LoopStrengthReduce.cpp revision 4d6ccb5f68cd7c6418a209f1fa4dbade569e4493
1//===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation analyzes and transforms the induction variables (and 11// computations derived from them) into forms suitable for efficient execution 12// on the target. 13// 14// This pass performs a strength reduction on array references inside loops that 15// have as one or more of their components the loop induction variable, it 16// rewrites expressions to take advantage of scaled-index addressing modes 17// available on the target, and it performs a variety of other optimizations 18// related to loop induction variables. 19// 20// Terminology note: this code has a lot of handling for "post-increment" or 21// "post-inc" users. This is not talking about post-increment addressing modes; 22// it is instead talking about code like this: 23// 24// %i = phi [ 0, %entry ], [ %i.next, %latch ] 25// ... 26// %i.next = add %i, 1 27// %c = icmp eq %i.next, %n 28// 29// The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however 30// it's useful to think about these as the same register, with some uses using 31// the value of the register before the add and some using // it after. In this 32// example, the icmp is a post-increment user, since it uses %i.next, which is 33// the value of the induction variable after the increment. The other common 34// case of post-increment users is users outside the loop. 35// 36// TODO: More sophistication in the way Formulae are generated and filtered. 37// 38// TODO: Handle multiple loops at a time. 39// 40// TODO: Should TargetLowering::AddrMode::BaseGV be changed to a ConstantExpr 41// instead of a GlobalValue? 42// 43// TODO: When truncation is free, truncate ICmp users' operands to make it a 44// smaller encoding (on x86 at least). 45// 46// TODO: When a negated register is used by an add (such as in a list of 47// multiple base registers, or as the increment expression in an addrec), 48// we may not actually need both reg and (-1 * reg) in registers; the 49// negation can be implemented by using a sub instead of an add. The 50// lack of support for taking this into consideration when making 51// register pressure decisions is partly worked around by the "Special" 52// use kind. 53// 54//===----------------------------------------------------------------------===// 55 56#define DEBUG_TYPE "loop-reduce" 57#include "llvm/Transforms/Scalar.h" 58#include "llvm/Constants.h" 59#include "llvm/Instructions.h" 60#include "llvm/IntrinsicInst.h" 61#include "llvm/DerivedTypes.h" 62#include "llvm/Analysis/IVUsers.h" 63#include "llvm/Analysis/Dominators.h" 64#include "llvm/Analysis/LoopPass.h" 65#include "llvm/Analysis/ScalarEvolutionExpander.h" 66#include "llvm/Assembly/Writer.h" 67#include "llvm/Transforms/Utils/BasicBlockUtils.h" 68#include "llvm/Transforms/Utils/Local.h" 69#include "llvm/ADT/SmallBitVector.h" 70#include "llvm/ADT/SetVector.h" 71#include "llvm/ADT/DenseSet.h" 72#include "llvm/Support/Debug.h" 73#include "llvm/Support/CommandLine.h" 74#include "llvm/Support/ValueHandle.h" 75#include "llvm/Support/raw_ostream.h" 76#include "llvm/Target/TargetLowering.h" 77#include <algorithm> 78using namespace llvm; 79 80static cl::opt<bool> EnableNested( 81 "enable-lsr-nested", cl::Hidden, cl::desc("Enable LSR on nested loops")); 82 83static cl::opt<bool> EnableRetry( 84 "enable-lsr-retry", cl::Hidden, cl::desc("Enable LSR retry")); 85 86// Temporary flag to cleanup congruent phis after LSR phi expansion. 87// It's currently disabled until we can determine whether it's truly useful or 88// not. The flag should be removed after the v3.0 release. 89// This is now needed for ivchains. 90static cl::opt<bool> EnablePhiElim( 91 "enable-lsr-phielim", cl::Hidden, cl::init(true), 92 cl::desc("Enable LSR phi elimination")); 93 94#ifndef NDEBUG 95// Stress test IV chain generation. 96static cl::opt<bool> StressIVChain( 97 "stress-ivchain", cl::Hidden, cl::init(false), 98 cl::desc("Stress test LSR IV chains")); 99#else 100static bool StressIVChain = false; 101#endif 102 103namespace { 104 105/// RegSortData - This class holds data which is used to order reuse candidates. 106class RegSortData { 107public: 108 /// UsedByIndices - This represents the set of LSRUse indices which reference 109 /// a particular register. 110 SmallBitVector UsedByIndices; 111 112 RegSortData() {} 113 114 void print(raw_ostream &OS) const; 115 void dump() const; 116}; 117 118} 119 120void RegSortData::print(raw_ostream &OS) const { 121 OS << "[NumUses=" << UsedByIndices.count() << ']'; 122} 123 124void RegSortData::dump() const { 125 print(errs()); errs() << '\n'; 126} 127 128namespace { 129 130/// RegUseTracker - Map register candidates to information about how they are 131/// used. 132class RegUseTracker { 133 typedef DenseMap<const SCEV *, RegSortData> RegUsesTy; 134 135 RegUsesTy RegUsesMap; 136 SmallVector<const SCEV *, 16> RegSequence; 137 138public: 139 void CountRegister(const SCEV *Reg, size_t LUIdx); 140 void DropRegister(const SCEV *Reg, size_t LUIdx); 141 void SwapAndDropUse(size_t LUIdx, size_t LastLUIdx); 142 143 bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const; 144 145 const SmallBitVector &getUsedByIndices(const SCEV *Reg) const; 146 147 void clear(); 148 149 typedef SmallVectorImpl<const SCEV *>::iterator iterator; 150 typedef SmallVectorImpl<const SCEV *>::const_iterator const_iterator; 151 iterator begin() { return RegSequence.begin(); } 152 iterator end() { return RegSequence.end(); } 153 const_iterator begin() const { return RegSequence.begin(); } 154 const_iterator end() const { return RegSequence.end(); } 155}; 156 157} 158 159void 160RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) { 161 std::pair<RegUsesTy::iterator, bool> Pair = 162 RegUsesMap.insert(std::make_pair(Reg, RegSortData())); 163 RegSortData &RSD = Pair.first->second; 164 if (Pair.second) 165 RegSequence.push_back(Reg); 166 RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1)); 167 RSD.UsedByIndices.set(LUIdx); 168} 169 170void 171RegUseTracker::DropRegister(const SCEV *Reg, size_t LUIdx) { 172 RegUsesTy::iterator It = RegUsesMap.find(Reg); 173 assert(It != RegUsesMap.end()); 174 RegSortData &RSD = It->second; 175 assert(RSD.UsedByIndices.size() > LUIdx); 176 RSD.UsedByIndices.reset(LUIdx); 177} 178 179void 180RegUseTracker::SwapAndDropUse(size_t LUIdx, size_t LastLUIdx) { 181 assert(LUIdx <= LastLUIdx); 182 183 // Update RegUses. The data structure is not optimized for this purpose; 184 // we must iterate through it and update each of the bit vectors. 185 for (RegUsesTy::iterator I = RegUsesMap.begin(), E = RegUsesMap.end(); 186 I != E; ++I) { 187 SmallBitVector &UsedByIndices = I->second.UsedByIndices; 188 if (LUIdx < UsedByIndices.size()) 189 UsedByIndices[LUIdx] = 190 LastLUIdx < UsedByIndices.size() ? UsedByIndices[LastLUIdx] : 0; 191 UsedByIndices.resize(std::min(UsedByIndices.size(), LastLUIdx)); 192 } 193} 194 195bool 196RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const { 197 RegUsesTy::const_iterator I = RegUsesMap.find(Reg); 198 if (I == RegUsesMap.end()) 199 return false; 200 const SmallBitVector &UsedByIndices = I->second.UsedByIndices; 201 int i = UsedByIndices.find_first(); 202 if (i == -1) return false; 203 if ((size_t)i != LUIdx) return true; 204 return UsedByIndices.find_next(i) != -1; 205} 206 207const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const { 208 RegUsesTy::const_iterator I = RegUsesMap.find(Reg); 209 assert(I != RegUsesMap.end() && "Unknown register!"); 210 return I->second.UsedByIndices; 211} 212 213void RegUseTracker::clear() { 214 RegUsesMap.clear(); 215 RegSequence.clear(); 216} 217 218namespace { 219 220/// Formula - This class holds information that describes a formula for 221/// computing satisfying a use. It may include broken-out immediates and scaled 222/// registers. 223struct Formula { 224 /// AM - This is used to represent complex addressing, as well as other kinds 225 /// of interesting uses. 226 TargetLowering::AddrMode AM; 227 228 /// BaseRegs - The list of "base" registers for this use. When this is 229 /// non-empty, AM.HasBaseReg should be set to true. 230 SmallVector<const SCEV *, 2> BaseRegs; 231 232 /// ScaledReg - The 'scaled' register for this use. This should be non-null 233 /// when AM.Scale is not zero. 234 const SCEV *ScaledReg; 235 236 /// UnfoldedOffset - An additional constant offset which added near the 237 /// use. This requires a temporary register, but the offset itself can 238 /// live in an add immediate field rather than a register. 239 int64_t UnfoldedOffset; 240 241 Formula() : ScaledReg(0), UnfoldedOffset(0) {} 242 243 void InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE); 244 245 unsigned getNumRegs() const; 246 Type *getType() const; 247 248 void DeleteBaseReg(const SCEV *&S); 249 250 bool referencesReg(const SCEV *S) const; 251 bool hasRegsUsedByUsesOtherThan(size_t LUIdx, 252 const RegUseTracker &RegUses) const; 253 254 void print(raw_ostream &OS) const; 255 void dump() const; 256}; 257 258} 259 260/// DoInitialMatch - Recursion helper for InitialMatch. 261static void DoInitialMatch(const SCEV *S, Loop *L, 262 SmallVectorImpl<const SCEV *> &Good, 263 SmallVectorImpl<const SCEV *> &Bad, 264 ScalarEvolution &SE) { 265 // Collect expressions which properly dominate the loop header. 266 if (SE.properlyDominates(S, L->getHeader())) { 267 Good.push_back(S); 268 return; 269 } 270 271 // Look at add operands. 272 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 273 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 274 I != E; ++I) 275 DoInitialMatch(*I, L, Good, Bad, SE); 276 return; 277 } 278 279 // Look at addrec operands. 280 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 281 if (!AR->getStart()->isZero()) { 282 DoInitialMatch(AR->getStart(), L, Good, Bad, SE); 283 DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0), 284 AR->getStepRecurrence(SE), 285 // FIXME: AR->getNoWrapFlags() 286 AR->getLoop(), SCEV::FlagAnyWrap), 287 L, Good, Bad, SE); 288 return; 289 } 290 291 // Handle a multiplication by -1 (negation) if it didn't fold. 292 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) 293 if (Mul->getOperand(0)->isAllOnesValue()) { 294 SmallVector<const SCEV *, 4> Ops(Mul->op_begin()+1, Mul->op_end()); 295 const SCEV *NewMul = SE.getMulExpr(Ops); 296 297 SmallVector<const SCEV *, 4> MyGood; 298 SmallVector<const SCEV *, 4> MyBad; 299 DoInitialMatch(NewMul, L, MyGood, MyBad, SE); 300 const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue( 301 SE.getEffectiveSCEVType(NewMul->getType()))); 302 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyGood.begin(), 303 E = MyGood.end(); I != E; ++I) 304 Good.push_back(SE.getMulExpr(NegOne, *I)); 305 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyBad.begin(), 306 E = MyBad.end(); I != E; ++I) 307 Bad.push_back(SE.getMulExpr(NegOne, *I)); 308 return; 309 } 310 311 // Ok, we can't do anything interesting. Just stuff the whole thing into a 312 // register and hope for the best. 313 Bad.push_back(S); 314} 315 316/// InitialMatch - Incorporate loop-variant parts of S into this Formula, 317/// attempting to keep all loop-invariant and loop-computable values in a 318/// single base register. 319void Formula::InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) { 320 SmallVector<const SCEV *, 4> Good; 321 SmallVector<const SCEV *, 4> Bad; 322 DoInitialMatch(S, L, Good, Bad, SE); 323 if (!Good.empty()) { 324 const SCEV *Sum = SE.getAddExpr(Good); 325 if (!Sum->isZero()) 326 BaseRegs.push_back(Sum); 327 AM.HasBaseReg = true; 328 } 329 if (!Bad.empty()) { 330 const SCEV *Sum = SE.getAddExpr(Bad); 331 if (!Sum->isZero()) 332 BaseRegs.push_back(Sum); 333 AM.HasBaseReg = true; 334 } 335} 336 337/// getNumRegs - Return the total number of register operands used by this 338/// formula. This does not include register uses implied by non-constant 339/// addrec strides. 340unsigned Formula::getNumRegs() const { 341 return !!ScaledReg + BaseRegs.size(); 342} 343 344/// getType - Return the type of this formula, if it has one, or null 345/// otherwise. This type is meaningless except for the bit size. 346Type *Formula::getType() const { 347 return !BaseRegs.empty() ? BaseRegs.front()->getType() : 348 ScaledReg ? ScaledReg->getType() : 349 AM.BaseGV ? AM.BaseGV->getType() : 350 0; 351} 352 353/// DeleteBaseReg - Delete the given base reg from the BaseRegs list. 354void Formula::DeleteBaseReg(const SCEV *&S) { 355 if (&S != &BaseRegs.back()) 356 std::swap(S, BaseRegs.back()); 357 BaseRegs.pop_back(); 358} 359 360/// referencesReg - Test if this formula references the given register. 361bool Formula::referencesReg(const SCEV *S) const { 362 return S == ScaledReg || 363 std::find(BaseRegs.begin(), BaseRegs.end(), S) != BaseRegs.end(); 364} 365 366/// hasRegsUsedByUsesOtherThan - Test whether this formula uses registers 367/// which are used by uses other than the use with the given index. 368bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx, 369 const RegUseTracker &RegUses) const { 370 if (ScaledReg) 371 if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx)) 372 return true; 373 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(), 374 E = BaseRegs.end(); I != E; ++I) 375 if (RegUses.isRegUsedByUsesOtherThan(*I, LUIdx)) 376 return true; 377 return false; 378} 379 380void Formula::print(raw_ostream &OS) const { 381 bool First = true; 382 if (AM.BaseGV) { 383 if (!First) OS << " + "; else First = false; 384 WriteAsOperand(OS, AM.BaseGV, /*PrintType=*/false); 385 } 386 if (AM.BaseOffs != 0) { 387 if (!First) OS << " + "; else First = false; 388 OS << AM.BaseOffs; 389 } 390 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(), 391 E = BaseRegs.end(); I != E; ++I) { 392 if (!First) OS << " + "; else First = false; 393 OS << "reg(" << **I << ')'; 394 } 395 if (AM.HasBaseReg && BaseRegs.empty()) { 396 if (!First) OS << " + "; else First = false; 397 OS << "**error: HasBaseReg**"; 398 } else if (!AM.HasBaseReg && !BaseRegs.empty()) { 399 if (!First) OS << " + "; else First = false; 400 OS << "**error: !HasBaseReg**"; 401 } 402 if (AM.Scale != 0) { 403 if (!First) OS << " + "; else First = false; 404 OS << AM.Scale << "*reg("; 405 if (ScaledReg) 406 OS << *ScaledReg; 407 else 408 OS << "<unknown>"; 409 OS << ')'; 410 } 411 if (UnfoldedOffset != 0) { 412 if (!First) OS << " + "; else First = false; 413 OS << "imm(" << UnfoldedOffset << ')'; 414 } 415} 416 417void Formula::dump() const { 418 print(errs()); errs() << '\n'; 419} 420 421/// isAddRecSExtable - Return true if the given addrec can be sign-extended 422/// without changing its value. 423static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { 424 Type *WideTy = 425 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1); 426 return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy)); 427} 428 429/// isAddSExtable - Return true if the given add can be sign-extended 430/// without changing its value. 431static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) { 432 Type *WideTy = 433 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1); 434 return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy)); 435} 436 437/// isMulSExtable - Return true if the given mul can be sign-extended 438/// without changing its value. 439static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) { 440 Type *WideTy = 441 IntegerType::get(SE.getContext(), 442 SE.getTypeSizeInBits(M->getType()) * M->getNumOperands()); 443 return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy)); 444} 445 446/// getExactSDiv - Return an expression for LHS /s RHS, if it can be determined 447/// and if the remainder is known to be zero, or null otherwise. If 448/// IgnoreSignificantBits is true, expressions like (X * Y) /s Y are simplified 449/// to Y, ignoring that the multiplication may overflow, which is useful when 450/// the result will be used in a context where the most significant bits are 451/// ignored. 452static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS, 453 ScalarEvolution &SE, 454 bool IgnoreSignificantBits = false) { 455 // Handle the trivial case, which works for any SCEV type. 456 if (LHS == RHS) 457 return SE.getConstant(LHS->getType(), 1); 458 459 // Handle a few RHS special cases. 460 const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS); 461 if (RC) { 462 const APInt &RA = RC->getValue()->getValue(); 463 // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do 464 // some folding. 465 if (RA.isAllOnesValue()) 466 return SE.getMulExpr(LHS, RC); 467 // Handle x /s 1 as x. 468 if (RA == 1) 469 return LHS; 470 } 471 472 // Check for a division of a constant by a constant. 473 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) { 474 if (!RC) 475 return 0; 476 const APInt &LA = C->getValue()->getValue(); 477 const APInt &RA = RC->getValue()->getValue(); 478 if (LA.srem(RA) != 0) 479 return 0; 480 return SE.getConstant(LA.sdiv(RA)); 481 } 482 483 // Distribute the sdiv over addrec operands, if the addrec doesn't overflow. 484 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) { 485 if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) { 486 const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE, 487 IgnoreSignificantBits); 488 if (!Step) return 0; 489 const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE, 490 IgnoreSignificantBits); 491 if (!Start) return 0; 492 // FlagNW is independent of the start value, step direction, and is 493 // preserved with smaller magnitude steps. 494 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 495 return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap); 496 } 497 return 0; 498 } 499 500 // Distribute the sdiv over add operands, if the add doesn't overflow. 501 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) { 502 if (IgnoreSignificantBits || isAddSExtable(Add, SE)) { 503 SmallVector<const SCEV *, 8> Ops; 504 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 505 I != E; ++I) { 506 const SCEV *Op = getExactSDiv(*I, RHS, SE, 507 IgnoreSignificantBits); 508 if (!Op) return 0; 509 Ops.push_back(Op); 510 } 511 return SE.getAddExpr(Ops); 512 } 513 return 0; 514 } 515 516 // Check for a multiply operand that we can pull RHS out of. 517 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) { 518 if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) { 519 SmallVector<const SCEV *, 4> Ops; 520 bool Found = false; 521 for (SCEVMulExpr::op_iterator I = Mul->op_begin(), E = Mul->op_end(); 522 I != E; ++I) { 523 const SCEV *S = *I; 524 if (!Found) 525 if (const SCEV *Q = getExactSDiv(S, RHS, SE, 526 IgnoreSignificantBits)) { 527 S = Q; 528 Found = true; 529 } 530 Ops.push_back(S); 531 } 532 return Found ? SE.getMulExpr(Ops) : 0; 533 } 534 return 0; 535 } 536 537 // Otherwise we don't know. 538 return 0; 539} 540 541/// ExtractImmediate - If S involves the addition of a constant integer value, 542/// return that integer value, and mutate S to point to a new SCEV with that 543/// value excluded. 544static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) { 545 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 546 if (C->getValue()->getValue().getMinSignedBits() <= 64) { 547 S = SE.getConstant(C->getType(), 0); 548 return C->getValue()->getSExtValue(); 549 } 550 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 551 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 552 int64_t Result = ExtractImmediate(NewOps.front(), SE); 553 if (Result != 0) 554 S = SE.getAddExpr(NewOps); 555 return Result; 556 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 557 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 558 int64_t Result = ExtractImmediate(NewOps.front(), SE); 559 if (Result != 0) 560 S = SE.getAddRecExpr(NewOps, AR->getLoop(), 561 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 562 SCEV::FlagAnyWrap); 563 return Result; 564 } 565 return 0; 566} 567 568/// ExtractSymbol - If S involves the addition of a GlobalValue address, 569/// return that symbol, and mutate S to point to a new SCEV with that 570/// value excluded. 571static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) { 572 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 573 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) { 574 S = SE.getConstant(GV->getType(), 0); 575 return GV; 576 } 577 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 578 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 579 GlobalValue *Result = ExtractSymbol(NewOps.back(), SE); 580 if (Result) 581 S = SE.getAddExpr(NewOps); 582 return Result; 583 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 584 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 585 GlobalValue *Result = ExtractSymbol(NewOps.front(), SE); 586 if (Result) 587 S = SE.getAddRecExpr(NewOps, AR->getLoop(), 588 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 589 SCEV::FlagAnyWrap); 590 return Result; 591 } 592 return 0; 593} 594 595/// isAddressUse - Returns true if the specified instruction is using the 596/// specified value as an address. 597static bool isAddressUse(Instruction *Inst, Value *OperandVal) { 598 bool isAddress = isa<LoadInst>(Inst); 599 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 600 if (SI->getOperand(1) == OperandVal) 601 isAddress = true; 602 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 603 // Addressing modes can also be folded into prefetches and a variety 604 // of intrinsics. 605 switch (II->getIntrinsicID()) { 606 default: break; 607 case Intrinsic::prefetch: 608 case Intrinsic::x86_sse_storeu_ps: 609 case Intrinsic::x86_sse2_storeu_pd: 610 case Intrinsic::x86_sse2_storeu_dq: 611 case Intrinsic::x86_sse2_storel_dq: 612 if (II->getArgOperand(0) == OperandVal) 613 isAddress = true; 614 break; 615 } 616 } 617 return isAddress; 618} 619 620/// getAccessType - Return the type of the memory being accessed. 621static Type *getAccessType(const Instruction *Inst) { 622 Type *AccessTy = Inst->getType(); 623 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) 624 AccessTy = SI->getOperand(0)->getType(); 625 else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 626 // Addressing modes can also be folded into prefetches and a variety 627 // of intrinsics. 628 switch (II->getIntrinsicID()) { 629 default: break; 630 case Intrinsic::x86_sse_storeu_ps: 631 case Intrinsic::x86_sse2_storeu_pd: 632 case Intrinsic::x86_sse2_storeu_dq: 633 case Intrinsic::x86_sse2_storel_dq: 634 AccessTy = II->getArgOperand(0)->getType(); 635 break; 636 } 637 } 638 639 // All pointers have the same requirements, so canonicalize them to an 640 // arbitrary pointer type to minimize variation. 641 if (PointerType *PTy = dyn_cast<PointerType>(AccessTy)) 642 AccessTy = PointerType::get(IntegerType::get(PTy->getContext(), 1), 643 PTy->getAddressSpace()); 644 645 return AccessTy; 646} 647 648/// isExistingPhi - Return true if this AddRec is already a phi in its loop. 649static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE) { 650 for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin(); 651 PHINode *PN = dyn_cast<PHINode>(I); ++I) { 652 if (SE.isSCEVable(PN->getType()) && 653 (SE.getEffectiveSCEVType(PN->getType()) == 654 SE.getEffectiveSCEVType(AR->getType())) && 655 SE.getSCEV(PN) == AR) 656 return true; 657 } 658 return false; 659} 660 661/// Check if expanding this expression is likely to incur significant cost. This 662/// is tricky because SCEV doesn't track which expressions are actually computed 663/// by the current IR. 664/// 665/// We currently allow expansion of IV increments that involve adds, 666/// multiplication by constants, and AddRecs from existing phis. 667/// 668/// TODO: Allow UDivExpr if we can find an existing IV increment that is an 669/// obvious multiple of the UDivExpr. 670static bool isHighCostExpansion(const SCEV *S, 671 SmallPtrSet<const SCEV*, 8> &Processed, 672 ScalarEvolution &SE) { 673 // Zero/One operand expressions 674 switch (S->getSCEVType()) { 675 case scUnknown: 676 case scConstant: 677 return false; 678 case scTruncate: 679 return isHighCostExpansion(cast<SCEVTruncateExpr>(S)->getOperand(), 680 Processed, SE); 681 case scZeroExtend: 682 return isHighCostExpansion(cast<SCEVZeroExtendExpr>(S)->getOperand(), 683 Processed, SE); 684 case scSignExtend: 685 return isHighCostExpansion(cast<SCEVSignExtendExpr>(S)->getOperand(), 686 Processed, SE); 687 } 688 689 if (!Processed.insert(S)) 690 return false; 691 692 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 693 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 694 I != E; ++I) { 695 if (isHighCostExpansion(*I, Processed, SE)) 696 return true; 697 } 698 return false; 699 } 700 701 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 702 if (Mul->getNumOperands() == 2) { 703 // Multiplication by a constant is ok 704 if (isa<SCEVConstant>(Mul->getOperand(0))) 705 return isHighCostExpansion(Mul->getOperand(1), Processed, SE); 706 707 // If we have the value of one operand, check if an existing 708 // multiplication already generates this expression. 709 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Mul->getOperand(1))) { 710 Value *UVal = U->getValue(); 711 for (Value::use_iterator UI = UVal->use_begin(), UE = UVal->use_end(); 712 UI != UE; ++UI) { 713 Instruction *User = cast<Instruction>(*UI); 714 if (User->getOpcode() == Instruction::Mul 715 && SE.isSCEVable(User->getType())) { 716 return SE.getSCEV(User) == Mul; 717 } 718 } 719 } 720 } 721 } 722 723 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 724 if (isExistingPhi(AR, SE)) 725 return false; 726 } 727 728 // Fow now, consider any other type of expression (div/mul/min/max) high cost. 729 return true; 730} 731 732/// DeleteTriviallyDeadInstructions - If any of the instructions is the 733/// specified set are trivially dead, delete them and see if this makes any of 734/// their operands subsequently dead. 735static bool 736DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) { 737 bool Changed = false; 738 739 while (!DeadInsts.empty()) { 740 Instruction *I = dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val()); 741 742 if (I == 0 || !isInstructionTriviallyDead(I)) 743 continue; 744 745 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) 746 if (Instruction *U = dyn_cast<Instruction>(*OI)) { 747 *OI = 0; 748 if (U->use_empty()) 749 DeadInsts.push_back(U); 750 } 751 752 I->eraseFromParent(); 753 Changed = true; 754 } 755 756 return Changed; 757} 758 759namespace { 760 761/// Cost - This class is used to measure and compare candidate formulae. 762class Cost { 763 /// TODO: Some of these could be merged. Also, a lexical ordering 764 /// isn't always optimal. 765 unsigned NumRegs; 766 unsigned AddRecCost; 767 unsigned NumIVMuls; 768 unsigned NumBaseAdds; 769 unsigned ImmCost; 770 unsigned SetupCost; 771 772public: 773 Cost() 774 : NumRegs(0), AddRecCost(0), NumIVMuls(0), NumBaseAdds(0), ImmCost(0), 775 SetupCost(0) {} 776 777 bool operator<(const Cost &Other) const; 778 779 void Loose(); 780 781#ifndef NDEBUG 782 // Once any of the metrics loses, they must all remain losers. 783 bool isValid() { 784 return ((NumRegs | AddRecCost | NumIVMuls | NumBaseAdds 785 | ImmCost | SetupCost) != ~0u) 786 || ((NumRegs & AddRecCost & NumIVMuls & NumBaseAdds 787 & ImmCost & SetupCost) == ~0u); 788 } 789#endif 790 791 bool isLoser() { 792 assert(isValid() && "invalid cost"); 793 return NumRegs == ~0u; 794 } 795 796 void RateFormula(const Formula &F, 797 SmallPtrSet<const SCEV *, 16> &Regs, 798 const DenseSet<const SCEV *> &VisitedRegs, 799 const Loop *L, 800 const SmallVectorImpl<int64_t> &Offsets, 801 ScalarEvolution &SE, DominatorTree &DT, 802 SmallPtrSet<const SCEV *, 16> *LoserRegs = 0); 803 804 void print(raw_ostream &OS) const; 805 void dump() const; 806 807private: 808 void RateRegister(const SCEV *Reg, 809 SmallPtrSet<const SCEV *, 16> &Regs, 810 const Loop *L, 811 ScalarEvolution &SE, DominatorTree &DT); 812 void RatePrimaryRegister(const SCEV *Reg, 813 SmallPtrSet<const SCEV *, 16> &Regs, 814 const Loop *L, 815 ScalarEvolution &SE, DominatorTree &DT, 816 SmallPtrSet<const SCEV *, 16> *LoserRegs); 817}; 818 819} 820 821/// RateRegister - Tally up interesting quantities from the given register. 822void Cost::RateRegister(const SCEV *Reg, 823 SmallPtrSet<const SCEV *, 16> &Regs, 824 const Loop *L, 825 ScalarEvolution &SE, DominatorTree &DT) { 826 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) { 827 if (AR->getLoop() == L) 828 AddRecCost += 1; /// TODO: This should be a function of the stride. 829 830 // If this is an addrec for another loop, don't second-guess its addrec phi 831 // nodes. LSR isn't currently smart enough to reason about more than one 832 // loop at a time. LSR has either already run on inner loops, will not run 833 // on other loops, and cannot be expected to change sibling loops. If the 834 // AddRec exists, consider it's register free and leave it alone. Otherwise, 835 // do not consider this formula at all. 836 else if (!EnableNested || L->contains(AR->getLoop()) || 837 (!AR->getLoop()->contains(L) && 838 DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))) { 839 if (isExistingPhi(AR, SE)) 840 return; 841 842 // For !EnableNested, never rewrite IVs in other loops. 843 if (!EnableNested) { 844 Loose(); 845 return; 846 } 847 // If this isn't one of the addrecs that the loop already has, it 848 // would require a costly new phi and add. TODO: This isn't 849 // precisely modeled right now. 850 ++NumBaseAdds; 851 if (!Regs.count(AR->getStart())) { 852 RateRegister(AR->getStart(), Regs, L, SE, DT); 853 if (isLoser()) 854 return; 855 } 856 } 857 858 // Add the step value register, if it needs one. 859 // TODO: The non-affine case isn't precisely modeled here. 860 if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) { 861 if (!Regs.count(AR->getOperand(1))) { 862 RateRegister(AR->getOperand(1), Regs, L, SE, DT); 863 if (isLoser()) 864 return; 865 } 866 } 867 } 868 ++NumRegs; 869 870 // Rough heuristic; favor registers which don't require extra setup 871 // instructions in the preheader. 872 if (!isa<SCEVUnknown>(Reg) && 873 !isa<SCEVConstant>(Reg) && 874 !(isa<SCEVAddRecExpr>(Reg) && 875 (isa<SCEVUnknown>(cast<SCEVAddRecExpr>(Reg)->getStart()) || 876 isa<SCEVConstant>(cast<SCEVAddRecExpr>(Reg)->getStart())))) 877 ++SetupCost; 878 879 NumIVMuls += isa<SCEVMulExpr>(Reg) && 880 SE.hasComputableLoopEvolution(Reg, L); 881} 882 883/// RatePrimaryRegister - Record this register in the set. If we haven't seen it 884/// before, rate it. Optional LoserRegs provides a way to declare any formula 885/// that refers to one of those regs an instant loser. 886void Cost::RatePrimaryRegister(const SCEV *Reg, 887 SmallPtrSet<const SCEV *, 16> &Regs, 888 const Loop *L, 889 ScalarEvolution &SE, DominatorTree &DT, 890 SmallPtrSet<const SCEV *, 16> *LoserRegs) { 891 if (LoserRegs && LoserRegs->count(Reg)) { 892 Loose(); 893 return; 894 } 895 if (Regs.insert(Reg)) { 896 RateRegister(Reg, Regs, L, SE, DT); 897 if (isLoser()) 898 LoserRegs->insert(Reg); 899 } 900} 901 902void Cost::RateFormula(const Formula &F, 903 SmallPtrSet<const SCEV *, 16> &Regs, 904 const DenseSet<const SCEV *> &VisitedRegs, 905 const Loop *L, 906 const SmallVectorImpl<int64_t> &Offsets, 907 ScalarEvolution &SE, DominatorTree &DT, 908 SmallPtrSet<const SCEV *, 16> *LoserRegs) { 909 // Tally up the registers. 910 if (const SCEV *ScaledReg = F.ScaledReg) { 911 if (VisitedRegs.count(ScaledReg)) { 912 Loose(); 913 return; 914 } 915 RatePrimaryRegister(ScaledReg, Regs, L, SE, DT, LoserRegs); 916 if (isLoser()) 917 return; 918 } 919 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 920 E = F.BaseRegs.end(); I != E; ++I) { 921 const SCEV *BaseReg = *I; 922 if (VisitedRegs.count(BaseReg)) { 923 Loose(); 924 return; 925 } 926 RatePrimaryRegister(BaseReg, Regs, L, SE, DT, LoserRegs); 927 if (isLoser()) 928 return; 929 } 930 931 // Determine how many (unfolded) adds we'll need inside the loop. 932 size_t NumBaseParts = F.BaseRegs.size() + (F.UnfoldedOffset != 0); 933 if (NumBaseParts > 1) 934 NumBaseAdds += NumBaseParts - 1; 935 936 // Tally up the non-zero immediates. 937 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(), 938 E = Offsets.end(); I != E; ++I) { 939 int64_t Offset = (uint64_t)*I + F.AM.BaseOffs; 940 if (F.AM.BaseGV) 941 ImmCost += 64; // Handle symbolic values conservatively. 942 // TODO: This should probably be the pointer size. 943 else if (Offset != 0) 944 ImmCost += APInt(64, Offset, true).getMinSignedBits(); 945 } 946 assert(isValid() && "invalid cost"); 947} 948 949/// Loose - Set this cost to a losing value. 950void Cost::Loose() { 951 NumRegs = ~0u; 952 AddRecCost = ~0u; 953 NumIVMuls = ~0u; 954 NumBaseAdds = ~0u; 955 ImmCost = ~0u; 956 SetupCost = ~0u; 957} 958 959/// operator< - Choose the lower cost. 960bool Cost::operator<(const Cost &Other) const { 961 if (NumRegs != Other.NumRegs) 962 return NumRegs < Other.NumRegs; 963 if (AddRecCost != Other.AddRecCost) 964 return AddRecCost < Other.AddRecCost; 965 if (NumIVMuls != Other.NumIVMuls) 966 return NumIVMuls < Other.NumIVMuls; 967 if (NumBaseAdds != Other.NumBaseAdds) 968 return NumBaseAdds < Other.NumBaseAdds; 969 if (ImmCost != Other.ImmCost) 970 return ImmCost < Other.ImmCost; 971 if (SetupCost != Other.SetupCost) 972 return SetupCost < Other.SetupCost; 973 return false; 974} 975 976void Cost::print(raw_ostream &OS) const { 977 OS << NumRegs << " reg" << (NumRegs == 1 ? "" : "s"); 978 if (AddRecCost != 0) 979 OS << ", with addrec cost " << AddRecCost; 980 if (NumIVMuls != 0) 981 OS << ", plus " << NumIVMuls << " IV mul" << (NumIVMuls == 1 ? "" : "s"); 982 if (NumBaseAdds != 0) 983 OS << ", plus " << NumBaseAdds << " base add" 984 << (NumBaseAdds == 1 ? "" : "s"); 985 if (ImmCost != 0) 986 OS << ", plus " << ImmCost << " imm cost"; 987 if (SetupCost != 0) 988 OS << ", plus " << SetupCost << " setup cost"; 989} 990 991void Cost::dump() const { 992 print(errs()); errs() << '\n'; 993} 994 995namespace { 996 997/// LSRFixup - An operand value in an instruction which is to be replaced 998/// with some equivalent, possibly strength-reduced, replacement. 999struct LSRFixup { 1000 /// UserInst - The instruction which will be updated. 1001 Instruction *UserInst; 1002 1003 /// OperandValToReplace - The operand of the instruction which will 1004 /// be replaced. The operand may be used more than once; every instance 1005 /// will be replaced. 1006 Value *OperandValToReplace; 1007 1008 /// PostIncLoops - If this user is to use the post-incremented value of an 1009 /// induction variable, this variable is non-null and holds the loop 1010 /// associated with the induction variable. 1011 PostIncLoopSet PostIncLoops; 1012 1013 /// LUIdx - The index of the LSRUse describing the expression which 1014 /// this fixup needs, minus an offset (below). 1015 size_t LUIdx; 1016 1017 /// Offset - A constant offset to be added to the LSRUse expression. 1018 /// This allows multiple fixups to share the same LSRUse with different 1019 /// offsets, for example in an unrolled loop. 1020 int64_t Offset; 1021 1022 bool isUseFullyOutsideLoop(const Loop *L) const; 1023 1024 LSRFixup(); 1025 1026 void print(raw_ostream &OS) const; 1027 void dump() const; 1028}; 1029 1030} 1031 1032LSRFixup::LSRFixup() 1033 : UserInst(0), OperandValToReplace(0), LUIdx(~size_t(0)), Offset(0) {} 1034 1035/// isUseFullyOutsideLoop - Test whether this fixup always uses its 1036/// value outside of the given loop. 1037bool LSRFixup::isUseFullyOutsideLoop(const Loop *L) const { 1038 // PHI nodes use their value in their incoming blocks. 1039 if (const PHINode *PN = dyn_cast<PHINode>(UserInst)) { 1040 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 1041 if (PN->getIncomingValue(i) == OperandValToReplace && 1042 L->contains(PN->getIncomingBlock(i))) 1043 return false; 1044 return true; 1045 } 1046 1047 return !L->contains(UserInst); 1048} 1049 1050void LSRFixup::print(raw_ostream &OS) const { 1051 OS << "UserInst="; 1052 // Store is common and interesting enough to be worth special-casing. 1053 if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) { 1054 OS << "store "; 1055 WriteAsOperand(OS, Store->getOperand(0), /*PrintType=*/false); 1056 } else if (UserInst->getType()->isVoidTy()) 1057 OS << UserInst->getOpcodeName(); 1058 else 1059 WriteAsOperand(OS, UserInst, /*PrintType=*/false); 1060 1061 OS << ", OperandValToReplace="; 1062 WriteAsOperand(OS, OperandValToReplace, /*PrintType=*/false); 1063 1064 for (PostIncLoopSet::const_iterator I = PostIncLoops.begin(), 1065 E = PostIncLoops.end(); I != E; ++I) { 1066 OS << ", PostIncLoop="; 1067 WriteAsOperand(OS, (*I)->getHeader(), /*PrintType=*/false); 1068 } 1069 1070 if (LUIdx != ~size_t(0)) 1071 OS << ", LUIdx=" << LUIdx; 1072 1073 if (Offset != 0) 1074 OS << ", Offset=" << Offset; 1075} 1076 1077void LSRFixup::dump() const { 1078 print(errs()); errs() << '\n'; 1079} 1080 1081namespace { 1082 1083/// UniquifierDenseMapInfo - A DenseMapInfo implementation for holding 1084/// DenseMaps and DenseSets of sorted SmallVectors of const SCEV*. 1085struct UniquifierDenseMapInfo { 1086 static SmallVector<const SCEV *, 2> getEmptyKey() { 1087 SmallVector<const SCEV *, 2> V; 1088 V.push_back(reinterpret_cast<const SCEV *>(-1)); 1089 return V; 1090 } 1091 1092 static SmallVector<const SCEV *, 2> getTombstoneKey() { 1093 SmallVector<const SCEV *, 2> V; 1094 V.push_back(reinterpret_cast<const SCEV *>(-2)); 1095 return V; 1096 } 1097 1098 static unsigned getHashValue(const SmallVector<const SCEV *, 2> &V) { 1099 unsigned Result = 0; 1100 for (SmallVectorImpl<const SCEV *>::const_iterator I = V.begin(), 1101 E = V.end(); I != E; ++I) 1102 Result ^= DenseMapInfo<const SCEV *>::getHashValue(*I); 1103 return Result; 1104 } 1105 1106 static bool isEqual(const SmallVector<const SCEV *, 2> &LHS, 1107 const SmallVector<const SCEV *, 2> &RHS) { 1108 return LHS == RHS; 1109 } 1110}; 1111 1112/// LSRUse - This class holds the state that LSR keeps for each use in 1113/// IVUsers, as well as uses invented by LSR itself. It includes information 1114/// about what kinds of things can be folded into the user, information about 1115/// the user itself, and information about how the use may be satisfied. 1116/// TODO: Represent multiple users of the same expression in common? 1117class LSRUse { 1118 DenseSet<SmallVector<const SCEV *, 2>, UniquifierDenseMapInfo> Uniquifier; 1119 1120public: 1121 /// KindType - An enum for a kind of use, indicating what types of 1122 /// scaled and immediate operands it might support. 1123 enum KindType { 1124 Basic, ///< A normal use, with no folding. 1125 Special, ///< A special case of basic, allowing -1 scales. 1126 Address, ///< An address use; folding according to TargetLowering 1127 ICmpZero ///< An equality icmp with both operands folded into one. 1128 // TODO: Add a generic icmp too? 1129 }; 1130 1131 KindType Kind; 1132 Type *AccessTy; 1133 1134 SmallVector<int64_t, 8> Offsets; 1135 int64_t MinOffset; 1136 int64_t MaxOffset; 1137 1138 /// AllFixupsOutsideLoop - This records whether all of the fixups using this 1139 /// LSRUse are outside of the loop, in which case some special-case heuristics 1140 /// may be used. 1141 bool AllFixupsOutsideLoop; 1142 1143 /// WidestFixupType - This records the widest use type for any fixup using 1144 /// this LSRUse. FindUseWithSimilarFormula can't consider uses with different 1145 /// max fixup widths to be equivalent, because the narrower one may be relying 1146 /// on the implicit truncation to truncate away bogus bits. 1147 Type *WidestFixupType; 1148 1149 /// Formulae - A list of ways to build a value that can satisfy this user. 1150 /// After the list is populated, one of these is selected heuristically and 1151 /// used to formulate a replacement for OperandValToReplace in UserInst. 1152 SmallVector<Formula, 12> Formulae; 1153 1154 /// Regs - The set of register candidates used by all formulae in this LSRUse. 1155 SmallPtrSet<const SCEV *, 4> Regs; 1156 1157 LSRUse(KindType K, Type *T) : Kind(K), AccessTy(T), 1158 MinOffset(INT64_MAX), 1159 MaxOffset(INT64_MIN), 1160 AllFixupsOutsideLoop(true), 1161 WidestFixupType(0) {} 1162 1163 bool HasFormulaWithSameRegs(const Formula &F) const; 1164 bool InsertFormula(const Formula &F); 1165 void DeleteFormula(Formula &F); 1166 void RecomputeRegs(size_t LUIdx, RegUseTracker &Reguses); 1167 1168 void print(raw_ostream &OS) const; 1169 void dump() const; 1170}; 1171 1172} 1173 1174/// HasFormula - Test whether this use as a formula which has the same 1175/// registers as the given formula. 1176bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const { 1177 SmallVector<const SCEV *, 2> Key = F.BaseRegs; 1178 if (F.ScaledReg) Key.push_back(F.ScaledReg); 1179 // Unstable sort by host order ok, because this is only used for uniquifying. 1180 std::sort(Key.begin(), Key.end()); 1181 return Uniquifier.count(Key); 1182} 1183 1184/// InsertFormula - If the given formula has not yet been inserted, add it to 1185/// the list, and return true. Return false otherwise. 1186bool LSRUse::InsertFormula(const Formula &F) { 1187 SmallVector<const SCEV *, 2> Key = F.BaseRegs; 1188 if (F.ScaledReg) Key.push_back(F.ScaledReg); 1189 // Unstable sort by host order ok, because this is only used for uniquifying. 1190 std::sort(Key.begin(), Key.end()); 1191 1192 if (!Uniquifier.insert(Key).second) 1193 return false; 1194 1195 // Using a register to hold the value of 0 is not profitable. 1196 assert((!F.ScaledReg || !F.ScaledReg->isZero()) && 1197 "Zero allocated in a scaled register!"); 1198#ifndef NDEBUG 1199 for (SmallVectorImpl<const SCEV *>::const_iterator I = 1200 F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) 1201 assert(!(*I)->isZero() && "Zero allocated in a base register!"); 1202#endif 1203 1204 // Add the formula to the list. 1205 Formulae.push_back(F); 1206 1207 // Record registers now being used by this use. 1208 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 1209 1210 return true; 1211} 1212 1213/// DeleteFormula - Remove the given formula from this use's list. 1214void LSRUse::DeleteFormula(Formula &F) { 1215 if (&F != &Formulae.back()) 1216 std::swap(F, Formulae.back()); 1217 Formulae.pop_back(); 1218} 1219 1220/// RecomputeRegs - Recompute the Regs field, and update RegUses. 1221void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) { 1222 // Now that we've filtered out some formulae, recompute the Regs set. 1223 SmallPtrSet<const SCEV *, 4> OldRegs = Regs; 1224 Regs.clear(); 1225 for (SmallVectorImpl<Formula>::const_iterator I = Formulae.begin(), 1226 E = Formulae.end(); I != E; ++I) { 1227 const Formula &F = *I; 1228 if (F.ScaledReg) Regs.insert(F.ScaledReg); 1229 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 1230 } 1231 1232 // Update the RegTracker. 1233 for (SmallPtrSet<const SCEV *, 4>::iterator I = OldRegs.begin(), 1234 E = OldRegs.end(); I != E; ++I) 1235 if (!Regs.count(*I)) 1236 RegUses.DropRegister(*I, LUIdx); 1237} 1238 1239void LSRUse::print(raw_ostream &OS) const { 1240 OS << "LSR Use: Kind="; 1241 switch (Kind) { 1242 case Basic: OS << "Basic"; break; 1243 case Special: OS << "Special"; break; 1244 case ICmpZero: OS << "ICmpZero"; break; 1245 case Address: 1246 OS << "Address of "; 1247 if (AccessTy->isPointerTy()) 1248 OS << "pointer"; // the full pointer type could be really verbose 1249 else 1250 OS << *AccessTy; 1251 } 1252 1253 OS << ", Offsets={"; 1254 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(), 1255 E = Offsets.end(); I != E; ++I) { 1256 OS << *I; 1257 if (llvm::next(I) != E) 1258 OS << ','; 1259 } 1260 OS << '}'; 1261 1262 if (AllFixupsOutsideLoop) 1263 OS << ", all-fixups-outside-loop"; 1264 1265 if (WidestFixupType) 1266 OS << ", widest fixup type: " << *WidestFixupType; 1267} 1268 1269void LSRUse::dump() const { 1270 print(errs()); errs() << '\n'; 1271} 1272 1273/// isLegalUse - Test whether the use described by AM is "legal", meaning it can 1274/// be completely folded into the user instruction at isel time. This includes 1275/// address-mode folding and special icmp tricks. 1276static bool isLegalUse(const TargetLowering::AddrMode &AM, 1277 LSRUse::KindType Kind, Type *AccessTy, 1278 const TargetLowering *TLI) { 1279 switch (Kind) { 1280 case LSRUse::Address: 1281 // If we have low-level target information, ask the target if it can 1282 // completely fold this address. 1283 if (TLI) return TLI->isLegalAddressingMode(AM, AccessTy); 1284 1285 // Otherwise, just guess that reg+reg addressing is legal. 1286 return !AM.BaseGV && AM.BaseOffs == 0 && AM.Scale <= 1; 1287 1288 case LSRUse::ICmpZero: 1289 // There's not even a target hook for querying whether it would be legal to 1290 // fold a GV into an ICmp. 1291 if (AM.BaseGV) 1292 return false; 1293 1294 // ICmp only has two operands; don't allow more than two non-trivial parts. 1295 if (AM.Scale != 0 && AM.HasBaseReg && AM.BaseOffs != 0) 1296 return false; 1297 1298 // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by 1299 // putting the scaled register in the other operand of the icmp. 1300 if (AM.Scale != 0 && AM.Scale != -1) 1301 return false; 1302 1303 // If we have low-level target information, ask the target if it can fold an 1304 // integer immediate on an icmp. 1305 if (AM.BaseOffs != 0) { 1306 if (TLI) return TLI->isLegalICmpImmediate(-(uint64_t)AM.BaseOffs); 1307 return false; 1308 } 1309 1310 return true; 1311 1312 case LSRUse::Basic: 1313 // Only handle single-register values. 1314 return !AM.BaseGV && AM.Scale == 0 && AM.BaseOffs == 0; 1315 1316 case LSRUse::Special: 1317 // Only handle -1 scales, or no scale. 1318 return AM.Scale == 0 || AM.Scale == -1; 1319 } 1320 1321 llvm_unreachable("Invalid LSRUse Kind!"); 1322} 1323 1324static bool isLegalUse(TargetLowering::AddrMode AM, 1325 int64_t MinOffset, int64_t MaxOffset, 1326 LSRUse::KindType Kind, Type *AccessTy, 1327 const TargetLowering *TLI) { 1328 // Check for overflow. 1329 if (((int64_t)((uint64_t)AM.BaseOffs + MinOffset) > AM.BaseOffs) != 1330 (MinOffset > 0)) 1331 return false; 1332 AM.BaseOffs = (uint64_t)AM.BaseOffs + MinOffset; 1333 if (isLegalUse(AM, Kind, AccessTy, TLI)) { 1334 AM.BaseOffs = (uint64_t)AM.BaseOffs - MinOffset; 1335 // Check for overflow. 1336 if (((int64_t)((uint64_t)AM.BaseOffs + MaxOffset) > AM.BaseOffs) != 1337 (MaxOffset > 0)) 1338 return false; 1339 AM.BaseOffs = (uint64_t)AM.BaseOffs + MaxOffset; 1340 return isLegalUse(AM, Kind, AccessTy, TLI); 1341 } 1342 return false; 1343} 1344 1345static bool isAlwaysFoldable(int64_t BaseOffs, 1346 GlobalValue *BaseGV, 1347 bool HasBaseReg, 1348 LSRUse::KindType Kind, Type *AccessTy, 1349 const TargetLowering *TLI) { 1350 // Fast-path: zero is always foldable. 1351 if (BaseOffs == 0 && !BaseGV) return true; 1352 1353 // Conservatively, create an address with an immediate and a 1354 // base and a scale. 1355 TargetLowering::AddrMode AM; 1356 AM.BaseOffs = BaseOffs; 1357 AM.BaseGV = BaseGV; 1358 AM.HasBaseReg = HasBaseReg; 1359 AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1360 1361 // Canonicalize a scale of 1 to a base register if the formula doesn't 1362 // already have a base register. 1363 if (!AM.HasBaseReg && AM.Scale == 1) { 1364 AM.Scale = 0; 1365 AM.HasBaseReg = true; 1366 } 1367 1368 return isLegalUse(AM, Kind, AccessTy, TLI); 1369} 1370 1371static bool isAlwaysFoldable(const SCEV *S, 1372 int64_t MinOffset, int64_t MaxOffset, 1373 bool HasBaseReg, 1374 LSRUse::KindType Kind, Type *AccessTy, 1375 const TargetLowering *TLI, 1376 ScalarEvolution &SE) { 1377 // Fast-path: zero is always foldable. 1378 if (S->isZero()) return true; 1379 1380 // Conservatively, create an address with an immediate and a 1381 // base and a scale. 1382 int64_t BaseOffs = ExtractImmediate(S, SE); 1383 GlobalValue *BaseGV = ExtractSymbol(S, SE); 1384 1385 // If there's anything else involved, it's not foldable. 1386 if (!S->isZero()) return false; 1387 1388 // Fast-path: zero is always foldable. 1389 if (BaseOffs == 0 && !BaseGV) return true; 1390 1391 // Conservatively, create an address with an immediate and a 1392 // base and a scale. 1393 TargetLowering::AddrMode AM; 1394 AM.BaseOffs = BaseOffs; 1395 AM.BaseGV = BaseGV; 1396 AM.HasBaseReg = HasBaseReg; 1397 AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1398 1399 return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, TLI); 1400} 1401 1402namespace { 1403 1404/// UseMapDenseMapInfo - A DenseMapInfo implementation for holding 1405/// DenseMaps and DenseSets of pairs of const SCEV* and LSRUse::Kind. 1406struct UseMapDenseMapInfo { 1407 static std::pair<const SCEV *, LSRUse::KindType> getEmptyKey() { 1408 return std::make_pair(reinterpret_cast<const SCEV *>(-1), LSRUse::Basic); 1409 } 1410 1411 static std::pair<const SCEV *, LSRUse::KindType> getTombstoneKey() { 1412 return std::make_pair(reinterpret_cast<const SCEV *>(-2), LSRUse::Basic); 1413 } 1414 1415 static unsigned 1416 getHashValue(const std::pair<const SCEV *, LSRUse::KindType> &V) { 1417 unsigned Result = DenseMapInfo<const SCEV *>::getHashValue(V.first); 1418 Result ^= DenseMapInfo<unsigned>::getHashValue(unsigned(V.second)); 1419 return Result; 1420 } 1421 1422 static bool isEqual(const std::pair<const SCEV *, LSRUse::KindType> &LHS, 1423 const std::pair<const SCEV *, LSRUse::KindType> &RHS) { 1424 return LHS == RHS; 1425 } 1426}; 1427 1428/// IVInc - An individual increment in a Chain of IV increments. 1429/// Relate an IV user to an expression that computes the IV it uses from the IV 1430/// used by the previous link in the Chain. 1431/// 1432/// For the head of a chain, IncExpr holds the absolute SCEV expression for the 1433/// original IVOperand. The head of the chain's IVOperand is only valid during 1434/// chain collection, before LSR replaces IV users. During chain generation, 1435/// IncExpr can be used to find the new IVOperand that computes the same 1436/// expression. 1437struct IVInc { 1438 Instruction *UserInst; 1439 Value* IVOperand; 1440 const SCEV *IncExpr; 1441 1442 IVInc(Instruction *U, Value *O, const SCEV *E): 1443 UserInst(U), IVOperand(O), IncExpr(E) {} 1444}; 1445 1446// IVChain - The list of IV increments in program order. 1447// We typically add the head of a chain without finding subsequent links. 1448typedef SmallVector<IVInc,1> IVChain; 1449 1450/// ChainUsers - Helper for CollectChains to track multiple IV increment uses. 1451/// Distinguish between FarUsers that definitely cross IV increments and 1452/// NearUsers that may be used between IV increments. 1453struct ChainUsers { 1454 SmallPtrSet<Instruction*, 4> FarUsers; 1455 SmallPtrSet<Instruction*, 4> NearUsers; 1456}; 1457 1458/// LSRInstance - This class holds state for the main loop strength reduction 1459/// logic. 1460class LSRInstance { 1461 IVUsers &IU; 1462 ScalarEvolution &SE; 1463 DominatorTree &DT; 1464 LoopInfo &LI; 1465 const TargetLowering *const TLI; 1466 Loop *const L; 1467 bool Changed; 1468 1469 /// IVIncInsertPos - This is the insert position that the current loop's 1470 /// induction variable increment should be placed. In simple loops, this is 1471 /// the latch block's terminator. But in more complicated cases, this is a 1472 /// position which will dominate all the in-loop post-increment users. 1473 Instruction *IVIncInsertPos; 1474 1475 /// Factors - Interesting factors between use strides. 1476 SmallSetVector<int64_t, 8> Factors; 1477 1478 /// Types - Interesting use types, to facilitate truncation reuse. 1479 SmallSetVector<Type *, 4> Types; 1480 1481 /// Fixups - The list of operands which are to be replaced. 1482 SmallVector<LSRFixup, 16> Fixups; 1483 1484 /// Uses - The list of interesting uses. 1485 SmallVector<LSRUse, 16> Uses; 1486 1487 /// RegUses - Track which uses use which register candidates. 1488 RegUseTracker RegUses; 1489 1490 // Limit the number of chains to avoid quadratic behavior. We don't expect to 1491 // have more than a few IV increment chains in a loop. Missing a Chain falls 1492 // back to normal LSR behavior for those uses. 1493 static const unsigned MaxChains = 8; 1494 1495 /// IVChainVec - IV users can form a chain of IV increments. 1496 SmallVector<IVChain, MaxChains> IVChainVec; 1497 1498 /// IVIncSet - IV users that belong to profitable IVChains. 1499 SmallPtrSet<Use*, MaxChains> IVIncSet; 1500 1501 void OptimizeShadowIV(); 1502 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse); 1503 ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse); 1504 void OptimizeLoopTermCond(); 1505 1506 void ChainInstruction(Instruction *UserInst, Instruction *IVOper, 1507 SmallVectorImpl<ChainUsers> &ChainUsersVec); 1508 void FinalizeChain(IVChain &Chain); 1509 void CollectChains(); 1510 void GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, 1511 SmallVectorImpl<WeakVH> &DeadInsts); 1512 1513 void CollectInterestingTypesAndFactors(); 1514 void CollectFixupsAndInitialFormulae(); 1515 1516 LSRFixup &getNewFixup() { 1517 Fixups.push_back(LSRFixup()); 1518 return Fixups.back(); 1519 } 1520 1521 // Support for sharing of LSRUses between LSRFixups. 1522 typedef DenseMap<std::pair<const SCEV *, LSRUse::KindType>, 1523 size_t, 1524 UseMapDenseMapInfo> UseMapTy; 1525 UseMapTy UseMap; 1526 1527 bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, 1528 LSRUse::KindType Kind, Type *AccessTy); 1529 1530 std::pair<size_t, int64_t> getUse(const SCEV *&Expr, 1531 LSRUse::KindType Kind, 1532 Type *AccessTy); 1533 1534 void DeleteUse(LSRUse &LU, size_t LUIdx); 1535 1536 LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU); 1537 1538 void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 1539 void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 1540 void CountRegisters(const Formula &F, size_t LUIdx); 1541 bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F); 1542 1543 void CollectLoopInvariantFixupsAndFormulae(); 1544 1545 void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base, 1546 unsigned Depth = 0); 1547 void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base); 1548 void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 1549 void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 1550 void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base); 1551 void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base); 1552 void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base); 1553 void GenerateCrossUseConstantOffsets(); 1554 void GenerateAllReuseFormulae(); 1555 1556 void FilterOutUndesirableDedicatedRegisters(); 1557 1558 size_t EstimateSearchSpaceComplexity() const; 1559 void NarrowSearchSpaceByDetectingSupersets(); 1560 void NarrowSearchSpaceByCollapsingUnrolledCode(); 1561 void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); 1562 void NarrowSearchSpaceByPickingWinnerRegs(); 1563 void NarrowSearchSpaceUsingHeuristics(); 1564 1565 void SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 1566 Cost &SolutionCost, 1567 SmallVectorImpl<const Formula *> &Workspace, 1568 const Cost &CurCost, 1569 const SmallPtrSet<const SCEV *, 16> &CurRegs, 1570 DenseSet<const SCEV *> &VisitedRegs) const; 1571 void Solve(SmallVectorImpl<const Formula *> &Solution) const; 1572 1573 BasicBlock::iterator 1574 HoistInsertPosition(BasicBlock::iterator IP, 1575 const SmallVectorImpl<Instruction *> &Inputs) const; 1576 BasicBlock::iterator 1577 AdjustInsertPositionForExpand(BasicBlock::iterator IP, 1578 const LSRFixup &LF, 1579 const LSRUse &LU, 1580 SCEVExpander &Rewriter) const; 1581 1582 Value *Expand(const LSRFixup &LF, 1583 const Formula &F, 1584 BasicBlock::iterator IP, 1585 SCEVExpander &Rewriter, 1586 SmallVectorImpl<WeakVH> &DeadInsts) const; 1587 void RewriteForPHI(PHINode *PN, const LSRFixup &LF, 1588 const Formula &F, 1589 SCEVExpander &Rewriter, 1590 SmallVectorImpl<WeakVH> &DeadInsts, 1591 Pass *P) const; 1592 void Rewrite(const LSRFixup &LF, 1593 const Formula &F, 1594 SCEVExpander &Rewriter, 1595 SmallVectorImpl<WeakVH> &DeadInsts, 1596 Pass *P) const; 1597 void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, 1598 Pass *P); 1599 1600public: 1601 LSRInstance(const TargetLowering *tli, Loop *l, Pass *P); 1602 1603 bool getChanged() const { return Changed; } 1604 1605 void print_factors_and_types(raw_ostream &OS) const; 1606 void print_fixups(raw_ostream &OS) const; 1607 void print_uses(raw_ostream &OS) const; 1608 void print(raw_ostream &OS) const; 1609 void dump() const; 1610}; 1611 1612} 1613 1614/// OptimizeShadowIV - If IV is used in a int-to-float cast 1615/// inside the loop then try to eliminate the cast operation. 1616void LSRInstance::OptimizeShadowIV() { 1617 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1618 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 1619 return; 1620 1621 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); 1622 UI != E; /* empty */) { 1623 IVUsers::const_iterator CandidateUI = UI; 1624 ++UI; 1625 Instruction *ShadowUse = CandidateUI->getUser(); 1626 Type *DestTy = NULL; 1627 bool IsSigned = false; 1628 1629 /* If shadow use is a int->float cast then insert a second IV 1630 to eliminate this cast. 1631 1632 for (unsigned i = 0; i < n; ++i) 1633 foo((double)i); 1634 1635 is transformed into 1636 1637 double d = 0.0; 1638 for (unsigned i = 0; i < n; ++i, ++d) 1639 foo(d); 1640 */ 1641 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) { 1642 IsSigned = false; 1643 DestTy = UCast->getDestTy(); 1644 } 1645 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) { 1646 IsSigned = true; 1647 DestTy = SCast->getDestTy(); 1648 } 1649 if (!DestTy) continue; 1650 1651 if (TLI) { 1652 // If target does not support DestTy natively then do not apply 1653 // this transformation. 1654 EVT DVT = TLI->getValueType(DestTy); 1655 if (!TLI->isTypeLegal(DVT)) continue; 1656 } 1657 1658 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0)); 1659 if (!PH) continue; 1660 if (PH->getNumIncomingValues() != 2) continue; 1661 1662 Type *SrcTy = PH->getType(); 1663 int Mantissa = DestTy->getFPMantissaWidth(); 1664 if (Mantissa == -1) continue; 1665 if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa) 1666 continue; 1667 1668 unsigned Entry, Latch; 1669 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) { 1670 Entry = 0; 1671 Latch = 1; 1672 } else { 1673 Entry = 1; 1674 Latch = 0; 1675 } 1676 1677 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry)); 1678 if (!Init) continue; 1679 Constant *NewInit = ConstantFP::get(DestTy, IsSigned ? 1680 (double)Init->getSExtValue() : 1681 (double)Init->getZExtValue()); 1682 1683 BinaryOperator *Incr = 1684 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch)); 1685 if (!Incr) continue; 1686 if (Incr->getOpcode() != Instruction::Add 1687 && Incr->getOpcode() != Instruction::Sub) 1688 continue; 1689 1690 /* Initialize new IV, double d = 0.0 in above example. */ 1691 ConstantInt *C = NULL; 1692 if (Incr->getOperand(0) == PH) 1693 C = dyn_cast<ConstantInt>(Incr->getOperand(1)); 1694 else if (Incr->getOperand(1) == PH) 1695 C = dyn_cast<ConstantInt>(Incr->getOperand(0)); 1696 else 1697 continue; 1698 1699 if (!C) continue; 1700 1701 // Ignore negative constants, as the code below doesn't handle them 1702 // correctly. TODO: Remove this restriction. 1703 if (!C->getValue().isStrictlyPositive()) continue; 1704 1705 /* Add new PHINode. */ 1706 PHINode *NewPH = PHINode::Create(DestTy, 2, "IV.S.", PH); 1707 1708 /* create new increment. '++d' in above example. */ 1709 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue()); 1710 BinaryOperator *NewIncr = 1711 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ? 1712 Instruction::FAdd : Instruction::FSub, 1713 NewPH, CFP, "IV.S.next.", Incr); 1714 1715 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry)); 1716 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch)); 1717 1718 /* Remove cast operation */ 1719 ShadowUse->replaceAllUsesWith(NewPH); 1720 ShadowUse->eraseFromParent(); 1721 Changed = true; 1722 break; 1723 } 1724} 1725 1726/// FindIVUserForCond - If Cond has an operand that is an expression of an IV, 1727/// set the IV user and stride information and return true, otherwise return 1728/// false. 1729bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) { 1730 for (IVUsers::iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 1731 if (UI->getUser() == Cond) { 1732 // NOTE: we could handle setcc instructions with multiple uses here, but 1733 // InstCombine does it as well for simple uses, it's not clear that it 1734 // occurs enough in real life to handle. 1735 CondUse = UI; 1736 return true; 1737 } 1738 return false; 1739} 1740 1741/// OptimizeMax - Rewrite the loop's terminating condition if it uses 1742/// a max computation. 1743/// 1744/// This is a narrow solution to a specific, but acute, problem. For loops 1745/// like this: 1746/// 1747/// i = 0; 1748/// do { 1749/// p[i] = 0.0; 1750/// } while (++i < n); 1751/// 1752/// the trip count isn't just 'n', because 'n' might not be positive. And 1753/// unfortunately this can come up even for loops where the user didn't use 1754/// a C do-while loop. For example, seemingly well-behaved top-test loops 1755/// will commonly be lowered like this: 1756// 1757/// if (n > 0) { 1758/// i = 0; 1759/// do { 1760/// p[i] = 0.0; 1761/// } while (++i < n); 1762/// } 1763/// 1764/// and then it's possible for subsequent optimization to obscure the if 1765/// test in such a way that indvars can't find it. 1766/// 1767/// When indvars can't find the if test in loops like this, it creates a 1768/// max expression, which allows it to give the loop a canonical 1769/// induction variable: 1770/// 1771/// i = 0; 1772/// max = n < 1 ? 1 : n; 1773/// do { 1774/// p[i] = 0.0; 1775/// } while (++i != max); 1776/// 1777/// Canonical induction variables are necessary because the loop passes 1778/// are designed around them. The most obvious example of this is the 1779/// LoopInfo analysis, which doesn't remember trip count values. It 1780/// expects to be able to rediscover the trip count each time it is 1781/// needed, and it does this using a simple analysis that only succeeds if 1782/// the loop has a canonical induction variable. 1783/// 1784/// However, when it comes time to generate code, the maximum operation 1785/// can be quite costly, especially if it's inside of an outer loop. 1786/// 1787/// This function solves this problem by detecting this type of loop and 1788/// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting 1789/// the instructions for the maximum computation. 1790/// 1791ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { 1792 // Check that the loop matches the pattern we're looking for. 1793 if (Cond->getPredicate() != CmpInst::ICMP_EQ && 1794 Cond->getPredicate() != CmpInst::ICMP_NE) 1795 return Cond; 1796 1797 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1)); 1798 if (!Sel || !Sel->hasOneUse()) return Cond; 1799 1800 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1801 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 1802 return Cond; 1803 const SCEV *One = SE.getConstant(BackedgeTakenCount->getType(), 1); 1804 1805 // Add one to the backedge-taken count to get the trip count. 1806 const SCEV *IterationCount = SE.getAddExpr(One, BackedgeTakenCount); 1807 if (IterationCount != SE.getSCEV(Sel)) return Cond; 1808 1809 // Check for a max calculation that matches the pattern. There's no check 1810 // for ICMP_ULE here because the comparison would be with zero, which 1811 // isn't interesting. 1812 CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; 1813 const SCEVNAryExpr *Max = 0; 1814 if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(BackedgeTakenCount)) { 1815 Pred = ICmpInst::ICMP_SLE; 1816 Max = S; 1817 } else if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(IterationCount)) { 1818 Pred = ICmpInst::ICMP_SLT; 1819 Max = S; 1820 } else if (const SCEVUMaxExpr *U = dyn_cast<SCEVUMaxExpr>(IterationCount)) { 1821 Pred = ICmpInst::ICMP_ULT; 1822 Max = U; 1823 } else { 1824 // No match; bail. 1825 return Cond; 1826 } 1827 1828 // To handle a max with more than two operands, this optimization would 1829 // require additional checking and setup. 1830 if (Max->getNumOperands() != 2) 1831 return Cond; 1832 1833 const SCEV *MaxLHS = Max->getOperand(0); 1834 const SCEV *MaxRHS = Max->getOperand(1); 1835 1836 // ScalarEvolution canonicalizes constants to the left. For < and >, look 1837 // for a comparison with 1. For <= and >=, a comparison with zero. 1838 if (!MaxLHS || 1839 (ICmpInst::isTrueWhenEqual(Pred) ? !MaxLHS->isZero() : (MaxLHS != One))) 1840 return Cond; 1841 1842 // Check the relevant induction variable for conformance to 1843 // the pattern. 1844 const SCEV *IV = SE.getSCEV(Cond->getOperand(0)); 1845 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); 1846 if (!AR || !AR->isAffine() || 1847 AR->getStart() != One || 1848 AR->getStepRecurrence(SE) != One) 1849 return Cond; 1850 1851 assert(AR->getLoop() == L && 1852 "Loop condition operand is an addrec in a different loop!"); 1853 1854 // Check the right operand of the select, and remember it, as it will 1855 // be used in the new comparison instruction. 1856 Value *NewRHS = 0; 1857 if (ICmpInst::isTrueWhenEqual(Pred)) { 1858 // Look for n+1, and grab n. 1859 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1))) 1860 if (isa<ConstantInt>(BO->getOperand(1)) && 1861 cast<ConstantInt>(BO->getOperand(1))->isOne() && 1862 SE.getSCEV(BO->getOperand(0)) == MaxRHS) 1863 NewRHS = BO->getOperand(0); 1864 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(2))) 1865 if (isa<ConstantInt>(BO->getOperand(1)) && 1866 cast<ConstantInt>(BO->getOperand(1))->isOne() && 1867 SE.getSCEV(BO->getOperand(0)) == MaxRHS) 1868 NewRHS = BO->getOperand(0); 1869 if (!NewRHS) 1870 return Cond; 1871 } else if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS) 1872 NewRHS = Sel->getOperand(1); 1873 else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS) 1874 NewRHS = Sel->getOperand(2); 1875 else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(MaxRHS)) 1876 NewRHS = SU->getValue(); 1877 else 1878 // Max doesn't match expected pattern. 1879 return Cond; 1880 1881 // Determine the new comparison opcode. It may be signed or unsigned, 1882 // and the original comparison may be either equality or inequality. 1883 if (Cond->getPredicate() == CmpInst::ICMP_EQ) 1884 Pred = CmpInst::getInversePredicate(Pred); 1885 1886 // Ok, everything looks ok to change the condition into an SLT or SGE and 1887 // delete the max calculation. 1888 ICmpInst *NewCond = 1889 new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp"); 1890 1891 // Delete the max calculation instructions. 1892 Cond->replaceAllUsesWith(NewCond); 1893 CondUse->setUser(NewCond); 1894 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0)); 1895 Cond->eraseFromParent(); 1896 Sel->eraseFromParent(); 1897 if (Cmp->use_empty()) 1898 Cmp->eraseFromParent(); 1899 return NewCond; 1900} 1901 1902/// OptimizeLoopTermCond - Change loop terminating condition to use the 1903/// postinc iv when possible. 1904void 1905LSRInstance::OptimizeLoopTermCond() { 1906 SmallPtrSet<Instruction *, 4> PostIncs; 1907 1908 BasicBlock *LatchBlock = L->getLoopLatch(); 1909 SmallVector<BasicBlock*, 8> ExitingBlocks; 1910 L->getExitingBlocks(ExitingBlocks); 1911 1912 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 1913 BasicBlock *ExitingBlock = ExitingBlocks[i]; 1914 1915 // Get the terminating condition for the loop if possible. If we 1916 // can, we want to change it to use a post-incremented version of its 1917 // induction variable, to allow coalescing the live ranges for the IV into 1918 // one register value. 1919 1920 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 1921 if (!TermBr) 1922 continue; 1923 // FIXME: Overly conservative, termination condition could be an 'or' etc.. 1924 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition())) 1925 continue; 1926 1927 // Search IVUsesByStride to find Cond's IVUse if there is one. 1928 IVStrideUse *CondUse = 0; 1929 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition()); 1930 if (!FindIVUserForCond(Cond, CondUse)) 1931 continue; 1932 1933 // If the trip count is computed in terms of a max (due to ScalarEvolution 1934 // being unable to find a sufficient guard, for example), change the loop 1935 // comparison to use SLT or ULT instead of NE. 1936 // One consequence of doing this now is that it disrupts the count-down 1937 // optimization. That's not always a bad thing though, because in such 1938 // cases it may still be worthwhile to avoid a max. 1939 Cond = OptimizeMax(Cond, CondUse); 1940 1941 // If this exiting block dominates the latch block, it may also use 1942 // the post-inc value if it won't be shared with other uses. 1943 // Check for dominance. 1944 if (!DT.dominates(ExitingBlock, LatchBlock)) 1945 continue; 1946 1947 // Conservatively avoid trying to use the post-inc value in non-latch 1948 // exits if there may be pre-inc users in intervening blocks. 1949 if (LatchBlock != ExitingBlock) 1950 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 1951 // Test if the use is reachable from the exiting block. This dominator 1952 // query is a conservative approximation of reachability. 1953 if (&*UI != CondUse && 1954 !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) { 1955 // Conservatively assume there may be reuse if the quotient of their 1956 // strides could be a legal scale. 1957 const SCEV *A = IU.getStride(*CondUse, L); 1958 const SCEV *B = IU.getStride(*UI, L); 1959 if (!A || !B) continue; 1960 if (SE.getTypeSizeInBits(A->getType()) != 1961 SE.getTypeSizeInBits(B->getType())) { 1962 if (SE.getTypeSizeInBits(A->getType()) > 1963 SE.getTypeSizeInBits(B->getType())) 1964 B = SE.getSignExtendExpr(B, A->getType()); 1965 else 1966 A = SE.getSignExtendExpr(A, B->getType()); 1967 } 1968 if (const SCEVConstant *D = 1969 dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) { 1970 const ConstantInt *C = D->getValue(); 1971 // Stride of one or negative one can have reuse with non-addresses. 1972 if (C->isOne() || C->isAllOnesValue()) 1973 goto decline_post_inc; 1974 // Avoid weird situations. 1975 if (C->getValue().getMinSignedBits() >= 64 || 1976 C->getValue().isMinSignedValue()) 1977 goto decline_post_inc; 1978 // Without TLI, assume that any stride might be valid, and so any 1979 // use might be shared. 1980 if (!TLI) 1981 goto decline_post_inc; 1982 // Check for possible scaled-address reuse. 1983 Type *AccessTy = getAccessType(UI->getUser()); 1984 TargetLowering::AddrMode AM; 1985 AM.Scale = C->getSExtValue(); 1986 if (TLI->isLegalAddressingMode(AM, AccessTy)) 1987 goto decline_post_inc; 1988 AM.Scale = -AM.Scale; 1989 if (TLI->isLegalAddressingMode(AM, AccessTy)) 1990 goto decline_post_inc; 1991 } 1992 } 1993 1994 DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: " 1995 << *Cond << '\n'); 1996 1997 // It's possible for the setcc instruction to be anywhere in the loop, and 1998 // possible for it to have multiple users. If it is not immediately before 1999 // the exiting block branch, move it. 2000 if (&*++BasicBlock::iterator(Cond) != TermBr) { 2001 if (Cond->hasOneUse()) { 2002 Cond->moveBefore(TermBr); 2003 } else { 2004 // Clone the terminating condition and insert into the loopend. 2005 ICmpInst *OldCond = Cond; 2006 Cond = cast<ICmpInst>(Cond->clone()); 2007 Cond->setName(L->getHeader()->getName() + ".termcond"); 2008 ExitingBlock->getInstList().insert(TermBr, Cond); 2009 2010 // Clone the IVUse, as the old use still exists! 2011 CondUse = &IU.AddUser(Cond, CondUse->getOperandValToReplace()); 2012 TermBr->replaceUsesOfWith(OldCond, Cond); 2013 } 2014 } 2015 2016 // If we get to here, we know that we can transform the setcc instruction to 2017 // use the post-incremented version of the IV, allowing us to coalesce the 2018 // live ranges for the IV correctly. 2019 CondUse->transformToPostInc(L); 2020 Changed = true; 2021 2022 PostIncs.insert(Cond); 2023 decline_post_inc:; 2024 } 2025 2026 // Determine an insertion point for the loop induction variable increment. It 2027 // must dominate all the post-inc comparisons we just set up, and it must 2028 // dominate the loop latch edge. 2029 IVIncInsertPos = L->getLoopLatch()->getTerminator(); 2030 for (SmallPtrSet<Instruction *, 4>::const_iterator I = PostIncs.begin(), 2031 E = PostIncs.end(); I != E; ++I) { 2032 BasicBlock *BB = 2033 DT.findNearestCommonDominator(IVIncInsertPos->getParent(), 2034 (*I)->getParent()); 2035 if (BB == (*I)->getParent()) 2036 IVIncInsertPos = *I; 2037 else if (BB != IVIncInsertPos->getParent()) 2038 IVIncInsertPos = BB->getTerminator(); 2039 } 2040} 2041 2042/// reconcileNewOffset - Determine if the given use can accommodate a fixup 2043/// at the given offset and other details. If so, update the use and 2044/// return true. 2045bool 2046LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg, 2047 LSRUse::KindType Kind, Type *AccessTy) { 2048 int64_t NewMinOffset = LU.MinOffset; 2049 int64_t NewMaxOffset = LU.MaxOffset; 2050 Type *NewAccessTy = AccessTy; 2051 2052 // Check for a mismatched kind. It's tempting to collapse mismatched kinds to 2053 // something conservative, however this can pessimize in the case that one of 2054 // the uses will have all its uses outside the loop, for example. 2055 if (LU.Kind != Kind) 2056 return false; 2057 // Conservatively assume HasBaseReg is true for now. 2058 if (NewOffset < LU.MinOffset) { 2059 if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, HasBaseReg, 2060 Kind, AccessTy, TLI)) 2061 return false; 2062 NewMinOffset = NewOffset; 2063 } else if (NewOffset > LU.MaxOffset) { 2064 if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, HasBaseReg, 2065 Kind, AccessTy, TLI)) 2066 return false; 2067 NewMaxOffset = NewOffset; 2068 } 2069 // Check for a mismatched access type, and fall back conservatively as needed. 2070 // TODO: Be less conservative when the type is similar and can use the same 2071 // addressing modes. 2072 if (Kind == LSRUse::Address && AccessTy != LU.AccessTy) 2073 NewAccessTy = Type::getVoidTy(AccessTy->getContext()); 2074 2075 // Update the use. 2076 LU.MinOffset = NewMinOffset; 2077 LU.MaxOffset = NewMaxOffset; 2078 LU.AccessTy = NewAccessTy; 2079 if (NewOffset != LU.Offsets.back()) 2080 LU.Offsets.push_back(NewOffset); 2081 return true; 2082} 2083 2084/// getUse - Return an LSRUse index and an offset value for a fixup which 2085/// needs the given expression, with the given kind and optional access type. 2086/// Either reuse an existing use or create a new one, as needed. 2087std::pair<size_t, int64_t> 2088LSRInstance::getUse(const SCEV *&Expr, 2089 LSRUse::KindType Kind, Type *AccessTy) { 2090 const SCEV *Copy = Expr; 2091 int64_t Offset = ExtractImmediate(Expr, SE); 2092 2093 // Basic uses can't accept any offset, for example. 2094 if (!isAlwaysFoldable(Offset, 0, /*HasBaseReg=*/true, Kind, AccessTy, TLI)) { 2095 Expr = Copy; 2096 Offset = 0; 2097 } 2098 2099 std::pair<UseMapTy::iterator, bool> P = 2100 UseMap.insert(std::make_pair(std::make_pair(Expr, Kind), 0)); 2101 if (!P.second) { 2102 // A use already existed with this base. 2103 size_t LUIdx = P.first->second; 2104 LSRUse &LU = Uses[LUIdx]; 2105 if (reconcileNewOffset(LU, Offset, /*HasBaseReg=*/true, Kind, AccessTy)) 2106 // Reuse this use. 2107 return std::make_pair(LUIdx, Offset); 2108 } 2109 2110 // Create a new use. 2111 size_t LUIdx = Uses.size(); 2112 P.first->second = LUIdx; 2113 Uses.push_back(LSRUse(Kind, AccessTy)); 2114 LSRUse &LU = Uses[LUIdx]; 2115 2116 // We don't need to track redundant offsets, but we don't need to go out 2117 // of our way here to avoid them. 2118 if (LU.Offsets.empty() || Offset != LU.Offsets.back()) 2119 LU.Offsets.push_back(Offset); 2120 2121 LU.MinOffset = Offset; 2122 LU.MaxOffset = Offset; 2123 return std::make_pair(LUIdx, Offset); 2124} 2125 2126/// DeleteUse - Delete the given use from the Uses list. 2127void LSRInstance::DeleteUse(LSRUse &LU, size_t LUIdx) { 2128 if (&LU != &Uses.back()) 2129 std::swap(LU, Uses.back()); 2130 Uses.pop_back(); 2131 2132 // Update RegUses. 2133 RegUses.SwapAndDropUse(LUIdx, Uses.size()); 2134} 2135 2136/// FindUseWithFormula - Look for a use distinct from OrigLU which is has 2137/// a formula that has the same registers as the given formula. 2138LSRUse * 2139LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF, 2140 const LSRUse &OrigLU) { 2141 // Search all uses for the formula. This could be more clever. 2142 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2143 LSRUse &LU = Uses[LUIdx]; 2144 // Check whether this use is close enough to OrigLU, to see whether it's 2145 // worthwhile looking through its formulae. 2146 // Ignore ICmpZero uses because they may contain formulae generated by 2147 // GenerateICmpZeroScales, in which case adding fixup offsets may 2148 // be invalid. 2149 if (&LU != &OrigLU && 2150 LU.Kind != LSRUse::ICmpZero && 2151 LU.Kind == OrigLU.Kind && OrigLU.AccessTy == LU.AccessTy && 2152 LU.WidestFixupType == OrigLU.WidestFixupType && 2153 LU.HasFormulaWithSameRegs(OrigF)) { 2154 // Scan through this use's formulae. 2155 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(), 2156 E = LU.Formulae.end(); I != E; ++I) { 2157 const Formula &F = *I; 2158 // Check to see if this formula has the same registers and symbols 2159 // as OrigF. 2160 if (F.BaseRegs == OrigF.BaseRegs && 2161 F.ScaledReg == OrigF.ScaledReg && 2162 F.AM.BaseGV == OrigF.AM.BaseGV && 2163 F.AM.Scale == OrigF.AM.Scale && 2164 F.UnfoldedOffset == OrigF.UnfoldedOffset) { 2165 if (F.AM.BaseOffs == 0) 2166 return &LU; 2167 // This is the formula where all the registers and symbols matched; 2168 // there aren't going to be any others. Since we declined it, we 2169 // can skip the rest of the formulae and procede to the next LSRUse. 2170 break; 2171 } 2172 } 2173 } 2174 } 2175 2176 // Nothing looked good. 2177 return 0; 2178} 2179 2180void LSRInstance::CollectInterestingTypesAndFactors() { 2181 SmallSetVector<const SCEV *, 4> Strides; 2182 2183 // Collect interesting types and strides. 2184 SmallVector<const SCEV *, 4> Worklist; 2185 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { 2186 const SCEV *Expr = IU.getExpr(*UI); 2187 2188 // Collect interesting types. 2189 Types.insert(SE.getEffectiveSCEVType(Expr->getType())); 2190 2191 // Add strides for mentioned loops. 2192 Worklist.push_back(Expr); 2193 do { 2194 const SCEV *S = Worklist.pop_back_val(); 2195 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 2196 if (EnableNested || AR->getLoop() == L) 2197 Strides.insert(AR->getStepRecurrence(SE)); 2198 Worklist.push_back(AR->getStart()); 2199 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 2200 Worklist.append(Add->op_begin(), Add->op_end()); 2201 } 2202 } while (!Worklist.empty()); 2203 } 2204 2205 // Compute interesting factors from the set of interesting strides. 2206 for (SmallSetVector<const SCEV *, 4>::const_iterator 2207 I = Strides.begin(), E = Strides.end(); I != E; ++I) 2208 for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter = 2209 llvm::next(I); NewStrideIter != E; ++NewStrideIter) { 2210 const SCEV *OldStride = *I; 2211 const SCEV *NewStride = *NewStrideIter; 2212 2213 if (SE.getTypeSizeInBits(OldStride->getType()) != 2214 SE.getTypeSizeInBits(NewStride->getType())) { 2215 if (SE.getTypeSizeInBits(OldStride->getType()) > 2216 SE.getTypeSizeInBits(NewStride->getType())) 2217 NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType()); 2218 else 2219 OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType()); 2220 } 2221 if (const SCEVConstant *Factor = 2222 dyn_cast_or_null<SCEVConstant>(getExactSDiv(NewStride, OldStride, 2223 SE, true))) { 2224 if (Factor->getValue()->getValue().getMinSignedBits() <= 64) 2225 Factors.insert(Factor->getValue()->getValue().getSExtValue()); 2226 } else if (const SCEVConstant *Factor = 2227 dyn_cast_or_null<SCEVConstant>(getExactSDiv(OldStride, 2228 NewStride, 2229 SE, true))) { 2230 if (Factor->getValue()->getValue().getMinSignedBits() <= 64) 2231 Factors.insert(Factor->getValue()->getValue().getSExtValue()); 2232 } 2233 } 2234 2235 // If all uses use the same type, don't bother looking for truncation-based 2236 // reuse. 2237 if (Types.size() == 1) 2238 Types.clear(); 2239 2240 DEBUG(print_factors_and_types(dbgs())); 2241} 2242 2243/// findIVOperand - Helper for CollectChains that finds an IV operand (computed 2244/// by an AddRec in this loop) within [OI,OE) or returns OE. If IVUsers mapped 2245/// Instructions to IVStrideUses, we could partially skip this. 2246static User::op_iterator 2247findIVOperand(User::op_iterator OI, User::op_iterator OE, 2248 Loop *L, ScalarEvolution &SE) { 2249 for(; OI != OE; ++OI) { 2250 if (Instruction *Oper = dyn_cast<Instruction>(*OI)) { 2251 if (!SE.isSCEVable(Oper->getType())) 2252 continue; 2253 2254 if (const SCEVAddRecExpr *AR = 2255 dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Oper))) { 2256 if (AR->getLoop() == L) 2257 break; 2258 } 2259 } 2260 } 2261 return OI; 2262} 2263 2264/// getWideOperand - IVChain logic must consistenctly peek base TruncInst 2265/// operands, so wrap it in a convenient helper. 2266static Value *getWideOperand(Value *Oper) { 2267 if (TruncInst *Trunc = dyn_cast<TruncInst>(Oper)) 2268 return Trunc->getOperand(0); 2269 return Oper; 2270} 2271 2272/// isCompatibleIVType - Return true if we allow an IV chain to include both 2273/// types. 2274static bool isCompatibleIVType(Value *LVal, Value *RVal) { 2275 Type *LType = LVal->getType(); 2276 Type *RType = RVal->getType(); 2277 return (LType == RType) || (LType->isPointerTy() && RType->isPointerTy()); 2278} 2279 2280/// getExprBase - Return an approximation of this SCEV expression's "base", or 2281/// NULL for any constant. Returning the expression itself is 2282/// conservative. Returning a deeper subexpression is more precise and valid as 2283/// long as it isn't less complex than another subexpression. For expressions 2284/// involving multiple unscaled values, we need to return the pointer-type 2285/// SCEVUnknown. This avoids forming chains across objects, such as: 2286/// PrevOper==a[i], IVOper==b[i], IVInc==b-a. 2287/// 2288/// Since SCEVUnknown is the rightmost type, and pointers are the rightmost 2289/// SCEVUnknown, we simply return the rightmost SCEV operand. 2290static const SCEV *getExprBase(const SCEV *S) { 2291 switch (S->getSCEVType()) { 2292 default: // uncluding scUnknown. 2293 return S; 2294 case scConstant: 2295 return 0; 2296 case scTruncate: 2297 return getExprBase(cast<SCEVTruncateExpr>(S)->getOperand()); 2298 case scZeroExtend: 2299 return getExprBase(cast<SCEVZeroExtendExpr>(S)->getOperand()); 2300 case scSignExtend: 2301 return getExprBase(cast<SCEVSignExtendExpr>(S)->getOperand()); 2302 case scAddExpr: { 2303 // Skip over scaled operands (scMulExpr) to follow add operands as long as 2304 // there's nothing more complex. 2305 // FIXME: not sure if we want to recognize negation. 2306 const SCEVAddExpr *Add = cast<SCEVAddExpr>(S); 2307 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(Add->op_end()), 2308 E(Add->op_begin()); I != E; ++I) { 2309 const SCEV *SubExpr = *I; 2310 if (SubExpr->getSCEVType() == scAddExpr) 2311 return getExprBase(SubExpr); 2312 2313 if (SubExpr->getSCEVType() != scMulExpr) 2314 return SubExpr; 2315 } 2316 return S; // all operands are scaled, be conservative. 2317 } 2318 case scAddRecExpr: 2319 return getExprBase(cast<SCEVAddRecExpr>(S)->getStart()); 2320 } 2321} 2322 2323/// Return true if the chain increment is profitable to expand into a loop 2324/// invariant value, which may require its own register. A profitable chain 2325/// increment will be an offset relative to the same base. We allow such offsets 2326/// to potentially be used as chain increment as long as it's not obviously 2327/// expensive to expand using real instructions. 2328static const SCEV * 2329getProfitableChainIncrement(Value *NextIV, Value *PrevIV, 2330 const IVChain &Chain, Loop *L, 2331 ScalarEvolution &SE, const TargetLowering *TLI) { 2332 // Prune the solution space aggressively by checking that both IV operands 2333 // are expressions that operate on the same unscaled SCEVUnknown. This 2334 // "base" will be canceled by the subsequent getMinusSCEV call. Checking first 2335 // avoids creating extra SCEV expressions. 2336 const SCEV *OperExpr = SE.getSCEV(NextIV); 2337 const SCEV *PrevExpr = SE.getSCEV(PrevIV); 2338 if (getExprBase(OperExpr) != getExprBase(PrevExpr) && !StressIVChain) 2339 return 0; 2340 2341 const SCEV *IncExpr = SE.getMinusSCEV(OperExpr, PrevExpr); 2342 if (!SE.isLoopInvariant(IncExpr, L)) 2343 return 0; 2344 2345 // We are not able to expand an increment unless it is loop invariant, 2346 // however, the following checks are purely for profitability. 2347 if (StressIVChain) 2348 return IncExpr; 2349 2350 // Do not replace a constant offset from IV head with a nonconstant IV 2351 // increment. 2352 if (!isa<SCEVConstant>(IncExpr)) { 2353 const SCEV *HeadExpr = SE.getSCEV(getWideOperand(Chain[0].IVOperand)); 2354 if (isa<SCEVConstant>(SE.getMinusSCEV(OperExpr, HeadExpr))) 2355 return 0; 2356 } 2357 2358 SmallPtrSet<const SCEV*, 8> Processed; 2359 if (isHighCostExpansion(IncExpr, Processed, SE)) 2360 return 0; 2361 2362 return IncExpr; 2363} 2364 2365/// Return true if the number of registers needed for the chain is estimated to 2366/// be less than the number required for the individual IV users. First prohibit 2367/// any IV users that keep the IV live across increments (the Users set should 2368/// be empty). Next count the number and type of increments in the chain. 2369/// 2370/// Chaining IVs can lead to considerable code bloat if ISEL doesn't 2371/// effectively use postinc addressing modes. Only consider it profitable it the 2372/// increments can be computed in fewer registers when chained. 2373/// 2374/// TODO: Consider IVInc free if it's already used in another chains. 2375static bool 2376isProfitableChain(IVChain &Chain, SmallPtrSet<Instruction*, 4> &Users, 2377 ScalarEvolution &SE, const TargetLowering *TLI) { 2378 if (StressIVChain) 2379 return true; 2380 2381 if (Chain.size() <= 2) 2382 return false; 2383 2384 if (!Users.empty()) { 2385 DEBUG(dbgs() << "Chain: " << *Chain[0].UserInst << " users:\n"; 2386 for (SmallPtrSet<Instruction*, 4>::const_iterator I = Users.begin(), 2387 E = Users.end(); I != E; ++I) { 2388 dbgs() << " " << **I << "\n"; 2389 }); 2390 return false; 2391 } 2392 assert(!Chain.empty() && "empty IV chains are not allowed"); 2393 2394 // The chain itself may require a register, so intialize cost to 1. 2395 int cost = 1; 2396 2397 // A complete chain likely eliminates the need for keeping the original IV in 2398 // a register. LSR does not currently know how to form a complete chain unless 2399 // the header phi already exists. 2400 if (isa<PHINode>(Chain.back().UserInst) 2401 && SE.getSCEV(Chain.back().UserInst) == Chain[0].IncExpr) { 2402 --cost; 2403 } 2404 const SCEV *LastIncExpr = 0; 2405 unsigned NumConstIncrements = 0; 2406 unsigned NumVarIncrements = 0; 2407 unsigned NumReusedIncrements = 0; 2408 for (IVChain::const_iterator I = llvm::next(Chain.begin()), E = Chain.end(); 2409 I != E; ++I) { 2410 2411 if (I->IncExpr->isZero()) 2412 continue; 2413 2414 // Incrementing by zero or some constant is neutral. We assume constants can 2415 // be folded into an addressing mode or an add's immediate operand. 2416 if (isa<SCEVConstant>(I->IncExpr)) { 2417 ++NumConstIncrements; 2418 continue; 2419 } 2420 2421 if (I->IncExpr == LastIncExpr) 2422 ++NumReusedIncrements; 2423 else 2424 ++NumVarIncrements; 2425 2426 LastIncExpr = I->IncExpr; 2427 } 2428 // An IV chain with a single increment is handled by LSR's postinc 2429 // uses. However, a chain with multiple increments requires keeping the IV's 2430 // value live longer than it needs to be if chained. 2431 if (NumConstIncrements > 1) 2432 --cost; 2433 2434 // Materializing increment expressions in the preheader that didn't exist in 2435 // the original code may cost a register. For example, sign-extended array 2436 // indices can produce ridiculous increments like this: 2437 // IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64))) 2438 cost += NumVarIncrements; 2439 2440 // Reusing variable increments likely saves a register to hold the multiple of 2441 // the stride. 2442 cost -= NumReusedIncrements; 2443 2444 DEBUG(dbgs() << "Chain: " << *Chain[0].UserInst << " Cost: " << cost << "\n"); 2445 2446 return cost < 0; 2447} 2448 2449/// ChainInstruction - Add this IV user to an existing chain or make it the head 2450/// of a new chain. 2451void LSRInstance::ChainInstruction(Instruction *UserInst, Instruction *IVOper, 2452 SmallVectorImpl<ChainUsers> &ChainUsersVec) { 2453 // When IVs are used as types of varying widths, they are generally converted 2454 // to a wider type with some uses remaining narrow under a (free) trunc. 2455 Value *NextIV = getWideOperand(IVOper); 2456 2457 // Visit all existing chains. Check if its IVOper can be computed as a 2458 // profitable loop invariant increment from the last link in the Chain. 2459 unsigned ChainIdx = 0, NChains = IVChainVec.size(); 2460 const SCEV *LastIncExpr = 0; 2461 for (; ChainIdx < NChains; ++ChainIdx) { 2462 Value *PrevIV = getWideOperand(IVChainVec[ChainIdx].back().IVOperand); 2463 if (!isCompatibleIVType(PrevIV, NextIV)) 2464 continue; 2465 2466 // A phi nodes terminates a chain. 2467 if (isa<PHINode>(UserInst) 2468 && isa<PHINode>(IVChainVec[ChainIdx].back().UserInst)) 2469 continue; 2470 2471 if (const SCEV *IncExpr = 2472 getProfitableChainIncrement(NextIV, PrevIV, IVChainVec[ChainIdx], 2473 L, SE, TLI)) { 2474 LastIncExpr = IncExpr; 2475 break; 2476 } 2477 } 2478 // If we haven't found a chain, create a new one, unless we hit the max. Don't 2479 // bother for phi nodes, because they must be last in the chain. 2480 if (ChainIdx == NChains) { 2481 if (isa<PHINode>(UserInst)) 2482 return; 2483 if (NChains >= MaxChains && !StressIVChain) { 2484 DEBUG(dbgs() << "IV Chain Limit\n"); 2485 return; 2486 } 2487 LastIncExpr = SE.getSCEV(NextIV); 2488 // IVUsers may have skipped over sign/zero extensions. We don't currently 2489 // attempt to form chains involving extensions unless they can be hoisted 2490 // into this loop's AddRec. 2491 if (!isa<SCEVAddRecExpr>(LastIncExpr)) 2492 return; 2493 ++NChains; 2494 IVChainVec.resize(NChains); 2495 ChainUsersVec.resize(NChains); 2496 DEBUG(dbgs() << "IV Head: (" << *UserInst << ") IV=" << *LastIncExpr 2497 << "\n"); 2498 } 2499 else 2500 DEBUG(dbgs() << "IV Inc: (" << *UserInst << ") IV+" << *LastIncExpr 2501 << "\n"); 2502 2503 // Add this IV user to the end of the chain. 2504 IVChainVec[ChainIdx].push_back(IVInc(UserInst, IVOper, LastIncExpr)); 2505 2506 SmallPtrSet<Instruction*,4> &NearUsers = ChainUsersVec[ChainIdx].NearUsers; 2507 // This chain's NearUsers become FarUsers. 2508 if (!LastIncExpr->isZero()) { 2509 ChainUsersVec[ChainIdx].FarUsers.insert(NearUsers.begin(), 2510 NearUsers.end()); 2511 NearUsers.clear(); 2512 } 2513 2514 // All other uses of IVOperand become near uses of the chain. 2515 // We currently ignore intermediate values within SCEV expressions, assuming 2516 // they will eventually be used be the current chain, or can be computed 2517 // from one of the chain increments. To be more precise we could 2518 // transitively follow its user and only add leaf IV users to the set. 2519 for (Value::use_iterator UseIter = IVOper->use_begin(), 2520 UseEnd = IVOper->use_end(); UseIter != UseEnd; ++UseIter) { 2521 Instruction *OtherUse = dyn_cast<Instruction>(*UseIter); 2522 if (SE.isSCEVable(OtherUse->getType()) 2523 && !isa<SCEVUnknown>(SE.getSCEV(OtherUse)) 2524 && IU.isIVUserOrOperand(OtherUse)) { 2525 continue; 2526 } 2527 if (OtherUse && OtherUse != UserInst) 2528 NearUsers.insert(OtherUse); 2529 } 2530 2531 // Since this user is part of the chain, it's no longer considered a use 2532 // of the chain. 2533 ChainUsersVec[ChainIdx].FarUsers.erase(UserInst); 2534} 2535 2536/// CollectChains - Populate the vector of Chains. 2537/// 2538/// This decreases ILP at the architecture level. Targets with ample registers, 2539/// multiple memory ports, and no register renaming probably don't want 2540/// this. However, such targets should probably disable LSR altogether. 2541/// 2542/// The job of LSR is to make a reasonable choice of induction variables across 2543/// the loop. Subsequent passes can easily "unchain" computation exposing more 2544/// ILP *within the loop* if the target wants it. 2545/// 2546/// Finding the best IV chain is potentially a scheduling problem. Since LSR 2547/// will not reorder memory operations, it will recognize this as a chain, but 2548/// will generate redundant IV increments. Ideally this would be corrected later 2549/// by a smart scheduler: 2550/// = A[i] 2551/// = A[i+x] 2552/// A[i] = 2553/// A[i+x] = 2554/// 2555/// TODO: Walk the entire domtree within this loop, not just the path to the 2556/// loop latch. This will discover chains on side paths, but requires 2557/// maintaining multiple copies of the Chains state. 2558void LSRInstance::CollectChains() { 2559 SmallVector<ChainUsers, 8> ChainUsersVec; 2560 2561 SmallVector<BasicBlock *,8> LatchPath; 2562 BasicBlock *LoopHeader = L->getHeader(); 2563 for (DomTreeNode *Rung = DT.getNode(L->getLoopLatch()); 2564 Rung->getBlock() != LoopHeader; Rung = Rung->getIDom()) { 2565 LatchPath.push_back(Rung->getBlock()); 2566 } 2567 LatchPath.push_back(LoopHeader); 2568 2569 // Walk the instruction stream from the loop header to the loop latch. 2570 for (SmallVectorImpl<BasicBlock *>::reverse_iterator 2571 BBIter = LatchPath.rbegin(), BBEnd = LatchPath.rend(); 2572 BBIter != BBEnd; ++BBIter) { 2573 for (BasicBlock::iterator I = (*BBIter)->begin(), E = (*BBIter)->end(); 2574 I != E; ++I) { 2575 // Skip instructions that weren't seen by IVUsers analysis. 2576 if (isa<PHINode>(I) || !IU.isIVUserOrOperand(I)) 2577 continue; 2578 2579 // Ignore users that are part of a SCEV expression. This way we only 2580 // consider leaf IV Users. This effectively rediscovers a portion of 2581 // IVUsers analysis but in program order this time. 2582 if (SE.isSCEVable(I->getType()) && !isa<SCEVUnknown>(SE.getSCEV(I))) 2583 continue; 2584 2585 // Remove this instruction from any NearUsers set it may be in. 2586 for (unsigned ChainIdx = 0, NChains = IVChainVec.size(); 2587 ChainIdx < NChains; ++ChainIdx) { 2588 ChainUsersVec[ChainIdx].NearUsers.erase(I); 2589 } 2590 // Search for operands that can be chained. 2591 SmallPtrSet<Instruction*, 4> UniqueOperands; 2592 User::op_iterator IVOpEnd = I->op_end(); 2593 User::op_iterator IVOpIter = findIVOperand(I->op_begin(), IVOpEnd, L, SE); 2594 while (IVOpIter != IVOpEnd) { 2595 Instruction *IVOpInst = cast<Instruction>(*IVOpIter); 2596 if (UniqueOperands.insert(IVOpInst)) 2597 ChainInstruction(I, IVOpInst, ChainUsersVec); 2598 IVOpIter = findIVOperand(llvm::next(IVOpIter), IVOpEnd, L, SE); 2599 } 2600 } // Continue walking down the instructions. 2601 } // Continue walking down the domtree. 2602 // Visit phi backedges to determine if the chain can generate the IV postinc. 2603 for (BasicBlock::iterator I = L->getHeader()->begin(); 2604 PHINode *PN = dyn_cast<PHINode>(I); ++I) { 2605 if (!SE.isSCEVable(PN->getType())) 2606 continue; 2607 2608 Instruction *IncV = 2609 dyn_cast<Instruction>(PN->getIncomingValueForBlock(L->getLoopLatch())); 2610 if (IncV) 2611 ChainInstruction(PN, IncV, ChainUsersVec); 2612 } 2613 // Remove any unprofitable chains. 2614 unsigned ChainIdx = 0; 2615 for (unsigned UsersIdx = 0, NChains = IVChainVec.size(); 2616 UsersIdx < NChains; ++UsersIdx) { 2617 if (!isProfitableChain(IVChainVec[UsersIdx], 2618 ChainUsersVec[UsersIdx].FarUsers, SE, TLI)) 2619 continue; 2620 // Preserve the chain at UsesIdx. 2621 if (ChainIdx != UsersIdx) 2622 IVChainVec[ChainIdx] = IVChainVec[UsersIdx]; 2623 FinalizeChain(IVChainVec[ChainIdx]); 2624 ++ChainIdx; 2625 } 2626 IVChainVec.resize(ChainIdx); 2627} 2628 2629void LSRInstance::FinalizeChain(IVChain &Chain) { 2630 assert(!Chain.empty() && "empty IV chains are not allowed"); 2631 DEBUG(dbgs() << "Final Chain: " << *Chain[0].UserInst << "\n"); 2632 2633 for (IVChain::const_iterator I = llvm::next(Chain.begin()), E = Chain.end(); 2634 I != E; ++I) { 2635 DEBUG(dbgs() << " Inc: " << *I->UserInst << "\n"); 2636 User::op_iterator UseI = 2637 std::find(I->UserInst->op_begin(), I->UserInst->op_end(), I->IVOperand); 2638 assert(UseI != I->UserInst->op_end() && "cannot find IV operand"); 2639 IVIncSet.insert(UseI); 2640 } 2641} 2642 2643/// Return true if the IVInc can be folded into an addressing mode. 2644static bool canFoldIVIncExpr(const SCEV *IncExpr, Instruction *UserInst, 2645 Value *Operand, const TargetLowering *TLI) { 2646 const SCEVConstant *IncConst = dyn_cast<SCEVConstant>(IncExpr); 2647 if (!IncConst || !isAddressUse(UserInst, Operand)) 2648 return false; 2649 2650 if (IncConst->getValue()->getValue().getMinSignedBits() > 64) 2651 return false; 2652 2653 int64_t IncOffset = IncConst->getValue()->getSExtValue(); 2654 if (!isAlwaysFoldable(IncOffset, /*BaseGV=*/0, /*HaseBaseReg=*/false, 2655 LSRUse::Address, getAccessType(UserInst), TLI)) 2656 return false; 2657 2658 return true; 2659} 2660 2661/// GenerateIVChains - Generate an add or subtract for each IVInc in a chain to 2662/// materialize the IV user's operand from the previous IV user's operand. 2663void LSRInstance::GenerateIVChain(const IVChain &Chain, SCEVExpander &Rewriter, 2664 SmallVectorImpl<WeakVH> &DeadInsts) { 2665 // Find the new IVOperand for the head of the chain. It may have been replaced 2666 // by LSR. 2667 const IVInc &Head = Chain[0]; 2668 User::op_iterator IVOpEnd = Head.UserInst->op_end(); 2669 User::op_iterator IVOpIter = findIVOperand(Head.UserInst->op_begin(), 2670 IVOpEnd, L, SE); 2671 Value *IVSrc = 0; 2672 while (IVOpIter != IVOpEnd) { 2673 IVSrc = getWideOperand(*IVOpIter); 2674 2675 // If this operand computes the expression that the chain needs, we may use 2676 // it. (Check this after setting IVSrc which is used below.) 2677 // 2678 // Note that if Head.IncExpr is wider than IVSrc, then this phi is too 2679 // narrow for the chain, so we can no longer use it. We do allow using a 2680 // wider phi, assuming the LSR checked for free truncation. In that case we 2681 // should already have a truncate on this operand such that 2682 // getSCEV(IVSrc) == IncExpr. 2683 if (SE.getSCEV(*IVOpIter) == Head.IncExpr 2684 || SE.getSCEV(IVSrc) == Head.IncExpr) { 2685 break; 2686 } 2687 IVOpIter = findIVOperand(llvm::next(IVOpIter), IVOpEnd, L, SE); 2688 } 2689 if (IVOpIter == IVOpEnd) { 2690 // Gracefully give up on this chain. 2691 DEBUG(dbgs() << "Concealed chain head: " << *Head.UserInst << "\n"); 2692 return; 2693 } 2694 2695 DEBUG(dbgs() << "Generate chain at: " << *IVSrc << "\n"); 2696 Type *IVTy = IVSrc->getType(); 2697 Type *IntTy = SE.getEffectiveSCEVType(IVTy); 2698 const SCEV *LeftOverExpr = 0; 2699 for (IVChain::const_iterator IncI = llvm::next(Chain.begin()), 2700 IncE = Chain.end(); IncI != IncE; ++IncI) { 2701 2702 Instruction *InsertPt = IncI->UserInst; 2703 if (isa<PHINode>(InsertPt)) 2704 InsertPt = L->getLoopLatch()->getTerminator(); 2705 2706 // IVOper will replace the current IV User's operand. IVSrc is the IV 2707 // value currently held in a register. 2708 Value *IVOper = IVSrc; 2709 if (!IncI->IncExpr->isZero()) { 2710 // IncExpr was the result of subtraction of two narrow values, so must 2711 // be signed. 2712 const SCEV *IncExpr = SE.getNoopOrSignExtend(IncI->IncExpr, IntTy); 2713 LeftOverExpr = LeftOverExpr ? 2714 SE.getAddExpr(LeftOverExpr, IncExpr) : IncExpr; 2715 } 2716 if (LeftOverExpr && !LeftOverExpr->isZero()) { 2717 // Expand the IV increment. 2718 Rewriter.clearPostInc(); 2719 Value *IncV = Rewriter.expandCodeFor(LeftOverExpr, IntTy, InsertPt); 2720 const SCEV *IVOperExpr = SE.getAddExpr(SE.getUnknown(IVSrc), 2721 SE.getUnknown(IncV)); 2722 IVOper = Rewriter.expandCodeFor(IVOperExpr, IVTy, InsertPt); 2723 2724 // If an IV increment can't be folded, use it as the next IV value. 2725 if (!canFoldIVIncExpr(LeftOverExpr, IncI->UserInst, IncI->IVOperand, 2726 TLI)) { 2727 assert(IVTy == IVOper->getType() && "inconsistent IV increment type"); 2728 IVSrc = IVOper; 2729 LeftOverExpr = 0; 2730 } 2731 } 2732 Type *OperTy = IncI->IVOperand->getType(); 2733 if (IVTy != OperTy) { 2734 assert(SE.getTypeSizeInBits(IVTy) >= SE.getTypeSizeInBits(OperTy) && 2735 "cannot extend a chained IV"); 2736 IRBuilder<> Builder(InsertPt); 2737 IVOper = Builder.CreateTruncOrBitCast(IVOper, OperTy, "lsr.chain"); 2738 } 2739 IncI->UserInst->replaceUsesOfWith(IncI->IVOperand, IVOper); 2740 DeadInsts.push_back(IncI->IVOperand); 2741 } 2742 // If LSR created a new, wider phi, we may also replace its postinc. We only 2743 // do this if we also found a wide value for the head of the chain. 2744 if (isa<PHINode>(Chain.back().UserInst)) { 2745 for (BasicBlock::iterator I = L->getHeader()->begin(); 2746 PHINode *Phi = dyn_cast<PHINode>(I); ++I) { 2747 if (!isCompatibleIVType(Phi, IVSrc)) 2748 continue; 2749 Instruction *PostIncV = dyn_cast<Instruction>( 2750 Phi->getIncomingValueForBlock(L->getLoopLatch())); 2751 if (!PostIncV || (SE.getSCEV(PostIncV) != SE.getSCEV(IVSrc))) 2752 continue; 2753 Value *IVOper = IVSrc; 2754 Type *PostIncTy = PostIncV->getType(); 2755 if (IVTy != PostIncTy) { 2756 assert(PostIncTy->isPointerTy() && "mixing int/ptr IV types"); 2757 IRBuilder<> Builder(L->getLoopLatch()->getTerminator()); 2758 Builder.SetCurrentDebugLocation(PostIncV->getDebugLoc()); 2759 IVOper = Builder.CreatePointerCast(IVSrc, PostIncTy, "lsr.chain"); 2760 } 2761 Phi->replaceUsesOfWith(PostIncV, IVOper); 2762 DeadInsts.push_back(PostIncV); 2763 } 2764 } 2765} 2766 2767void LSRInstance::CollectFixupsAndInitialFormulae() { 2768 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { 2769 Instruction *UserInst = UI->getUser(); 2770 // Skip IV users that are part of profitable IV Chains. 2771 User::op_iterator UseI = std::find(UserInst->op_begin(), UserInst->op_end(), 2772 UI->getOperandValToReplace()); 2773 assert(UseI != UserInst->op_end() && "cannot find IV operand"); 2774 if (IVIncSet.count(UseI)) 2775 continue; 2776 2777 // Record the uses. 2778 LSRFixup &LF = getNewFixup(); 2779 LF.UserInst = UserInst; 2780 LF.OperandValToReplace = UI->getOperandValToReplace(); 2781 LF.PostIncLoops = UI->getPostIncLoops(); 2782 2783 LSRUse::KindType Kind = LSRUse::Basic; 2784 Type *AccessTy = 0; 2785 if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) { 2786 Kind = LSRUse::Address; 2787 AccessTy = getAccessType(LF.UserInst); 2788 } 2789 2790 const SCEV *S = IU.getExpr(*UI); 2791 2792 // Equality (== and !=) ICmps are special. We can rewrite (i == N) as 2793 // (N - i == 0), and this allows (N - i) to be the expression that we work 2794 // with rather than just N or i, so we can consider the register 2795 // requirements for both N and i at the same time. Limiting this code to 2796 // equality icmps is not a problem because all interesting loops use 2797 // equality icmps, thanks to IndVarSimplify. 2798 if (ICmpInst *CI = dyn_cast<ICmpInst>(LF.UserInst)) 2799 if (CI->isEquality()) { 2800 // Swap the operands if needed to put the OperandValToReplace on the 2801 // left, for consistency. 2802 Value *NV = CI->getOperand(1); 2803 if (NV == LF.OperandValToReplace) { 2804 CI->setOperand(1, CI->getOperand(0)); 2805 CI->setOperand(0, NV); 2806 NV = CI->getOperand(1); 2807 Changed = true; 2808 } 2809 2810 // x == y --> x - y == 0 2811 const SCEV *N = SE.getSCEV(NV); 2812 if (SE.isLoopInvariant(N, L)) { 2813 // S is normalized, so normalize N before folding it into S 2814 // to keep the result normalized. 2815 N = TransformForPostIncUse(Normalize, N, CI, 0, 2816 LF.PostIncLoops, SE, DT); 2817 Kind = LSRUse::ICmpZero; 2818 S = SE.getMinusSCEV(N, S); 2819 } 2820 2821 // -1 and the negations of all interesting strides (except the negation 2822 // of -1) are now also interesting. 2823 for (size_t i = 0, e = Factors.size(); i != e; ++i) 2824 if (Factors[i] != -1) 2825 Factors.insert(-(uint64_t)Factors[i]); 2826 Factors.insert(-1); 2827 } 2828 2829 // Set up the initial formula for this use. 2830 std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy); 2831 LF.LUIdx = P.first; 2832 LF.Offset = P.second; 2833 LSRUse &LU = Uses[LF.LUIdx]; 2834 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); 2835 if (!LU.WidestFixupType || 2836 SE.getTypeSizeInBits(LU.WidestFixupType) < 2837 SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) 2838 LU.WidestFixupType = LF.OperandValToReplace->getType(); 2839 2840 // If this is the first use of this LSRUse, give it a formula. 2841 if (LU.Formulae.empty()) { 2842 InsertInitialFormula(S, LU, LF.LUIdx); 2843 CountRegisters(LU.Formulae.back(), LF.LUIdx); 2844 } 2845 } 2846 2847 DEBUG(print_fixups(dbgs())); 2848} 2849 2850/// InsertInitialFormula - Insert a formula for the given expression into 2851/// the given use, separating out loop-variant portions from loop-invariant 2852/// and loop-computable portions. 2853void 2854LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) { 2855 Formula F; 2856 F.InitialMatch(S, L, SE); 2857 bool Inserted = InsertFormula(LU, LUIdx, F); 2858 assert(Inserted && "Initial formula already exists!"); (void)Inserted; 2859} 2860 2861/// InsertSupplementalFormula - Insert a simple single-register formula for 2862/// the given expression into the given use. 2863void 2864LSRInstance::InsertSupplementalFormula(const SCEV *S, 2865 LSRUse &LU, size_t LUIdx) { 2866 Formula F; 2867 F.BaseRegs.push_back(S); 2868 F.AM.HasBaseReg = true; 2869 bool Inserted = InsertFormula(LU, LUIdx, F); 2870 assert(Inserted && "Supplemental formula already exists!"); (void)Inserted; 2871} 2872 2873/// CountRegisters - Note which registers are used by the given formula, 2874/// updating RegUses. 2875void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) { 2876 if (F.ScaledReg) 2877 RegUses.CountRegister(F.ScaledReg, LUIdx); 2878 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 2879 E = F.BaseRegs.end(); I != E; ++I) 2880 RegUses.CountRegister(*I, LUIdx); 2881} 2882 2883/// InsertFormula - If the given formula has not yet been inserted, add it to 2884/// the list, and return true. Return false otherwise. 2885bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) { 2886 if (!LU.InsertFormula(F)) 2887 return false; 2888 2889 CountRegisters(F, LUIdx); 2890 return true; 2891} 2892 2893/// CollectLoopInvariantFixupsAndFormulae - Check for other uses of 2894/// loop-invariant values which we're tracking. These other uses will pin these 2895/// values in registers, making them less profitable for elimination. 2896/// TODO: This currently misses non-constant addrec step registers. 2897/// TODO: Should this give more weight to users inside the loop? 2898void 2899LSRInstance::CollectLoopInvariantFixupsAndFormulae() { 2900 SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end()); 2901 SmallPtrSet<const SCEV *, 8> Inserted; 2902 2903 while (!Worklist.empty()) { 2904 const SCEV *S = Worklist.pop_back_val(); 2905 2906 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) 2907 Worklist.append(N->op_begin(), N->op_end()); 2908 else if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) 2909 Worklist.push_back(C->getOperand()); 2910 else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 2911 Worklist.push_back(D->getLHS()); 2912 Worklist.push_back(D->getRHS()); 2913 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 2914 if (!Inserted.insert(U)) continue; 2915 const Value *V = U->getValue(); 2916 if (const Instruction *Inst = dyn_cast<Instruction>(V)) { 2917 // Look for instructions defined outside the loop. 2918 if (L->contains(Inst)) continue; 2919 } else if (isa<UndefValue>(V)) 2920 // Undef doesn't have a live range, so it doesn't matter. 2921 continue; 2922 for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end(); 2923 UI != UE; ++UI) { 2924 const Instruction *UserInst = dyn_cast<Instruction>(*UI); 2925 // Ignore non-instructions. 2926 if (!UserInst) 2927 continue; 2928 // Ignore instructions in other functions (as can happen with 2929 // Constants). 2930 if (UserInst->getParent()->getParent() != L->getHeader()->getParent()) 2931 continue; 2932 // Ignore instructions not dominated by the loop. 2933 const BasicBlock *UseBB = !isa<PHINode>(UserInst) ? 2934 UserInst->getParent() : 2935 cast<PHINode>(UserInst)->getIncomingBlock( 2936 PHINode::getIncomingValueNumForOperand(UI.getOperandNo())); 2937 if (!DT.dominates(L->getHeader(), UseBB)) 2938 continue; 2939 // Ignore uses which are part of other SCEV expressions, to avoid 2940 // analyzing them multiple times. 2941 if (SE.isSCEVable(UserInst->getType())) { 2942 const SCEV *UserS = SE.getSCEV(const_cast<Instruction *>(UserInst)); 2943 // If the user is a no-op, look through to its uses. 2944 if (!isa<SCEVUnknown>(UserS)) 2945 continue; 2946 if (UserS == U) { 2947 Worklist.push_back( 2948 SE.getUnknown(const_cast<Instruction *>(UserInst))); 2949 continue; 2950 } 2951 } 2952 // Ignore icmp instructions which are already being analyzed. 2953 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) { 2954 unsigned OtherIdx = !UI.getOperandNo(); 2955 Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx)); 2956 if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L)) 2957 continue; 2958 } 2959 2960 LSRFixup &LF = getNewFixup(); 2961 LF.UserInst = const_cast<Instruction *>(UserInst); 2962 LF.OperandValToReplace = UI.getUse(); 2963 std::pair<size_t, int64_t> P = getUse(S, LSRUse::Basic, 0); 2964 LF.LUIdx = P.first; 2965 LF.Offset = P.second; 2966 LSRUse &LU = Uses[LF.LUIdx]; 2967 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L); 2968 if (!LU.WidestFixupType || 2969 SE.getTypeSizeInBits(LU.WidestFixupType) < 2970 SE.getTypeSizeInBits(LF.OperandValToReplace->getType())) 2971 LU.WidestFixupType = LF.OperandValToReplace->getType(); 2972 InsertSupplementalFormula(U, LU, LF.LUIdx); 2973 CountRegisters(LU.Formulae.back(), Uses.size() - 1); 2974 break; 2975 } 2976 } 2977 } 2978} 2979 2980/// CollectSubexprs - Split S into subexpressions which can be pulled out into 2981/// separate registers. If C is non-null, multiply each subexpression by C. 2982static void CollectSubexprs(const SCEV *S, const SCEVConstant *C, 2983 SmallVectorImpl<const SCEV *> &Ops, 2984 const Loop *L, 2985 ScalarEvolution &SE) { 2986 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 2987 // Break out add operands. 2988 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 2989 I != E; ++I) 2990 CollectSubexprs(*I, C, Ops, L, SE); 2991 return; 2992 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 2993 // Split a non-zero base out of an addrec. 2994 if (!AR->getStart()->isZero()) { 2995 CollectSubexprs(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0), 2996 AR->getStepRecurrence(SE), 2997 AR->getLoop(), 2998 //FIXME: AR->getNoWrapFlags(SCEV::FlagNW) 2999 SCEV::FlagAnyWrap), 3000 C, Ops, L, SE); 3001 CollectSubexprs(AR->getStart(), C, Ops, L, SE); 3002 return; 3003 } 3004 } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 3005 // Break (C * (a + b + c)) into C*a + C*b + C*c. 3006 if (Mul->getNumOperands() == 2) 3007 if (const SCEVConstant *Op0 = 3008 dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 3009 CollectSubexprs(Mul->getOperand(1), 3010 C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0, 3011 Ops, L, SE); 3012 return; 3013 } 3014 } 3015 3016 // Otherwise use the value itself, optionally with a scale applied. 3017 Ops.push_back(C ? SE.getMulExpr(C, S) : S); 3018} 3019 3020/// GenerateReassociations - Split out subexpressions from adds and the bases of 3021/// addrecs. 3022void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, 3023 Formula Base, 3024 unsigned Depth) { 3025 // Arbitrarily cap recursion to protect compile time. 3026 if (Depth >= 3) return; 3027 3028 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 3029 const SCEV *BaseReg = Base.BaseRegs[i]; 3030 3031 SmallVector<const SCEV *, 8> AddOps; 3032 CollectSubexprs(BaseReg, 0, AddOps, L, SE); 3033 3034 if (AddOps.size() == 1) continue; 3035 3036 for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(), 3037 JE = AddOps.end(); J != JE; ++J) { 3038 3039 // Loop-variant "unknown" values are uninteresting; we won't be able to 3040 // do anything meaningful with them. 3041 if (isa<SCEVUnknown>(*J) && !SE.isLoopInvariant(*J, L)) 3042 continue; 3043 3044 // Don't pull a constant into a register if the constant could be folded 3045 // into an immediate field. 3046 if (isAlwaysFoldable(*J, LU.MinOffset, LU.MaxOffset, 3047 Base.getNumRegs() > 1, 3048 LU.Kind, LU.AccessTy, TLI, SE)) 3049 continue; 3050 3051 // Collect all operands except *J. 3052 SmallVector<const SCEV *, 8> InnerAddOps 3053 (((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J); 3054 InnerAddOps.append 3055 (llvm::next(J), ((const SmallVector<const SCEV *, 8> &)AddOps).end()); 3056 3057 // Don't leave just a constant behind in a register if the constant could 3058 // be folded into an immediate field. 3059 if (InnerAddOps.size() == 1 && 3060 isAlwaysFoldable(InnerAddOps[0], LU.MinOffset, LU.MaxOffset, 3061 Base.getNumRegs() > 1, 3062 LU.Kind, LU.AccessTy, TLI, SE)) 3063 continue; 3064 3065 const SCEV *InnerSum = SE.getAddExpr(InnerAddOps); 3066 if (InnerSum->isZero()) 3067 continue; 3068 Formula F = Base; 3069 3070 // Add the remaining pieces of the add back into the new formula. 3071 const SCEVConstant *InnerSumSC = dyn_cast<SCEVConstant>(InnerSum); 3072 if (TLI && InnerSumSC && 3073 SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 && 3074 TLI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset + 3075 InnerSumSC->getValue()->getZExtValue())) { 3076 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset + 3077 InnerSumSC->getValue()->getZExtValue(); 3078 F.BaseRegs.erase(F.BaseRegs.begin() + i); 3079 } else 3080 F.BaseRegs[i] = InnerSum; 3081 3082 // Add J as its own register, or an unfolded immediate. 3083 const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J); 3084 if (TLI && SC && SE.getTypeSizeInBits(SC->getType()) <= 64 && 3085 TLI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset + 3086 SC->getValue()->getZExtValue())) 3087 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset + 3088 SC->getValue()->getZExtValue(); 3089 else 3090 F.BaseRegs.push_back(*J); 3091 3092 if (InsertFormula(LU, LUIdx, F)) 3093 // If that formula hadn't been seen before, recurse to find more like 3094 // it. 3095 GenerateReassociations(LU, LUIdx, LU.Formulae.back(), Depth+1); 3096 } 3097 } 3098} 3099 3100/// GenerateCombinations - Generate a formula consisting of all of the 3101/// loop-dominating registers added into a single register. 3102void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, 3103 Formula Base) { 3104 // This method is only interesting on a plurality of registers. 3105 if (Base.BaseRegs.size() <= 1) return; 3106 3107 Formula F = Base; 3108 F.BaseRegs.clear(); 3109 SmallVector<const SCEV *, 4> Ops; 3110 for (SmallVectorImpl<const SCEV *>::const_iterator 3111 I = Base.BaseRegs.begin(), E = Base.BaseRegs.end(); I != E; ++I) { 3112 const SCEV *BaseReg = *I; 3113 if (SE.properlyDominates(BaseReg, L->getHeader()) && 3114 !SE.hasComputableLoopEvolution(BaseReg, L)) 3115 Ops.push_back(BaseReg); 3116 else 3117 F.BaseRegs.push_back(BaseReg); 3118 } 3119 if (Ops.size() > 1) { 3120 const SCEV *Sum = SE.getAddExpr(Ops); 3121 // TODO: If Sum is zero, it probably means ScalarEvolution missed an 3122 // opportunity to fold something. For now, just ignore such cases 3123 // rather than proceed with zero in a register. 3124 if (!Sum->isZero()) { 3125 F.BaseRegs.push_back(Sum); 3126 (void)InsertFormula(LU, LUIdx, F); 3127 } 3128 } 3129} 3130 3131/// GenerateSymbolicOffsets - Generate reuse formulae using symbolic offsets. 3132void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, 3133 Formula Base) { 3134 // We can't add a symbolic offset if the address already contains one. 3135 if (Base.AM.BaseGV) return; 3136 3137 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 3138 const SCEV *G = Base.BaseRegs[i]; 3139 GlobalValue *GV = ExtractSymbol(G, SE); 3140 if (G->isZero() || !GV) 3141 continue; 3142 Formula F = Base; 3143 F.AM.BaseGV = GV; 3144 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, 3145 LU.Kind, LU.AccessTy, TLI)) 3146 continue; 3147 F.BaseRegs[i] = G; 3148 (void)InsertFormula(LU, LUIdx, F); 3149 } 3150} 3151 3152/// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets. 3153void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, 3154 Formula Base) { 3155 // TODO: For now, just add the min and max offset, because it usually isn't 3156 // worthwhile looking at everything inbetween. 3157 SmallVector<int64_t, 2> Worklist; 3158 Worklist.push_back(LU.MinOffset); 3159 if (LU.MaxOffset != LU.MinOffset) 3160 Worklist.push_back(LU.MaxOffset); 3161 3162 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 3163 const SCEV *G = Base.BaseRegs[i]; 3164 3165 for (SmallVectorImpl<int64_t>::const_iterator I = Worklist.begin(), 3166 E = Worklist.end(); I != E; ++I) { 3167 Formula F = Base; 3168 F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs - *I; 3169 if (isLegalUse(F.AM, LU.MinOffset - *I, LU.MaxOffset - *I, 3170 LU.Kind, LU.AccessTy, TLI)) { 3171 // Add the offset to the base register. 3172 const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), *I), G); 3173 // If it cancelled out, drop the base register, otherwise update it. 3174 if (NewG->isZero()) { 3175 std::swap(F.BaseRegs[i], F.BaseRegs.back()); 3176 F.BaseRegs.pop_back(); 3177 } else 3178 F.BaseRegs[i] = NewG; 3179 3180 (void)InsertFormula(LU, LUIdx, F); 3181 } 3182 } 3183 3184 int64_t Imm = ExtractImmediate(G, SE); 3185 if (G->isZero() || Imm == 0) 3186 continue; 3187 Formula F = Base; 3188 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Imm; 3189 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, 3190 LU.Kind, LU.AccessTy, TLI)) 3191 continue; 3192 F.BaseRegs[i] = G; 3193 (void)InsertFormula(LU, LUIdx, F); 3194 } 3195} 3196 3197/// GenerateICmpZeroScales - For ICmpZero, check to see if we can scale up 3198/// the comparison. For example, x == y -> x*c == y*c. 3199void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, 3200 Formula Base) { 3201 if (LU.Kind != LSRUse::ICmpZero) return; 3202 3203 // Determine the integer type for the base formula. 3204 Type *IntTy = Base.getType(); 3205 if (!IntTy) return; 3206 if (SE.getTypeSizeInBits(IntTy) > 64) return; 3207 3208 // Don't do this if there is more than one offset. 3209 if (LU.MinOffset != LU.MaxOffset) return; 3210 3211 assert(!Base.AM.BaseGV && "ICmpZero use is not legal!"); 3212 3213 // Check each interesting stride. 3214 for (SmallSetVector<int64_t, 8>::const_iterator 3215 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 3216 int64_t Factor = *I; 3217 3218 // Check that the multiplication doesn't overflow. 3219 if (Base.AM.BaseOffs == INT64_MIN && Factor == -1) 3220 continue; 3221 int64_t NewBaseOffs = (uint64_t)Base.AM.BaseOffs * Factor; 3222 if (NewBaseOffs / Factor != Base.AM.BaseOffs) 3223 continue; 3224 3225 // Check that multiplying with the use offset doesn't overflow. 3226 int64_t Offset = LU.MinOffset; 3227 if (Offset == INT64_MIN && Factor == -1) 3228 continue; 3229 Offset = (uint64_t)Offset * Factor; 3230 if (Offset / Factor != LU.MinOffset) 3231 continue; 3232 3233 Formula F = Base; 3234 F.AM.BaseOffs = NewBaseOffs; 3235 3236 // Check that this scale is legal. 3237 if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy, TLI)) 3238 continue; 3239 3240 // Compensate for the use having MinOffset built into it. 3241 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Offset - LU.MinOffset; 3242 3243 const SCEV *FactorS = SE.getConstant(IntTy, Factor); 3244 3245 // Check that multiplying with each base register doesn't overflow. 3246 for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) { 3247 F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS); 3248 if (getExactSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i]) 3249 goto next; 3250 } 3251 3252 // Check that multiplying with the scaled register doesn't overflow. 3253 if (F.ScaledReg) { 3254 F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS); 3255 if (getExactSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg) 3256 continue; 3257 } 3258 3259 // Check that multiplying with the unfolded offset doesn't overflow. 3260 if (F.UnfoldedOffset != 0) { 3261 if (F.UnfoldedOffset == INT64_MIN && Factor == -1) 3262 continue; 3263 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset * Factor; 3264 if (F.UnfoldedOffset / Factor != Base.UnfoldedOffset) 3265 continue; 3266 } 3267 3268 // If we make it here and it's legal, add it. 3269 (void)InsertFormula(LU, LUIdx, F); 3270 next:; 3271 } 3272} 3273 3274/// GenerateScales - Generate stride factor reuse formulae by making use of 3275/// scaled-offset address modes, for example. 3276void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) { 3277 // Determine the integer type for the base formula. 3278 Type *IntTy = Base.getType(); 3279 if (!IntTy) return; 3280 3281 // If this Formula already has a scaled register, we can't add another one. 3282 if (Base.AM.Scale != 0) return; 3283 3284 // Check each interesting stride. 3285 for (SmallSetVector<int64_t, 8>::const_iterator 3286 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 3287 int64_t Factor = *I; 3288 3289 Base.AM.Scale = Factor; 3290 Base.AM.HasBaseReg = Base.BaseRegs.size() > 1; 3291 // Check whether this scale is going to be legal. 3292 if (!isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, 3293 LU.Kind, LU.AccessTy, TLI)) { 3294 // As a special-case, handle special out-of-loop Basic users specially. 3295 // TODO: Reconsider this special case. 3296 if (LU.Kind == LSRUse::Basic && 3297 isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, 3298 LSRUse::Special, LU.AccessTy, TLI) && 3299 LU.AllFixupsOutsideLoop) 3300 LU.Kind = LSRUse::Special; 3301 else 3302 continue; 3303 } 3304 // For an ICmpZero, negating a solitary base register won't lead to 3305 // new solutions. 3306 if (LU.Kind == LSRUse::ICmpZero && 3307 !Base.AM.HasBaseReg && Base.AM.BaseOffs == 0 && !Base.AM.BaseGV) 3308 continue; 3309 // For each addrec base reg, apply the scale, if possible. 3310 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) 3311 if (const SCEVAddRecExpr *AR = 3312 dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i])) { 3313 const SCEV *FactorS = SE.getConstant(IntTy, Factor); 3314 if (FactorS->isZero()) 3315 continue; 3316 // Divide out the factor, ignoring high bits, since we'll be 3317 // scaling the value back up in the end. 3318 if (const SCEV *Quotient = getExactSDiv(AR, FactorS, SE, true)) { 3319 // TODO: This could be optimized to avoid all the copying. 3320 Formula F = Base; 3321 F.ScaledReg = Quotient; 3322 F.DeleteBaseReg(F.BaseRegs[i]); 3323 (void)InsertFormula(LU, LUIdx, F); 3324 } 3325 } 3326 } 3327} 3328 3329/// GenerateTruncates - Generate reuse formulae from different IV types. 3330void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) { 3331 // This requires TargetLowering to tell us which truncates are free. 3332 if (!TLI) return; 3333 3334 // Don't bother truncating symbolic values. 3335 if (Base.AM.BaseGV) return; 3336 3337 // Determine the integer type for the base formula. 3338 Type *DstTy = Base.getType(); 3339 if (!DstTy) return; 3340 DstTy = SE.getEffectiveSCEVType(DstTy); 3341 3342 for (SmallSetVector<Type *, 4>::const_iterator 3343 I = Types.begin(), E = Types.end(); I != E; ++I) { 3344 Type *SrcTy = *I; 3345 if (SrcTy != DstTy && TLI->isTruncateFree(SrcTy, DstTy)) { 3346 Formula F = Base; 3347 3348 if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, *I); 3349 for (SmallVectorImpl<const SCEV *>::iterator J = F.BaseRegs.begin(), 3350 JE = F.BaseRegs.end(); J != JE; ++J) 3351 *J = SE.getAnyExtendExpr(*J, SrcTy); 3352 3353 // TODO: This assumes we've done basic processing on all uses and 3354 // have an idea what the register usage is. 3355 if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses)) 3356 continue; 3357 3358 (void)InsertFormula(LU, LUIdx, F); 3359 } 3360 } 3361} 3362 3363namespace { 3364 3365/// WorkItem - Helper class for GenerateCrossUseConstantOffsets. It's used to 3366/// defer modifications so that the search phase doesn't have to worry about 3367/// the data structures moving underneath it. 3368struct WorkItem { 3369 size_t LUIdx; 3370 int64_t Imm; 3371 const SCEV *OrigReg; 3372 3373 WorkItem(size_t LI, int64_t I, const SCEV *R) 3374 : LUIdx(LI), Imm(I), OrigReg(R) {} 3375 3376 void print(raw_ostream &OS) const; 3377 void dump() const; 3378}; 3379 3380} 3381 3382void WorkItem::print(raw_ostream &OS) const { 3383 OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx 3384 << " , add offset " << Imm; 3385} 3386 3387void WorkItem::dump() const { 3388 print(errs()); errs() << '\n'; 3389} 3390 3391/// GenerateCrossUseConstantOffsets - Look for registers which are a constant 3392/// distance apart and try to form reuse opportunities between them. 3393void LSRInstance::GenerateCrossUseConstantOffsets() { 3394 // Group the registers by their value without any added constant offset. 3395 typedef std::map<int64_t, const SCEV *> ImmMapTy; 3396 typedef DenseMap<const SCEV *, ImmMapTy> RegMapTy; 3397 RegMapTy Map; 3398 DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap; 3399 SmallVector<const SCEV *, 8> Sequence; 3400 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); 3401 I != E; ++I) { 3402 const SCEV *Reg = *I; 3403 int64_t Imm = ExtractImmediate(Reg, SE); 3404 std::pair<RegMapTy::iterator, bool> Pair = 3405 Map.insert(std::make_pair(Reg, ImmMapTy())); 3406 if (Pair.second) 3407 Sequence.push_back(Reg); 3408 Pair.first->second.insert(std::make_pair(Imm, *I)); 3409 UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(*I); 3410 } 3411 3412 // Now examine each set of registers with the same base value. Build up 3413 // a list of work to do and do the work in a separate step so that we're 3414 // not adding formulae and register counts while we're searching. 3415 SmallVector<WorkItem, 32> WorkItems; 3416 SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems; 3417 for (SmallVectorImpl<const SCEV *>::const_iterator I = Sequence.begin(), 3418 E = Sequence.end(); I != E; ++I) { 3419 const SCEV *Reg = *I; 3420 const ImmMapTy &Imms = Map.find(Reg)->second; 3421 3422 // It's not worthwhile looking for reuse if there's only one offset. 3423 if (Imms.size() == 1) 3424 continue; 3425 3426 DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':'; 3427 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 3428 J != JE; ++J) 3429 dbgs() << ' ' << J->first; 3430 dbgs() << '\n'); 3431 3432 // Examine each offset. 3433 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 3434 J != JE; ++J) { 3435 const SCEV *OrigReg = J->second; 3436 3437 int64_t JImm = J->first; 3438 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg); 3439 3440 if (!isa<SCEVConstant>(OrigReg) && 3441 UsedByIndicesMap[Reg].count() == 1) { 3442 DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg << '\n'); 3443 continue; 3444 } 3445 3446 // Conservatively examine offsets between this orig reg a few selected 3447 // other orig regs. 3448 ImmMapTy::const_iterator OtherImms[] = { 3449 Imms.begin(), prior(Imms.end()), 3450 Imms.lower_bound((Imms.begin()->first + prior(Imms.end())->first) / 2) 3451 }; 3452 for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) { 3453 ImmMapTy::const_iterator M = OtherImms[i]; 3454 if (M == J || M == JE) continue; 3455 3456 // Compute the difference between the two. 3457 int64_t Imm = (uint64_t)JImm - M->first; 3458 for (int LUIdx = UsedByIndices.find_first(); LUIdx != -1; 3459 LUIdx = UsedByIndices.find_next(LUIdx)) 3460 // Make a memo of this use, offset, and register tuple. 3461 if (UniqueItems.insert(std::make_pair(LUIdx, Imm))) 3462 WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg)); 3463 } 3464 } 3465 } 3466 3467 Map.clear(); 3468 Sequence.clear(); 3469 UsedByIndicesMap.clear(); 3470 UniqueItems.clear(); 3471 3472 // Now iterate through the worklist and add new formulae. 3473 for (SmallVectorImpl<WorkItem>::const_iterator I = WorkItems.begin(), 3474 E = WorkItems.end(); I != E; ++I) { 3475 const WorkItem &WI = *I; 3476 size_t LUIdx = WI.LUIdx; 3477 LSRUse &LU = Uses[LUIdx]; 3478 int64_t Imm = WI.Imm; 3479 const SCEV *OrigReg = WI.OrigReg; 3480 3481 Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType()); 3482 const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm)); 3483 unsigned BitWidth = SE.getTypeSizeInBits(IntTy); 3484 3485 // TODO: Use a more targeted data structure. 3486 for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) { 3487 const Formula &F = LU.Formulae[L]; 3488 // Use the immediate in the scaled register. 3489 if (F.ScaledReg == OrigReg) { 3490 int64_t Offs = (uint64_t)F.AM.BaseOffs + 3491 Imm * (uint64_t)F.AM.Scale; 3492 // Don't create 50 + reg(-50). 3493 if (F.referencesReg(SE.getSCEV( 3494 ConstantInt::get(IntTy, -(uint64_t)Offs)))) 3495 continue; 3496 Formula NewF = F; 3497 NewF.AM.BaseOffs = Offs; 3498 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, 3499 LU.Kind, LU.AccessTy, TLI)) 3500 continue; 3501 NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg); 3502 3503 // If the new scale is a constant in a register, and adding the constant 3504 // value to the immediate would produce a value closer to zero than the 3505 // immediate itself, then the formula isn't worthwhile. 3506 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg)) 3507 if (C->getValue()->isNegative() != 3508 (NewF.AM.BaseOffs < 0) && 3509 (C->getValue()->getValue().abs() * APInt(BitWidth, F.AM.Scale)) 3510 .ule(abs64(NewF.AM.BaseOffs))) 3511 continue; 3512 3513 // OK, looks good. 3514 (void)InsertFormula(LU, LUIdx, NewF); 3515 } else { 3516 // Use the immediate in a base register. 3517 for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) { 3518 const SCEV *BaseReg = F.BaseRegs[N]; 3519 if (BaseReg != OrigReg) 3520 continue; 3521 Formula NewF = F; 3522 NewF.AM.BaseOffs = (uint64_t)NewF.AM.BaseOffs + Imm; 3523 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, 3524 LU.Kind, LU.AccessTy, TLI)) { 3525 if (!TLI || 3526 !TLI->isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm)) 3527 continue; 3528 NewF = F; 3529 NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset + Imm; 3530 } 3531 NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg); 3532 3533 // If the new formula has a constant in a register, and adding the 3534 // constant value to the immediate would produce a value closer to 3535 // zero than the immediate itself, then the formula isn't worthwhile. 3536 for (SmallVectorImpl<const SCEV *>::const_iterator 3537 J = NewF.BaseRegs.begin(), JE = NewF.BaseRegs.end(); 3538 J != JE; ++J) 3539 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*J)) 3540 if ((C->getValue()->getValue() + NewF.AM.BaseOffs).abs().slt( 3541 abs64(NewF.AM.BaseOffs)) && 3542 (C->getValue()->getValue() + 3543 NewF.AM.BaseOffs).countTrailingZeros() >= 3544 CountTrailingZeros_64(NewF.AM.BaseOffs)) 3545 goto skip_formula; 3546 3547 // Ok, looks good. 3548 (void)InsertFormula(LU, LUIdx, NewF); 3549 break; 3550 skip_formula:; 3551 } 3552 } 3553 } 3554 } 3555} 3556 3557/// GenerateAllReuseFormulae - Generate formulae for each use. 3558void 3559LSRInstance::GenerateAllReuseFormulae() { 3560 // This is split into multiple loops so that hasRegsUsedByUsesOtherThan 3561 // queries are more precise. 3562 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 3563 LSRUse &LU = Uses[LUIdx]; 3564 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 3565 GenerateReassociations(LU, LUIdx, LU.Formulae[i]); 3566 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 3567 GenerateCombinations(LU, LUIdx, LU.Formulae[i]); 3568 } 3569 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 3570 LSRUse &LU = Uses[LUIdx]; 3571 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 3572 GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]); 3573 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 3574 GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]); 3575 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 3576 GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]); 3577 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 3578 GenerateScales(LU, LUIdx, LU.Formulae[i]); 3579 } 3580 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 3581 LSRUse &LU = Uses[LUIdx]; 3582 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 3583 GenerateTruncates(LU, LUIdx, LU.Formulae[i]); 3584 } 3585 3586 GenerateCrossUseConstantOffsets(); 3587 3588 DEBUG(dbgs() << "\n" 3589 "After generating reuse formulae:\n"; 3590 print_uses(dbgs())); 3591} 3592 3593/// If there are multiple formulae with the same set of registers used 3594/// by other uses, pick the best one and delete the others. 3595void LSRInstance::FilterOutUndesirableDedicatedRegisters() { 3596 DenseSet<const SCEV *> VisitedRegs; 3597 SmallPtrSet<const SCEV *, 16> Regs; 3598 SmallPtrSet<const SCEV *, 16> LoserRegs; 3599#ifndef NDEBUG 3600 bool ChangedFormulae = false; 3601#endif 3602 3603 // Collect the best formula for each unique set of shared registers. This 3604 // is reset for each use. 3605 typedef DenseMap<SmallVector<const SCEV *, 2>, size_t, UniquifierDenseMapInfo> 3606 BestFormulaeTy; 3607 BestFormulaeTy BestFormulae; 3608 3609 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 3610 LSRUse &LU = Uses[LUIdx]; 3611 DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); dbgs() << '\n'); 3612 3613 bool Any = false; 3614 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); 3615 FIdx != NumForms; ++FIdx) { 3616 Formula &F = LU.Formulae[FIdx]; 3617 3618 // Some formulas are instant losers. For example, they may depend on 3619 // nonexistent AddRecs from other loops. These need to be filtered 3620 // immediately, otherwise heuristics could choose them over others leading 3621 // to an unsatisfactory solution. Passing LoserRegs into RateFormula here 3622 // avoids the need to recompute this information across formulae using the 3623 // same bad AddRec. Passing LoserRegs is also essential unless we remove 3624 // the corresponding bad register from the Regs set. 3625 Cost CostF; 3626 Regs.clear(); 3627 CostF.RateFormula(F, Regs, VisitedRegs, L, LU.Offsets, SE, DT, 3628 &LoserRegs); 3629 if (CostF.isLoser()) { 3630 // During initial formula generation, undesirable formulae are generated 3631 // by uses within other loops that have some non-trivial address mode or 3632 // use the postinc form of the IV. LSR needs to provide these formulae 3633 // as the basis of rediscovering the desired formula that uses an AddRec 3634 // corresponding to the existing phi. Once all formulae have been 3635 // generated, these initial losers may be pruned. 3636 DEBUG(dbgs() << " Filtering loser "; F.print(dbgs()); 3637 dbgs() << "\n"); 3638 } 3639 else { 3640 SmallVector<const SCEV *, 2> Key; 3641 for (SmallVectorImpl<const SCEV *>::const_iterator J = F.BaseRegs.begin(), 3642 JE = F.BaseRegs.end(); J != JE; ++J) { 3643 const SCEV *Reg = *J; 3644 if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx)) 3645 Key.push_back(Reg); 3646 } 3647 if (F.ScaledReg && 3648 RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx)) 3649 Key.push_back(F.ScaledReg); 3650 // Unstable sort by host order ok, because this is only used for 3651 // uniquifying. 3652 std::sort(Key.begin(), Key.end()); 3653 3654 std::pair<BestFormulaeTy::const_iterator, bool> P = 3655 BestFormulae.insert(std::make_pair(Key, FIdx)); 3656 if (P.second) 3657 continue; 3658 3659 Formula &Best = LU.Formulae[P.first->second]; 3660 3661 Cost CostBest; 3662 Regs.clear(); 3663 CostBest.RateFormula(Best, Regs, VisitedRegs, L, LU.Offsets, SE, DT); 3664 if (CostF < CostBest) 3665 std::swap(F, Best); 3666 DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs()); 3667 dbgs() << "\n" 3668 " in favor of formula "; Best.print(dbgs()); 3669 dbgs() << '\n'); 3670 } 3671#ifndef NDEBUG 3672 ChangedFormulae = true; 3673#endif 3674 LU.DeleteFormula(F); 3675 --FIdx; 3676 --NumForms; 3677 Any = true; 3678 } 3679 3680 // Now that we've filtered out some formulae, recompute the Regs set. 3681 if (Any) 3682 LU.RecomputeRegs(LUIdx, RegUses); 3683 3684 // Reset this to prepare for the next use. 3685 BestFormulae.clear(); 3686 } 3687 3688 DEBUG(if (ChangedFormulae) { 3689 dbgs() << "\n" 3690 "After filtering out undesirable candidates:\n"; 3691 print_uses(dbgs()); 3692 }); 3693} 3694 3695// This is a rough guess that seems to work fairly well. 3696static const size_t ComplexityLimit = UINT16_MAX; 3697 3698/// EstimateSearchSpaceComplexity - Estimate the worst-case number of 3699/// solutions the solver might have to consider. It almost never considers 3700/// this many solutions because it prune the search space, but the pruning 3701/// isn't always sufficient. 3702size_t LSRInstance::EstimateSearchSpaceComplexity() const { 3703 size_t Power = 1; 3704 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 3705 E = Uses.end(); I != E; ++I) { 3706 size_t FSize = I->Formulae.size(); 3707 if (FSize >= ComplexityLimit) { 3708 Power = ComplexityLimit; 3709 break; 3710 } 3711 Power *= FSize; 3712 if (Power >= ComplexityLimit) 3713 break; 3714 } 3715 return Power; 3716} 3717 3718/// NarrowSearchSpaceByDetectingSupersets - When one formula uses a superset 3719/// of the registers of another formula, it won't help reduce register 3720/// pressure (though it may not necessarily hurt register pressure); remove 3721/// it to simplify the system. 3722void LSRInstance::NarrowSearchSpaceByDetectingSupersets() { 3723 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 3724 DEBUG(dbgs() << "The search space is too complex.\n"); 3725 3726 DEBUG(dbgs() << "Narrowing the search space by eliminating formulae " 3727 "which use a superset of registers used by other " 3728 "formulae.\n"); 3729 3730 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 3731 LSRUse &LU = Uses[LUIdx]; 3732 bool Any = false; 3733 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 3734 Formula &F = LU.Formulae[i]; 3735 // Look for a formula with a constant or GV in a register. If the use 3736 // also has a formula with that same value in an immediate field, 3737 // delete the one that uses a register. 3738 for (SmallVectorImpl<const SCEV *>::const_iterator 3739 I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) { 3740 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*I)) { 3741 Formula NewF = F; 3742 NewF.AM.BaseOffs += C->getValue()->getSExtValue(); 3743 NewF.BaseRegs.erase(NewF.BaseRegs.begin() + 3744 (I - F.BaseRegs.begin())); 3745 if (LU.HasFormulaWithSameRegs(NewF)) { 3746 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 3747 LU.DeleteFormula(F); 3748 --i; 3749 --e; 3750 Any = true; 3751 break; 3752 } 3753 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(*I)) { 3754 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) 3755 if (!F.AM.BaseGV) { 3756 Formula NewF = F; 3757 NewF.AM.BaseGV = GV; 3758 NewF.BaseRegs.erase(NewF.BaseRegs.begin() + 3759 (I - F.BaseRegs.begin())); 3760 if (LU.HasFormulaWithSameRegs(NewF)) { 3761 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); 3762 dbgs() << '\n'); 3763 LU.DeleteFormula(F); 3764 --i; 3765 --e; 3766 Any = true; 3767 break; 3768 } 3769 } 3770 } 3771 } 3772 } 3773 if (Any) 3774 LU.RecomputeRegs(LUIdx, RegUses); 3775 } 3776 3777 DEBUG(dbgs() << "After pre-selection:\n"; 3778 print_uses(dbgs())); 3779 } 3780} 3781 3782/// NarrowSearchSpaceByCollapsingUnrolledCode - When there are many registers 3783/// for expressions like A, A+1, A+2, etc., allocate a single register for 3784/// them. 3785void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() { 3786 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 3787 DEBUG(dbgs() << "The search space is too complex.\n"); 3788 3789 DEBUG(dbgs() << "Narrowing the search space by assuming that uses " 3790 "separated by a constant offset will use the same " 3791 "registers.\n"); 3792 3793 // This is especially useful for unrolled loops. 3794 3795 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 3796 LSRUse &LU = Uses[LUIdx]; 3797 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(), 3798 E = LU.Formulae.end(); I != E; ++I) { 3799 const Formula &F = *I; 3800 if (F.AM.BaseOffs != 0 && F.AM.Scale == 0) { 3801 if (LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU)) { 3802 if (reconcileNewOffset(*LUThatHas, F.AM.BaseOffs, 3803 /*HasBaseReg=*/false, 3804 LU.Kind, LU.AccessTy)) { 3805 DEBUG(dbgs() << " Deleting use "; LU.print(dbgs()); 3806 dbgs() << '\n'); 3807 3808 LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop; 3809 3810 // Update the relocs to reference the new use. 3811 for (SmallVectorImpl<LSRFixup>::iterator I = Fixups.begin(), 3812 E = Fixups.end(); I != E; ++I) { 3813 LSRFixup &Fixup = *I; 3814 if (Fixup.LUIdx == LUIdx) { 3815 Fixup.LUIdx = LUThatHas - &Uses.front(); 3816 Fixup.Offset += F.AM.BaseOffs; 3817 // Add the new offset to LUThatHas' offset list. 3818 if (LUThatHas->Offsets.back() != Fixup.Offset) { 3819 LUThatHas->Offsets.push_back(Fixup.Offset); 3820 if (Fixup.Offset > LUThatHas->MaxOffset) 3821 LUThatHas->MaxOffset = Fixup.Offset; 3822 if (Fixup.Offset < LUThatHas->MinOffset) 3823 LUThatHas->MinOffset = Fixup.Offset; 3824 } 3825 DEBUG(dbgs() << "New fixup has offset " 3826 << Fixup.Offset << '\n'); 3827 } 3828 if (Fixup.LUIdx == NumUses-1) 3829 Fixup.LUIdx = LUIdx; 3830 } 3831 3832 // Delete formulae from the new use which are no longer legal. 3833 bool Any = false; 3834 for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) { 3835 Formula &F = LUThatHas->Formulae[i]; 3836 if (!isLegalUse(F.AM, 3837 LUThatHas->MinOffset, LUThatHas->MaxOffset, 3838 LUThatHas->Kind, LUThatHas->AccessTy, TLI)) { 3839 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); 3840 dbgs() << '\n'); 3841 LUThatHas->DeleteFormula(F); 3842 --i; 3843 --e; 3844 Any = true; 3845 } 3846 } 3847 if (Any) 3848 LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses); 3849 3850 // Delete the old use. 3851 DeleteUse(LU, LUIdx); 3852 --LUIdx; 3853 --NumUses; 3854 break; 3855 } 3856 } 3857 } 3858 } 3859 } 3860 3861 DEBUG(dbgs() << "After pre-selection:\n"; 3862 print_uses(dbgs())); 3863 } 3864} 3865 3866/// NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters - Call 3867/// FilterOutUndesirableDedicatedRegisters again, if necessary, now that 3868/// we've done more filtering, as it may be able to find more formulae to 3869/// eliminate. 3870void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){ 3871 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 3872 DEBUG(dbgs() << "The search space is too complex.\n"); 3873 3874 DEBUG(dbgs() << "Narrowing the search space by re-filtering out " 3875 "undesirable dedicated registers.\n"); 3876 3877 FilterOutUndesirableDedicatedRegisters(); 3878 3879 DEBUG(dbgs() << "After pre-selection:\n"; 3880 print_uses(dbgs())); 3881 } 3882} 3883 3884/// NarrowSearchSpaceByPickingWinnerRegs - Pick a register which seems likely 3885/// to be profitable, and then in any use which has any reference to that 3886/// register, delete all formulae which do not reference that register. 3887void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() { 3888 // With all other options exhausted, loop until the system is simple 3889 // enough to handle. 3890 SmallPtrSet<const SCEV *, 4> Taken; 3891 while (EstimateSearchSpaceComplexity() >= ComplexityLimit) { 3892 // Ok, we have too many of formulae on our hands to conveniently handle. 3893 // Use a rough heuristic to thin out the list. 3894 DEBUG(dbgs() << "The search space is too complex.\n"); 3895 3896 // Pick the register which is used by the most LSRUses, which is likely 3897 // to be a good reuse register candidate. 3898 const SCEV *Best = 0; 3899 unsigned BestNum = 0; 3900 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); 3901 I != E; ++I) { 3902 const SCEV *Reg = *I; 3903 if (Taken.count(Reg)) 3904 continue; 3905 if (!Best) 3906 Best = Reg; 3907 else { 3908 unsigned Count = RegUses.getUsedByIndices(Reg).count(); 3909 if (Count > BestNum) { 3910 Best = Reg; 3911 BestNum = Count; 3912 } 3913 } 3914 } 3915 3916 DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best 3917 << " will yield profitable reuse.\n"); 3918 Taken.insert(Best); 3919 3920 // In any use with formulae which references this register, delete formulae 3921 // which don't reference it. 3922 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 3923 LSRUse &LU = Uses[LUIdx]; 3924 if (!LU.Regs.count(Best)) continue; 3925 3926 bool Any = false; 3927 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 3928 Formula &F = LU.Formulae[i]; 3929 if (!F.referencesReg(Best)) { 3930 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 3931 LU.DeleteFormula(F); 3932 --e; 3933 --i; 3934 Any = true; 3935 assert(e != 0 && "Use has no formulae left! Is Regs inconsistent?"); 3936 continue; 3937 } 3938 } 3939 3940 if (Any) 3941 LU.RecomputeRegs(LUIdx, RegUses); 3942 } 3943 3944 DEBUG(dbgs() << "After pre-selection:\n"; 3945 print_uses(dbgs())); 3946 } 3947} 3948 3949/// NarrowSearchSpaceUsingHeuristics - If there are an extraordinary number of 3950/// formulae to choose from, use some rough heuristics to prune down the number 3951/// of formulae. This keeps the main solver from taking an extraordinary amount 3952/// of time in some worst-case scenarios. 3953void LSRInstance::NarrowSearchSpaceUsingHeuristics() { 3954 NarrowSearchSpaceByDetectingSupersets(); 3955 NarrowSearchSpaceByCollapsingUnrolledCode(); 3956 NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(); 3957 NarrowSearchSpaceByPickingWinnerRegs(); 3958} 3959 3960/// SolveRecurse - This is the recursive solver. 3961void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 3962 Cost &SolutionCost, 3963 SmallVectorImpl<const Formula *> &Workspace, 3964 const Cost &CurCost, 3965 const SmallPtrSet<const SCEV *, 16> &CurRegs, 3966 DenseSet<const SCEV *> &VisitedRegs) const { 3967 // Some ideas: 3968 // - prune more: 3969 // - use more aggressive filtering 3970 // - sort the formula so that the most profitable solutions are found first 3971 // - sort the uses too 3972 // - search faster: 3973 // - don't compute a cost, and then compare. compare while computing a cost 3974 // and bail early. 3975 // - track register sets with SmallBitVector 3976 3977 const LSRUse &LU = Uses[Workspace.size()]; 3978 3979 // If this use references any register that's already a part of the 3980 // in-progress solution, consider it a requirement that a formula must 3981 // reference that register in order to be considered. This prunes out 3982 // unprofitable searching. 3983 SmallSetVector<const SCEV *, 4> ReqRegs; 3984 for (SmallPtrSet<const SCEV *, 16>::const_iterator I = CurRegs.begin(), 3985 E = CurRegs.end(); I != E; ++I) 3986 if (LU.Regs.count(*I)) 3987 ReqRegs.insert(*I); 3988 3989 bool AnySatisfiedReqRegs = false; 3990 SmallPtrSet<const SCEV *, 16> NewRegs; 3991 Cost NewCost; 3992retry: 3993 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(), 3994 E = LU.Formulae.end(); I != E; ++I) { 3995 const Formula &F = *I; 3996 3997 // Ignore formulae which do not use any of the required registers. 3998 for (SmallSetVector<const SCEV *, 4>::const_iterator J = ReqRegs.begin(), 3999 JE = ReqRegs.end(); J != JE; ++J) { 4000 const SCEV *Reg = *J; 4001 if ((!F.ScaledReg || F.ScaledReg != Reg) && 4002 std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) == 4003 F.BaseRegs.end()) 4004 goto skip; 4005 } 4006 AnySatisfiedReqRegs = true; 4007 4008 // Evaluate the cost of the current formula. If it's already worse than 4009 // the current best, prune the search at that point. 4010 NewCost = CurCost; 4011 NewRegs = CurRegs; 4012 NewCost.RateFormula(F, NewRegs, VisitedRegs, L, LU.Offsets, SE, DT); 4013 if (NewCost < SolutionCost) { 4014 Workspace.push_back(&F); 4015 if (Workspace.size() != Uses.size()) { 4016 SolveRecurse(Solution, SolutionCost, Workspace, NewCost, 4017 NewRegs, VisitedRegs); 4018 if (F.getNumRegs() == 1 && Workspace.size() == 1) 4019 VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]); 4020 } else { 4021 DEBUG(dbgs() << "New best at "; NewCost.print(dbgs()); 4022 dbgs() << ".\n Regs:"; 4023 for (SmallPtrSet<const SCEV *, 16>::const_iterator 4024 I = NewRegs.begin(), E = NewRegs.end(); I != E; ++I) 4025 dbgs() << ' ' << **I; 4026 dbgs() << '\n'); 4027 4028 SolutionCost = NewCost; 4029 Solution = Workspace; 4030 } 4031 Workspace.pop_back(); 4032 } 4033 skip:; 4034 } 4035 4036 if (!EnableRetry && !AnySatisfiedReqRegs) 4037 return; 4038 4039 // If none of the formulae had all of the required registers, relax the 4040 // constraint so that we don't exclude all formulae. 4041 if (!AnySatisfiedReqRegs) { 4042 assert(!ReqRegs.empty() && "Solver failed even without required registers"); 4043 ReqRegs.clear(); 4044 goto retry; 4045 } 4046} 4047 4048/// Solve - Choose one formula from each use. Return the results in the given 4049/// Solution vector. 4050void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const { 4051 SmallVector<const Formula *, 8> Workspace; 4052 Cost SolutionCost; 4053 SolutionCost.Loose(); 4054 Cost CurCost; 4055 SmallPtrSet<const SCEV *, 16> CurRegs; 4056 DenseSet<const SCEV *> VisitedRegs; 4057 Workspace.reserve(Uses.size()); 4058 4059 // SolveRecurse does all the work. 4060 SolveRecurse(Solution, SolutionCost, Workspace, CurCost, 4061 CurRegs, VisitedRegs); 4062 if (Solution.empty()) { 4063 DEBUG(dbgs() << "\nNo Satisfactory Solution\n"); 4064 return; 4065 } 4066 4067 // Ok, we've now made all our decisions. 4068 DEBUG(dbgs() << "\n" 4069 "The chosen solution requires "; SolutionCost.print(dbgs()); 4070 dbgs() << ":\n"; 4071 for (size_t i = 0, e = Uses.size(); i != e; ++i) { 4072 dbgs() << " "; 4073 Uses[i].print(dbgs()); 4074 dbgs() << "\n" 4075 " "; 4076 Solution[i]->print(dbgs()); 4077 dbgs() << '\n'; 4078 }); 4079 4080 assert(Solution.size() == Uses.size() && "Malformed solution!"); 4081} 4082 4083/// HoistInsertPosition - Helper for AdjustInsertPositionForExpand. Climb up 4084/// the dominator tree far as we can go while still being dominated by the 4085/// input positions. This helps canonicalize the insert position, which 4086/// encourages sharing. 4087BasicBlock::iterator 4088LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, 4089 const SmallVectorImpl<Instruction *> &Inputs) 4090 const { 4091 for (;;) { 4092 const Loop *IPLoop = LI.getLoopFor(IP->getParent()); 4093 unsigned IPLoopDepth = IPLoop ? IPLoop->getLoopDepth() : 0; 4094 4095 BasicBlock *IDom; 4096 for (DomTreeNode *Rung = DT.getNode(IP->getParent()); ; ) { 4097 if (!Rung) return IP; 4098 Rung = Rung->getIDom(); 4099 if (!Rung) return IP; 4100 IDom = Rung->getBlock(); 4101 4102 // Don't climb into a loop though. 4103 const Loop *IDomLoop = LI.getLoopFor(IDom); 4104 unsigned IDomDepth = IDomLoop ? IDomLoop->getLoopDepth() : 0; 4105 if (IDomDepth <= IPLoopDepth && 4106 (IDomDepth != IPLoopDepth || IDomLoop == IPLoop)) 4107 break; 4108 } 4109 4110 bool AllDominate = true; 4111 Instruction *BetterPos = 0; 4112 Instruction *Tentative = IDom->getTerminator(); 4113 for (SmallVectorImpl<Instruction *>::const_iterator I = Inputs.begin(), 4114 E = Inputs.end(); I != E; ++I) { 4115 Instruction *Inst = *I; 4116 if (Inst == Tentative || !DT.dominates(Inst, Tentative)) { 4117 AllDominate = false; 4118 break; 4119 } 4120 // Attempt to find an insert position in the middle of the block, 4121 // instead of at the end, so that it can be used for other expansions. 4122 if (IDom == Inst->getParent() && 4123 (!BetterPos || DT.dominates(BetterPos, Inst))) 4124 BetterPos = llvm::next(BasicBlock::iterator(Inst)); 4125 } 4126 if (!AllDominate) 4127 break; 4128 if (BetterPos) 4129 IP = BetterPos; 4130 else 4131 IP = Tentative; 4132 } 4133 4134 return IP; 4135} 4136 4137/// AdjustInsertPositionForExpand - Determine an input position which will be 4138/// dominated by the operands and which will dominate the result. 4139BasicBlock::iterator 4140LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP, 4141 const LSRFixup &LF, 4142 const LSRUse &LU, 4143 SCEVExpander &Rewriter) const { 4144 // Collect some instructions which must be dominated by the 4145 // expanding replacement. These must be dominated by any operands that 4146 // will be required in the expansion. 4147 SmallVector<Instruction *, 4> Inputs; 4148 if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace)) 4149 Inputs.push_back(I); 4150 if (LU.Kind == LSRUse::ICmpZero) 4151 if (Instruction *I = 4152 dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1))) 4153 Inputs.push_back(I); 4154 if (LF.PostIncLoops.count(L)) { 4155 if (LF.isUseFullyOutsideLoop(L)) 4156 Inputs.push_back(L->getLoopLatch()->getTerminator()); 4157 else 4158 Inputs.push_back(IVIncInsertPos); 4159 } 4160 // The expansion must also be dominated by the increment positions of any 4161 // loops it for which it is using post-inc mode. 4162 for (PostIncLoopSet::const_iterator I = LF.PostIncLoops.begin(), 4163 E = LF.PostIncLoops.end(); I != E; ++I) { 4164 const Loop *PIL = *I; 4165 if (PIL == L) continue; 4166 4167 // Be dominated by the loop exit. 4168 SmallVector<BasicBlock *, 4> ExitingBlocks; 4169 PIL->getExitingBlocks(ExitingBlocks); 4170 if (!ExitingBlocks.empty()) { 4171 BasicBlock *BB = ExitingBlocks[0]; 4172 for (unsigned i = 1, e = ExitingBlocks.size(); i != e; ++i) 4173 BB = DT.findNearestCommonDominator(BB, ExitingBlocks[i]); 4174 Inputs.push_back(BB->getTerminator()); 4175 } 4176 } 4177 4178 assert(!isa<PHINode>(LowestIP) && !isa<LandingPadInst>(LowestIP) 4179 && !isa<DbgInfoIntrinsic>(LowestIP) && 4180 "Insertion point must be a normal instruction"); 4181 4182 // Then, climb up the immediate dominator tree as far as we can go while 4183 // still being dominated by the input positions. 4184 BasicBlock::iterator IP = HoistInsertPosition(LowestIP, Inputs); 4185 4186 // Don't insert instructions before PHI nodes. 4187 while (isa<PHINode>(IP)) ++IP; 4188 4189 // Ignore landingpad instructions. 4190 while (isa<LandingPadInst>(IP)) ++IP; 4191 4192 // Ignore debug intrinsics. 4193 while (isa<DbgInfoIntrinsic>(IP)) ++IP; 4194 4195 // Set IP below instructions recently inserted by SCEVExpander. This keeps the 4196 // IP consistent across expansions and allows the previously inserted 4197 // instructions to be reused by subsequent expansion. 4198 while (Rewriter.isInsertedInstruction(IP) && IP != LowestIP) ++IP; 4199 4200 return IP; 4201} 4202 4203/// Expand - Emit instructions for the leading candidate expression for this 4204/// LSRUse (this is called "expanding"). 4205Value *LSRInstance::Expand(const LSRFixup &LF, 4206 const Formula &F, 4207 BasicBlock::iterator IP, 4208 SCEVExpander &Rewriter, 4209 SmallVectorImpl<WeakVH> &DeadInsts) const { 4210 const LSRUse &LU = Uses[LF.LUIdx]; 4211 4212 // Determine an input position which will be dominated by the operands and 4213 // which will dominate the result. 4214 IP = AdjustInsertPositionForExpand(IP, LF, LU, Rewriter); 4215 4216 // Inform the Rewriter if we have a post-increment use, so that it can 4217 // perform an advantageous expansion. 4218 Rewriter.setPostInc(LF.PostIncLoops); 4219 4220 // This is the type that the user actually needs. 4221 Type *OpTy = LF.OperandValToReplace->getType(); 4222 // This will be the type that we'll initially expand to. 4223 Type *Ty = F.getType(); 4224 if (!Ty) 4225 // No type known; just expand directly to the ultimate type. 4226 Ty = OpTy; 4227 else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy)) 4228 // Expand directly to the ultimate type if it's the right size. 4229 Ty = OpTy; 4230 // This is the type to do integer arithmetic in. 4231 Type *IntTy = SE.getEffectiveSCEVType(Ty); 4232 4233 // Build up a list of operands to add together to form the full base. 4234 SmallVector<const SCEV *, 8> Ops; 4235 4236 // Expand the BaseRegs portion. 4237 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 4238 E = F.BaseRegs.end(); I != E; ++I) { 4239 const SCEV *Reg = *I; 4240 assert(!Reg->isZero() && "Zero allocated in a base register!"); 4241 4242 // If we're expanding for a post-inc user, make the post-inc adjustment. 4243 PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops); 4244 Reg = TransformForPostIncUse(Denormalize, Reg, 4245 LF.UserInst, LF.OperandValToReplace, 4246 Loops, SE, DT); 4247 4248 Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, 0, IP))); 4249 } 4250 4251 // Flush the operand list to suppress SCEVExpander hoisting. 4252 if (!Ops.empty()) { 4253 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 4254 Ops.clear(); 4255 Ops.push_back(SE.getUnknown(FullV)); 4256 } 4257 4258 // Expand the ScaledReg portion. 4259 Value *ICmpScaledV = 0; 4260 if (F.AM.Scale != 0) { 4261 const SCEV *ScaledS = F.ScaledReg; 4262 4263 // If we're expanding for a post-inc user, make the post-inc adjustment. 4264 PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops); 4265 ScaledS = TransformForPostIncUse(Denormalize, ScaledS, 4266 LF.UserInst, LF.OperandValToReplace, 4267 Loops, SE, DT); 4268 4269 if (LU.Kind == LSRUse::ICmpZero) { 4270 // An interesting way of "folding" with an icmp is to use a negated 4271 // scale, which we'll implement by inserting it into the other operand 4272 // of the icmp. 4273 assert(F.AM.Scale == -1 && 4274 "The only scale supported by ICmpZero uses is -1!"); 4275 ICmpScaledV = Rewriter.expandCodeFor(ScaledS, 0, IP); 4276 } else { 4277 // Otherwise just expand the scaled register and an explicit scale, 4278 // which is expected to be matched as part of the address. 4279 ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, 0, IP)); 4280 ScaledS = SE.getMulExpr(ScaledS, 4281 SE.getConstant(ScaledS->getType(), F.AM.Scale)); 4282 Ops.push_back(ScaledS); 4283 4284 // Flush the operand list to suppress SCEVExpander hoisting. 4285 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 4286 Ops.clear(); 4287 Ops.push_back(SE.getUnknown(FullV)); 4288 } 4289 } 4290 4291 // Expand the GV portion. 4292 if (F.AM.BaseGV) { 4293 Ops.push_back(SE.getUnknown(F.AM.BaseGV)); 4294 4295 // Flush the operand list to suppress SCEVExpander hoisting. 4296 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); 4297 Ops.clear(); 4298 Ops.push_back(SE.getUnknown(FullV)); 4299 } 4300 4301 // Expand the immediate portion. 4302 int64_t Offset = (uint64_t)F.AM.BaseOffs + LF.Offset; 4303 if (Offset != 0) { 4304 if (LU.Kind == LSRUse::ICmpZero) { 4305 // The other interesting way of "folding" with an ICmpZero is to use a 4306 // negated immediate. 4307 if (!ICmpScaledV) 4308 ICmpScaledV = ConstantInt::get(IntTy, -(uint64_t)Offset); 4309 else { 4310 Ops.push_back(SE.getUnknown(ICmpScaledV)); 4311 ICmpScaledV = ConstantInt::get(IntTy, Offset); 4312 } 4313 } else { 4314 // Just add the immediate values. These again are expected to be matched 4315 // as part of the address. 4316 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, Offset))); 4317 } 4318 } 4319 4320 // Expand the unfolded offset portion. 4321 int64_t UnfoldedOffset = F.UnfoldedOffset; 4322 if (UnfoldedOffset != 0) { 4323 // Just add the immediate values. 4324 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, 4325 UnfoldedOffset))); 4326 } 4327 4328 // Emit instructions summing all the operands. 4329 const SCEV *FullS = Ops.empty() ? 4330 SE.getConstant(IntTy, 0) : 4331 SE.getAddExpr(Ops); 4332 Value *FullV = Rewriter.expandCodeFor(FullS, Ty, IP); 4333 4334 // We're done expanding now, so reset the rewriter. 4335 Rewriter.clearPostInc(); 4336 4337 // An ICmpZero Formula represents an ICmp which we're handling as a 4338 // comparison against zero. Now that we've expanded an expression for that 4339 // form, update the ICmp's other operand. 4340 if (LU.Kind == LSRUse::ICmpZero) { 4341 ICmpInst *CI = cast<ICmpInst>(LF.UserInst); 4342 DeadInsts.push_back(CI->getOperand(1)); 4343 assert(!F.AM.BaseGV && "ICmp does not support folding a global value and " 4344 "a scale at the same time!"); 4345 if (F.AM.Scale == -1) { 4346 if (ICmpScaledV->getType() != OpTy) { 4347 Instruction *Cast = 4348 CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false, 4349 OpTy, false), 4350 ICmpScaledV, OpTy, "tmp", CI); 4351 ICmpScaledV = Cast; 4352 } 4353 CI->setOperand(1, ICmpScaledV); 4354 } else { 4355 assert(F.AM.Scale == 0 && 4356 "ICmp does not support folding a global value and " 4357 "a scale at the same time!"); 4358 Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy), 4359 -(uint64_t)Offset); 4360 if (C->getType() != OpTy) 4361 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 4362 OpTy, false), 4363 C, OpTy); 4364 4365 CI->setOperand(1, C); 4366 } 4367 } 4368 4369 return FullV; 4370} 4371 4372/// RewriteForPHI - Helper for Rewrite. PHI nodes are special because the use 4373/// of their operands effectively happens in their predecessor blocks, so the 4374/// expression may need to be expanded in multiple places. 4375void LSRInstance::RewriteForPHI(PHINode *PN, 4376 const LSRFixup &LF, 4377 const Formula &F, 4378 SCEVExpander &Rewriter, 4379 SmallVectorImpl<WeakVH> &DeadInsts, 4380 Pass *P) const { 4381 DenseMap<BasicBlock *, Value *> Inserted; 4382 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 4383 if (PN->getIncomingValue(i) == LF.OperandValToReplace) { 4384 BasicBlock *BB = PN->getIncomingBlock(i); 4385 4386 // If this is a critical edge, split the edge so that we do not insert 4387 // the code on all predecessor/successor paths. We do this unless this 4388 // is the canonical backedge for this loop, which complicates post-inc 4389 // users. 4390 if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 && 4391 !isa<IndirectBrInst>(BB->getTerminator())) { 4392 BasicBlock *Parent = PN->getParent(); 4393 Loop *PNLoop = LI.getLoopFor(Parent); 4394 if (!PNLoop || Parent != PNLoop->getHeader()) { 4395 // Split the critical edge. 4396 BasicBlock *NewBB = 0; 4397 if (!Parent->isLandingPad()) { 4398 NewBB = SplitCriticalEdge(BB, Parent, P, 4399 /*MergeIdenticalEdges=*/true, 4400 /*DontDeleteUselessPhis=*/true); 4401 } else { 4402 SmallVector<BasicBlock*, 2> NewBBs; 4403 SplitLandingPadPredecessors(Parent, BB, "", "", P, NewBBs); 4404 NewBB = NewBBs[0]; 4405 } 4406 4407 // If PN is outside of the loop and BB is in the loop, we want to 4408 // move the block to be immediately before the PHI block, not 4409 // immediately after BB. 4410 if (L->contains(BB) && !L->contains(PN)) 4411 NewBB->moveBefore(PN->getParent()); 4412 4413 // Splitting the edge can reduce the number of PHI entries we have. 4414 e = PN->getNumIncomingValues(); 4415 BB = NewBB; 4416 i = PN->getBasicBlockIndex(BB); 4417 } 4418 } 4419 4420 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair = 4421 Inserted.insert(std::make_pair(BB, static_cast<Value *>(0))); 4422 if (!Pair.second) 4423 PN->setIncomingValue(i, Pair.first->second); 4424 else { 4425 Value *FullV = Expand(LF, F, BB->getTerminator(), Rewriter, DeadInsts); 4426 4427 // If this is reuse-by-noop-cast, insert the noop cast. 4428 Type *OpTy = LF.OperandValToReplace->getType(); 4429 if (FullV->getType() != OpTy) 4430 FullV = 4431 CastInst::Create(CastInst::getCastOpcode(FullV, false, 4432 OpTy, false), 4433 FullV, LF.OperandValToReplace->getType(), 4434 "tmp", BB->getTerminator()); 4435 4436 PN->setIncomingValue(i, FullV); 4437 Pair.first->second = FullV; 4438 } 4439 } 4440} 4441 4442/// Rewrite - Emit instructions for the leading candidate expression for this 4443/// LSRUse (this is called "expanding"), and update the UserInst to reference 4444/// the newly expanded value. 4445void LSRInstance::Rewrite(const LSRFixup &LF, 4446 const Formula &F, 4447 SCEVExpander &Rewriter, 4448 SmallVectorImpl<WeakVH> &DeadInsts, 4449 Pass *P) const { 4450 // First, find an insertion point that dominates UserInst. For PHI nodes, 4451 // find the nearest block which dominates all the relevant uses. 4452 if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) { 4453 RewriteForPHI(PN, LF, F, Rewriter, DeadInsts, P); 4454 } else { 4455 Value *FullV = Expand(LF, F, LF.UserInst, Rewriter, DeadInsts); 4456 4457 // If this is reuse-by-noop-cast, insert the noop cast. 4458 Type *OpTy = LF.OperandValToReplace->getType(); 4459 if (FullV->getType() != OpTy) { 4460 Instruction *Cast = 4461 CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false), 4462 FullV, OpTy, "tmp", LF.UserInst); 4463 FullV = Cast; 4464 } 4465 4466 // Update the user. ICmpZero is handled specially here (for now) because 4467 // Expand may have updated one of the operands of the icmp already, and 4468 // its new value may happen to be equal to LF.OperandValToReplace, in 4469 // which case doing replaceUsesOfWith leads to replacing both operands 4470 // with the same value. TODO: Reorganize this. 4471 if (Uses[LF.LUIdx].Kind == LSRUse::ICmpZero) 4472 LF.UserInst->setOperand(0, FullV); 4473 else 4474 LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV); 4475 } 4476 4477 DeadInsts.push_back(LF.OperandValToReplace); 4478} 4479 4480/// ImplementSolution - Rewrite all the fixup locations with new values, 4481/// following the chosen solution. 4482void 4483LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, 4484 Pass *P) { 4485 // Keep track of instructions we may have made dead, so that 4486 // we can remove them after we are done working. 4487 SmallVector<WeakVH, 16> DeadInsts; 4488 4489 SCEVExpander Rewriter(SE, "lsr"); 4490#ifndef NDEBUG 4491 Rewriter.setDebugType(DEBUG_TYPE); 4492#endif 4493 Rewriter.disableCanonicalMode(); 4494 Rewriter.enableLSRMode(); 4495 Rewriter.setIVIncInsertPos(L, IVIncInsertPos); 4496 4497 // Mark phi nodes that terminate chains so the expander tries to reuse them. 4498 for (SmallVectorImpl<IVChain>::const_iterator ChainI = IVChainVec.begin(), 4499 ChainE = IVChainVec.end(); ChainI != ChainE; ++ChainI) { 4500 if (PHINode *PN = dyn_cast<PHINode>(ChainI->back().UserInst)) 4501 Rewriter.setChainedPhi(PN); 4502 } 4503 4504 // Expand the new value definitions and update the users. 4505 for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(), 4506 E = Fixups.end(); I != E; ++I) { 4507 const LSRFixup &Fixup = *I; 4508 4509 Rewrite(Fixup, *Solution[Fixup.LUIdx], Rewriter, DeadInsts, P); 4510 4511 Changed = true; 4512 } 4513 4514 for (SmallVectorImpl<IVChain>::const_iterator ChainI = IVChainVec.begin(), 4515 ChainE = IVChainVec.end(); ChainI != ChainE; ++ChainI) { 4516 GenerateIVChain(*ChainI, Rewriter, DeadInsts); 4517 Changed = true; 4518 } 4519 // Clean up after ourselves. This must be done before deleting any 4520 // instructions. 4521 Rewriter.clear(); 4522 4523 Changed |= DeleteTriviallyDeadInstructions(DeadInsts); 4524} 4525 4526LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P) 4527 : IU(P->getAnalysis<IVUsers>()), 4528 SE(P->getAnalysis<ScalarEvolution>()), 4529 DT(P->getAnalysis<DominatorTree>()), 4530 LI(P->getAnalysis<LoopInfo>()), 4531 TLI(tli), L(l), Changed(false), IVIncInsertPos(0) { 4532 4533 // If LoopSimplify form is not available, stay out of trouble. 4534 if (!L->isLoopSimplifyForm()) 4535 return; 4536 4537 // All dominating loops must have preheaders, or SCEVExpander may not be able 4538 // to materialize an AddRecExpr whose Start is an outer AddRecExpr. 4539 // 4540 // FIXME: This is a little absurd. I think LoopSimplify should be taught 4541 // to create a preheader under any circumstance. 4542 for (DomTreeNode *Rung = DT.getNode(L->getLoopPreheader()); 4543 Rung; Rung = Rung->getIDom()) { 4544 BasicBlock *BB = Rung->getBlock(); 4545 const Loop *DomLoop = LI.getLoopFor(BB); 4546 if (DomLoop && DomLoop->getHeader() == BB) { 4547 if (!DomLoop->getLoopPreheader()) 4548 return; 4549 } 4550 } 4551 // If there's no interesting work to be done, bail early. 4552 if (IU.empty()) return; 4553 4554 DEBUG(dbgs() << "\nLSR on loop "; 4555 WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false); 4556 dbgs() << ":\n"); 4557 4558 // First, perform some low-level loop optimizations. 4559 OptimizeShadowIV(); 4560 OptimizeLoopTermCond(); 4561 4562 // If loop preparation eliminates all interesting IV users, bail. 4563 if (IU.empty()) return; 4564 4565 // Skip nested loops until we can model them better with formulae. 4566 if (!EnableNested && !L->empty()) { 4567 DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n"); 4568 return; 4569 } 4570 4571 // Start collecting data and preparing for the solver. 4572 CollectChains(); 4573 CollectInterestingTypesAndFactors(); 4574 CollectFixupsAndInitialFormulae(); 4575 CollectLoopInvariantFixupsAndFormulae(); 4576 4577 assert(!Uses.empty() && "IVUsers reported at least one use"); 4578 DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n"; 4579 print_uses(dbgs())); 4580 4581 // Now use the reuse data to generate a bunch of interesting ways 4582 // to formulate the values needed for the uses. 4583 GenerateAllReuseFormulae(); 4584 4585 FilterOutUndesirableDedicatedRegisters(); 4586 NarrowSearchSpaceUsingHeuristics(); 4587 4588 SmallVector<const Formula *, 8> Solution; 4589 Solve(Solution); 4590 4591 // Release memory that is no longer needed. 4592 Factors.clear(); 4593 Types.clear(); 4594 RegUses.clear(); 4595 4596 if (Solution.empty()) 4597 return; 4598 4599#ifndef NDEBUG 4600 // Formulae should be legal. 4601 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 4602 E = Uses.end(); I != E; ++I) { 4603 const LSRUse &LU = *I; 4604 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(), 4605 JE = LU.Formulae.end(); J != JE; ++J) 4606 assert(isLegalUse(J->AM, LU.MinOffset, LU.MaxOffset, 4607 LU.Kind, LU.AccessTy, TLI) && 4608 "Illegal formula generated!"); 4609 }; 4610#endif 4611 4612 // Now that we've decided what we want, make it so. 4613 ImplementSolution(Solution, P); 4614} 4615 4616void LSRInstance::print_factors_and_types(raw_ostream &OS) const { 4617 if (Factors.empty() && Types.empty()) return; 4618 4619 OS << "LSR has identified the following interesting factors and types: "; 4620 bool First = true; 4621 4622 for (SmallSetVector<int64_t, 8>::const_iterator 4623 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 4624 if (!First) OS << ", "; 4625 First = false; 4626 OS << '*' << *I; 4627 } 4628 4629 for (SmallSetVector<Type *, 4>::const_iterator 4630 I = Types.begin(), E = Types.end(); I != E; ++I) { 4631 if (!First) OS << ", "; 4632 First = false; 4633 OS << '(' << **I << ')'; 4634 } 4635 OS << '\n'; 4636} 4637 4638void LSRInstance::print_fixups(raw_ostream &OS) const { 4639 OS << "LSR is examining the following fixup sites:\n"; 4640 for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(), 4641 E = Fixups.end(); I != E; ++I) { 4642 dbgs() << " "; 4643 I->print(OS); 4644 OS << '\n'; 4645 } 4646} 4647 4648void LSRInstance::print_uses(raw_ostream &OS) const { 4649 OS << "LSR is examining the following uses:\n"; 4650 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 4651 E = Uses.end(); I != E; ++I) { 4652 const LSRUse &LU = *I; 4653 dbgs() << " "; 4654 LU.print(OS); 4655 OS << '\n'; 4656 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(), 4657 JE = LU.Formulae.end(); J != JE; ++J) { 4658 OS << " "; 4659 J->print(OS); 4660 OS << '\n'; 4661 } 4662 } 4663} 4664 4665void LSRInstance::print(raw_ostream &OS) const { 4666 print_factors_and_types(OS); 4667 print_fixups(OS); 4668 print_uses(OS); 4669} 4670 4671void LSRInstance::dump() const { 4672 print(errs()); errs() << '\n'; 4673} 4674 4675namespace { 4676 4677class LoopStrengthReduce : public LoopPass { 4678 /// TLI - Keep a pointer of a TargetLowering to consult for determining 4679 /// transformation profitability. 4680 const TargetLowering *const TLI; 4681 4682public: 4683 static char ID; // Pass ID, replacement for typeid 4684 explicit LoopStrengthReduce(const TargetLowering *tli = 0); 4685 4686private: 4687 bool runOnLoop(Loop *L, LPPassManager &LPM); 4688 void getAnalysisUsage(AnalysisUsage &AU) const; 4689}; 4690 4691} 4692 4693char LoopStrengthReduce::ID = 0; 4694INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce", 4695 "Loop Strength Reduction", false, false) 4696INITIALIZE_PASS_DEPENDENCY(DominatorTree) 4697INITIALIZE_PASS_DEPENDENCY(ScalarEvolution) 4698INITIALIZE_PASS_DEPENDENCY(IVUsers) 4699INITIALIZE_PASS_DEPENDENCY(LoopInfo) 4700INITIALIZE_PASS_DEPENDENCY(LoopSimplify) 4701INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce", 4702 "Loop Strength Reduction", false, false) 4703 4704 4705Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) { 4706 return new LoopStrengthReduce(TLI); 4707} 4708 4709LoopStrengthReduce::LoopStrengthReduce(const TargetLowering *tli) 4710 : LoopPass(ID), TLI(tli) { 4711 initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry()); 4712 } 4713 4714void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { 4715 // We split critical edges, so we change the CFG. However, we do update 4716 // many analyses if they are around. 4717 AU.addPreservedID(LoopSimplifyID); 4718 4719 AU.addRequired<LoopInfo>(); 4720 AU.addPreserved<LoopInfo>(); 4721 AU.addRequiredID(LoopSimplifyID); 4722 AU.addRequired<DominatorTree>(); 4723 AU.addPreserved<DominatorTree>(); 4724 AU.addRequired<ScalarEvolution>(); 4725 AU.addPreserved<ScalarEvolution>(); 4726 // Requiring LoopSimplify a second time here prevents IVUsers from running 4727 // twice, since LoopSimplify was invalidated by running ScalarEvolution. 4728 AU.addRequiredID(LoopSimplifyID); 4729 AU.addRequired<IVUsers>(); 4730 AU.addPreserved<IVUsers>(); 4731} 4732 4733bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) { 4734 bool Changed = false; 4735 4736 // Run the main LSR transformation. 4737 Changed |= LSRInstance(TLI, L, this).getChanged(); 4738 4739 // Remove any extra phis created by processing inner loops. 4740 Changed |= DeleteDeadPHIs(L->getHeader()); 4741 if (EnablePhiElim) { 4742 SmallVector<WeakVH, 16> DeadInsts; 4743 SCEVExpander Rewriter(getAnalysis<ScalarEvolution>(), "lsr"); 4744#ifndef NDEBUG 4745 Rewriter.setDebugType(DEBUG_TYPE); 4746#endif 4747 unsigned numFolded = Rewriter. 4748 replaceCongruentIVs(L, &getAnalysis<DominatorTree>(), DeadInsts, TLI); 4749 if (numFolded) { 4750 Changed = true; 4751 DeleteTriviallyDeadInstructions(DeadInsts); 4752 DeleteDeadPHIs(L->getHeader()); 4753 } 4754 } 4755 return Changed; 4756} 4757