LoopStrengthReduce.cpp revision 968cb939e5a00cb06aefafc89581645790c590b3
1//===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This transformation analyzes and transforms the induction variables (and 11// computations derived from them) into forms suitable for efficient execution 12// on the target. 13// 14// This pass performs a strength reduction on array references inside loops that 15// have as one or more of their components the loop induction variable, it 16// rewrites expressions to take advantage of scaled-index addressing modes 17// available on the target, and it performs a variety of other optimizations 18// related to loop induction variables. 19// 20// Terminology note: this code has a lot of handling for "post-increment" or 21// "post-inc" users. This is not talking about post-increment addressing modes; 22// it is instead talking about code like this: 23// 24// %i = phi [ 0, %entry ], [ %i.next, %latch ] 25// ... 26// %i.next = add %i, 1 27// %c = icmp eq %i.next, %n 28// 29// The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however 30// it's useful to think about these as the same register, with some uses using 31// the value of the register before the add and some using // it after. In this 32// example, the icmp is a post-increment user, since it uses %i.next, which is 33// the value of the induction variable after the increment. The other common 34// case of post-increment users is users outside the loop. 35// 36// TODO: More sophistication in the way Formulae are generated and filtered. 37// 38// TODO: Handle multiple loops at a time. 39// 40// TODO: Should TargetLowering::AddrMode::BaseGV be changed to a ConstantExpr 41// instead of a GlobalValue? 42// 43// TODO: When truncation is free, truncate ICmp users' operands to make it a 44// smaller encoding (on x86 at least). 45// 46// TODO: When a negated register is used by an add (such as in a list of 47// multiple base registers, or as the increment expression in an addrec), 48// we may not actually need both reg and (-1 * reg) in registers; the 49// negation can be implemented by using a sub instead of an add. The 50// lack of support for taking this into consideration when making 51// register pressure decisions is partly worked around by the "Special" 52// use kind. 53// 54//===----------------------------------------------------------------------===// 55 56#define DEBUG_TYPE "loop-reduce" 57#include "llvm/Transforms/Scalar.h" 58#include "llvm/Constants.h" 59#include "llvm/Instructions.h" 60#include "llvm/IntrinsicInst.h" 61#include "llvm/DerivedTypes.h" 62#include "llvm/Analysis/IVUsers.h" 63#include "llvm/Analysis/Dominators.h" 64#include "llvm/Analysis/LoopPass.h" 65#include "llvm/Analysis/ScalarEvolutionExpander.h" 66#include "llvm/Transforms/Utils/BasicBlockUtils.h" 67#include "llvm/Transforms/Utils/Local.h" 68#include "llvm/ADT/SmallBitVector.h" 69#include "llvm/ADT/SetVector.h" 70#include "llvm/ADT/DenseSet.h" 71#include "llvm/Support/Debug.h" 72#include "llvm/Support/ValueHandle.h" 73#include "llvm/Support/raw_ostream.h" 74#include "llvm/Target/TargetLowering.h" 75#include <algorithm> 76using namespace llvm; 77 78namespace { 79 80/// RegSortData - This class holds data which is used to order reuse candidates. 81class RegSortData { 82public: 83 /// UsedByIndices - This represents the set of LSRUse indices which reference 84 /// a particular register. 85 SmallBitVector UsedByIndices; 86 87 RegSortData() {} 88 89 void print(raw_ostream &OS) const; 90 void dump() const; 91}; 92 93} 94 95void RegSortData::print(raw_ostream &OS) const { 96 OS << "[NumUses=" << UsedByIndices.count() << ']'; 97} 98 99void RegSortData::dump() const { 100 print(errs()); errs() << '\n'; 101} 102 103namespace { 104 105/// RegUseTracker - Map register candidates to information about how they are 106/// used. 107class RegUseTracker { 108 typedef DenseMap<const SCEV *, RegSortData> RegUsesTy; 109 110 RegUsesTy RegUses; 111 SmallVector<const SCEV *, 16> RegSequence; 112 113public: 114 void CountRegister(const SCEV *Reg, size_t LUIdx); 115 116 bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const; 117 118 const SmallBitVector &getUsedByIndices(const SCEV *Reg) const; 119 120 void clear(); 121 122 typedef SmallVectorImpl<const SCEV *>::iterator iterator; 123 typedef SmallVectorImpl<const SCEV *>::const_iterator const_iterator; 124 iterator begin() { return RegSequence.begin(); } 125 iterator end() { return RegSequence.end(); } 126 const_iterator begin() const { return RegSequence.begin(); } 127 const_iterator end() const { return RegSequence.end(); } 128}; 129 130} 131 132void 133RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) { 134 std::pair<RegUsesTy::iterator, bool> Pair = 135 RegUses.insert(std::make_pair(Reg, RegSortData())); 136 RegSortData &RSD = Pair.first->second; 137 if (Pair.second) 138 RegSequence.push_back(Reg); 139 RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1)); 140 RSD.UsedByIndices.set(LUIdx); 141} 142 143bool 144RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const { 145 if (!RegUses.count(Reg)) return false; 146 const SmallBitVector &UsedByIndices = 147 RegUses.find(Reg)->second.UsedByIndices; 148 int i = UsedByIndices.find_first(); 149 if (i == -1) return false; 150 if ((size_t)i != LUIdx) return true; 151 return UsedByIndices.find_next(i) != -1; 152} 153 154const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const { 155 RegUsesTy::const_iterator I = RegUses.find(Reg); 156 assert(I != RegUses.end() && "Unknown register!"); 157 return I->second.UsedByIndices; 158} 159 160void RegUseTracker::clear() { 161 RegUses.clear(); 162 RegSequence.clear(); 163} 164 165namespace { 166 167/// Formula - This class holds information that describes a formula for 168/// computing satisfying a use. It may include broken-out immediates and scaled 169/// registers. 170struct Formula { 171 /// AM - This is used to represent complex addressing, as well as other kinds 172 /// of interesting uses. 173 TargetLowering::AddrMode AM; 174 175 /// BaseRegs - The list of "base" registers for this use. When this is 176 /// non-empty, AM.HasBaseReg should be set to true. 177 SmallVector<const SCEV *, 2> BaseRegs; 178 179 /// ScaledReg - The 'scaled' register for this use. This should be non-null 180 /// when AM.Scale is not zero. 181 const SCEV *ScaledReg; 182 183 Formula() : ScaledReg(0) {} 184 185 void InitialMatch(const SCEV *S, Loop *L, 186 ScalarEvolution &SE, DominatorTree &DT); 187 188 unsigned getNumRegs() const; 189 const Type *getType() const; 190 191 bool referencesReg(const SCEV *S) const; 192 bool hasRegsUsedByUsesOtherThan(size_t LUIdx, 193 const RegUseTracker &RegUses) const; 194 195 void print(raw_ostream &OS) const; 196 void dump() const; 197}; 198 199} 200 201/// DoInitialMatch - Recurrsion helper for InitialMatch. 202static void DoInitialMatch(const SCEV *S, Loop *L, 203 SmallVectorImpl<const SCEV *> &Good, 204 SmallVectorImpl<const SCEV *> &Bad, 205 ScalarEvolution &SE, DominatorTree &DT) { 206 // Collect expressions which properly dominate the loop header. 207 if (S->properlyDominates(L->getHeader(), &DT)) { 208 Good.push_back(S); 209 return; 210 } 211 212 // Look at add operands. 213 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 214 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 215 I != E; ++I) 216 DoInitialMatch(*I, L, Good, Bad, SE, DT); 217 return; 218 } 219 220 // Look at addrec operands. 221 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 222 if (!AR->getStart()->isZero()) { 223 DoInitialMatch(AR->getStart(), L, Good, Bad, SE, DT); 224 DoInitialMatch(SE.getAddRecExpr(SE.getIntegerSCEV(0, AR->getType()), 225 AR->getStepRecurrence(SE), 226 AR->getLoop()), 227 L, Good, Bad, SE, DT); 228 return; 229 } 230 231 // Handle a multiplication by -1 (negation) if it didn't fold. 232 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) 233 if (Mul->getOperand(0)->isAllOnesValue()) { 234 SmallVector<const SCEV *, 4> Ops(Mul->op_begin()+1, Mul->op_end()); 235 const SCEV *NewMul = SE.getMulExpr(Ops); 236 237 SmallVector<const SCEV *, 4> MyGood; 238 SmallVector<const SCEV *, 4> MyBad; 239 DoInitialMatch(NewMul, L, MyGood, MyBad, SE, DT); 240 const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue( 241 SE.getEffectiveSCEVType(NewMul->getType()))); 242 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyGood.begin(), 243 E = MyGood.end(); I != E; ++I) 244 Good.push_back(SE.getMulExpr(NegOne, *I)); 245 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyBad.begin(), 246 E = MyBad.end(); I != E; ++I) 247 Bad.push_back(SE.getMulExpr(NegOne, *I)); 248 return; 249 } 250 251 // Ok, we can't do anything interesting. Just stuff the whole thing into a 252 // register and hope for the best. 253 Bad.push_back(S); 254} 255 256/// InitialMatch - Incorporate loop-variant parts of S into this Formula, 257/// attempting to keep all loop-invariant and loop-computable values in a 258/// single base register. 259void Formula::InitialMatch(const SCEV *S, Loop *L, 260 ScalarEvolution &SE, DominatorTree &DT) { 261 SmallVector<const SCEV *, 4> Good; 262 SmallVector<const SCEV *, 4> Bad; 263 DoInitialMatch(S, L, Good, Bad, SE, DT); 264 if (!Good.empty()) { 265 BaseRegs.push_back(SE.getAddExpr(Good)); 266 AM.HasBaseReg = true; 267 } 268 if (!Bad.empty()) { 269 BaseRegs.push_back(SE.getAddExpr(Bad)); 270 AM.HasBaseReg = true; 271 } 272} 273 274/// getNumRegs - Return the total number of register operands used by this 275/// formula. This does not include register uses implied by non-constant 276/// addrec strides. 277unsigned Formula::getNumRegs() const { 278 return !!ScaledReg + BaseRegs.size(); 279} 280 281/// getType - Return the type of this formula, if it has one, or null 282/// otherwise. This type is meaningless except for the bit size. 283const Type *Formula::getType() const { 284 return !BaseRegs.empty() ? BaseRegs.front()->getType() : 285 ScaledReg ? ScaledReg->getType() : 286 AM.BaseGV ? AM.BaseGV->getType() : 287 0; 288} 289 290/// referencesReg - Test if this formula references the given register. 291bool Formula::referencesReg(const SCEV *S) const { 292 return S == ScaledReg || 293 std::find(BaseRegs.begin(), BaseRegs.end(), S) != BaseRegs.end(); 294} 295 296/// hasRegsUsedByUsesOtherThan - Test whether this formula uses registers 297/// which are used by uses other than the use with the given index. 298bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx, 299 const RegUseTracker &RegUses) const { 300 if (ScaledReg) 301 if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx)) 302 return true; 303 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(), 304 E = BaseRegs.end(); I != E; ++I) 305 if (RegUses.isRegUsedByUsesOtherThan(*I, LUIdx)) 306 return true; 307 return false; 308} 309 310void Formula::print(raw_ostream &OS) const { 311 bool First = true; 312 if (AM.BaseGV) { 313 if (!First) OS << " + "; else First = false; 314 WriteAsOperand(OS, AM.BaseGV, /*PrintType=*/false); 315 } 316 if (AM.BaseOffs != 0) { 317 if (!First) OS << " + "; else First = false; 318 OS << AM.BaseOffs; 319 } 320 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(), 321 E = BaseRegs.end(); I != E; ++I) { 322 if (!First) OS << " + "; else First = false; 323 OS << "reg(" << **I << ')'; 324 } 325 if (AM.Scale != 0) { 326 if (!First) OS << " + "; else First = false; 327 OS << AM.Scale << "*reg("; 328 if (ScaledReg) 329 OS << *ScaledReg; 330 else 331 OS << "<unknown>"; 332 OS << ')'; 333 } 334} 335 336void Formula::dump() const { 337 print(errs()); errs() << '\n'; 338} 339 340/// getSDiv - Return an expression for LHS /s RHS, if it can be determined, 341/// or null otherwise. If IgnoreSignificantBits is true, expressions like 342/// (X * Y) /s Y are simplified to Y, ignoring that the multiplication may 343/// overflow, which is useful when the result will be used in a context where 344/// the most significant bits are ignored. 345static const SCEV *getSDiv(const SCEV *LHS, const SCEV *RHS, 346 ScalarEvolution &SE, 347 bool IgnoreSignificantBits = false) { 348 // Handle the trivial case, which works for any SCEV type. 349 if (LHS == RHS) 350 return SE.getIntegerSCEV(1, LHS->getType()); 351 352 // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do some 353 // folding. 354 if (RHS->isAllOnesValue()) 355 return SE.getMulExpr(LHS, RHS); 356 357 // Check for a division of a constant by a constant. 358 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) { 359 const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS); 360 if (!RC) 361 return 0; 362 if (C->getValue()->getValue().srem(RC->getValue()->getValue()) != 0) 363 return 0; 364 return SE.getConstant(C->getValue()->getValue() 365 .sdiv(RC->getValue()->getValue())); 366 } 367 368 // Distribute the sdiv over addrec operands. 369 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) { 370 const SCEV *Start = getSDiv(AR->getStart(), RHS, SE, 371 IgnoreSignificantBits); 372 if (!Start) return 0; 373 const SCEV *Step = getSDiv(AR->getStepRecurrence(SE), RHS, SE, 374 IgnoreSignificantBits); 375 if (!Step) return 0; 376 return SE.getAddRecExpr(Start, Step, AR->getLoop()); 377 } 378 379 // Distribute the sdiv over add operands. 380 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) { 381 SmallVector<const SCEV *, 8> Ops; 382 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 383 I != E; ++I) { 384 const SCEV *Op = getSDiv(*I, RHS, SE, 385 IgnoreSignificantBits); 386 if (!Op) return 0; 387 Ops.push_back(Op); 388 } 389 return SE.getAddExpr(Ops); 390 } 391 392 // Check for a multiply operand that we can pull RHS out of. 393 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) 394 if (IgnoreSignificantBits || Mul->hasNoSignedWrap()) { 395 SmallVector<const SCEV *, 4> Ops; 396 bool Found = false; 397 for (SCEVMulExpr::op_iterator I = Mul->op_begin(), E = Mul->op_end(); 398 I != E; ++I) { 399 if (!Found) 400 if (const SCEV *Q = getSDiv(*I, RHS, SE, IgnoreSignificantBits)) { 401 Ops.push_back(Q); 402 Found = true; 403 continue; 404 } 405 Ops.push_back(*I); 406 } 407 return Found ? SE.getMulExpr(Ops) : 0; 408 } 409 410 // Otherwise we don't know. 411 return 0; 412} 413 414/// ExtractImmediate - If S involves the addition of a constant integer value, 415/// return that integer value, and mutate S to point to a new SCEV with that 416/// value excluded. 417static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) { 418 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 419 if (C->getValue()->getValue().getMinSignedBits() <= 64) { 420 S = SE.getIntegerSCEV(0, C->getType()); 421 return C->getValue()->getSExtValue(); 422 } 423 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 424 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 425 int64_t Result = ExtractImmediate(NewOps.front(), SE); 426 S = SE.getAddExpr(NewOps); 427 return Result; 428 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 429 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 430 int64_t Result = ExtractImmediate(NewOps.front(), SE); 431 S = SE.getAddRecExpr(NewOps, AR->getLoop()); 432 return Result; 433 } 434 return 0; 435} 436 437/// ExtractSymbol - If S involves the addition of a GlobalValue address, 438/// return that symbol, and mutate S to point to a new SCEV with that 439/// value excluded. 440static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) { 441 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 442 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) { 443 S = SE.getIntegerSCEV(0, GV->getType()); 444 return GV; 445 } 446 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 447 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end()); 448 GlobalValue *Result = ExtractSymbol(NewOps.back(), SE); 449 S = SE.getAddExpr(NewOps); 450 return Result; 451 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 452 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end()); 453 GlobalValue *Result = ExtractSymbol(NewOps.front(), SE); 454 S = SE.getAddRecExpr(NewOps, AR->getLoop()); 455 return Result; 456 } 457 return 0; 458} 459 460/// isAddressUse - Returns true if the specified instruction is using the 461/// specified value as an address. 462static bool isAddressUse(Instruction *Inst, Value *OperandVal) { 463 bool isAddress = isa<LoadInst>(Inst); 464 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) { 465 if (SI->getOperand(1) == OperandVal) 466 isAddress = true; 467 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 468 // Addressing modes can also be folded into prefetches and a variety 469 // of intrinsics. 470 switch (II->getIntrinsicID()) { 471 default: break; 472 case Intrinsic::prefetch: 473 case Intrinsic::x86_sse2_loadu_dq: 474 case Intrinsic::x86_sse2_loadu_pd: 475 case Intrinsic::x86_sse_loadu_ps: 476 case Intrinsic::x86_sse_storeu_ps: 477 case Intrinsic::x86_sse2_storeu_pd: 478 case Intrinsic::x86_sse2_storeu_dq: 479 case Intrinsic::x86_sse2_storel_dq: 480 if (II->getOperand(1) == OperandVal) 481 isAddress = true; 482 break; 483 } 484 } 485 return isAddress; 486} 487 488/// getAccessType - Return the type of the memory being accessed. 489static const Type *getAccessType(const Instruction *Inst) { 490 const Type *AccessTy = Inst->getType(); 491 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) 492 AccessTy = SI->getOperand(0)->getType(); 493 else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) { 494 // Addressing modes can also be folded into prefetches and a variety 495 // of intrinsics. 496 switch (II->getIntrinsicID()) { 497 default: break; 498 case Intrinsic::x86_sse_storeu_ps: 499 case Intrinsic::x86_sse2_storeu_pd: 500 case Intrinsic::x86_sse2_storeu_dq: 501 case Intrinsic::x86_sse2_storel_dq: 502 AccessTy = II->getOperand(1)->getType(); 503 break; 504 } 505 } 506 507 // All pointers have the same requirements, so canonicalize them to an 508 // arbitrary pointer type to minimize variation. 509 if (const PointerType *PTy = dyn_cast<PointerType>(AccessTy)) 510 AccessTy = PointerType::get(IntegerType::get(PTy->getContext(), 1), 511 PTy->getAddressSpace()); 512 513 return AccessTy; 514} 515 516/// DeleteTriviallyDeadInstructions - If any of the instructions is the 517/// specified set are trivially dead, delete them and see if this makes any of 518/// their operands subsequently dead. 519static bool 520DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) { 521 bool Changed = false; 522 523 while (!DeadInsts.empty()) { 524 Instruction *I = dyn_cast_or_null<Instruction>(DeadInsts.pop_back_val()); 525 526 if (I == 0 || !isInstructionTriviallyDead(I)) 527 continue; 528 529 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI) 530 if (Instruction *U = dyn_cast<Instruction>(*OI)) { 531 *OI = 0; 532 if (U->use_empty()) 533 DeadInsts.push_back(U); 534 } 535 536 I->eraseFromParent(); 537 Changed = true; 538 } 539 540 return Changed; 541} 542 543namespace { 544 545/// Cost - This class is used to measure and compare candidate formulae. 546class Cost { 547 /// TODO: Some of these could be merged. Also, a lexical ordering 548 /// isn't always optimal. 549 unsigned NumRegs; 550 unsigned AddRecCost; 551 unsigned NumIVMuls; 552 unsigned NumBaseAdds; 553 unsigned ImmCost; 554 unsigned SetupCost; 555 556public: 557 Cost() 558 : NumRegs(0), AddRecCost(0), NumIVMuls(0), NumBaseAdds(0), ImmCost(0), 559 SetupCost(0) {} 560 561 unsigned getNumRegs() const { return NumRegs; } 562 563 bool operator<(const Cost &Other) const; 564 565 void Loose(); 566 567 void RateFormula(const Formula &F, 568 SmallPtrSet<const SCEV *, 16> &Regs, 569 const DenseSet<const SCEV *> &VisitedRegs, 570 const Loop *L, 571 const SmallVectorImpl<int64_t> &Offsets, 572 ScalarEvolution &SE, DominatorTree &DT); 573 574 void print(raw_ostream &OS) const; 575 void dump() const; 576 577private: 578 void RateRegister(const SCEV *Reg, 579 SmallPtrSet<const SCEV *, 16> &Regs, 580 const Loop *L, 581 ScalarEvolution &SE, DominatorTree &DT); 582 void RatePrimaryRegister(const SCEV *Reg, 583 SmallPtrSet<const SCEV *, 16> &Regs, 584 const Loop *L, 585 ScalarEvolution &SE, DominatorTree &DT); 586}; 587 588} 589 590/// RateRegister - Tally up interesting quantities from the given register. 591void Cost::RateRegister(const SCEV *Reg, 592 SmallPtrSet<const SCEV *, 16> &Regs, 593 const Loop *L, 594 ScalarEvolution &SE, DominatorTree &DT) { 595 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) { 596 if (AR->getLoop() == L) 597 AddRecCost += 1; /// TODO: This should be a function of the stride. 598 599 // If this is an addrec for a loop that's already been visited by LSR, 600 // don't second-guess its addrec phi nodes. LSR isn't currently smart 601 // enough to reason about more than one loop at a time. Consider these 602 // registers free and leave them alone. 603 else if (L->contains(AR->getLoop()) || 604 (!AR->getLoop()->contains(L) && 605 DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))) { 606 for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin(); 607 PHINode *PN = dyn_cast<PHINode>(I); ++I) 608 if (SE.isSCEVable(PN->getType()) && 609 (SE.getEffectiveSCEVType(PN->getType()) == 610 SE.getEffectiveSCEVType(AR->getType())) && 611 SE.getSCEV(PN) == AR) 612 return; 613 614 // If this isn't one of the addrecs that the loop already has, it 615 // would require a costly new phi and add. TODO: This isn't 616 // precisely modeled right now. 617 ++NumBaseAdds; 618 if (!Regs.count(AR->getStart())) 619 RateRegister(AR->getStart(), Regs, L, SE, DT); 620 } 621 622 // Add the step value register, if it needs one. 623 // TODO: The non-affine case isn't precisely modeled here. 624 if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) 625 if (!Regs.count(AR->getStart())) 626 RateRegister(AR->getOperand(1), Regs, L, SE, DT); 627 } 628 ++NumRegs; 629 630 // Rough heuristic; favor registers which don't require extra setup 631 // instructions in the preheader. 632 if (!isa<SCEVUnknown>(Reg) && 633 !isa<SCEVConstant>(Reg) && 634 !(isa<SCEVAddRecExpr>(Reg) && 635 (isa<SCEVUnknown>(cast<SCEVAddRecExpr>(Reg)->getStart()) || 636 isa<SCEVConstant>(cast<SCEVAddRecExpr>(Reg)->getStart())))) 637 ++SetupCost; 638} 639 640/// RatePrimaryRegister - Record this register in the set. If we haven't seen it 641/// before, rate it. 642void Cost::RatePrimaryRegister(const SCEV *Reg, 643 SmallPtrSet<const SCEV *, 16> &Regs, 644 const Loop *L, 645 ScalarEvolution &SE, DominatorTree &DT) { 646 if (Regs.insert(Reg)) 647 RateRegister(Reg, Regs, L, SE, DT); 648} 649 650void Cost::RateFormula(const Formula &F, 651 SmallPtrSet<const SCEV *, 16> &Regs, 652 const DenseSet<const SCEV *> &VisitedRegs, 653 const Loop *L, 654 const SmallVectorImpl<int64_t> &Offsets, 655 ScalarEvolution &SE, DominatorTree &DT) { 656 // Tally up the registers. 657 if (const SCEV *ScaledReg = F.ScaledReg) { 658 if (VisitedRegs.count(ScaledReg)) { 659 Loose(); 660 return; 661 } 662 RatePrimaryRegister(ScaledReg, Regs, L, SE, DT); 663 } 664 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 665 E = F.BaseRegs.end(); I != E; ++I) { 666 const SCEV *BaseReg = *I; 667 if (VisitedRegs.count(BaseReg)) { 668 Loose(); 669 return; 670 } 671 RatePrimaryRegister(BaseReg, Regs, L, SE, DT); 672 673 NumIVMuls += isa<SCEVMulExpr>(BaseReg) && 674 BaseReg->hasComputableLoopEvolution(L); 675 } 676 677 if (F.BaseRegs.size() > 1) 678 NumBaseAdds += F.BaseRegs.size() - 1; 679 680 // Tally up the non-zero immediates. 681 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(), 682 E = Offsets.end(); I != E; ++I) { 683 int64_t Offset = (uint64_t)*I + F.AM.BaseOffs; 684 if (F.AM.BaseGV) 685 ImmCost += 64; // Handle symbolic values conservatively. 686 // TODO: This should probably be the pointer size. 687 else if (Offset != 0) 688 ImmCost += APInt(64, Offset, true).getMinSignedBits(); 689 } 690} 691 692/// Loose - Set this cost to a loosing value. 693void Cost::Loose() { 694 NumRegs = ~0u; 695 AddRecCost = ~0u; 696 NumIVMuls = ~0u; 697 NumBaseAdds = ~0u; 698 ImmCost = ~0u; 699 SetupCost = ~0u; 700} 701 702/// operator< - Choose the lower cost. 703bool Cost::operator<(const Cost &Other) const { 704 if (NumRegs != Other.NumRegs) 705 return NumRegs < Other.NumRegs; 706 if (AddRecCost != Other.AddRecCost) 707 return AddRecCost < Other.AddRecCost; 708 if (NumIVMuls != Other.NumIVMuls) 709 return NumIVMuls < Other.NumIVMuls; 710 if (NumBaseAdds != Other.NumBaseAdds) 711 return NumBaseAdds < Other.NumBaseAdds; 712 if (ImmCost != Other.ImmCost) 713 return ImmCost < Other.ImmCost; 714 if (SetupCost != Other.SetupCost) 715 return SetupCost < Other.SetupCost; 716 return false; 717} 718 719void Cost::print(raw_ostream &OS) const { 720 OS << NumRegs << " reg" << (NumRegs == 1 ? "" : "s"); 721 if (AddRecCost != 0) 722 OS << ", with addrec cost " << AddRecCost; 723 if (NumIVMuls != 0) 724 OS << ", plus " << NumIVMuls << " IV mul" << (NumIVMuls == 1 ? "" : "s"); 725 if (NumBaseAdds != 0) 726 OS << ", plus " << NumBaseAdds << " base add" 727 << (NumBaseAdds == 1 ? "" : "s"); 728 if (ImmCost != 0) 729 OS << ", plus " << ImmCost << " imm cost"; 730 if (SetupCost != 0) 731 OS << ", plus " << SetupCost << " setup cost"; 732} 733 734void Cost::dump() const { 735 print(errs()); errs() << '\n'; 736} 737 738namespace { 739 740/// LSRFixup - An operand value in an instruction which is to be replaced 741/// with some equivalent, possibly strength-reduced, replacement. 742struct LSRFixup { 743 /// UserInst - The instruction which will be updated. 744 Instruction *UserInst; 745 746 /// OperandValToReplace - The operand of the instruction which will 747 /// be replaced. The operand may be used more than once; every instance 748 /// will be replaced. 749 Value *OperandValToReplace; 750 751 /// PostIncLoop - If this user is to use the post-incremented value of an 752 /// induction variable, this variable is non-null and holds the loop 753 /// associated with the induction variable. 754 const Loop *PostIncLoop; 755 756 /// LUIdx - The index of the LSRUse describing the expression which 757 /// this fixup needs, minus an offset (below). 758 size_t LUIdx; 759 760 /// Offset - A constant offset to be added to the LSRUse expression. 761 /// This allows multiple fixups to share the same LSRUse with different 762 /// offsets, for example in an unrolled loop. 763 int64_t Offset; 764 765 LSRFixup(); 766 767 void print(raw_ostream &OS) const; 768 void dump() const; 769}; 770 771} 772 773LSRFixup::LSRFixup() 774 : UserInst(0), OperandValToReplace(0), PostIncLoop(0), 775 LUIdx(~size_t(0)), Offset(0) {} 776 777void LSRFixup::print(raw_ostream &OS) const { 778 OS << "UserInst="; 779 // Store is common and interesting enough to be worth special-casing. 780 if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) { 781 OS << "store "; 782 WriteAsOperand(OS, Store->getOperand(0), /*PrintType=*/false); 783 } else if (UserInst->getType()->isVoidTy()) 784 OS << UserInst->getOpcodeName(); 785 else 786 WriteAsOperand(OS, UserInst, /*PrintType=*/false); 787 788 OS << ", OperandValToReplace="; 789 WriteAsOperand(OS, OperandValToReplace, /*PrintType=*/false); 790 791 if (PostIncLoop) { 792 OS << ", PostIncLoop="; 793 WriteAsOperand(OS, PostIncLoop->getHeader(), /*PrintType=*/false); 794 } 795 796 if (LUIdx != ~size_t(0)) 797 OS << ", LUIdx=" << LUIdx; 798 799 if (Offset != 0) 800 OS << ", Offset=" << Offset; 801} 802 803void LSRFixup::dump() const { 804 print(errs()); errs() << '\n'; 805} 806 807namespace { 808 809/// UniquifierDenseMapInfo - A DenseMapInfo implementation for holding 810/// DenseMaps and DenseSets of sorted SmallVectors of const SCEV*. 811struct UniquifierDenseMapInfo { 812 static SmallVector<const SCEV *, 2> getEmptyKey() { 813 SmallVector<const SCEV *, 2> V; 814 V.push_back(reinterpret_cast<const SCEV *>(-1)); 815 return V; 816 } 817 818 static SmallVector<const SCEV *, 2> getTombstoneKey() { 819 SmallVector<const SCEV *, 2> V; 820 V.push_back(reinterpret_cast<const SCEV *>(-2)); 821 return V; 822 } 823 824 static unsigned getHashValue(const SmallVector<const SCEV *, 2> &V) { 825 unsigned Result = 0; 826 for (SmallVectorImpl<const SCEV *>::const_iterator I = V.begin(), 827 E = V.end(); I != E; ++I) 828 Result ^= DenseMapInfo<const SCEV *>::getHashValue(*I); 829 return Result; 830 } 831 832 static bool isEqual(const SmallVector<const SCEV *, 2> &LHS, 833 const SmallVector<const SCEV *, 2> &RHS) { 834 return LHS == RHS; 835 } 836}; 837 838/// LSRUse - This class holds the state that LSR keeps for each use in 839/// IVUsers, as well as uses invented by LSR itself. It includes information 840/// about what kinds of things can be folded into the user, information about 841/// the user itself, and information about how the use may be satisfied. 842/// TODO: Represent multiple users of the same expression in common? 843class LSRUse { 844 DenseSet<SmallVector<const SCEV *, 2>, UniquifierDenseMapInfo> Uniquifier; 845 846public: 847 /// KindType - An enum for a kind of use, indicating what types of 848 /// scaled and immediate operands it might support. 849 enum KindType { 850 Basic, ///< A normal use, with no folding. 851 Special, ///< A special case of basic, allowing -1 scales. 852 Address, ///< An address use; folding according to TargetLowering 853 ICmpZero ///< An equality icmp with both operands folded into one. 854 // TODO: Add a generic icmp too? 855 }; 856 857 KindType Kind; 858 const Type *AccessTy; 859 860 SmallVector<int64_t, 8> Offsets; 861 int64_t MinOffset; 862 int64_t MaxOffset; 863 864 /// AllFixupsOutsideLoop - This records whether all of the fixups using this 865 /// LSRUse are outside of the loop, in which case some special-case heuristics 866 /// may be used. 867 bool AllFixupsOutsideLoop; 868 869 /// Formulae - A list of ways to build a value that can satisfy this user. 870 /// After the list is populated, one of these is selected heuristically and 871 /// used to formulate a replacement for OperandValToReplace in UserInst. 872 SmallVector<Formula, 12> Formulae; 873 874 /// Regs - The set of register candidates used by all formulae in this LSRUse. 875 SmallPtrSet<const SCEV *, 4> Regs; 876 877 LSRUse(KindType K, const Type *T) : Kind(K), AccessTy(T), 878 MinOffset(INT64_MAX), 879 MaxOffset(INT64_MIN), 880 AllFixupsOutsideLoop(true) {} 881 882 bool InsertFormula(size_t LUIdx, const Formula &F); 883 884 void check() const; 885 886 void print(raw_ostream &OS) const; 887 void dump() const; 888}; 889 890/// InsertFormula - If the given formula has not yet been inserted, add it to 891/// the list, and return true. Return false otherwise. 892bool LSRUse::InsertFormula(size_t LUIdx, const Formula &F) { 893 SmallVector<const SCEV *, 2> Key = F.BaseRegs; 894 if (F.ScaledReg) Key.push_back(F.ScaledReg); 895 // Unstable sort by host order ok, because this is only used for uniquifying. 896 std::sort(Key.begin(), Key.end()); 897 898 if (!Uniquifier.insert(Key).second) 899 return false; 900 901 // Using a register to hold the value of 0 is not profitable. 902 assert((!F.ScaledReg || !F.ScaledReg->isZero()) && 903 "Zero allocated in a scaled register!"); 904#ifndef NDEBUG 905 for (SmallVectorImpl<const SCEV *>::const_iterator I = 906 F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) 907 assert(!(*I)->isZero() && "Zero allocated in a base register!"); 908#endif 909 910 // Add the formula to the list. 911 Formulae.push_back(F); 912 913 // Record registers now being used by this use. 914 if (F.ScaledReg) Regs.insert(F.ScaledReg); 915 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 916 917 return true; 918} 919 920void LSRUse::print(raw_ostream &OS) const { 921 OS << "LSR Use: Kind="; 922 switch (Kind) { 923 case Basic: OS << "Basic"; break; 924 case Special: OS << "Special"; break; 925 case ICmpZero: OS << "ICmpZero"; break; 926 case Address: 927 OS << "Address of "; 928 if (AccessTy->isPointerTy()) 929 OS << "pointer"; // the full pointer type could be really verbose 930 else 931 OS << *AccessTy; 932 } 933 934 OS << ", Offsets={"; 935 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(), 936 E = Offsets.end(); I != E; ++I) { 937 OS << *I; 938 if (next(I) != E) 939 OS << ','; 940 } 941 OS << '}'; 942 943 if (AllFixupsOutsideLoop) 944 OS << ", all-fixups-outside-loop"; 945} 946 947void LSRUse::dump() const { 948 print(errs()); errs() << '\n'; 949} 950 951/// isLegalUse - Test whether the use described by AM is "legal", meaning it can 952/// be completely folded into the user instruction at isel time. This includes 953/// address-mode folding and special icmp tricks. 954static bool isLegalUse(const TargetLowering::AddrMode &AM, 955 LSRUse::KindType Kind, const Type *AccessTy, 956 const TargetLowering *TLI) { 957 switch (Kind) { 958 case LSRUse::Address: 959 // If we have low-level target information, ask the target if it can 960 // completely fold this address. 961 if (TLI) return TLI->isLegalAddressingMode(AM, AccessTy); 962 963 // Otherwise, just guess that reg+reg addressing is legal. 964 return !AM.BaseGV && AM.BaseOffs == 0 && AM.Scale <= 1; 965 966 case LSRUse::ICmpZero: 967 // There's not even a target hook for querying whether it would be legal to 968 // fold a GV into an ICmp. 969 if (AM.BaseGV) 970 return false; 971 972 // ICmp only has two operands; don't allow more than two non-trivial parts. 973 if (AM.Scale != 0 && AM.HasBaseReg && AM.BaseOffs != 0) 974 return false; 975 976 // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by 977 // putting the scaled register in the other operand of the icmp. 978 if (AM.Scale != 0 && AM.Scale != -1) 979 return false; 980 981 // If we have low-level target information, ask the target if it can fold an 982 // integer immediate on an icmp. 983 if (AM.BaseOffs != 0) { 984 if (TLI) return TLI->isLegalICmpImmediate(-AM.BaseOffs); 985 return false; 986 } 987 988 return true; 989 990 case LSRUse::Basic: 991 // Only handle single-register values. 992 return !AM.BaseGV && AM.Scale == 0 && AM.BaseOffs == 0; 993 994 case LSRUse::Special: 995 // Only handle -1 scales, or no scale. 996 return AM.Scale == 0 || AM.Scale == -1; 997 } 998 999 return false; 1000} 1001 1002static bool isLegalUse(TargetLowering::AddrMode AM, 1003 int64_t MinOffset, int64_t MaxOffset, 1004 LSRUse::KindType Kind, const Type *AccessTy, 1005 const TargetLowering *TLI) { 1006 // Check for overflow. 1007 if (((int64_t)((uint64_t)AM.BaseOffs + MinOffset) > AM.BaseOffs) != 1008 (MinOffset > 0)) 1009 return false; 1010 AM.BaseOffs = (uint64_t)AM.BaseOffs + MinOffset; 1011 if (isLegalUse(AM, Kind, AccessTy, TLI)) { 1012 AM.BaseOffs = (uint64_t)AM.BaseOffs - MinOffset; 1013 // Check for overflow. 1014 if (((int64_t)((uint64_t)AM.BaseOffs + MaxOffset) > AM.BaseOffs) != 1015 (MaxOffset > 0)) 1016 return false; 1017 AM.BaseOffs = (uint64_t)AM.BaseOffs + MaxOffset; 1018 return isLegalUse(AM, Kind, AccessTy, TLI); 1019 } 1020 return false; 1021} 1022 1023static bool isAlwaysFoldable(int64_t BaseOffs, 1024 GlobalValue *BaseGV, 1025 bool HasBaseReg, 1026 LSRUse::KindType Kind, const Type *AccessTy, 1027 const TargetLowering *TLI, 1028 ScalarEvolution &SE) { 1029 // Fast-path: zero is always foldable. 1030 if (BaseOffs == 0 && !BaseGV) return true; 1031 1032 // Conservatively, create an address with an immediate and a 1033 // base and a scale. 1034 TargetLowering::AddrMode AM; 1035 AM.BaseOffs = BaseOffs; 1036 AM.BaseGV = BaseGV; 1037 AM.HasBaseReg = HasBaseReg; 1038 AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1039 1040 return isLegalUse(AM, Kind, AccessTy, TLI); 1041} 1042 1043static bool isAlwaysFoldable(const SCEV *S, 1044 int64_t MinOffset, int64_t MaxOffset, 1045 bool HasBaseReg, 1046 LSRUse::KindType Kind, const Type *AccessTy, 1047 const TargetLowering *TLI, 1048 ScalarEvolution &SE) { 1049 // Fast-path: zero is always foldable. 1050 if (S->isZero()) return true; 1051 1052 // Conservatively, create an address with an immediate and a 1053 // base and a scale. 1054 int64_t BaseOffs = ExtractImmediate(S, SE); 1055 GlobalValue *BaseGV = ExtractSymbol(S, SE); 1056 1057 // If there's anything else involved, it's not foldable. 1058 if (!S->isZero()) return false; 1059 1060 // Fast-path: zero is always foldable. 1061 if (BaseOffs == 0 && !BaseGV) return true; 1062 1063 // Conservatively, create an address with an immediate and a 1064 // base and a scale. 1065 TargetLowering::AddrMode AM; 1066 AM.BaseOffs = BaseOffs; 1067 AM.BaseGV = BaseGV; 1068 AM.HasBaseReg = HasBaseReg; 1069 AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1; 1070 1071 return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, TLI); 1072} 1073 1074/// FormulaSorter - This class implements an ordering for formulae which sorts 1075/// the by their standalone cost. 1076class FormulaSorter { 1077 /// These two sets are kept empty, so that we compute standalone costs. 1078 DenseSet<const SCEV *> VisitedRegs; 1079 SmallPtrSet<const SCEV *, 16> Regs; 1080 Loop *L; 1081 LSRUse *LU; 1082 ScalarEvolution &SE; 1083 DominatorTree &DT; 1084 1085public: 1086 FormulaSorter(Loop *l, LSRUse &lu, ScalarEvolution &se, DominatorTree &dt) 1087 : L(l), LU(&lu), SE(se), DT(dt) {} 1088 1089 bool operator()(const Formula &A, const Formula &B) { 1090 Cost CostA; 1091 CostA.RateFormula(A, Regs, VisitedRegs, L, LU->Offsets, SE, DT); 1092 Regs.clear(); 1093 Cost CostB; 1094 CostB.RateFormula(B, Regs, VisitedRegs, L, LU->Offsets, SE, DT); 1095 Regs.clear(); 1096 return CostA < CostB; 1097 } 1098}; 1099 1100/// LSRInstance - This class holds state for the main loop strength reduction 1101/// logic. 1102class LSRInstance { 1103 IVUsers &IU; 1104 ScalarEvolution &SE; 1105 DominatorTree &DT; 1106 const TargetLowering *const TLI; 1107 Loop *const L; 1108 bool Changed; 1109 1110 /// IVIncInsertPos - This is the insert position that the current loop's 1111 /// induction variable increment should be placed. In simple loops, this is 1112 /// the latch block's terminator. But in more complicated cases, this is a 1113 /// position which will dominate all the in-loop post-increment users. 1114 Instruction *IVIncInsertPos; 1115 1116 /// Factors - Interesting factors between use strides. 1117 SmallSetVector<int64_t, 8> Factors; 1118 1119 /// Types - Interesting use types, to facilitate truncation reuse. 1120 SmallSetVector<const Type *, 4> Types; 1121 1122 /// Fixups - The list of operands which are to be replaced. 1123 SmallVector<LSRFixup, 16> Fixups; 1124 1125 /// Uses - The list of interesting uses. 1126 SmallVector<LSRUse, 16> Uses; 1127 1128 /// RegUses - Track which uses use which register candidates. 1129 RegUseTracker RegUses; 1130 1131 void OptimizeShadowIV(); 1132 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse); 1133 ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse); 1134 bool OptimizeLoopTermCond(); 1135 1136 void CollectInterestingTypesAndFactors(); 1137 void CollectFixupsAndInitialFormulae(); 1138 1139 LSRFixup &getNewFixup() { 1140 Fixups.push_back(LSRFixup()); 1141 return Fixups.back(); 1142 } 1143 1144 // Support for sharing of LSRUses between LSRFixups. 1145 typedef DenseMap<const SCEV *, size_t> UseMapTy; 1146 UseMapTy UseMap; 1147 1148 bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, 1149 LSRUse::KindType Kind, const Type *AccessTy); 1150 1151 std::pair<size_t, int64_t> getUse(const SCEV *&Expr, 1152 LSRUse::KindType Kind, 1153 const Type *AccessTy); 1154 1155public: 1156 void InsertInitialFormula(const SCEV *S, Loop *L, LSRUse &LU, size_t LUIdx); 1157 void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx); 1158 void CountRegisters(const Formula &F, size_t LUIdx); 1159 bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F); 1160 1161 void CollectLoopInvariantFixupsAndFormulae(); 1162 1163 void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base, 1164 unsigned Depth = 0); 1165 void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base); 1166 void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 1167 void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base); 1168 void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base); 1169 void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base); 1170 void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base); 1171 void GenerateCrossUseConstantOffsets(); 1172 void GenerateAllReuseFormulae(); 1173 1174 void FilterOutUndesirableDedicatedRegisters(); 1175 void NarrowSearchSpaceUsingHeuristics(); 1176 1177 void SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 1178 Cost &SolutionCost, 1179 SmallVectorImpl<const Formula *> &Workspace, 1180 const Cost &CurCost, 1181 const SmallPtrSet<const SCEV *, 16> &CurRegs, 1182 DenseSet<const SCEV *> &VisitedRegs) const; 1183 void Solve(SmallVectorImpl<const Formula *> &Solution) const; 1184 1185 Value *Expand(const LSRFixup &LF, 1186 const Formula &F, 1187 BasicBlock::iterator IP, Loop *L, Instruction *IVIncInsertPos, 1188 SCEVExpander &Rewriter, 1189 SmallVectorImpl<WeakVH> &DeadInsts, 1190 ScalarEvolution &SE, DominatorTree &DT) const; 1191 void RewriteForPHI(PHINode *PN, const LSRFixup &LF, 1192 const Formula &F, 1193 Loop *L, Instruction *IVIncInsertPos, 1194 SCEVExpander &Rewriter, 1195 SmallVectorImpl<WeakVH> &DeadInsts, 1196 ScalarEvolution &SE, DominatorTree &DT, 1197 Pass *P) const; 1198 void Rewrite(const LSRFixup &LF, 1199 const Formula &F, 1200 Loop *L, Instruction *IVIncInsertPos, 1201 SCEVExpander &Rewriter, 1202 SmallVectorImpl<WeakVH> &DeadInsts, 1203 ScalarEvolution &SE, DominatorTree &DT, 1204 Pass *P) const; 1205 void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, 1206 Pass *P); 1207 1208 LSRInstance(const TargetLowering *tli, Loop *l, Pass *P); 1209 1210 bool getChanged() const { return Changed; } 1211 1212 void print_factors_and_types(raw_ostream &OS) const; 1213 void print_fixups(raw_ostream &OS) const; 1214 void print_uses(raw_ostream &OS) const; 1215 void print(raw_ostream &OS) const; 1216 void dump() const; 1217}; 1218 1219} 1220 1221/// OptimizeShadowIV - If IV is used in a int-to-float cast 1222/// inside the loop then try to eliminate the cast opeation. 1223void LSRInstance::OptimizeShadowIV() { 1224 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1225 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 1226 return; 1227 1228 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); 1229 UI != E; /* empty */) { 1230 IVUsers::const_iterator CandidateUI = UI; 1231 ++UI; 1232 Instruction *ShadowUse = CandidateUI->getUser(); 1233 const Type *DestTy = NULL; 1234 1235 /* If shadow use is a int->float cast then insert a second IV 1236 to eliminate this cast. 1237 1238 for (unsigned i = 0; i < n; ++i) 1239 foo((double)i); 1240 1241 is transformed into 1242 1243 double d = 0.0; 1244 for (unsigned i = 0; i < n; ++i, ++d) 1245 foo(d); 1246 */ 1247 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) 1248 DestTy = UCast->getDestTy(); 1249 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) 1250 DestTy = SCast->getDestTy(); 1251 if (!DestTy) continue; 1252 1253 if (TLI) { 1254 // If target does not support DestTy natively then do not apply 1255 // this transformation. 1256 EVT DVT = TLI->getValueType(DestTy); 1257 if (!TLI->isTypeLegal(DVT)) continue; 1258 } 1259 1260 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0)); 1261 if (!PH) continue; 1262 if (PH->getNumIncomingValues() != 2) continue; 1263 1264 const Type *SrcTy = PH->getType(); 1265 int Mantissa = DestTy->getFPMantissaWidth(); 1266 if (Mantissa == -1) continue; 1267 if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa) 1268 continue; 1269 1270 unsigned Entry, Latch; 1271 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) { 1272 Entry = 0; 1273 Latch = 1; 1274 } else { 1275 Entry = 1; 1276 Latch = 0; 1277 } 1278 1279 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry)); 1280 if (!Init) continue; 1281 Constant *NewInit = ConstantFP::get(DestTy, Init->getZExtValue()); 1282 1283 BinaryOperator *Incr = 1284 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch)); 1285 if (!Incr) continue; 1286 if (Incr->getOpcode() != Instruction::Add 1287 && Incr->getOpcode() != Instruction::Sub) 1288 continue; 1289 1290 /* Initialize new IV, double d = 0.0 in above example. */ 1291 ConstantInt *C = NULL; 1292 if (Incr->getOperand(0) == PH) 1293 C = dyn_cast<ConstantInt>(Incr->getOperand(1)); 1294 else if (Incr->getOperand(1) == PH) 1295 C = dyn_cast<ConstantInt>(Incr->getOperand(0)); 1296 else 1297 continue; 1298 1299 if (!C) continue; 1300 1301 // Ignore negative constants, as the code below doesn't handle them 1302 // correctly. TODO: Remove this restriction. 1303 if (!C->getValue().isStrictlyPositive()) continue; 1304 1305 /* Add new PHINode. */ 1306 PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH); 1307 1308 /* create new increment. '++d' in above example. */ 1309 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue()); 1310 BinaryOperator *NewIncr = 1311 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ? 1312 Instruction::FAdd : Instruction::FSub, 1313 NewPH, CFP, "IV.S.next.", Incr); 1314 1315 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry)); 1316 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch)); 1317 1318 /* Remove cast operation */ 1319 ShadowUse->replaceAllUsesWith(NewPH); 1320 ShadowUse->eraseFromParent(); 1321 break; 1322 } 1323} 1324 1325/// FindIVUserForCond - If Cond has an operand that is an expression of an IV, 1326/// set the IV user and stride information and return true, otherwise return 1327/// false. 1328bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, 1329 IVStrideUse *&CondUse) { 1330 for (IVUsers::iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 1331 if (UI->getUser() == Cond) { 1332 // NOTE: we could handle setcc instructions with multiple uses here, but 1333 // InstCombine does it as well for simple uses, it's not clear that it 1334 // occurs enough in real life to handle. 1335 CondUse = UI; 1336 return true; 1337 } 1338 return false; 1339} 1340 1341/// OptimizeMax - Rewrite the loop's terminating condition if it uses 1342/// a max computation. 1343/// 1344/// This is a narrow solution to a specific, but acute, problem. For loops 1345/// like this: 1346/// 1347/// i = 0; 1348/// do { 1349/// p[i] = 0.0; 1350/// } while (++i < n); 1351/// 1352/// the trip count isn't just 'n', because 'n' might not be positive. And 1353/// unfortunately this can come up even for loops where the user didn't use 1354/// a C do-while loop. For example, seemingly well-behaved top-test loops 1355/// will commonly be lowered like this: 1356// 1357/// if (n > 0) { 1358/// i = 0; 1359/// do { 1360/// p[i] = 0.0; 1361/// } while (++i < n); 1362/// } 1363/// 1364/// and then it's possible for subsequent optimization to obscure the if 1365/// test in such a way that indvars can't find it. 1366/// 1367/// When indvars can't find the if test in loops like this, it creates a 1368/// max expression, which allows it to give the loop a canonical 1369/// induction variable: 1370/// 1371/// i = 0; 1372/// max = n < 1 ? 1 : n; 1373/// do { 1374/// p[i] = 0.0; 1375/// } while (++i != max); 1376/// 1377/// Canonical induction variables are necessary because the loop passes 1378/// are designed around them. The most obvious example of this is the 1379/// LoopInfo analysis, which doesn't remember trip count values. It 1380/// expects to be able to rediscover the trip count each time it is 1381/// needed, and it does this using a simple analysis that only succeeds if 1382/// the loop has a canonical induction variable. 1383/// 1384/// However, when it comes time to generate code, the maximum operation 1385/// can be quite costly, especially if it's inside of an outer loop. 1386/// 1387/// This function solves this problem by detecting this type of loop and 1388/// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting 1389/// the instructions for the maximum computation. 1390/// 1391ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) { 1392 // Check that the loop matches the pattern we're looking for. 1393 if (Cond->getPredicate() != CmpInst::ICMP_EQ && 1394 Cond->getPredicate() != CmpInst::ICMP_NE) 1395 return Cond; 1396 1397 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1)); 1398 if (!Sel || !Sel->hasOneUse()) return Cond; 1399 1400 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L); 1401 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount)) 1402 return Cond; 1403 const SCEV *One = SE.getIntegerSCEV(1, BackedgeTakenCount->getType()); 1404 1405 // Add one to the backedge-taken count to get the trip count. 1406 const SCEV *IterationCount = SE.getAddExpr(BackedgeTakenCount, One); 1407 1408 // Check for a max calculation that matches the pattern. 1409 if (!isa<SCEVSMaxExpr>(IterationCount) && !isa<SCEVUMaxExpr>(IterationCount)) 1410 return Cond; 1411 const SCEVNAryExpr *Max = cast<SCEVNAryExpr>(IterationCount); 1412 if (Max != SE.getSCEV(Sel)) return Cond; 1413 1414 // To handle a max with more than two operands, this optimization would 1415 // require additional checking and setup. 1416 if (Max->getNumOperands() != 2) 1417 return Cond; 1418 1419 const SCEV *MaxLHS = Max->getOperand(0); 1420 const SCEV *MaxRHS = Max->getOperand(1); 1421 if (!MaxLHS || MaxLHS != One) return Cond; 1422 // Check the relevant induction variable for conformance to 1423 // the pattern. 1424 const SCEV *IV = SE.getSCEV(Cond->getOperand(0)); 1425 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV); 1426 if (!AR || !AR->isAffine() || 1427 AR->getStart() != One || 1428 AR->getStepRecurrence(SE) != One) 1429 return Cond; 1430 1431 assert(AR->getLoop() == L && 1432 "Loop condition operand is an addrec in a different loop!"); 1433 1434 // Check the right operand of the select, and remember it, as it will 1435 // be used in the new comparison instruction. 1436 Value *NewRHS = 0; 1437 if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS) 1438 NewRHS = Sel->getOperand(1); 1439 else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS) 1440 NewRHS = Sel->getOperand(2); 1441 if (!NewRHS) return Cond; 1442 1443 // Determine the new comparison opcode. It may be signed or unsigned, 1444 // and the original comparison may be either equality or inequality. 1445 CmpInst::Predicate Pred = 1446 isa<SCEVSMaxExpr>(Max) ? CmpInst::ICMP_SLT : CmpInst::ICMP_ULT; 1447 if (Cond->getPredicate() == CmpInst::ICMP_EQ) 1448 Pred = CmpInst::getInversePredicate(Pred); 1449 1450 // Ok, everything looks ok to change the condition into an SLT or SGE and 1451 // delete the max calculation. 1452 ICmpInst *NewCond = 1453 new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp"); 1454 1455 // Delete the max calculation instructions. 1456 Cond->replaceAllUsesWith(NewCond); 1457 CondUse->setUser(NewCond); 1458 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0)); 1459 Cond->eraseFromParent(); 1460 Sel->eraseFromParent(); 1461 if (Cmp->use_empty()) 1462 Cmp->eraseFromParent(); 1463 return NewCond; 1464} 1465 1466/// OptimizeLoopTermCond - Change loop terminating condition to use the 1467/// postinc iv when possible. 1468bool 1469LSRInstance::OptimizeLoopTermCond() { 1470 SmallPtrSet<Instruction *, 4> PostIncs; 1471 1472 BasicBlock *LatchBlock = L->getLoopLatch(); 1473 SmallVector<BasicBlock*, 8> ExitingBlocks; 1474 L->getExitingBlocks(ExitingBlocks); 1475 1476 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) { 1477 BasicBlock *ExitingBlock = ExitingBlocks[i]; 1478 1479 // Get the terminating condition for the loop if possible. If we 1480 // can, we want to change it to use a post-incremented version of its 1481 // induction variable, to allow coalescing the live ranges for the IV into 1482 // one register value. 1483 1484 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator()); 1485 if (!TermBr) 1486 continue; 1487 // FIXME: Overly conservative, termination condition could be an 'or' etc.. 1488 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition())) 1489 continue; 1490 1491 // Search IVUsesByStride to find Cond's IVUse if there is one. 1492 IVStrideUse *CondUse = 0; 1493 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition()); 1494 if (!FindIVUserForCond(Cond, CondUse)) 1495 continue; 1496 1497 // If the trip count is computed in terms of a max (due to ScalarEvolution 1498 // being unable to find a sufficient guard, for example), change the loop 1499 // comparison to use SLT or ULT instead of NE. 1500 // One consequence of doing this now is that it disrupts the count-down 1501 // optimization. That's not always a bad thing though, because in such 1502 // cases it may still be worthwhile to avoid a max. 1503 Cond = OptimizeMax(Cond, CondUse); 1504 1505 // If this exiting block dominates the latch block, it may also use 1506 // the post-inc value if it won't be shared with other uses. 1507 // Check for dominance. 1508 if (!DT.dominates(ExitingBlock, LatchBlock)) 1509 continue; 1510 1511 // Conservatively avoid trying to use the post-inc value in non-latch 1512 // exits if there may be pre-inc users in intervening blocks. 1513 if (LatchBlock != ExitingBlock) 1514 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) 1515 // Test if the use is reachable from the exiting block. This dominator 1516 // query is a conservative approximation of reachability. 1517 if (&*UI != CondUse && 1518 !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) { 1519 // Conservatively assume there may be reuse if the quotient of their 1520 // strides could be a legal scale. 1521 const SCEV *A = CondUse->getStride(); 1522 const SCEV *B = UI->getStride(); 1523 if (SE.getTypeSizeInBits(A->getType()) != 1524 SE.getTypeSizeInBits(B->getType())) { 1525 if (SE.getTypeSizeInBits(A->getType()) > 1526 SE.getTypeSizeInBits(B->getType())) 1527 B = SE.getSignExtendExpr(B, A->getType()); 1528 else 1529 A = SE.getSignExtendExpr(A, B->getType()); 1530 } 1531 if (const SCEVConstant *D = 1532 dyn_cast_or_null<SCEVConstant>(getSDiv(B, A, SE))) { 1533 // Stride of one or negative one can have reuse with non-addresses. 1534 if (D->getValue()->isOne() || 1535 D->getValue()->isAllOnesValue()) 1536 goto decline_post_inc; 1537 // Avoid weird situations. 1538 if (D->getValue()->getValue().getMinSignedBits() >= 64 || 1539 D->getValue()->getValue().isMinSignedValue()) 1540 goto decline_post_inc; 1541 // Without TLI, assume that any stride might be valid, and so any 1542 // use might be shared. 1543 if (!TLI) 1544 goto decline_post_inc; 1545 // Check for possible scaled-address reuse. 1546 const Type *AccessTy = getAccessType(UI->getUser()); 1547 TargetLowering::AddrMode AM; 1548 AM.Scale = D->getValue()->getSExtValue(); 1549 if (TLI->isLegalAddressingMode(AM, AccessTy)) 1550 goto decline_post_inc; 1551 AM.Scale = -AM.Scale; 1552 if (TLI->isLegalAddressingMode(AM, AccessTy)) 1553 goto decline_post_inc; 1554 } 1555 } 1556 1557 DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: " 1558 << *Cond << '\n'); 1559 1560 // It's possible for the setcc instruction to be anywhere in the loop, and 1561 // possible for it to have multiple users. If it is not immediately before 1562 // the exiting block branch, move it. 1563 if (&*++BasicBlock::iterator(Cond) != TermBr) { 1564 if (Cond->hasOneUse()) { 1565 Cond->moveBefore(TermBr); 1566 } else { 1567 // Clone the terminating condition and insert into the loopend. 1568 ICmpInst *OldCond = Cond; 1569 Cond = cast<ICmpInst>(Cond->clone()); 1570 Cond->setName(L->getHeader()->getName() + ".termcond"); 1571 ExitingBlock->getInstList().insert(TermBr, Cond); 1572 1573 // Clone the IVUse, as the old use still exists! 1574 CondUse = &IU.AddUser(CondUse->getStride(), CondUse->getOffset(), 1575 Cond, CondUse->getOperandValToReplace()); 1576 TermBr->replaceUsesOfWith(OldCond, Cond); 1577 } 1578 } 1579 1580 // If we get to here, we know that we can transform the setcc instruction to 1581 // use the post-incremented version of the IV, allowing us to coalesce the 1582 // live ranges for the IV correctly. 1583 CondUse->setOffset(SE.getMinusSCEV(CondUse->getOffset(), 1584 CondUse->getStride())); 1585 CondUse->setIsUseOfPostIncrementedValue(true); 1586 Changed = true; 1587 1588 PostIncs.insert(Cond); 1589 decline_post_inc:; 1590 } 1591 1592 // Determine an insertion point for the loop induction variable increment. It 1593 // must dominate all the post-inc comparisons we just set up, and it must 1594 // dominate the loop latch edge. 1595 IVIncInsertPos = L->getLoopLatch()->getTerminator(); 1596 for (SmallPtrSet<Instruction *, 4>::const_iterator I = PostIncs.begin(), 1597 E = PostIncs.end(); I != E; ++I) { 1598 BasicBlock *BB = 1599 DT.findNearestCommonDominator(IVIncInsertPos->getParent(), 1600 (*I)->getParent()); 1601 if (BB == (*I)->getParent()) 1602 IVIncInsertPos = *I; 1603 else if (BB != IVIncInsertPos->getParent()) 1604 IVIncInsertPos = BB->getTerminator(); 1605 } 1606 1607 return Changed; 1608} 1609 1610bool 1611LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, 1612 LSRUse::KindType Kind, const Type *AccessTy) { 1613 int64_t NewMinOffset = LU.MinOffset; 1614 int64_t NewMaxOffset = LU.MaxOffset; 1615 const Type *NewAccessTy = AccessTy; 1616 1617 // Check for a mismatched kind. It's tempting to collapse mismatched kinds to 1618 // something conservative, however this can pessimize in the case that one of 1619 // the uses will have all its uses outside the loop, for example. 1620 if (LU.Kind != Kind) 1621 return false; 1622 // Conservatively assume HasBaseReg is true for now. 1623 if (NewOffset < LU.MinOffset) { 1624 if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, /*HasBaseReg=*/true, 1625 Kind, AccessTy, TLI, SE)) 1626 return false; 1627 NewMinOffset = NewOffset; 1628 } else if (NewOffset > LU.MaxOffset) { 1629 if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, /*HasBaseReg=*/true, 1630 Kind, AccessTy, TLI, SE)) 1631 return false; 1632 NewMaxOffset = NewOffset; 1633 } 1634 // Check for a mismatched access type, and fall back conservatively as needed. 1635 if (Kind == LSRUse::Address && AccessTy != LU.AccessTy) 1636 NewAccessTy = Type::getVoidTy(AccessTy->getContext()); 1637 1638 // Update the use. 1639 LU.MinOffset = NewMinOffset; 1640 LU.MaxOffset = NewMaxOffset; 1641 LU.AccessTy = NewAccessTy; 1642 if (NewOffset != LU.Offsets.back()) 1643 LU.Offsets.push_back(NewOffset); 1644 return true; 1645} 1646 1647/// getUse - Return an LSRUse index and an offset value for a fixup which 1648/// needs the given expression, with the given kind and optional access type. 1649/// Either reuse an exisitng use or create a new one, as needed. 1650std::pair<size_t, int64_t> 1651LSRInstance::getUse(const SCEV *&Expr, 1652 LSRUse::KindType Kind, const Type *AccessTy) { 1653 const SCEV *Copy = Expr; 1654 int64_t Offset = ExtractImmediate(Expr, SE); 1655 1656 // Basic uses can't accept any offset, for example. 1657 if (!isAlwaysFoldable(Offset, 0, /*HasBaseReg=*/true, 1658 Kind, AccessTy, TLI, SE)) { 1659 Expr = Copy; 1660 Offset = 0; 1661 } 1662 1663 std::pair<UseMapTy::iterator, bool> P = 1664 UseMap.insert(std::make_pair(Expr, 0)); 1665 if (!P.second) { 1666 // A use already existed with this base. 1667 size_t LUIdx = P.first->second; 1668 LSRUse &LU = Uses[LUIdx]; 1669 if (reconcileNewOffset(LU, Offset, Kind, AccessTy)) 1670 // Reuse this use. 1671 return std::make_pair(LUIdx, Offset); 1672 } 1673 1674 // Create a new use. 1675 size_t LUIdx = Uses.size(); 1676 P.first->second = LUIdx; 1677 Uses.push_back(LSRUse(Kind, AccessTy)); 1678 LSRUse &LU = Uses[LUIdx]; 1679 1680 // We don't need to track redundant offsets, but we don't need to go out 1681 // of our way here to avoid them. 1682 if (LU.Offsets.empty() || Offset != LU.Offsets.back()) 1683 LU.Offsets.push_back(Offset); 1684 1685 LU.MinOffset = Offset; 1686 LU.MaxOffset = Offset; 1687 return std::make_pair(LUIdx, Offset); 1688} 1689 1690void LSRInstance::CollectInterestingTypesAndFactors() { 1691 SmallSetVector<const SCEV *, 4> Strides; 1692 1693 // Collect interesting types and factors. 1694 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { 1695 const SCEV *Stride = UI->getStride(); 1696 1697 // Collect interesting types. 1698 Types.insert(SE.getEffectiveSCEVType(Stride->getType())); 1699 1700 // Collect interesting factors. 1701 for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter = 1702 Strides.begin(), SEnd = Strides.end(); NewStrideIter != SEnd; 1703 ++NewStrideIter) { 1704 const SCEV *OldStride = Stride; 1705 const SCEV *NewStride = *NewStrideIter; 1706 if (OldStride == NewStride) 1707 continue; 1708 1709 if (SE.getTypeSizeInBits(OldStride->getType()) != 1710 SE.getTypeSizeInBits(NewStride->getType())) { 1711 if (SE.getTypeSizeInBits(OldStride->getType()) > 1712 SE.getTypeSizeInBits(NewStride->getType())) 1713 NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType()); 1714 else 1715 OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType()); 1716 } 1717 if (const SCEVConstant *Factor = 1718 dyn_cast_or_null<SCEVConstant>(getSDiv(NewStride, OldStride, 1719 SE, true))) { 1720 if (Factor->getValue()->getValue().getMinSignedBits() <= 64) 1721 Factors.insert(Factor->getValue()->getValue().getSExtValue()); 1722 } else if (const SCEVConstant *Factor = 1723 dyn_cast_or_null<SCEVConstant>(getSDiv(OldStride, NewStride, 1724 SE, true))) { 1725 if (Factor->getValue()->getValue().getMinSignedBits() <= 64) 1726 Factors.insert(Factor->getValue()->getValue().getSExtValue()); 1727 } 1728 } 1729 Strides.insert(Stride); 1730 } 1731 1732 // If all uses use the same type, don't bother looking for truncation-based 1733 // reuse. 1734 if (Types.size() == 1) 1735 Types.clear(); 1736 1737 DEBUG(print_factors_and_types(dbgs())); 1738} 1739 1740void LSRInstance::CollectFixupsAndInitialFormulae() { 1741 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) { 1742 // Record the uses. 1743 LSRFixup &LF = getNewFixup(); 1744 LF.UserInst = UI->getUser(); 1745 LF.OperandValToReplace = UI->getOperandValToReplace(); 1746 if (UI->isUseOfPostIncrementedValue()) 1747 LF.PostIncLoop = L; 1748 1749 LSRUse::KindType Kind = LSRUse::Basic; 1750 const Type *AccessTy = 0; 1751 if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) { 1752 Kind = LSRUse::Address; 1753 AccessTy = getAccessType(LF.UserInst); 1754 } 1755 1756 const SCEV *S = IU.getCanonicalExpr(*UI); 1757 1758 // Equality (== and !=) ICmps are special. We can rewrite (i == N) as 1759 // (N - i == 0), and this allows (N - i) to be the expression that we work 1760 // with rather than just N or i, so we can consider the register 1761 // requirements for both N and i at the same time. Limiting this code to 1762 // equality icmps is not a problem because all interesting loops use 1763 // equality icmps, thanks to IndVarSimplify. 1764 if (ICmpInst *CI = dyn_cast<ICmpInst>(LF.UserInst)) 1765 if (CI->isEquality()) { 1766 // Swap the operands if needed to put the OperandValToReplace on the 1767 // left, for consistency. 1768 Value *NV = CI->getOperand(1); 1769 if (NV == LF.OperandValToReplace) { 1770 CI->setOperand(1, CI->getOperand(0)); 1771 CI->setOperand(0, NV); 1772 } 1773 1774 // x == y --> x - y == 0 1775 const SCEV *N = SE.getSCEV(NV); 1776 if (N->isLoopInvariant(L)) { 1777 Kind = LSRUse::ICmpZero; 1778 S = SE.getMinusSCEV(N, S); 1779 } 1780 1781 // -1 and the negations of all interesting strides (except the negation 1782 // of -1) are now also interesting. 1783 for (size_t i = 0, e = Factors.size(); i != e; ++i) 1784 if (Factors[i] != -1) 1785 Factors.insert(-(uint64_t)Factors[i]); 1786 Factors.insert(-1); 1787 } 1788 1789 // Set up the initial formula for this use. 1790 std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy); 1791 LF.LUIdx = P.first; 1792 LF.Offset = P.second; 1793 LSRUse &LU = Uses[LF.LUIdx]; 1794 LU.AllFixupsOutsideLoop &= !L->contains(LF.UserInst); 1795 1796 // If this is the first use of this LSRUse, give it a formula. 1797 if (LU.Formulae.empty()) { 1798 InsertInitialFormula(S, L, LU, LF.LUIdx); 1799 CountRegisters(LU.Formulae.back(), LF.LUIdx); 1800 } 1801 } 1802 1803 DEBUG(print_fixups(dbgs())); 1804} 1805 1806void 1807LSRInstance::InsertInitialFormula(const SCEV *S, Loop *L, 1808 LSRUse &LU, size_t LUIdx) { 1809 Formula F; 1810 F.InitialMatch(S, L, SE, DT); 1811 bool Inserted = InsertFormula(LU, LUIdx, F); 1812 assert(Inserted && "Initial formula already exists!"); (void)Inserted; 1813} 1814 1815void 1816LSRInstance::InsertSupplementalFormula(const SCEV *S, 1817 LSRUse &LU, size_t LUIdx) { 1818 Formula F; 1819 F.BaseRegs.push_back(S); 1820 F.AM.HasBaseReg = true; 1821 bool Inserted = InsertFormula(LU, LUIdx, F); 1822 assert(Inserted && "Supplemental formula already exists!"); (void)Inserted; 1823} 1824 1825/// CountRegisters - Note which registers are used by the given formula, 1826/// updating RegUses. 1827void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) { 1828 if (F.ScaledReg) 1829 RegUses.CountRegister(F.ScaledReg, LUIdx); 1830 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 1831 E = F.BaseRegs.end(); I != E; ++I) 1832 RegUses.CountRegister(*I, LUIdx); 1833} 1834 1835/// InsertFormula - If the given formula has not yet been inserted, add it to 1836/// the list, and return true. Return false otherwise. 1837bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) { 1838 if (!LU.InsertFormula(LUIdx, F)) 1839 return false; 1840 1841 CountRegisters(F, LUIdx); 1842 return true; 1843} 1844 1845/// CollectLoopInvariantFixupsAndFormulae - Check for other uses of 1846/// loop-invariant values which we're tracking. These other uses will pin these 1847/// values in registers, making them less profitable for elimination. 1848/// TODO: This currently misses non-constant addrec step registers. 1849/// TODO: Should this give more weight to users inside the loop? 1850void 1851LSRInstance::CollectLoopInvariantFixupsAndFormulae() { 1852 SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end()); 1853 SmallPtrSet<const SCEV *, 8> Inserted; 1854 1855 while (!Worklist.empty()) { 1856 const SCEV *S = Worklist.pop_back_val(); 1857 1858 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) 1859 Worklist.insert(Worklist.end(), N->op_begin(), N->op_end()); 1860 else if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) 1861 Worklist.push_back(C->getOperand()); 1862 else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 1863 Worklist.push_back(D->getLHS()); 1864 Worklist.push_back(D->getRHS()); 1865 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 1866 if (!Inserted.insert(U)) continue; 1867 const Value *V = U->getValue(); 1868 if (const Instruction *Inst = dyn_cast<Instruction>(V)) 1869 if (L->contains(Inst)) continue; 1870 for (Value::use_const_iterator UI = V->use_begin(), UE = V->use_end(); 1871 UI != UE; ++UI) { 1872 const Instruction *UserInst = dyn_cast<Instruction>(*UI); 1873 // Ignore non-instructions. 1874 if (!UserInst) 1875 continue; 1876 // Ignore instructions in other functions (as can happen with 1877 // Constants). 1878 if (UserInst->getParent()->getParent() != L->getHeader()->getParent()) 1879 continue; 1880 // Ignore instructions not dominated by the loop. 1881 const BasicBlock *UseBB = !isa<PHINode>(UserInst) ? 1882 UserInst->getParent() : 1883 cast<PHINode>(UserInst)->getIncomingBlock( 1884 PHINode::getIncomingValueNumForOperand(UI.getOperandNo())); 1885 if (!DT.dominates(L->getHeader(), UseBB)) 1886 continue; 1887 // Ignore uses which are part of other SCEV expressions, to avoid 1888 // analyzing them multiple times. 1889 if (SE.isSCEVable(UserInst->getType()) && 1890 !isa<SCEVUnknown>(SE.getSCEV(const_cast<Instruction *>(UserInst)))) 1891 continue; 1892 // Ignore icmp instructions which are already being analyzed. 1893 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) { 1894 unsigned OtherIdx = !UI.getOperandNo(); 1895 Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx)); 1896 if (SE.getSCEV(OtherOp)->hasComputableLoopEvolution(L)) 1897 continue; 1898 } 1899 1900 LSRFixup &LF = getNewFixup(); 1901 LF.UserInst = const_cast<Instruction *>(UserInst); 1902 LF.OperandValToReplace = UI.getUse(); 1903 std::pair<size_t, int64_t> P = getUse(S, LSRUse::Basic, 0); 1904 LF.LUIdx = P.first; 1905 LF.Offset = P.second; 1906 LSRUse &LU = Uses[LF.LUIdx]; 1907 LU.AllFixupsOutsideLoop &= L->contains(LF.UserInst); 1908 InsertSupplementalFormula(U, LU, LF.LUIdx); 1909 CountRegisters(LU.Formulae.back(), Uses.size() - 1); 1910 break; 1911 } 1912 } 1913 } 1914} 1915 1916/// CollectSubexprs - Split S into subexpressions which can be pulled out into 1917/// separate registers. If C is non-null, multiply each subexpression by C. 1918static void CollectSubexprs(const SCEV *S, const SCEVConstant *C, 1919 SmallVectorImpl<const SCEV *> &Ops, 1920 ScalarEvolution &SE) { 1921 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) { 1922 // Break out add operands. 1923 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end(); 1924 I != E; ++I) 1925 CollectSubexprs(*I, C, Ops, SE); 1926 return; 1927 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) { 1928 // Split a non-zero base out of an addrec. 1929 if (!AR->getStart()->isZero()) { 1930 CollectSubexprs(SE.getAddRecExpr(SE.getIntegerSCEV(0, AR->getType()), 1931 AR->getStepRecurrence(SE), 1932 AR->getLoop()), C, Ops, SE); 1933 CollectSubexprs(AR->getStart(), C, Ops, SE); 1934 return; 1935 } 1936 } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) { 1937 // Break (C * (a + b + c)) into C*a + C*b + C*c. 1938 if (Mul->getNumOperands() == 2) 1939 if (const SCEVConstant *Op0 = 1940 dyn_cast<SCEVConstant>(Mul->getOperand(0))) { 1941 CollectSubexprs(Mul->getOperand(1), 1942 C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0, 1943 Ops, SE); 1944 return; 1945 } 1946 } 1947 1948 // Otherwise use the value itself. 1949 Ops.push_back(C ? SE.getMulExpr(C, S) : S); 1950} 1951 1952/// GenerateReassociations - Split out subexpressions from adds and the bases of 1953/// addrecs. 1954void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx, 1955 Formula Base, 1956 unsigned Depth) { 1957 // Arbitrarily cap recursion to protect compile time. 1958 if (Depth >= 3) return; 1959 1960 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 1961 const SCEV *BaseReg = Base.BaseRegs[i]; 1962 1963 SmallVector<const SCEV *, 8> AddOps; 1964 CollectSubexprs(BaseReg, 0, AddOps, SE); 1965 if (AddOps.size() == 1) continue; 1966 1967 for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(), 1968 JE = AddOps.end(); J != JE; ++J) { 1969 // Don't pull a constant into a register if the constant could be folded 1970 // into an immediate field. 1971 if (isAlwaysFoldable(*J, LU.MinOffset, LU.MaxOffset, 1972 Base.getNumRegs() > 1, 1973 LU.Kind, LU.AccessTy, TLI, SE)) 1974 continue; 1975 1976 // Collect all operands except *J. 1977 SmallVector<const SCEV *, 8> InnerAddOps; 1978 for (SmallVectorImpl<const SCEV *>::const_iterator K = AddOps.begin(), 1979 KE = AddOps.end(); K != KE; ++K) 1980 if (K != J) 1981 InnerAddOps.push_back(*K); 1982 1983 // Don't leave just a constant behind in a register if the constant could 1984 // be folded into an immediate field. 1985 if (InnerAddOps.size() == 1 && 1986 isAlwaysFoldable(InnerAddOps[0], LU.MinOffset, LU.MaxOffset, 1987 Base.getNumRegs() > 1, 1988 LU.Kind, LU.AccessTy, TLI, SE)) 1989 continue; 1990 1991 Formula F = Base; 1992 F.BaseRegs[i] = SE.getAddExpr(InnerAddOps); 1993 F.BaseRegs.push_back(*J); 1994 if (InsertFormula(LU, LUIdx, F)) 1995 // If that formula hadn't been seen before, recurse to find more like 1996 // it. 1997 GenerateReassociations(LU, LUIdx, LU.Formulae.back(), Depth+1); 1998 } 1999 } 2000} 2001 2002/// GenerateCombinations - Generate a formula consisting of all of the 2003/// loop-dominating registers added into a single register. 2004void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx, 2005 Formula Base) { 2006 // This method is only intersting on a plurality of registers. 2007 if (Base.BaseRegs.size() <= 1) return; 2008 2009 Formula F = Base; 2010 F.BaseRegs.clear(); 2011 SmallVector<const SCEV *, 4> Ops; 2012 for (SmallVectorImpl<const SCEV *>::const_iterator 2013 I = Base.BaseRegs.begin(), E = Base.BaseRegs.end(); I != E; ++I) { 2014 const SCEV *BaseReg = *I; 2015 if (BaseReg->properlyDominates(L->getHeader(), &DT) && 2016 !BaseReg->hasComputableLoopEvolution(L)) 2017 Ops.push_back(BaseReg); 2018 else 2019 F.BaseRegs.push_back(BaseReg); 2020 } 2021 if (Ops.size() > 1) { 2022 const SCEV *Sum = SE.getAddExpr(Ops); 2023 // TODO: If Sum is zero, it probably means ScalarEvolution missed an 2024 // opportunity to fold something. For now, just ignore such cases 2025 // rather than procede with zero in a register. 2026 if (!Sum->isZero()) { 2027 F.BaseRegs.push_back(Sum); 2028 (void)InsertFormula(LU, LUIdx, F); 2029 } 2030 } 2031} 2032 2033/// GenerateSymbolicOffsets - Generate reuse formulae using symbolic offsets. 2034void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, 2035 Formula Base) { 2036 // We can't add a symbolic offset if the address already contains one. 2037 if (Base.AM.BaseGV) return; 2038 2039 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 2040 const SCEV *G = Base.BaseRegs[i]; 2041 GlobalValue *GV = ExtractSymbol(G, SE); 2042 if (G->isZero() || !GV) 2043 continue; 2044 Formula F = Base; 2045 F.AM.BaseGV = GV; 2046 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, 2047 LU.Kind, LU.AccessTy, TLI)) 2048 continue; 2049 F.BaseRegs[i] = G; 2050 (void)InsertFormula(LU, LUIdx, F); 2051 } 2052} 2053 2054/// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets. 2055void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, 2056 Formula Base) { 2057 // TODO: For now, just add the min and max offset, because it usually isn't 2058 // worthwhile looking at everything inbetween. 2059 SmallVector<int64_t, 4> Worklist; 2060 Worklist.push_back(LU.MinOffset); 2061 if (LU.MaxOffset != LU.MinOffset) 2062 Worklist.push_back(LU.MaxOffset); 2063 2064 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) { 2065 const SCEV *G = Base.BaseRegs[i]; 2066 2067 for (SmallVectorImpl<int64_t>::const_iterator I = Worklist.begin(), 2068 E = Worklist.end(); I != E; ++I) { 2069 Formula F = Base; 2070 F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs - *I; 2071 if (isLegalUse(F.AM, LU.MinOffset - *I, LU.MaxOffset - *I, 2072 LU.Kind, LU.AccessTy, TLI)) { 2073 F.BaseRegs[i] = SE.getAddExpr(G, SE.getIntegerSCEV(*I, G->getType())); 2074 2075 (void)InsertFormula(LU, LUIdx, F); 2076 } 2077 } 2078 2079 int64_t Imm = ExtractImmediate(G, SE); 2080 if (G->isZero() || Imm == 0) 2081 continue; 2082 Formula F = Base; 2083 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Imm; 2084 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset, 2085 LU.Kind, LU.AccessTy, TLI)) 2086 continue; 2087 F.BaseRegs[i] = G; 2088 (void)InsertFormula(LU, LUIdx, F); 2089 } 2090} 2091 2092/// GenerateICmpZeroScales - For ICmpZero, check to see if we can scale up 2093/// the comparison. For example, x == y -> x*c == y*c. 2094void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, 2095 Formula Base) { 2096 if (LU.Kind != LSRUse::ICmpZero) return; 2097 2098 // Determine the integer type for the base formula. 2099 const Type *IntTy = Base.getType(); 2100 if (!IntTy) return; 2101 if (SE.getTypeSizeInBits(IntTy) > 64) return; 2102 2103 // Don't do this if there is more than one offset. 2104 if (LU.MinOffset != LU.MaxOffset) return; 2105 2106 assert(!Base.AM.BaseGV && "ICmpZero use is not legal!"); 2107 2108 // Check each interesting stride. 2109 for (SmallSetVector<int64_t, 8>::const_iterator 2110 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 2111 int64_t Factor = *I; 2112 Formula F = Base; 2113 2114 // Check that the multiplication doesn't overflow. 2115 if (F.AM.BaseOffs == INT64_MIN && Factor == -1) 2116 continue; 2117 F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs * Factor; 2118 if ((int64_t)F.AM.BaseOffs / Factor != Base.AM.BaseOffs) 2119 continue; 2120 2121 // Check that multiplying with the use offset doesn't overflow. 2122 int64_t Offset = LU.MinOffset; 2123 if (Offset == INT64_MIN && Factor == -1) 2124 continue; 2125 Offset = (uint64_t)Offset * Factor; 2126 if ((int64_t)Offset / Factor != LU.MinOffset) 2127 continue; 2128 2129 // Check that this scale is legal. 2130 if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy, TLI)) 2131 continue; 2132 2133 // Compensate for the use having MinOffset built into it. 2134 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Offset - LU.MinOffset; 2135 2136 const SCEV *FactorS = SE.getIntegerSCEV(Factor, IntTy); 2137 2138 // Check that multiplying with each base register doesn't overflow. 2139 for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) { 2140 F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS); 2141 if (getSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i]) 2142 goto next; 2143 } 2144 2145 // Check that multiplying with the scaled register doesn't overflow. 2146 if (F.ScaledReg) { 2147 F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS); 2148 if (getSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg) 2149 continue; 2150 } 2151 2152 // If we make it here and it's legal, add it. 2153 (void)InsertFormula(LU, LUIdx, F); 2154 next:; 2155 } 2156} 2157 2158/// GenerateScales - Generate stride factor reuse formulae by making use of 2159/// scaled-offset address modes, for example. 2160void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, 2161 Formula Base) { 2162 // Determine the integer type for the base formula. 2163 const Type *IntTy = Base.getType(); 2164 if (!IntTy) return; 2165 2166 // If this Formula already has a scaled register, we can't add another one. 2167 if (Base.AM.Scale != 0) return; 2168 2169 // Check each interesting stride. 2170 for (SmallSetVector<int64_t, 8>::const_iterator 2171 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 2172 int64_t Factor = *I; 2173 2174 Base.AM.Scale = Factor; 2175 Base.AM.HasBaseReg = Base.BaseRegs.size() > 1; 2176 // Check whether this scale is going to be legal. 2177 if (!isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, 2178 LU.Kind, LU.AccessTy, TLI)) { 2179 // As a special-case, handle special out-of-loop Basic users specially. 2180 // TODO: Reconsider this special case. 2181 if (LU.Kind == LSRUse::Basic && 2182 isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset, 2183 LSRUse::Special, LU.AccessTy, TLI) && 2184 LU.AllFixupsOutsideLoop) 2185 LU.Kind = LSRUse::Special; 2186 else 2187 continue; 2188 } 2189 // For an ICmpZero, negating a solitary base register won't lead to 2190 // new solutions. 2191 if (LU.Kind == LSRUse::ICmpZero && 2192 !Base.AM.HasBaseReg && Base.AM.BaseOffs == 0 && !Base.AM.BaseGV) 2193 continue; 2194 // For each addrec base reg, apply the scale, if possible. 2195 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) 2196 if (const SCEVAddRecExpr *AR = 2197 dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i])) { 2198 const SCEV *FactorS = SE.getIntegerSCEV(Factor, IntTy); 2199 if (FactorS->isZero()) 2200 continue; 2201 // Divide out the factor, ignoring high bits, since we'll be 2202 // scaling the value back up in the end. 2203 if (const SCEV *Quotient = getSDiv(AR, FactorS, SE, true)) { 2204 // TODO: This could be optimized to avoid all the copying. 2205 Formula F = Base; 2206 F.ScaledReg = Quotient; 2207 std::swap(F.BaseRegs[i], F.BaseRegs.back()); 2208 F.BaseRegs.pop_back(); 2209 (void)InsertFormula(LU, LUIdx, F); 2210 } 2211 } 2212 } 2213} 2214 2215/// GenerateTruncates - Generate reuse formulae from different IV types. 2216void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, 2217 Formula Base) { 2218 // This requires TargetLowering to tell us which truncates are free. 2219 if (!TLI) return; 2220 2221 // Don't bother truncating symbolic values. 2222 if (Base.AM.BaseGV) return; 2223 2224 // Determine the integer type for the base formula. 2225 const Type *DstTy = Base.getType(); 2226 if (!DstTy) return; 2227 DstTy = SE.getEffectiveSCEVType(DstTy); 2228 2229 for (SmallSetVector<const Type *, 4>::const_iterator 2230 I = Types.begin(), E = Types.end(); I != E; ++I) { 2231 const Type *SrcTy = *I; 2232 if (SrcTy != DstTy && TLI->isTruncateFree(SrcTy, DstTy)) { 2233 Formula F = Base; 2234 2235 if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, *I); 2236 for (SmallVectorImpl<const SCEV *>::iterator J = F.BaseRegs.begin(), 2237 JE = F.BaseRegs.end(); J != JE; ++J) 2238 *J = SE.getAnyExtendExpr(*J, SrcTy); 2239 2240 // TODO: This assumes we've done basic processing on all uses and 2241 // have an idea what the register usage is. 2242 if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses)) 2243 continue; 2244 2245 (void)InsertFormula(LU, LUIdx, F); 2246 } 2247 } 2248} 2249 2250namespace { 2251 2252/// WorkItem - Helper class for GenerateCrossUseConstantOffsets. It's used to 2253/// defer modifications so that the search phase doesn't have to worry about 2254/// the data structures moving underneath it. 2255struct WorkItem { 2256 size_t LUIdx; 2257 int64_t Imm; 2258 const SCEV *OrigReg; 2259 2260 WorkItem(size_t LI, int64_t I, const SCEV *R) 2261 : LUIdx(LI), Imm(I), OrigReg(R) {} 2262 2263 void print(raw_ostream &OS) const; 2264 void dump() const; 2265}; 2266 2267} 2268 2269void WorkItem::print(raw_ostream &OS) const { 2270 OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx 2271 << " , add offset " << Imm; 2272} 2273 2274void WorkItem::dump() const { 2275 print(errs()); errs() << '\n'; 2276} 2277 2278/// GenerateCrossUseConstantOffsets - Look for registers which are a constant 2279/// distance apart and try to form reuse opportunities between them. 2280void LSRInstance::GenerateCrossUseConstantOffsets() { 2281 // Group the registers by their value without any added constant offset. 2282 typedef std::map<int64_t, const SCEV *> ImmMapTy; 2283 typedef DenseMap<const SCEV *, ImmMapTy> RegMapTy; 2284 RegMapTy Map; 2285 DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap; 2286 SmallVector<const SCEV *, 8> Sequence; 2287 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); 2288 I != E; ++I) { 2289 const SCEV *Reg = *I; 2290 int64_t Imm = ExtractImmediate(Reg, SE); 2291 std::pair<RegMapTy::iterator, bool> Pair = 2292 Map.insert(std::make_pair(Reg, ImmMapTy())); 2293 if (Pair.second) 2294 Sequence.push_back(Reg); 2295 Pair.first->second.insert(std::make_pair(Imm, *I)); 2296 UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(*I); 2297 } 2298 2299 // Now examine each set of registers with the same base value. Build up 2300 // a list of work to do and do the work in a separate step so that we're 2301 // not adding formulae and register counts while we're searching. 2302 SmallVector<WorkItem, 32> WorkItems; 2303 SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems; 2304 for (SmallVectorImpl<const SCEV *>::const_iterator I = Sequence.begin(), 2305 E = Sequence.end(); I != E; ++I) { 2306 const SCEV *Reg = *I; 2307 const ImmMapTy &Imms = Map.find(Reg)->second; 2308 2309 // It's not worthwhile looking for reuse if there's only one offset. 2310 if (Imms.size() == 1) 2311 continue; 2312 2313 DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':'; 2314 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 2315 J != JE; ++J) 2316 dbgs() << ' ' << J->first; 2317 dbgs() << '\n'); 2318 2319 // Examine each offset. 2320 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end(); 2321 J != JE; ++J) { 2322 const SCEV *OrigReg = J->second; 2323 2324 int64_t JImm = J->first; 2325 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg); 2326 2327 if (!isa<SCEVConstant>(OrigReg) && 2328 UsedByIndicesMap[Reg].count() == 1) { 2329 DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg << '\n'); 2330 continue; 2331 } 2332 2333 // Conservatively examine offsets between this orig reg a few selected 2334 // other orig regs. 2335 ImmMapTy::const_iterator OtherImms[] = { 2336 Imms.begin(), prior(Imms.end()), 2337 Imms.upper_bound((Imms.begin()->first + prior(Imms.end())->first) / 2) 2338 }; 2339 for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) { 2340 ImmMapTy::const_iterator M = OtherImms[i]; 2341 if (M == J || M == JE) continue; 2342 2343 // Compute the difference between the two. 2344 int64_t Imm = (uint64_t)JImm - M->first; 2345 for (int LUIdx = UsedByIndices.find_first(); LUIdx != -1; 2346 LUIdx = UsedByIndices.find_next(LUIdx)) 2347 // Make a memo of this use, offset, and register tuple. 2348 if (UniqueItems.insert(std::make_pair(LUIdx, Imm))) 2349 WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg)); 2350 } 2351 } 2352 } 2353 2354 Map.clear(); 2355 Sequence.clear(); 2356 UsedByIndicesMap.clear(); 2357 UniqueItems.clear(); 2358 2359 // Now iterate through the worklist and add new formulae. 2360 for (SmallVectorImpl<WorkItem>::const_iterator I = WorkItems.begin(), 2361 E = WorkItems.end(); I != E; ++I) { 2362 const WorkItem &WI = *I; 2363 size_t LUIdx = WI.LUIdx; 2364 LSRUse &LU = Uses[LUIdx]; 2365 int64_t Imm = WI.Imm; 2366 const SCEV *OrigReg = WI.OrigReg; 2367 2368 const Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType()); 2369 const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm)); 2370 unsigned BitWidth = SE.getTypeSizeInBits(IntTy); 2371 2372 // TODO: Use a more targetted data structure. 2373 for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) { 2374 Formula F = LU.Formulae[L]; 2375 // Use the immediate in the scaled register. 2376 if (F.ScaledReg == OrigReg) { 2377 int64_t Offs = (uint64_t)F.AM.BaseOffs + 2378 Imm * (uint64_t)F.AM.Scale; 2379 // Don't create 50 + reg(-50). 2380 if (F.referencesReg(SE.getSCEV( 2381 ConstantInt::get(IntTy, -(uint64_t)Offs)))) 2382 continue; 2383 Formula NewF = F; 2384 NewF.AM.BaseOffs = Offs; 2385 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, 2386 LU.Kind, LU.AccessTy, TLI)) 2387 continue; 2388 NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg); 2389 2390 // If the new scale is a constant in a register, and adding the constant 2391 // value to the immediate would produce a value closer to zero than the 2392 // immediate itself, then the formula isn't worthwhile. 2393 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg)) 2394 if (C->getValue()->getValue().isNegative() != 2395 (NewF.AM.BaseOffs < 0) && 2396 (C->getValue()->getValue().abs() * APInt(BitWidth, F.AM.Scale)) 2397 .ule(APInt(BitWidth, NewF.AM.BaseOffs).abs())) 2398 continue; 2399 2400 // OK, looks good. 2401 (void)InsertFormula(LU, LUIdx, NewF); 2402 } else { 2403 // Use the immediate in a base register. 2404 for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) { 2405 const SCEV *BaseReg = F.BaseRegs[N]; 2406 if (BaseReg != OrigReg) 2407 continue; 2408 Formula NewF = F; 2409 NewF.AM.BaseOffs = (uint64_t)NewF.AM.BaseOffs + Imm; 2410 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset, 2411 LU.Kind, LU.AccessTy, TLI)) 2412 continue; 2413 NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg); 2414 2415 // If the new formula has a constant in a register, and adding the 2416 // constant value to the immediate would produce a value closer to 2417 // zero than the immediate itself, then the formula isn't worthwhile. 2418 for (SmallVectorImpl<const SCEV *>::const_iterator 2419 J = NewF.BaseRegs.begin(), JE = NewF.BaseRegs.end(); 2420 J != JE; ++J) 2421 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*J)) 2422 if (C->getValue()->getValue().isNegative() != 2423 (NewF.AM.BaseOffs < 0) && 2424 C->getValue()->getValue().abs() 2425 .ule(APInt(BitWidth, NewF.AM.BaseOffs).abs())) 2426 goto skip_formula; 2427 2428 // Ok, looks good. 2429 (void)InsertFormula(LU, LUIdx, NewF); 2430 break; 2431 skip_formula:; 2432 } 2433 } 2434 } 2435 } 2436} 2437 2438/// GenerateAllReuseFormulae - Generate formulae for each use. 2439void 2440LSRInstance::GenerateAllReuseFormulae() { 2441 // This is split into multiple loops so that hasRegsUsedByUsesOtherThan 2442 // queries are more precise. 2443 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2444 LSRUse &LU = Uses[LUIdx]; 2445 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2446 GenerateReassociations(LU, LUIdx, LU.Formulae[i]); 2447 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2448 GenerateCombinations(LU, LUIdx, LU.Formulae[i]); 2449 } 2450 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2451 LSRUse &LU = Uses[LUIdx]; 2452 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2453 GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]); 2454 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2455 GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]); 2456 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2457 GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]); 2458 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2459 GenerateScales(LU, LUIdx, LU.Formulae[i]); 2460 } 2461 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2462 LSRUse &LU = Uses[LUIdx]; 2463 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i) 2464 GenerateTruncates(LU, LUIdx, LU.Formulae[i]); 2465 } 2466 2467 GenerateCrossUseConstantOffsets(); 2468} 2469 2470/// If their are multiple formulae with the same set of registers used 2471/// by other uses, pick the best one and delete the others. 2472void LSRInstance::FilterOutUndesirableDedicatedRegisters() { 2473#ifndef NDEBUG 2474 bool Changed = false; 2475#endif 2476 2477 // Collect the best formula for each unique set of shared registers. This 2478 // is reset for each use. 2479 typedef DenseMap<SmallVector<const SCEV *, 2>, size_t, UniquifierDenseMapInfo> 2480 BestFormulaeTy; 2481 BestFormulaeTy BestFormulae; 2482 2483 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) { 2484 LSRUse &LU = Uses[LUIdx]; 2485 FormulaSorter Sorter(L, LU, SE, DT); 2486 2487 // Clear out the set of used regs; it will be recomputed. 2488 LU.Regs.clear(); 2489 2490 for (size_t FIdx = 0, NumForms = LU.Formulae.size(); 2491 FIdx != NumForms; ++FIdx) { 2492 Formula &F = LU.Formulae[FIdx]; 2493 2494 SmallVector<const SCEV *, 2> Key; 2495 for (SmallVectorImpl<const SCEV *>::const_iterator J = F.BaseRegs.begin(), 2496 JE = F.BaseRegs.end(); J != JE; ++J) { 2497 const SCEV *Reg = *J; 2498 if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx)) 2499 Key.push_back(Reg); 2500 } 2501 if (F.ScaledReg && 2502 RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx)) 2503 Key.push_back(F.ScaledReg); 2504 // Unstable sort by host order ok, because this is only used for 2505 // uniquifying. 2506 std::sort(Key.begin(), Key.end()); 2507 2508 std::pair<BestFormulaeTy::const_iterator, bool> P = 2509 BestFormulae.insert(std::make_pair(Key, FIdx)); 2510 if (!P.second) { 2511 Formula &Best = LU.Formulae[P.first->second]; 2512 if (Sorter.operator()(F, Best)) 2513 std::swap(F, Best); 2514 DEBUG(dbgs() << "Filtering out "; F.print(dbgs()); 2515 dbgs() << "\n" 2516 " in favor of "; Best.print(dbgs()); 2517 dbgs() << '\n'); 2518#ifndef NDEBUG 2519 Changed = true; 2520#endif 2521 std::swap(F, LU.Formulae.back()); 2522 LU.Formulae.pop_back(); 2523 --FIdx; 2524 --NumForms; 2525 continue; 2526 } 2527 if (F.ScaledReg) LU.Regs.insert(F.ScaledReg); 2528 LU.Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 2529 } 2530 BestFormulae.clear(); 2531 } 2532 2533 DEBUG(if (Changed) { 2534 dbgs() << "\n" 2535 "After filtering out undesirable candidates:\n"; 2536 print_uses(dbgs()); 2537 }); 2538} 2539 2540/// NarrowSearchSpaceUsingHeuristics - If there are an extrordinary number of 2541/// formulae to choose from, use some rough heuristics to prune down the number 2542/// of formulae. This keeps the main solver from taking an extrordinary amount 2543/// of time in some worst-case scenarios. 2544void LSRInstance::NarrowSearchSpaceUsingHeuristics() { 2545 // This is a rough guess that seems to work fairly well. 2546 const size_t Limit = UINT16_MAX; 2547 2548 SmallPtrSet<const SCEV *, 4> Taken; 2549 for (;;) { 2550 // Estimate the worst-case number of solutions we might consider. We almost 2551 // never consider this many solutions because we prune the search space, 2552 // but the pruning isn't always sufficient. 2553 uint32_t Power = 1; 2554 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 2555 E = Uses.end(); I != E; ++I) { 2556 size_t FSize = I->Formulae.size(); 2557 if (FSize >= Limit) { 2558 Power = Limit; 2559 break; 2560 } 2561 Power *= FSize; 2562 if (Power >= Limit) 2563 break; 2564 } 2565 if (Power < Limit) 2566 break; 2567 2568 // Ok, we have too many of formulae on our hands to conveniently handle. 2569 // Use a rough heuristic to thin out the list. 2570 2571 // Pick the register which is used by the most LSRUses, which is likely 2572 // to be a good reuse register candidate. 2573 const SCEV *Best = 0; 2574 unsigned BestNum = 0; 2575 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end(); 2576 I != E; ++I) { 2577 const SCEV *Reg = *I; 2578 if (Taken.count(Reg)) 2579 continue; 2580 if (!Best) 2581 Best = Reg; 2582 else { 2583 unsigned Count = RegUses.getUsedByIndices(Reg).count(); 2584 if (Count > BestNum) { 2585 Best = Reg; 2586 BestNum = Count; 2587 } 2588 } 2589 } 2590 2591 DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best 2592 << " will yeild profitable reuse.\n"); 2593 Taken.insert(Best); 2594 2595 // In any use with formulae which references this register, delete formulae 2596 // which don't reference it. 2597 for (SmallVectorImpl<LSRUse>::iterator I = Uses.begin(), 2598 E = Uses.end(); I != E; ++I) { 2599 LSRUse &LU = *I; 2600 if (!LU.Regs.count(Best)) continue; 2601 2602 // Clear out the set of used regs; it will be recomputed. 2603 LU.Regs.clear(); 2604 2605 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) { 2606 Formula &F = LU.Formulae[i]; 2607 if (!F.referencesReg(Best)) { 2608 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n'); 2609 std::swap(LU.Formulae.back(), F); 2610 LU.Formulae.pop_back(); 2611 --e; 2612 --i; 2613 continue; 2614 } 2615 2616 if (F.ScaledReg) LU.Regs.insert(F.ScaledReg); 2617 LU.Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end()); 2618 } 2619 } 2620 2621 DEBUG(dbgs() << "After pre-selection:\n"; 2622 print_uses(dbgs())); 2623 } 2624} 2625 2626/// SolveRecurse - This is the recursive solver. 2627void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution, 2628 Cost &SolutionCost, 2629 SmallVectorImpl<const Formula *> &Workspace, 2630 const Cost &CurCost, 2631 const SmallPtrSet<const SCEV *, 16> &CurRegs, 2632 DenseSet<const SCEV *> &VisitedRegs) const { 2633 // Some ideas: 2634 // - prune more: 2635 // - use more aggressive filtering 2636 // - sort the formula so that the most profitable solutions are found first 2637 // - sort the uses too 2638 // - search faster: 2639 // - dont compute a cost, and then compare. compare while computing a cost 2640 // and bail early. 2641 // - track register sets with SmallBitVector 2642 2643 const LSRUse &LU = Uses[Workspace.size()]; 2644 2645 // If this use references any register that's already a part of the 2646 // in-progress solution, consider it a requirement that a formula must 2647 // reference that register in order to be considered. This prunes out 2648 // unprofitable searching. 2649 SmallSetVector<const SCEV *, 4> ReqRegs; 2650 for (SmallPtrSet<const SCEV *, 16>::const_iterator I = CurRegs.begin(), 2651 E = CurRegs.end(); I != E; ++I) 2652 if (LU.Regs.count(*I)) 2653 ReqRegs.insert(*I); 2654 2655 bool AnySatisfiedReqRegs = false; 2656 SmallPtrSet<const SCEV *, 16> NewRegs; 2657 Cost NewCost; 2658retry: 2659 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(), 2660 E = LU.Formulae.end(); I != E; ++I) { 2661 const Formula &F = *I; 2662 2663 // Ignore formulae which do not use any of the required registers. 2664 for (SmallSetVector<const SCEV *, 4>::const_iterator J = ReqRegs.begin(), 2665 JE = ReqRegs.end(); J != JE; ++J) { 2666 const SCEV *Reg = *J; 2667 if ((!F.ScaledReg || F.ScaledReg != Reg) && 2668 std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) == 2669 F.BaseRegs.end()) 2670 goto skip; 2671 } 2672 AnySatisfiedReqRegs = true; 2673 2674 // Evaluate the cost of the current formula. If it's already worse than 2675 // the current best, prune the search at that point. 2676 NewCost = CurCost; 2677 NewRegs = CurRegs; 2678 NewCost.RateFormula(F, NewRegs, VisitedRegs, L, LU.Offsets, SE, DT); 2679 if (NewCost < SolutionCost) { 2680 Workspace.push_back(&F); 2681 if (Workspace.size() != Uses.size()) { 2682 SolveRecurse(Solution, SolutionCost, Workspace, NewCost, 2683 NewRegs, VisitedRegs); 2684 if (F.getNumRegs() == 1 && Workspace.size() == 1) 2685 VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]); 2686 } else { 2687 DEBUG(dbgs() << "New best at "; NewCost.print(dbgs()); 2688 dbgs() << ". Regs:"; 2689 for (SmallPtrSet<const SCEV *, 16>::const_iterator 2690 I = NewRegs.begin(), E = NewRegs.end(); I != E; ++I) 2691 dbgs() << ' ' << **I; 2692 dbgs() << '\n'); 2693 2694 SolutionCost = NewCost; 2695 Solution = Workspace; 2696 } 2697 Workspace.pop_back(); 2698 } 2699 skip:; 2700 } 2701 2702 // If none of the formulae had all of the required registers, relax the 2703 // constraint so that we don't exclude all formulae. 2704 if (!AnySatisfiedReqRegs) { 2705 ReqRegs.clear(); 2706 goto retry; 2707 } 2708} 2709 2710void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const { 2711 SmallVector<const Formula *, 8> Workspace; 2712 Cost SolutionCost; 2713 SolutionCost.Loose(); 2714 Cost CurCost; 2715 SmallPtrSet<const SCEV *, 16> CurRegs; 2716 DenseSet<const SCEV *> VisitedRegs; 2717 Workspace.reserve(Uses.size()); 2718 2719 SolveRecurse(Solution, SolutionCost, Workspace, CurCost, 2720 CurRegs, VisitedRegs); 2721 2722 // Ok, we've now made all our decisions. 2723 DEBUG(dbgs() << "\n" 2724 "The chosen solution requires "; SolutionCost.print(dbgs()); 2725 dbgs() << ":\n"; 2726 for (size_t i = 0, e = Uses.size(); i != e; ++i) { 2727 dbgs() << " "; 2728 Uses[i].print(dbgs()); 2729 dbgs() << "\n" 2730 " "; 2731 Solution[i]->print(dbgs()); 2732 dbgs() << '\n'; 2733 }); 2734} 2735 2736/// getImmediateDominator - A handy utility for the specific DominatorTree 2737/// query that we need here. 2738/// 2739static BasicBlock *getImmediateDominator(BasicBlock *BB, DominatorTree &DT) { 2740 DomTreeNode *Node = DT.getNode(BB); 2741 if (!Node) return 0; 2742 Node = Node->getIDom(); 2743 if (!Node) return 0; 2744 return Node->getBlock(); 2745} 2746 2747Value *LSRInstance::Expand(const LSRFixup &LF, 2748 const Formula &F, 2749 BasicBlock::iterator IP, 2750 Loop *L, Instruction *IVIncInsertPos, 2751 SCEVExpander &Rewriter, 2752 SmallVectorImpl<WeakVH> &DeadInsts, 2753 ScalarEvolution &SE, DominatorTree &DT) const { 2754 const LSRUse &LU = Uses[LF.LUIdx]; 2755 2756 // Then, collect some instructions which we will remain dominated by when 2757 // expanding the replacement. These must be dominated by any operands that 2758 // will be required in the expansion. 2759 SmallVector<Instruction *, 4> Inputs; 2760 if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace)) 2761 Inputs.push_back(I); 2762 if (LU.Kind == LSRUse::ICmpZero) 2763 if (Instruction *I = 2764 dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1))) 2765 Inputs.push_back(I); 2766 if (LF.PostIncLoop && !L->contains(LF.UserInst)) 2767 Inputs.push_back(L->getLoopLatch()->getTerminator()); 2768 2769 // Then, climb up the immediate dominator tree as far as we can go while 2770 // still being dominated by the input positions. 2771 for (;;) { 2772 bool AllDominate = true; 2773 Instruction *BetterPos = 0; 2774 BasicBlock *IDom = getImmediateDominator(IP->getParent(), DT); 2775 if (!IDom) break; 2776 Instruction *Tentative = IDom->getTerminator(); 2777 for (SmallVectorImpl<Instruction *>::const_iterator I = Inputs.begin(), 2778 E = Inputs.end(); I != E; ++I) { 2779 Instruction *Inst = *I; 2780 if (Inst == Tentative || !DT.dominates(Inst, Tentative)) { 2781 AllDominate = false; 2782 break; 2783 } 2784 if (IDom == Inst->getParent() && 2785 (!BetterPos || DT.dominates(BetterPos, Inst))) 2786 BetterPos = next(BasicBlock::iterator(Inst)); 2787 } 2788 if (!AllDominate) 2789 break; 2790 if (BetterPos) 2791 IP = BetterPos; 2792 else 2793 IP = Tentative; 2794 } 2795 while (isa<PHINode>(IP)) ++IP; 2796 2797 // Inform the Rewriter if we have a post-increment use, so that it can 2798 // perform an advantageous expansion. 2799 Rewriter.setPostInc(LF.PostIncLoop); 2800 2801 // This is the type that the user actually needs. 2802 const Type *OpTy = LF.OperandValToReplace->getType(); 2803 // This will be the type that we'll initially expand to. 2804 const Type *Ty = F.getType(); 2805 if (!Ty) 2806 // No type known; just expand directly to the ultimate type. 2807 Ty = OpTy; 2808 else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy)) 2809 // Expand directly to the ultimate type if it's the right size. 2810 Ty = OpTy; 2811 // This is the type to do integer arithmetic in. 2812 const Type *IntTy = SE.getEffectiveSCEVType(Ty); 2813 2814 // Build up a list of operands to add together to form the full base. 2815 SmallVector<const SCEV *, 8> Ops; 2816 2817 // Expand the BaseRegs portion. 2818 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(), 2819 E = F.BaseRegs.end(); I != E; ++I) { 2820 const SCEV *Reg = *I; 2821 assert(!Reg->isZero() && "Zero allocated in a base register!"); 2822 2823 // If we're expanding for a post-inc user for the add-rec's loop, make the 2824 // post-inc adjustment. 2825 const SCEV *Start = Reg; 2826 while (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Start)) { 2827 if (AR->getLoop() == LF.PostIncLoop) { 2828 Reg = SE.getAddExpr(Reg, AR->getStepRecurrence(SE)); 2829 // If the user is inside the loop, insert the code after the increment 2830 // so that it is dominated by its operand. 2831 if (L->contains(LF.UserInst)) 2832 IP = IVIncInsertPos; 2833 break; 2834 } 2835 Start = AR->getStart(); 2836 } 2837 2838 Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, 0, IP))); 2839 } 2840 2841 // Expand the ScaledReg portion. 2842 Value *ICmpScaledV = 0; 2843 if (F.AM.Scale != 0) { 2844 const SCEV *ScaledS = F.ScaledReg; 2845 2846 // If we're expanding for a post-inc user for the add-rec's loop, make the 2847 // post-inc adjustment. 2848 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(ScaledS)) 2849 if (AR->getLoop() == LF.PostIncLoop) 2850 ScaledS = SE.getAddExpr(ScaledS, AR->getStepRecurrence(SE)); 2851 2852 if (LU.Kind == LSRUse::ICmpZero) { 2853 // An interesting way of "folding" with an icmp is to use a negated 2854 // scale, which we'll implement by inserting it into the other operand 2855 // of the icmp. 2856 assert(F.AM.Scale == -1 && 2857 "The only scale supported by ICmpZero uses is -1!"); 2858 ICmpScaledV = Rewriter.expandCodeFor(ScaledS, 0, IP); 2859 } else { 2860 // Otherwise just expand the scaled register and an explicit scale, 2861 // which is expected to be matched as part of the address. 2862 ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, 0, IP)); 2863 ScaledS = SE.getMulExpr(ScaledS, 2864 SE.getIntegerSCEV(F.AM.Scale, 2865 ScaledS->getType())); 2866 Ops.push_back(ScaledS); 2867 } 2868 } 2869 2870 // Expand the immediate portions. 2871 if (F.AM.BaseGV) 2872 Ops.push_back(SE.getSCEV(F.AM.BaseGV)); 2873 int64_t Offset = (uint64_t)F.AM.BaseOffs + LF.Offset; 2874 if (Offset != 0) { 2875 if (LU.Kind == LSRUse::ICmpZero) { 2876 // The other interesting way of "folding" with an ICmpZero is to use a 2877 // negated immediate. 2878 if (!ICmpScaledV) 2879 ICmpScaledV = ConstantInt::get(IntTy, -Offset); 2880 else { 2881 Ops.push_back(SE.getUnknown(ICmpScaledV)); 2882 ICmpScaledV = ConstantInt::get(IntTy, Offset); 2883 } 2884 } else { 2885 // Just add the immediate values. These again are expected to be matched 2886 // as part of the address. 2887 Ops.push_back(SE.getIntegerSCEV(Offset, IntTy)); 2888 } 2889 } 2890 2891 // Emit instructions summing all the operands. 2892 const SCEV *FullS = Ops.empty() ? 2893 SE.getIntegerSCEV(0, IntTy) : 2894 SE.getAddExpr(Ops); 2895 Value *FullV = Rewriter.expandCodeFor(FullS, Ty, IP); 2896 2897 // We're done expanding now, so reset the rewriter. 2898 Rewriter.setPostInc(0); 2899 2900 // An ICmpZero Formula represents an ICmp which we're handling as a 2901 // comparison against zero. Now that we've expanded an expression for that 2902 // form, update the ICmp's other operand. 2903 if (LU.Kind == LSRUse::ICmpZero) { 2904 ICmpInst *CI = cast<ICmpInst>(LF.UserInst); 2905 DeadInsts.push_back(CI->getOperand(1)); 2906 assert(!F.AM.BaseGV && "ICmp does not support folding a global value and " 2907 "a scale at the same time!"); 2908 if (F.AM.Scale == -1) { 2909 if (ICmpScaledV->getType() != OpTy) { 2910 Instruction *Cast = 2911 CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false, 2912 OpTy, false), 2913 ICmpScaledV, OpTy, "tmp", CI); 2914 ICmpScaledV = Cast; 2915 } 2916 CI->setOperand(1, ICmpScaledV); 2917 } else { 2918 assert(F.AM.Scale == 0 && 2919 "ICmp does not support folding a global value and " 2920 "a scale at the same time!"); 2921 Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy), 2922 -(uint64_t)Offset); 2923 if (C->getType() != OpTy) 2924 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, 2925 OpTy, false), 2926 C, OpTy); 2927 2928 CI->setOperand(1, C); 2929 } 2930 } 2931 2932 return FullV; 2933} 2934 2935/// RewriteForPHI - Helper for Rewrite. PHI nodes are special because the use 2936/// of their operands effectively happens in their predecessor blocks, so the 2937/// expression may need to be expanded in multiple places. 2938void LSRInstance::RewriteForPHI(PHINode *PN, 2939 const LSRFixup &LF, 2940 const Formula &F, 2941 Loop *L, Instruction *IVIncInsertPos, 2942 SCEVExpander &Rewriter, 2943 SmallVectorImpl<WeakVH> &DeadInsts, 2944 ScalarEvolution &SE, DominatorTree &DT, 2945 Pass *P) const { 2946 DenseMap<BasicBlock *, Value *> Inserted; 2947 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 2948 if (PN->getIncomingValue(i) == LF.OperandValToReplace) { 2949 BasicBlock *BB = PN->getIncomingBlock(i); 2950 2951 // If this is a critical edge, split the edge so that we do not insert 2952 // the code on all predecessor/successor paths. We do this unless this 2953 // is the canonical backedge for this loop, which complicates post-inc 2954 // users. 2955 if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 && 2956 !isa<IndirectBrInst>(BB->getTerminator()) && 2957 (PN->getParent() != L->getHeader() || !L->contains(BB))) { 2958 // Split the critical edge. 2959 BasicBlock *NewBB = SplitCriticalEdge(BB, PN->getParent(), P); 2960 2961 // If PN is outside of the loop and BB is in the loop, we want to 2962 // move the block to be immediately before the PHI block, not 2963 // immediately after BB. 2964 if (L->contains(BB) && !L->contains(PN)) 2965 NewBB->moveBefore(PN->getParent()); 2966 2967 // Splitting the edge can reduce the number of PHI entries we have. 2968 e = PN->getNumIncomingValues(); 2969 BB = NewBB; 2970 i = PN->getBasicBlockIndex(BB); 2971 } 2972 2973 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair = 2974 Inserted.insert(std::make_pair(BB, static_cast<Value *>(0))); 2975 if (!Pair.second) 2976 PN->setIncomingValue(i, Pair.first->second); 2977 else { 2978 Value *FullV = Expand(LF, F, BB->getTerminator(), L, IVIncInsertPos, 2979 Rewriter, DeadInsts, SE, DT); 2980 2981 // If this is reuse-by-noop-cast, insert the noop cast. 2982 const Type *OpTy = LF.OperandValToReplace->getType(); 2983 if (FullV->getType() != OpTy) 2984 FullV = 2985 CastInst::Create(CastInst::getCastOpcode(FullV, false, 2986 OpTy, false), 2987 FullV, LF.OperandValToReplace->getType(), 2988 "tmp", BB->getTerminator()); 2989 2990 PN->setIncomingValue(i, FullV); 2991 Pair.first->second = FullV; 2992 } 2993 } 2994} 2995 2996/// Rewrite - Emit instructions for the leading candidate expression for this 2997/// LSRUse (this is called "expanding"), and update the UserInst to reference 2998/// the newly expanded value. 2999void LSRInstance::Rewrite(const LSRFixup &LF, 3000 const Formula &F, 3001 Loop *L, Instruction *IVIncInsertPos, 3002 SCEVExpander &Rewriter, 3003 SmallVectorImpl<WeakVH> &DeadInsts, 3004 ScalarEvolution &SE, DominatorTree &DT, 3005 Pass *P) const { 3006 // First, find an insertion point that dominates UserInst. For PHI nodes, 3007 // find the nearest block which dominates all the relevant uses. 3008 if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) { 3009 RewriteForPHI(PN, LF, F, L, IVIncInsertPos, Rewriter, DeadInsts, SE, DT, P); 3010 } else { 3011 Value *FullV = Expand(LF, F, LF.UserInst, L, IVIncInsertPos, 3012 Rewriter, DeadInsts, SE, DT); 3013 3014 // If this is reuse-by-noop-cast, insert the noop cast. 3015 const Type *OpTy = LF.OperandValToReplace->getType(); 3016 if (FullV->getType() != OpTy) { 3017 Instruction *Cast = 3018 CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false), 3019 FullV, OpTy, "tmp", LF.UserInst); 3020 FullV = Cast; 3021 } 3022 3023 // Update the user. ICmpZero is handled specially here (for now) because 3024 // Expand may have updated one of the operands of the icmp already, and 3025 // its new value may happen to be equal to LF.OperandValToReplace, in 3026 // which case doing replaceUsesOfWith leads to replacing both operands 3027 // with the same value. TODO: Reorganize this. 3028 if (Uses[LF.LUIdx].Kind == LSRUse::ICmpZero) 3029 LF.UserInst->setOperand(0, FullV); 3030 else 3031 LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV); 3032 } 3033 3034 DeadInsts.push_back(LF.OperandValToReplace); 3035} 3036 3037void 3038LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution, 3039 Pass *P) { 3040 // Keep track of instructions we may have made dead, so that 3041 // we can remove them after we are done working. 3042 SmallVector<WeakVH, 16> DeadInsts; 3043 3044 SCEVExpander Rewriter(SE); 3045 Rewriter.disableCanonicalMode(); 3046 Rewriter.setIVIncInsertPos(L, IVIncInsertPos); 3047 3048 // Expand the new value definitions and update the users. 3049 for (size_t i = 0, e = Fixups.size(); i != e; ++i) { 3050 size_t LUIdx = Fixups[i].LUIdx; 3051 3052 Rewrite(Fixups[i], *Solution[LUIdx], L, IVIncInsertPos, Rewriter, 3053 DeadInsts, SE, DT, P); 3054 3055 Changed = true; 3056 } 3057 3058 // Clean up after ourselves. This must be done before deleting any 3059 // instructions. 3060 Rewriter.clear(); 3061 3062 Changed |= DeleteTriviallyDeadInstructions(DeadInsts); 3063} 3064 3065LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P) 3066 : IU(P->getAnalysis<IVUsers>()), 3067 SE(P->getAnalysis<ScalarEvolution>()), 3068 DT(P->getAnalysis<DominatorTree>()), 3069 TLI(tli), L(l), Changed(false), IVIncInsertPos(0) { 3070 3071 // If LoopSimplify form is not available, stay out of trouble. 3072 if (!L->isLoopSimplifyForm()) return; 3073 3074 // If there's no interesting work to be done, bail early. 3075 if (IU.empty()) return; 3076 3077 DEBUG(dbgs() << "\nLSR on loop "; 3078 WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false); 3079 dbgs() << ":\n"); 3080 3081 /// OptimizeShadowIV - If IV is used in a int-to-float cast 3082 /// inside the loop then try to eliminate the cast opeation. 3083 OptimizeShadowIV(); 3084 3085 // Change loop terminating condition to use the postinc iv when possible. 3086 Changed |= OptimizeLoopTermCond(); 3087 3088 CollectInterestingTypesAndFactors(); 3089 CollectFixupsAndInitialFormulae(); 3090 CollectLoopInvariantFixupsAndFormulae(); 3091 3092 DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n"; 3093 print_uses(dbgs())); 3094 3095 // Now use the reuse data to generate a bunch of interesting ways 3096 // to formulate the values needed for the uses. 3097 GenerateAllReuseFormulae(); 3098 3099 DEBUG(dbgs() << "\n" 3100 "After generating reuse formulae:\n"; 3101 print_uses(dbgs())); 3102 3103 FilterOutUndesirableDedicatedRegisters(); 3104 NarrowSearchSpaceUsingHeuristics(); 3105 3106 SmallVector<const Formula *, 8> Solution; 3107 Solve(Solution); 3108 assert(Solution.size() == Uses.size() && "Malformed solution!"); 3109 3110 // Release memory that is no longer needed. 3111 Factors.clear(); 3112 Types.clear(); 3113 RegUses.clear(); 3114 3115#ifndef NDEBUG 3116 // Formulae should be legal. 3117 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 3118 E = Uses.end(); I != E; ++I) { 3119 const LSRUse &LU = *I; 3120 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(), 3121 JE = LU.Formulae.end(); J != JE; ++J) 3122 assert(isLegalUse(J->AM, LU.MinOffset, LU.MaxOffset, 3123 LU.Kind, LU.AccessTy, TLI) && 3124 "Illegal formula generated!"); 3125 }; 3126#endif 3127 3128 // Now that we've decided what we want, make it so. 3129 ImplementSolution(Solution, P); 3130} 3131 3132void LSRInstance::print_factors_and_types(raw_ostream &OS) const { 3133 if (Factors.empty() && Types.empty()) return; 3134 3135 OS << "LSR has identified the following interesting factors and types: "; 3136 bool First = true; 3137 3138 for (SmallSetVector<int64_t, 8>::const_iterator 3139 I = Factors.begin(), E = Factors.end(); I != E; ++I) { 3140 if (!First) OS << ", "; 3141 First = false; 3142 OS << '*' << *I; 3143 } 3144 3145 for (SmallSetVector<const Type *, 4>::const_iterator 3146 I = Types.begin(), E = Types.end(); I != E; ++I) { 3147 if (!First) OS << ", "; 3148 First = false; 3149 OS << '(' << **I << ')'; 3150 } 3151 OS << '\n'; 3152} 3153 3154void LSRInstance::print_fixups(raw_ostream &OS) const { 3155 OS << "LSR is examining the following fixup sites:\n"; 3156 for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(), 3157 E = Fixups.end(); I != E; ++I) { 3158 const LSRFixup &LF = *I; 3159 dbgs() << " "; 3160 LF.print(OS); 3161 OS << '\n'; 3162 } 3163} 3164 3165void LSRInstance::print_uses(raw_ostream &OS) const { 3166 OS << "LSR is examining the following uses:\n"; 3167 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(), 3168 E = Uses.end(); I != E; ++I) { 3169 const LSRUse &LU = *I; 3170 dbgs() << " "; 3171 LU.print(OS); 3172 OS << '\n'; 3173 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(), 3174 JE = LU.Formulae.end(); J != JE; ++J) { 3175 OS << " "; 3176 J->print(OS); 3177 OS << '\n'; 3178 } 3179 } 3180} 3181 3182void LSRInstance::print(raw_ostream &OS) const { 3183 print_factors_and_types(OS); 3184 print_fixups(OS); 3185 print_uses(OS); 3186} 3187 3188void LSRInstance::dump() const { 3189 print(errs()); errs() << '\n'; 3190} 3191 3192namespace { 3193 3194class LoopStrengthReduce : public LoopPass { 3195 /// TLI - Keep a pointer of a TargetLowering to consult for determining 3196 /// transformation profitability. 3197 const TargetLowering *const TLI; 3198 3199public: 3200 static char ID; // Pass ID, replacement for typeid 3201 explicit LoopStrengthReduce(const TargetLowering *tli = 0); 3202 3203private: 3204 bool runOnLoop(Loop *L, LPPassManager &LPM); 3205 void getAnalysisUsage(AnalysisUsage &AU) const; 3206}; 3207 3208} 3209 3210char LoopStrengthReduce::ID = 0; 3211static RegisterPass<LoopStrengthReduce> 3212X("loop-reduce", "Loop Strength Reduction"); 3213 3214Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) { 3215 return new LoopStrengthReduce(TLI); 3216} 3217 3218LoopStrengthReduce::LoopStrengthReduce(const TargetLowering *tli) 3219 : LoopPass(&ID), TLI(tli) {} 3220 3221void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const { 3222 // We split critical edges, so we change the CFG. However, we do update 3223 // many analyses if they are around. 3224 AU.addPreservedID(LoopSimplifyID); 3225 AU.addPreserved<LoopInfo>(); 3226 AU.addPreserved("domfrontier"); 3227 3228 AU.addRequiredID(LoopSimplifyID); 3229 AU.addRequired<DominatorTree>(); 3230 AU.addPreserved<DominatorTree>(); 3231 AU.addRequired<ScalarEvolution>(); 3232 AU.addPreserved<ScalarEvolution>(); 3233 AU.addRequired<IVUsers>(); 3234 AU.addPreserved<IVUsers>(); 3235} 3236 3237bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) { 3238 bool Changed = false; 3239 3240 // Run the main LSR transformation. 3241 Changed |= LSRInstance(TLI, L, this).getChanged(); 3242 3243 // At this point, it is worth checking to see if any recurrence PHIs are also 3244 // dead, so that we can remove them as well. 3245 Changed |= DeleteDeadPHIs(L->getHeader()); 3246 3247 return Changed; 3248} 3249