1//===-- InductiveRangeCheckElimination.cpp - ------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// The InductiveRangeCheckElimination pass splits a loop's iteration space into 10// three disjoint ranges. It does that in a way such that the loop running in 11// the middle loop provably does not need range checks. As an example, it will 12// convert 13// 14// len = < known positive > 15// for (i = 0; i < n; i++) { 16// if (0 <= i && i < len) { 17// do_something(); 18// } else { 19// throw_out_of_bounds(); 20// } 21// } 22// 23// to 24// 25// len = < known positive > 26// limit = smin(n, len) 27// // no first segment 28// for (i = 0; i < limit; i++) { 29// if (0 <= i && i < len) { // this check is fully redundant 30// do_something(); 31// } else { 32// throw_out_of_bounds(); 33// } 34// } 35// for (i = limit; i < n; i++) { 36// if (0 <= i && i < len) { 37// do_something(); 38// } else { 39// throw_out_of_bounds(); 40// } 41// } 42//===----------------------------------------------------------------------===// 43 44#include "llvm/ADT/Optional.h" 45#include "llvm/Analysis/BranchProbabilityInfo.h" 46#include "llvm/Analysis/InstructionSimplify.h" 47#include "llvm/Analysis/LoopInfo.h" 48#include "llvm/Analysis/LoopPass.h" 49#include "llvm/Analysis/ScalarEvolution.h" 50#include "llvm/Analysis/ScalarEvolutionExpander.h" 51#include "llvm/Analysis/ScalarEvolutionExpressions.h" 52#include "llvm/Analysis/ValueTracking.h" 53#include "llvm/IR/Dominators.h" 54#include "llvm/IR/Function.h" 55#include "llvm/IR/IRBuilder.h" 56#include "llvm/IR/Instructions.h" 57#include "llvm/IR/Module.h" 58#include "llvm/IR/PatternMatch.h" 59#include "llvm/IR/ValueHandle.h" 60#include "llvm/IR/Verifier.h" 61#include "llvm/Pass.h" 62#include "llvm/Support/Debug.h" 63#include "llvm/Support/raw_ostream.h" 64#include "llvm/Transforms/Scalar.h" 65#include "llvm/Transforms/Utils/BasicBlockUtils.h" 66#include "llvm/Transforms/Utils/Cloning.h" 67#include "llvm/Transforms/Utils/LoopUtils.h" 68#include "llvm/Transforms/Utils/SimplifyIndVar.h" 69#include "llvm/Transforms/Utils/UnrollLoop.h" 70 71using namespace llvm; 72 73static cl::opt<unsigned> LoopSizeCutoff("irce-loop-size-cutoff", cl::Hidden, 74 cl::init(64)); 75 76static cl::opt<bool> PrintChangedLoops("irce-print-changed-loops", cl::Hidden, 77 cl::init(false)); 78 79static cl::opt<bool> PrintRangeChecks("irce-print-range-checks", cl::Hidden, 80 cl::init(false)); 81 82static cl::opt<int> MaxExitProbReciprocal("irce-max-exit-prob-reciprocal", 83 cl::Hidden, cl::init(10)); 84 85#define DEBUG_TYPE "irce" 86 87namespace { 88 89/// An inductive range check is conditional branch in a loop with 90/// 91/// 1. a very cold successor (i.e. the branch jumps to that successor very 92/// rarely) 93/// 94/// and 95/// 96/// 2. a condition that is provably true for some contiguous range of values 97/// taken by the containing loop's induction variable. 98/// 99class InductiveRangeCheck { 100 // Classifies a range check 101 enum RangeCheckKind : unsigned { 102 // Range check of the form "0 <= I". 103 RANGE_CHECK_LOWER = 1, 104 105 // Range check of the form "I < L" where L is known positive. 106 RANGE_CHECK_UPPER = 2, 107 108 // The logical and of the RANGE_CHECK_LOWER and RANGE_CHECK_UPPER 109 // conditions. 110 RANGE_CHECK_BOTH = RANGE_CHECK_LOWER | RANGE_CHECK_UPPER, 111 112 // Unrecognized range check condition. 113 RANGE_CHECK_UNKNOWN = (unsigned)-1 114 }; 115 116 static StringRef rangeCheckKindToStr(RangeCheckKind); 117 118 const SCEV *Offset = nullptr; 119 const SCEV *Scale = nullptr; 120 Value *Length = nullptr; 121 Use *CheckUse = nullptr; 122 RangeCheckKind Kind = RANGE_CHECK_UNKNOWN; 123 124 static RangeCheckKind parseRangeCheckICmp(Loop *L, ICmpInst *ICI, 125 ScalarEvolution &SE, Value *&Index, 126 Value *&Length); 127 128 static void 129 extractRangeChecksFromCond(Loop *L, ScalarEvolution &SE, Use &ConditionUse, 130 SmallVectorImpl<InductiveRangeCheck> &Checks, 131 SmallPtrSetImpl<Value *> &Visited); 132 133public: 134 const SCEV *getOffset() const { return Offset; } 135 const SCEV *getScale() const { return Scale; } 136 Value *getLength() const { return Length; } 137 138 void print(raw_ostream &OS) const { 139 OS << "InductiveRangeCheck:\n"; 140 OS << " Kind: " << rangeCheckKindToStr(Kind) << "\n"; 141 OS << " Offset: "; 142 Offset->print(OS); 143 OS << " Scale: "; 144 Scale->print(OS); 145 OS << " Length: "; 146 if (Length) 147 Length->print(OS); 148 else 149 OS << "(null)"; 150 OS << "\n CheckUse: "; 151 getCheckUse()->getUser()->print(OS); 152 OS << " Operand: " << getCheckUse()->getOperandNo() << "\n"; 153 } 154 155#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 156 void dump() { 157 print(dbgs()); 158 } 159#endif 160 161 Use *getCheckUse() const { return CheckUse; } 162 163 /// Represents an signed integer range [Range.getBegin(), Range.getEnd()). If 164 /// R.getEnd() sle R.getBegin(), then R denotes the empty range. 165 166 class Range { 167 const SCEV *Begin; 168 const SCEV *End; 169 170 public: 171 Range(const SCEV *Begin, const SCEV *End) : Begin(Begin), End(End) { 172 assert(Begin->getType() == End->getType() && "ill-typed range!"); 173 } 174 175 Type *getType() const { return Begin->getType(); } 176 const SCEV *getBegin() const { return Begin; } 177 const SCEV *getEnd() const { return End; } 178 }; 179 180 /// This is the value the condition of the branch needs to evaluate to for the 181 /// branch to take the hot successor (see (1) above). 182 bool getPassingDirection() { return true; } 183 184 /// Computes a range for the induction variable (IndVar) in which the range 185 /// check is redundant and can be constant-folded away. The induction 186 /// variable is not required to be the canonical {0,+,1} induction variable. 187 Optional<Range> computeSafeIterationSpace(ScalarEvolution &SE, 188 const SCEVAddRecExpr *IndVar) const; 189 190 /// Parse out a set of inductive range checks from \p BI and append them to \p 191 /// Checks. 192 /// 193 /// NB! There may be conditions feeding into \p BI that aren't inductive range 194 /// checks, and hence don't end up in \p Checks. 195 static void 196 extractRangeChecksFromBranch(BranchInst *BI, Loop *L, ScalarEvolution &SE, 197 BranchProbabilityInfo &BPI, 198 SmallVectorImpl<InductiveRangeCheck> &Checks); 199}; 200 201class InductiveRangeCheckElimination : public LoopPass { 202public: 203 static char ID; 204 InductiveRangeCheckElimination() : LoopPass(ID) { 205 initializeInductiveRangeCheckEliminationPass( 206 *PassRegistry::getPassRegistry()); 207 } 208 209 void getAnalysisUsage(AnalysisUsage &AU) const override { 210 AU.addRequired<BranchProbabilityInfoWrapperPass>(); 211 getLoopAnalysisUsage(AU); 212 } 213 214 bool runOnLoop(Loop *L, LPPassManager &LPM) override; 215}; 216 217char InductiveRangeCheckElimination::ID = 0; 218} 219 220INITIALIZE_PASS_BEGIN(InductiveRangeCheckElimination, "irce", 221 "Inductive range check elimination", false, false) 222INITIALIZE_PASS_DEPENDENCY(BranchProbabilityInfoWrapperPass) 223INITIALIZE_PASS_DEPENDENCY(LoopPass) 224INITIALIZE_PASS_END(InductiveRangeCheckElimination, "irce", 225 "Inductive range check elimination", false, false) 226 227StringRef InductiveRangeCheck::rangeCheckKindToStr( 228 InductiveRangeCheck::RangeCheckKind RCK) { 229 switch (RCK) { 230 case InductiveRangeCheck::RANGE_CHECK_UNKNOWN: 231 return "RANGE_CHECK_UNKNOWN"; 232 233 case InductiveRangeCheck::RANGE_CHECK_UPPER: 234 return "RANGE_CHECK_UPPER"; 235 236 case InductiveRangeCheck::RANGE_CHECK_LOWER: 237 return "RANGE_CHECK_LOWER"; 238 239 case InductiveRangeCheck::RANGE_CHECK_BOTH: 240 return "RANGE_CHECK_BOTH"; 241 } 242 243 llvm_unreachable("unknown range check type!"); 244} 245 246/// Parse a single ICmp instruction, `ICI`, into a range check. If `ICI` cannot 247/// be interpreted as a range check, return `RANGE_CHECK_UNKNOWN` and set 248/// `Index` and `Length` to `nullptr`. Otherwise set `Index` to the value being 249/// range checked, and set `Length` to the upper limit `Index` is being range 250/// checked with if (and only if) the range check type is stronger or equal to 251/// RANGE_CHECK_UPPER. 252/// 253InductiveRangeCheck::RangeCheckKind 254InductiveRangeCheck::parseRangeCheckICmp(Loop *L, ICmpInst *ICI, 255 ScalarEvolution &SE, Value *&Index, 256 Value *&Length) { 257 258 auto IsNonNegativeAndNotLoopVarying = [&SE, L](Value *V) { 259 const SCEV *S = SE.getSCEV(V); 260 if (isa<SCEVCouldNotCompute>(S)) 261 return false; 262 263 return SE.getLoopDisposition(S, L) == ScalarEvolution::LoopInvariant && 264 SE.isKnownNonNegative(S); 265 }; 266 267 using namespace llvm::PatternMatch; 268 269 ICmpInst::Predicate Pred = ICI->getPredicate(); 270 Value *LHS = ICI->getOperand(0); 271 Value *RHS = ICI->getOperand(1); 272 273 switch (Pred) { 274 default: 275 return RANGE_CHECK_UNKNOWN; 276 277 case ICmpInst::ICMP_SLE: 278 std::swap(LHS, RHS); 279 // fallthrough 280 case ICmpInst::ICMP_SGE: 281 if (match(RHS, m_ConstantInt<0>())) { 282 Index = LHS; 283 return RANGE_CHECK_LOWER; 284 } 285 return RANGE_CHECK_UNKNOWN; 286 287 case ICmpInst::ICMP_SLT: 288 std::swap(LHS, RHS); 289 // fallthrough 290 case ICmpInst::ICMP_SGT: 291 if (match(RHS, m_ConstantInt<-1>())) { 292 Index = LHS; 293 return RANGE_CHECK_LOWER; 294 } 295 296 if (IsNonNegativeAndNotLoopVarying(LHS)) { 297 Index = RHS; 298 Length = LHS; 299 return RANGE_CHECK_UPPER; 300 } 301 return RANGE_CHECK_UNKNOWN; 302 303 case ICmpInst::ICMP_ULT: 304 std::swap(LHS, RHS); 305 // fallthrough 306 case ICmpInst::ICMP_UGT: 307 if (IsNonNegativeAndNotLoopVarying(LHS)) { 308 Index = RHS; 309 Length = LHS; 310 return RANGE_CHECK_BOTH; 311 } 312 return RANGE_CHECK_UNKNOWN; 313 } 314 315 llvm_unreachable("default clause returns!"); 316} 317 318void InductiveRangeCheck::extractRangeChecksFromCond( 319 Loop *L, ScalarEvolution &SE, Use &ConditionUse, 320 SmallVectorImpl<InductiveRangeCheck> &Checks, 321 SmallPtrSetImpl<Value *> &Visited) { 322 using namespace llvm::PatternMatch; 323 324 Value *Condition = ConditionUse.get(); 325 if (!Visited.insert(Condition).second) 326 return; 327 328 if (match(Condition, m_And(m_Value(), m_Value()))) { 329 SmallVector<InductiveRangeCheck, 8> SubChecks; 330 extractRangeChecksFromCond(L, SE, cast<User>(Condition)->getOperandUse(0), 331 SubChecks, Visited); 332 extractRangeChecksFromCond(L, SE, cast<User>(Condition)->getOperandUse(1), 333 SubChecks, Visited); 334 335 if (SubChecks.size() == 2) { 336 // Handle a special case where we know how to merge two checks separately 337 // checking the upper and lower bounds into a full range check. 338 const auto &RChkA = SubChecks[0]; 339 const auto &RChkB = SubChecks[1]; 340 if ((RChkA.Length == RChkB.Length || !RChkA.Length || !RChkB.Length) && 341 RChkA.Offset == RChkB.Offset && RChkA.Scale == RChkB.Scale) { 342 343 // If RChkA.Kind == RChkB.Kind then we just found two identical checks. 344 // But if one of them is a RANGE_CHECK_LOWER and the other is a 345 // RANGE_CHECK_UPPER (only possibility if they're different) then 346 // together they form a RANGE_CHECK_BOTH. 347 SubChecks[0].Kind = 348 (InductiveRangeCheck::RangeCheckKind)(RChkA.Kind | RChkB.Kind); 349 SubChecks[0].Length = RChkA.Length ? RChkA.Length : RChkB.Length; 350 SubChecks[0].CheckUse = &ConditionUse; 351 352 // We updated one of the checks in place, now erase the other. 353 SubChecks.pop_back(); 354 } 355 } 356 357 Checks.insert(Checks.end(), SubChecks.begin(), SubChecks.end()); 358 return; 359 } 360 361 ICmpInst *ICI = dyn_cast<ICmpInst>(Condition); 362 if (!ICI) 363 return; 364 365 Value *Length = nullptr, *Index; 366 auto RCKind = parseRangeCheckICmp(L, ICI, SE, Index, Length); 367 if (RCKind == InductiveRangeCheck::RANGE_CHECK_UNKNOWN) 368 return; 369 370 const auto *IndexAddRec = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(Index)); 371 bool IsAffineIndex = 372 IndexAddRec && (IndexAddRec->getLoop() == L) && IndexAddRec->isAffine(); 373 374 if (!IsAffineIndex) 375 return; 376 377 InductiveRangeCheck IRC; 378 IRC.Length = Length; 379 IRC.Offset = IndexAddRec->getStart(); 380 IRC.Scale = IndexAddRec->getStepRecurrence(SE); 381 IRC.CheckUse = &ConditionUse; 382 IRC.Kind = RCKind; 383 Checks.push_back(IRC); 384} 385 386void InductiveRangeCheck::extractRangeChecksFromBranch( 387 BranchInst *BI, Loop *L, ScalarEvolution &SE, BranchProbabilityInfo &BPI, 388 SmallVectorImpl<InductiveRangeCheck> &Checks) { 389 390 if (BI->isUnconditional() || BI->getParent() == L->getLoopLatch()) 391 return; 392 393 BranchProbability LikelyTaken(15, 16); 394 395 if (BPI.getEdgeProbability(BI->getParent(), (unsigned)0) < LikelyTaken) 396 return; 397 398 SmallPtrSet<Value *, 8> Visited; 399 InductiveRangeCheck::extractRangeChecksFromCond(L, SE, BI->getOperandUse(0), 400 Checks, Visited); 401} 402 403namespace { 404 405// Keeps track of the structure of a loop. This is similar to llvm::Loop, 406// except that it is more lightweight and can track the state of a loop through 407// changing and potentially invalid IR. This structure also formalizes the 408// kinds of loops we can deal with -- ones that have a single latch that is also 409// an exiting block *and* have a canonical induction variable. 410struct LoopStructure { 411 const char *Tag; 412 413 BasicBlock *Header; 414 BasicBlock *Latch; 415 416 // `Latch's terminator instruction is `LatchBr', and it's `LatchBrExitIdx'th 417 // successor is `LatchExit', the exit block of the loop. 418 BranchInst *LatchBr; 419 BasicBlock *LatchExit; 420 unsigned LatchBrExitIdx; 421 422 Value *IndVarNext; 423 Value *IndVarStart; 424 Value *LoopExitAt; 425 bool IndVarIncreasing; 426 427 LoopStructure() 428 : Tag(""), Header(nullptr), Latch(nullptr), LatchBr(nullptr), 429 LatchExit(nullptr), LatchBrExitIdx(-1), IndVarNext(nullptr), 430 IndVarStart(nullptr), LoopExitAt(nullptr), IndVarIncreasing(false) {} 431 432 template <typename M> LoopStructure map(M Map) const { 433 LoopStructure Result; 434 Result.Tag = Tag; 435 Result.Header = cast<BasicBlock>(Map(Header)); 436 Result.Latch = cast<BasicBlock>(Map(Latch)); 437 Result.LatchBr = cast<BranchInst>(Map(LatchBr)); 438 Result.LatchExit = cast<BasicBlock>(Map(LatchExit)); 439 Result.LatchBrExitIdx = LatchBrExitIdx; 440 Result.IndVarNext = Map(IndVarNext); 441 Result.IndVarStart = Map(IndVarStart); 442 Result.LoopExitAt = Map(LoopExitAt); 443 Result.IndVarIncreasing = IndVarIncreasing; 444 return Result; 445 } 446 447 static Optional<LoopStructure> parseLoopStructure(ScalarEvolution &, 448 BranchProbabilityInfo &BPI, 449 Loop &, 450 const char *&); 451}; 452 453/// This class is used to constrain loops to run within a given iteration space. 454/// The algorithm this class implements is given a Loop and a range [Begin, 455/// End). The algorithm then tries to break out a "main loop" out of the loop 456/// it is given in a way that the "main loop" runs with the induction variable 457/// in a subset of [Begin, End). The algorithm emits appropriate pre and post 458/// loops to run any remaining iterations. The pre loop runs any iterations in 459/// which the induction variable is < Begin, and the post loop runs any 460/// iterations in which the induction variable is >= End. 461/// 462class LoopConstrainer { 463 // The representation of a clone of the original loop we started out with. 464 struct ClonedLoop { 465 // The cloned blocks 466 std::vector<BasicBlock *> Blocks; 467 468 // `Map` maps values in the clonee into values in the cloned version 469 ValueToValueMapTy Map; 470 471 // An instance of `LoopStructure` for the cloned loop 472 LoopStructure Structure; 473 }; 474 475 // Result of rewriting the range of a loop. See changeIterationSpaceEnd for 476 // more details on what these fields mean. 477 struct RewrittenRangeInfo { 478 BasicBlock *PseudoExit; 479 BasicBlock *ExitSelector; 480 std::vector<PHINode *> PHIValuesAtPseudoExit; 481 PHINode *IndVarEnd; 482 483 RewrittenRangeInfo() 484 : PseudoExit(nullptr), ExitSelector(nullptr), IndVarEnd(nullptr) {} 485 }; 486 487 // Calculated subranges we restrict the iteration space of the main loop to. 488 // See the implementation of `calculateSubRanges' for more details on how 489 // these fields are computed. `LowLimit` is None if there is no restriction 490 // on low end of the restricted iteration space of the main loop. `HighLimit` 491 // is None if there is no restriction on high end of the restricted iteration 492 // space of the main loop. 493 494 struct SubRanges { 495 Optional<const SCEV *> LowLimit; 496 Optional<const SCEV *> HighLimit; 497 }; 498 499 // A utility function that does a `replaceUsesOfWith' on the incoming block 500 // set of a `PHINode' -- replaces instances of `Block' in the `PHINode's 501 // incoming block list with `ReplaceBy'. 502 static void replacePHIBlock(PHINode *PN, BasicBlock *Block, 503 BasicBlock *ReplaceBy); 504 505 // Compute a safe set of limits for the main loop to run in -- effectively the 506 // intersection of `Range' and the iteration space of the original loop. 507 // Return None if unable to compute the set of subranges. 508 // 509 Optional<SubRanges> calculateSubRanges() const; 510 511 // Clone `OriginalLoop' and return the result in CLResult. The IR after 512 // running `cloneLoop' is well formed except for the PHI nodes in CLResult -- 513 // the PHI nodes say that there is an incoming edge from `OriginalPreheader` 514 // but there is no such edge. 515 // 516 void cloneLoop(ClonedLoop &CLResult, const char *Tag) const; 517 518 // Rewrite the iteration space of the loop denoted by (LS, Preheader). The 519 // iteration space of the rewritten loop ends at ExitLoopAt. The start of the 520 // iteration space is not changed. `ExitLoopAt' is assumed to be slt 521 // `OriginalHeaderCount'. 522 // 523 // If there are iterations left to execute, control is made to jump to 524 // `ContinuationBlock', otherwise they take the normal loop exit. The 525 // returned `RewrittenRangeInfo' object is populated as follows: 526 // 527 // .PseudoExit is a basic block that unconditionally branches to 528 // `ContinuationBlock'. 529 // 530 // .ExitSelector is a basic block that decides, on exit from the loop, 531 // whether to branch to the "true" exit or to `PseudoExit'. 532 // 533 // .PHIValuesAtPseudoExit are PHINodes in `PseudoExit' that compute the value 534 // for each PHINode in the loop header on taking the pseudo exit. 535 // 536 // After changeIterationSpaceEnd, `Preheader' is no longer a legitimate 537 // preheader because it is made to branch to the loop header only 538 // conditionally. 539 // 540 RewrittenRangeInfo 541 changeIterationSpaceEnd(const LoopStructure &LS, BasicBlock *Preheader, 542 Value *ExitLoopAt, 543 BasicBlock *ContinuationBlock) const; 544 545 // The loop denoted by `LS' has `OldPreheader' as its preheader. This 546 // function creates a new preheader for `LS' and returns it. 547 // 548 BasicBlock *createPreheader(const LoopStructure &LS, BasicBlock *OldPreheader, 549 const char *Tag) const; 550 551 // `ContinuationBlockAndPreheader' was the continuation block for some call to 552 // `changeIterationSpaceEnd' and is the preheader to the loop denoted by `LS'. 553 // This function rewrites the PHI nodes in `LS.Header' to start with the 554 // correct value. 555 void rewriteIncomingValuesForPHIs( 556 LoopStructure &LS, BasicBlock *ContinuationBlockAndPreheader, 557 const LoopConstrainer::RewrittenRangeInfo &RRI) const; 558 559 // Even though we do not preserve any passes at this time, we at least need to 560 // keep the parent loop structure consistent. The `LPPassManager' seems to 561 // verify this after running a loop pass. This function adds the list of 562 // blocks denoted by BBs to this loops parent loop if required. 563 void addToParentLoopIfNeeded(ArrayRef<BasicBlock *> BBs); 564 565 // Some global state. 566 Function &F; 567 LLVMContext &Ctx; 568 ScalarEvolution &SE; 569 570 // Information about the original loop we started out with. 571 Loop &OriginalLoop; 572 LoopInfo &OriginalLoopInfo; 573 const SCEV *LatchTakenCount; 574 BasicBlock *OriginalPreheader; 575 576 // The preheader of the main loop. This may or may not be different from 577 // `OriginalPreheader'. 578 BasicBlock *MainLoopPreheader; 579 580 // The range we need to run the main loop in. 581 InductiveRangeCheck::Range Range; 582 583 // The structure of the main loop (see comment at the beginning of this class 584 // for a definition) 585 LoopStructure MainLoopStructure; 586 587public: 588 LoopConstrainer(Loop &L, LoopInfo &LI, const LoopStructure &LS, 589 ScalarEvolution &SE, InductiveRangeCheck::Range R) 590 : F(*L.getHeader()->getParent()), Ctx(L.getHeader()->getContext()), 591 SE(SE), OriginalLoop(L), OriginalLoopInfo(LI), LatchTakenCount(nullptr), 592 OriginalPreheader(nullptr), MainLoopPreheader(nullptr), Range(R), 593 MainLoopStructure(LS) {} 594 595 // Entry point for the algorithm. Returns true on success. 596 bool run(); 597}; 598 599} 600 601void LoopConstrainer::replacePHIBlock(PHINode *PN, BasicBlock *Block, 602 BasicBlock *ReplaceBy) { 603 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) 604 if (PN->getIncomingBlock(i) == Block) 605 PN->setIncomingBlock(i, ReplaceBy); 606} 607 608static bool CanBeSMax(ScalarEvolution &SE, const SCEV *S) { 609 APInt SMax = 610 APInt::getSignedMaxValue(cast<IntegerType>(S->getType())->getBitWidth()); 611 return SE.getSignedRange(S).contains(SMax) && 612 SE.getUnsignedRange(S).contains(SMax); 613} 614 615static bool CanBeSMin(ScalarEvolution &SE, const SCEV *S) { 616 APInt SMin = 617 APInt::getSignedMinValue(cast<IntegerType>(S->getType())->getBitWidth()); 618 return SE.getSignedRange(S).contains(SMin) && 619 SE.getUnsignedRange(S).contains(SMin); 620} 621 622Optional<LoopStructure> 623LoopStructure::parseLoopStructure(ScalarEvolution &SE, BranchProbabilityInfo &BPI, 624 Loop &L, const char *&FailureReason) { 625 assert(L.isLoopSimplifyForm() && "should follow from addRequired<>"); 626 627 BasicBlock *Latch = L.getLoopLatch(); 628 if (!L.isLoopExiting(Latch)) { 629 FailureReason = "no loop latch"; 630 return None; 631 } 632 633 BasicBlock *Header = L.getHeader(); 634 BasicBlock *Preheader = L.getLoopPreheader(); 635 if (!Preheader) { 636 FailureReason = "no preheader"; 637 return None; 638 } 639 640 BranchInst *LatchBr = dyn_cast<BranchInst>(Latch->getTerminator()); 641 if (!LatchBr || LatchBr->isUnconditional()) { 642 FailureReason = "latch terminator not conditional branch"; 643 return None; 644 } 645 646 unsigned LatchBrExitIdx = LatchBr->getSuccessor(0) == Header ? 1 : 0; 647 648 BranchProbability ExitProbability = 649 BPI.getEdgeProbability(LatchBr->getParent(), LatchBrExitIdx); 650 651 if (ExitProbability > BranchProbability(1, MaxExitProbReciprocal)) { 652 FailureReason = "short running loop, not profitable"; 653 return None; 654 } 655 656 ICmpInst *ICI = dyn_cast<ICmpInst>(LatchBr->getCondition()); 657 if (!ICI || !isa<IntegerType>(ICI->getOperand(0)->getType())) { 658 FailureReason = "latch terminator branch not conditional on integral icmp"; 659 return None; 660 } 661 662 const SCEV *LatchCount = SE.getExitCount(&L, Latch); 663 if (isa<SCEVCouldNotCompute>(LatchCount)) { 664 FailureReason = "could not compute latch count"; 665 return None; 666 } 667 668 ICmpInst::Predicate Pred = ICI->getPredicate(); 669 Value *LeftValue = ICI->getOperand(0); 670 const SCEV *LeftSCEV = SE.getSCEV(LeftValue); 671 IntegerType *IndVarTy = cast<IntegerType>(LeftValue->getType()); 672 673 Value *RightValue = ICI->getOperand(1); 674 const SCEV *RightSCEV = SE.getSCEV(RightValue); 675 676 // We canonicalize `ICI` such that `LeftSCEV` is an add recurrence. 677 if (!isa<SCEVAddRecExpr>(LeftSCEV)) { 678 if (isa<SCEVAddRecExpr>(RightSCEV)) { 679 std::swap(LeftSCEV, RightSCEV); 680 std::swap(LeftValue, RightValue); 681 Pred = ICmpInst::getSwappedPredicate(Pred); 682 } else { 683 FailureReason = "no add recurrences in the icmp"; 684 return None; 685 } 686 } 687 688 auto HasNoSignedWrap = [&](const SCEVAddRecExpr *AR) { 689 if (AR->getNoWrapFlags(SCEV::FlagNSW)) 690 return true; 691 692 IntegerType *Ty = cast<IntegerType>(AR->getType()); 693 IntegerType *WideTy = 694 IntegerType::get(Ty->getContext(), Ty->getBitWidth() * 2); 695 696 const SCEVAddRecExpr *ExtendAfterOp = 697 dyn_cast<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy)); 698 if (ExtendAfterOp) { 699 const SCEV *ExtendedStart = SE.getSignExtendExpr(AR->getStart(), WideTy); 700 const SCEV *ExtendedStep = 701 SE.getSignExtendExpr(AR->getStepRecurrence(SE), WideTy); 702 703 bool NoSignedWrap = ExtendAfterOp->getStart() == ExtendedStart && 704 ExtendAfterOp->getStepRecurrence(SE) == ExtendedStep; 705 706 if (NoSignedWrap) 707 return true; 708 } 709 710 // We may have proved this when computing the sign extension above. 711 return AR->getNoWrapFlags(SCEV::FlagNSW) != SCEV::FlagAnyWrap; 712 }; 713 714 auto IsInductionVar = [&](const SCEVAddRecExpr *AR, bool &IsIncreasing) { 715 if (!AR->isAffine()) 716 return false; 717 718 // Currently we only work with induction variables that have been proved to 719 // not wrap. This restriction can potentially be lifted in the future. 720 721 if (!HasNoSignedWrap(AR)) 722 return false; 723 724 if (const SCEVConstant *StepExpr = 725 dyn_cast<SCEVConstant>(AR->getStepRecurrence(SE))) { 726 ConstantInt *StepCI = StepExpr->getValue(); 727 if (StepCI->isOne() || StepCI->isMinusOne()) { 728 IsIncreasing = StepCI->isOne(); 729 return true; 730 } 731 } 732 733 return false; 734 }; 735 736 // `ICI` is interpreted as taking the backedge if the *next* value of the 737 // induction variable satisfies some constraint. 738 739 const SCEVAddRecExpr *IndVarNext = cast<SCEVAddRecExpr>(LeftSCEV); 740 bool IsIncreasing = false; 741 if (!IsInductionVar(IndVarNext, IsIncreasing)) { 742 FailureReason = "LHS in icmp not induction variable"; 743 return None; 744 } 745 746 ConstantInt *One = ConstantInt::get(IndVarTy, 1); 747 // TODO: generalize the predicates here to also match their unsigned variants. 748 if (IsIncreasing) { 749 bool FoundExpectedPred = 750 (Pred == ICmpInst::ICMP_SLT && LatchBrExitIdx == 1) || 751 (Pred == ICmpInst::ICMP_SGT && LatchBrExitIdx == 0); 752 753 if (!FoundExpectedPred) { 754 FailureReason = "expected icmp slt semantically, found something else"; 755 return None; 756 } 757 758 if (LatchBrExitIdx == 0) { 759 if (CanBeSMax(SE, RightSCEV)) { 760 // TODO: this restriction is easily removable -- we just have to 761 // remember that the icmp was an slt and not an sle. 762 FailureReason = "limit may overflow when coercing sle to slt"; 763 return None; 764 } 765 766 IRBuilder<> B(Preheader->getTerminator()); 767 RightValue = B.CreateAdd(RightValue, One); 768 } 769 770 } else { 771 bool FoundExpectedPred = 772 (Pred == ICmpInst::ICMP_SGT && LatchBrExitIdx == 1) || 773 (Pred == ICmpInst::ICMP_SLT && LatchBrExitIdx == 0); 774 775 if (!FoundExpectedPred) { 776 FailureReason = "expected icmp sgt semantically, found something else"; 777 return None; 778 } 779 780 if (LatchBrExitIdx == 0) { 781 if (CanBeSMin(SE, RightSCEV)) { 782 // TODO: this restriction is easily removable -- we just have to 783 // remember that the icmp was an sgt and not an sge. 784 FailureReason = "limit may overflow when coercing sge to sgt"; 785 return None; 786 } 787 788 IRBuilder<> B(Preheader->getTerminator()); 789 RightValue = B.CreateSub(RightValue, One); 790 } 791 } 792 793 const SCEV *StartNext = IndVarNext->getStart(); 794 const SCEV *Addend = SE.getNegativeSCEV(IndVarNext->getStepRecurrence(SE)); 795 const SCEV *IndVarStart = SE.getAddExpr(StartNext, Addend); 796 797 BasicBlock *LatchExit = LatchBr->getSuccessor(LatchBrExitIdx); 798 799 assert(SE.getLoopDisposition(LatchCount, &L) == 800 ScalarEvolution::LoopInvariant && 801 "loop variant exit count doesn't make sense!"); 802 803 assert(!L.contains(LatchExit) && "expected an exit block!"); 804 const DataLayout &DL = Preheader->getModule()->getDataLayout(); 805 Value *IndVarStartV = 806 SCEVExpander(SE, DL, "irce") 807 .expandCodeFor(IndVarStart, IndVarTy, Preheader->getTerminator()); 808 IndVarStartV->setName("indvar.start"); 809 810 LoopStructure Result; 811 812 Result.Tag = "main"; 813 Result.Header = Header; 814 Result.Latch = Latch; 815 Result.LatchBr = LatchBr; 816 Result.LatchExit = LatchExit; 817 Result.LatchBrExitIdx = LatchBrExitIdx; 818 Result.IndVarStart = IndVarStartV; 819 Result.IndVarNext = LeftValue; 820 Result.IndVarIncreasing = IsIncreasing; 821 Result.LoopExitAt = RightValue; 822 823 FailureReason = nullptr; 824 825 return Result; 826} 827 828Optional<LoopConstrainer::SubRanges> 829LoopConstrainer::calculateSubRanges() const { 830 IntegerType *Ty = cast<IntegerType>(LatchTakenCount->getType()); 831 832 if (Range.getType() != Ty) 833 return None; 834 835 LoopConstrainer::SubRanges Result; 836 837 // I think we can be more aggressive here and make this nuw / nsw if the 838 // addition that feeds into the icmp for the latch's terminating branch is nuw 839 // / nsw. In any case, a wrapping 2's complement addition is safe. 840 ConstantInt *One = ConstantInt::get(Ty, 1); 841 const SCEV *Start = SE.getSCEV(MainLoopStructure.IndVarStart); 842 const SCEV *End = SE.getSCEV(MainLoopStructure.LoopExitAt); 843 844 bool Increasing = MainLoopStructure.IndVarIncreasing; 845 846 // We compute `Smallest` and `Greatest` such that [Smallest, Greatest) is the 847 // range of values the induction variable takes. 848 849 const SCEV *Smallest = nullptr, *Greatest = nullptr; 850 851 if (Increasing) { 852 Smallest = Start; 853 Greatest = End; 854 } else { 855 // These two computations may sign-overflow. Here is why that is okay: 856 // 857 // We know that the induction variable does not sign-overflow on any 858 // iteration except the last one, and it starts at `Start` and ends at 859 // `End`, decrementing by one every time. 860 // 861 // * if `Smallest` sign-overflows we know `End` is `INT_SMAX`. Since the 862 // induction variable is decreasing we know that that the smallest value 863 // the loop body is actually executed with is `INT_SMIN` == `Smallest`. 864 // 865 // * if `Greatest` sign-overflows, we know it can only be `INT_SMIN`. In 866 // that case, `Clamp` will always return `Smallest` and 867 // [`Result.LowLimit`, `Result.HighLimit`) = [`Smallest`, `Smallest`) 868 // will be an empty range. Returning an empty range is always safe. 869 // 870 871 Smallest = SE.getAddExpr(End, SE.getSCEV(One)); 872 Greatest = SE.getAddExpr(Start, SE.getSCEV(One)); 873 } 874 875 auto Clamp = [this, Smallest, Greatest](const SCEV *S) { 876 return SE.getSMaxExpr(Smallest, SE.getSMinExpr(Greatest, S)); 877 }; 878 879 // In some cases we can prove that we don't need a pre or post loop 880 881 bool ProvablyNoPreloop = 882 SE.isKnownPredicate(ICmpInst::ICMP_SLE, Range.getBegin(), Smallest); 883 if (!ProvablyNoPreloop) 884 Result.LowLimit = Clamp(Range.getBegin()); 885 886 bool ProvablyNoPostLoop = 887 SE.isKnownPredicate(ICmpInst::ICMP_SLE, Greatest, Range.getEnd()); 888 if (!ProvablyNoPostLoop) 889 Result.HighLimit = Clamp(Range.getEnd()); 890 891 return Result; 892} 893 894void LoopConstrainer::cloneLoop(LoopConstrainer::ClonedLoop &Result, 895 const char *Tag) const { 896 for (BasicBlock *BB : OriginalLoop.getBlocks()) { 897 BasicBlock *Clone = CloneBasicBlock(BB, Result.Map, Twine(".") + Tag, &F); 898 Result.Blocks.push_back(Clone); 899 Result.Map[BB] = Clone; 900 } 901 902 auto GetClonedValue = [&Result](Value *V) { 903 assert(V && "null values not in domain!"); 904 auto It = Result.Map.find(V); 905 if (It == Result.Map.end()) 906 return V; 907 return static_cast<Value *>(It->second); 908 }; 909 910 Result.Structure = MainLoopStructure.map(GetClonedValue); 911 Result.Structure.Tag = Tag; 912 913 for (unsigned i = 0, e = Result.Blocks.size(); i != e; ++i) { 914 BasicBlock *ClonedBB = Result.Blocks[i]; 915 BasicBlock *OriginalBB = OriginalLoop.getBlocks()[i]; 916 917 assert(Result.Map[OriginalBB] == ClonedBB && "invariant!"); 918 919 for (Instruction &I : *ClonedBB) 920 RemapInstruction(&I, Result.Map, 921 RF_NoModuleLevelChanges | RF_IgnoreMissingLocals); 922 923 // Exit blocks will now have one more predecessor and their PHI nodes need 924 // to be edited to reflect that. No phi nodes need to be introduced because 925 // the loop is in LCSSA. 926 927 for (auto SBBI = succ_begin(OriginalBB), SBBE = succ_end(OriginalBB); 928 SBBI != SBBE; ++SBBI) { 929 930 if (OriginalLoop.contains(*SBBI)) 931 continue; // not an exit block 932 933 for (Instruction &I : **SBBI) { 934 if (!isa<PHINode>(&I)) 935 break; 936 937 PHINode *PN = cast<PHINode>(&I); 938 Value *OldIncoming = PN->getIncomingValueForBlock(OriginalBB); 939 PN->addIncoming(GetClonedValue(OldIncoming), ClonedBB); 940 } 941 } 942 } 943} 944 945LoopConstrainer::RewrittenRangeInfo LoopConstrainer::changeIterationSpaceEnd( 946 const LoopStructure &LS, BasicBlock *Preheader, Value *ExitSubloopAt, 947 BasicBlock *ContinuationBlock) const { 948 949 // We start with a loop with a single latch: 950 // 951 // +--------------------+ 952 // | | 953 // | preheader | 954 // | | 955 // +--------+-----------+ 956 // | ----------------\ 957 // | / | 958 // +--------v----v------+ | 959 // | | | 960 // | header | | 961 // | | | 962 // +--------------------+ | 963 // | 964 // ..... | 965 // | 966 // +--------------------+ | 967 // | | | 968 // | latch >----------/ 969 // | | 970 // +-------v------------+ 971 // | 972 // | 973 // | +--------------------+ 974 // | | | 975 // +---> original exit | 976 // | | 977 // +--------------------+ 978 // 979 // We change the control flow to look like 980 // 981 // 982 // +--------------------+ 983 // | | 984 // | preheader >-------------------------+ 985 // | | | 986 // +--------v-----------+ | 987 // | /-------------+ | 988 // | / | | 989 // +--------v--v--------+ | | 990 // | | | | 991 // | header | | +--------+ | 992 // | | | | | | 993 // +--------------------+ | | +-----v-----v-----------+ 994 // | | | | 995 // | | | .pseudo.exit | 996 // | | | | 997 // | | +-----------v-----------+ 998 // | | | 999 // ..... | | | 1000 // | | +--------v-------------+ 1001 // +--------------------+ | | | | 1002 // | | | | | ContinuationBlock | 1003 // | latch >------+ | | | 1004 // | | | +----------------------+ 1005 // +---------v----------+ | 1006 // | | 1007 // | | 1008 // | +---------------^-----+ 1009 // | | | 1010 // +-----> .exit.selector | 1011 // | | 1012 // +----------v----------+ 1013 // | 1014 // +--------------------+ | 1015 // | | | 1016 // | original exit <----+ 1017 // | | 1018 // +--------------------+ 1019 // 1020 1021 RewrittenRangeInfo RRI; 1022 1023 auto BBInsertLocation = std::next(Function::iterator(LS.Latch)); 1024 RRI.ExitSelector = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".exit.selector", 1025 &F, &*BBInsertLocation); 1026 RRI.PseudoExit = BasicBlock::Create(Ctx, Twine(LS.Tag) + ".pseudo.exit", &F, 1027 &*BBInsertLocation); 1028 1029 BranchInst *PreheaderJump = cast<BranchInst>(Preheader->getTerminator()); 1030 bool Increasing = LS.IndVarIncreasing; 1031 1032 IRBuilder<> B(PreheaderJump); 1033 1034 // EnterLoopCond - is it okay to start executing this `LS'? 1035 Value *EnterLoopCond = Increasing 1036 ? B.CreateICmpSLT(LS.IndVarStart, ExitSubloopAt) 1037 : B.CreateICmpSGT(LS.IndVarStart, ExitSubloopAt); 1038 1039 B.CreateCondBr(EnterLoopCond, LS.Header, RRI.PseudoExit); 1040 PreheaderJump->eraseFromParent(); 1041 1042 LS.LatchBr->setSuccessor(LS.LatchBrExitIdx, RRI.ExitSelector); 1043 B.SetInsertPoint(LS.LatchBr); 1044 Value *TakeBackedgeLoopCond = 1045 Increasing ? B.CreateICmpSLT(LS.IndVarNext, ExitSubloopAt) 1046 : B.CreateICmpSGT(LS.IndVarNext, ExitSubloopAt); 1047 Value *CondForBranch = LS.LatchBrExitIdx == 1 1048 ? TakeBackedgeLoopCond 1049 : B.CreateNot(TakeBackedgeLoopCond); 1050 1051 LS.LatchBr->setCondition(CondForBranch); 1052 1053 B.SetInsertPoint(RRI.ExitSelector); 1054 1055 // IterationsLeft - are there any more iterations left, given the original 1056 // upper bound on the induction variable? If not, we branch to the "real" 1057 // exit. 1058 Value *IterationsLeft = Increasing 1059 ? B.CreateICmpSLT(LS.IndVarNext, LS.LoopExitAt) 1060 : B.CreateICmpSGT(LS.IndVarNext, LS.LoopExitAt); 1061 B.CreateCondBr(IterationsLeft, RRI.PseudoExit, LS.LatchExit); 1062 1063 BranchInst *BranchToContinuation = 1064 BranchInst::Create(ContinuationBlock, RRI.PseudoExit); 1065 1066 // We emit PHI nodes into `RRI.PseudoExit' that compute the "latest" value of 1067 // each of the PHI nodes in the loop header. This feeds into the initial 1068 // value of the same PHI nodes if/when we continue execution. 1069 for (Instruction &I : *LS.Header) { 1070 if (!isa<PHINode>(&I)) 1071 break; 1072 1073 PHINode *PN = cast<PHINode>(&I); 1074 1075 PHINode *NewPHI = PHINode::Create(PN->getType(), 2, PN->getName() + ".copy", 1076 BranchToContinuation); 1077 1078 NewPHI->addIncoming(PN->getIncomingValueForBlock(Preheader), Preheader); 1079 NewPHI->addIncoming(PN->getIncomingValueForBlock(LS.Latch), 1080 RRI.ExitSelector); 1081 RRI.PHIValuesAtPseudoExit.push_back(NewPHI); 1082 } 1083 1084 RRI.IndVarEnd = PHINode::Create(LS.IndVarNext->getType(), 2, "indvar.end", 1085 BranchToContinuation); 1086 RRI.IndVarEnd->addIncoming(LS.IndVarStart, Preheader); 1087 RRI.IndVarEnd->addIncoming(LS.IndVarNext, RRI.ExitSelector); 1088 1089 // The latch exit now has a branch from `RRI.ExitSelector' instead of 1090 // `LS.Latch'. The PHI nodes need to be updated to reflect that. 1091 for (Instruction &I : *LS.LatchExit) { 1092 if (PHINode *PN = dyn_cast<PHINode>(&I)) 1093 replacePHIBlock(PN, LS.Latch, RRI.ExitSelector); 1094 else 1095 break; 1096 } 1097 1098 return RRI; 1099} 1100 1101void LoopConstrainer::rewriteIncomingValuesForPHIs( 1102 LoopStructure &LS, BasicBlock *ContinuationBlock, 1103 const LoopConstrainer::RewrittenRangeInfo &RRI) const { 1104 1105 unsigned PHIIndex = 0; 1106 for (Instruction &I : *LS.Header) { 1107 if (!isa<PHINode>(&I)) 1108 break; 1109 1110 PHINode *PN = cast<PHINode>(&I); 1111 1112 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) 1113 if (PN->getIncomingBlock(i) == ContinuationBlock) 1114 PN->setIncomingValue(i, RRI.PHIValuesAtPseudoExit[PHIIndex++]); 1115 } 1116 1117 LS.IndVarStart = RRI.IndVarEnd; 1118} 1119 1120BasicBlock *LoopConstrainer::createPreheader(const LoopStructure &LS, 1121 BasicBlock *OldPreheader, 1122 const char *Tag) const { 1123 1124 BasicBlock *Preheader = BasicBlock::Create(Ctx, Tag, &F, LS.Header); 1125 BranchInst::Create(LS.Header, Preheader); 1126 1127 for (Instruction &I : *LS.Header) { 1128 if (!isa<PHINode>(&I)) 1129 break; 1130 1131 PHINode *PN = cast<PHINode>(&I); 1132 for (unsigned i = 0, e = PN->getNumIncomingValues(); i < e; ++i) 1133 replacePHIBlock(PN, OldPreheader, Preheader); 1134 } 1135 1136 return Preheader; 1137} 1138 1139void LoopConstrainer::addToParentLoopIfNeeded(ArrayRef<BasicBlock *> BBs) { 1140 Loop *ParentLoop = OriginalLoop.getParentLoop(); 1141 if (!ParentLoop) 1142 return; 1143 1144 for (BasicBlock *BB : BBs) 1145 ParentLoop->addBasicBlockToLoop(BB, OriginalLoopInfo); 1146} 1147 1148bool LoopConstrainer::run() { 1149 BasicBlock *Preheader = nullptr; 1150 LatchTakenCount = SE.getExitCount(&OriginalLoop, MainLoopStructure.Latch); 1151 Preheader = OriginalLoop.getLoopPreheader(); 1152 assert(!isa<SCEVCouldNotCompute>(LatchTakenCount) && Preheader != nullptr && 1153 "preconditions!"); 1154 1155 OriginalPreheader = Preheader; 1156 MainLoopPreheader = Preheader; 1157 1158 Optional<SubRanges> MaybeSR = calculateSubRanges(); 1159 if (!MaybeSR.hasValue()) { 1160 DEBUG(dbgs() << "irce: could not compute subranges\n"); 1161 return false; 1162 } 1163 1164 SubRanges SR = MaybeSR.getValue(); 1165 bool Increasing = MainLoopStructure.IndVarIncreasing; 1166 IntegerType *IVTy = 1167 cast<IntegerType>(MainLoopStructure.IndVarNext->getType()); 1168 1169 SCEVExpander Expander(SE, F.getParent()->getDataLayout(), "irce"); 1170 Instruction *InsertPt = OriginalPreheader->getTerminator(); 1171 1172 // It would have been better to make `PreLoop' and `PostLoop' 1173 // `Optional<ClonedLoop>'s, but `ValueToValueMapTy' does not have a copy 1174 // constructor. 1175 ClonedLoop PreLoop, PostLoop; 1176 bool NeedsPreLoop = 1177 Increasing ? SR.LowLimit.hasValue() : SR.HighLimit.hasValue(); 1178 bool NeedsPostLoop = 1179 Increasing ? SR.HighLimit.hasValue() : SR.LowLimit.hasValue(); 1180 1181 Value *ExitPreLoopAt = nullptr; 1182 Value *ExitMainLoopAt = nullptr; 1183 const SCEVConstant *MinusOneS = 1184 cast<SCEVConstant>(SE.getConstant(IVTy, -1, true /* isSigned */)); 1185 1186 if (NeedsPreLoop) { 1187 const SCEV *ExitPreLoopAtSCEV = nullptr; 1188 1189 if (Increasing) 1190 ExitPreLoopAtSCEV = *SR.LowLimit; 1191 else { 1192 if (CanBeSMin(SE, *SR.HighLimit)) { 1193 DEBUG(dbgs() << "irce: could not prove no-overflow when computing " 1194 << "preloop exit limit. HighLimit = " << *(*SR.HighLimit) 1195 << "\n"); 1196 return false; 1197 } 1198 ExitPreLoopAtSCEV = SE.getAddExpr(*SR.HighLimit, MinusOneS); 1199 } 1200 1201 ExitPreLoopAt = Expander.expandCodeFor(ExitPreLoopAtSCEV, IVTy, InsertPt); 1202 ExitPreLoopAt->setName("exit.preloop.at"); 1203 } 1204 1205 if (NeedsPostLoop) { 1206 const SCEV *ExitMainLoopAtSCEV = nullptr; 1207 1208 if (Increasing) 1209 ExitMainLoopAtSCEV = *SR.HighLimit; 1210 else { 1211 if (CanBeSMin(SE, *SR.LowLimit)) { 1212 DEBUG(dbgs() << "irce: could not prove no-overflow when computing " 1213 << "mainloop exit limit. LowLimit = " << *(*SR.LowLimit) 1214 << "\n"); 1215 return false; 1216 } 1217 ExitMainLoopAtSCEV = SE.getAddExpr(*SR.LowLimit, MinusOneS); 1218 } 1219 1220 ExitMainLoopAt = Expander.expandCodeFor(ExitMainLoopAtSCEV, IVTy, InsertPt); 1221 ExitMainLoopAt->setName("exit.mainloop.at"); 1222 } 1223 1224 // We clone these ahead of time so that we don't have to deal with changing 1225 // and temporarily invalid IR as we transform the loops. 1226 if (NeedsPreLoop) 1227 cloneLoop(PreLoop, "preloop"); 1228 if (NeedsPostLoop) 1229 cloneLoop(PostLoop, "postloop"); 1230 1231 RewrittenRangeInfo PreLoopRRI; 1232 1233 if (NeedsPreLoop) { 1234 Preheader->getTerminator()->replaceUsesOfWith(MainLoopStructure.Header, 1235 PreLoop.Structure.Header); 1236 1237 MainLoopPreheader = 1238 createPreheader(MainLoopStructure, Preheader, "mainloop"); 1239 PreLoopRRI = changeIterationSpaceEnd(PreLoop.Structure, Preheader, 1240 ExitPreLoopAt, MainLoopPreheader); 1241 rewriteIncomingValuesForPHIs(MainLoopStructure, MainLoopPreheader, 1242 PreLoopRRI); 1243 } 1244 1245 BasicBlock *PostLoopPreheader = nullptr; 1246 RewrittenRangeInfo PostLoopRRI; 1247 1248 if (NeedsPostLoop) { 1249 PostLoopPreheader = 1250 createPreheader(PostLoop.Structure, Preheader, "postloop"); 1251 PostLoopRRI = changeIterationSpaceEnd(MainLoopStructure, MainLoopPreheader, 1252 ExitMainLoopAt, PostLoopPreheader); 1253 rewriteIncomingValuesForPHIs(PostLoop.Structure, PostLoopPreheader, 1254 PostLoopRRI); 1255 } 1256 1257 BasicBlock *NewMainLoopPreheader = 1258 MainLoopPreheader != Preheader ? MainLoopPreheader : nullptr; 1259 BasicBlock *NewBlocks[] = {PostLoopPreheader, PreLoopRRI.PseudoExit, 1260 PreLoopRRI.ExitSelector, PostLoopRRI.PseudoExit, 1261 PostLoopRRI.ExitSelector, NewMainLoopPreheader}; 1262 1263 // Some of the above may be nullptr, filter them out before passing to 1264 // addToParentLoopIfNeeded. 1265 auto NewBlocksEnd = 1266 std::remove(std::begin(NewBlocks), std::end(NewBlocks), nullptr); 1267 1268 addToParentLoopIfNeeded(makeArrayRef(std::begin(NewBlocks), NewBlocksEnd)); 1269 addToParentLoopIfNeeded(PreLoop.Blocks); 1270 addToParentLoopIfNeeded(PostLoop.Blocks); 1271 1272 return true; 1273} 1274 1275/// Computes and returns a range of values for the induction variable (IndVar) 1276/// in which the range check can be safely elided. If it cannot compute such a 1277/// range, returns None. 1278Optional<InductiveRangeCheck::Range> 1279InductiveRangeCheck::computeSafeIterationSpace( 1280 ScalarEvolution &SE, const SCEVAddRecExpr *IndVar) const { 1281 // IndVar is of the form "A + B * I" (where "I" is the canonical induction 1282 // variable, that may or may not exist as a real llvm::Value in the loop) and 1283 // this inductive range check is a range check on the "C + D * I" ("C" is 1284 // getOffset() and "D" is getScale()). We rewrite the value being range 1285 // checked to "M + N * IndVar" where "N" = "D * B^(-1)" and "M" = "C - NA". 1286 // Currently we support this only for "B" = "D" = { 1 or -1 }, but the code 1287 // can be generalized as needed. 1288 // 1289 // The actual inequalities we solve are of the form 1290 // 1291 // 0 <= M + 1 * IndVar < L given L >= 0 (i.e. N == 1) 1292 // 1293 // The inequality is satisfied by -M <= IndVar < (L - M) [^1]. All additions 1294 // and subtractions are twos-complement wrapping and comparisons are signed. 1295 // 1296 // Proof: 1297 // 1298 // If there exists IndVar such that -M <= IndVar < (L - M) then it follows 1299 // that -M <= (-M + L) [== Eq. 1]. Since L >= 0, if (-M + L) sign-overflows 1300 // then (-M + L) < (-M). Hence by [Eq. 1], (-M + L) could not have 1301 // overflown. 1302 // 1303 // This means IndVar = t + (-M) for t in [0, L). Hence (IndVar + M) = t. 1304 // Hence 0 <= (IndVar + M) < L 1305 1306 // [^1]: Note that the solution does _not_ apply if L < 0; consider values M = 1307 // 127, IndVar = 126 and L = -2 in an i8 world. 1308 1309 if (!IndVar->isAffine()) 1310 return None; 1311 1312 const SCEV *A = IndVar->getStart(); 1313 const SCEVConstant *B = dyn_cast<SCEVConstant>(IndVar->getStepRecurrence(SE)); 1314 if (!B) 1315 return None; 1316 1317 const SCEV *C = getOffset(); 1318 const SCEVConstant *D = dyn_cast<SCEVConstant>(getScale()); 1319 if (D != B) 1320 return None; 1321 1322 ConstantInt *ConstD = D->getValue(); 1323 if (!(ConstD->isMinusOne() || ConstD->isOne())) 1324 return None; 1325 1326 const SCEV *M = SE.getMinusSCEV(C, A); 1327 1328 const SCEV *Begin = SE.getNegativeSCEV(M); 1329 const SCEV *UpperLimit = nullptr; 1330 1331 // We strengthen "0 <= I" to "0 <= I < INT_SMAX" and "I < L" to "0 <= I < L". 1332 // We can potentially do much better here. 1333 if (Value *V = getLength()) { 1334 UpperLimit = SE.getSCEV(V); 1335 } else { 1336 assert(Kind == InductiveRangeCheck::RANGE_CHECK_LOWER && "invariant!"); 1337 unsigned BitWidth = cast<IntegerType>(IndVar->getType())->getBitWidth(); 1338 UpperLimit = SE.getConstant(APInt::getSignedMaxValue(BitWidth)); 1339 } 1340 1341 const SCEV *End = SE.getMinusSCEV(UpperLimit, M); 1342 return InductiveRangeCheck::Range(Begin, End); 1343} 1344 1345static Optional<InductiveRangeCheck::Range> 1346IntersectRange(ScalarEvolution &SE, 1347 const Optional<InductiveRangeCheck::Range> &R1, 1348 const InductiveRangeCheck::Range &R2) { 1349 if (!R1.hasValue()) 1350 return R2; 1351 auto &R1Value = R1.getValue(); 1352 1353 // TODO: we could widen the smaller range and have this work; but for now we 1354 // bail out to keep things simple. 1355 if (R1Value.getType() != R2.getType()) 1356 return None; 1357 1358 const SCEV *NewBegin = SE.getSMaxExpr(R1Value.getBegin(), R2.getBegin()); 1359 const SCEV *NewEnd = SE.getSMinExpr(R1Value.getEnd(), R2.getEnd()); 1360 1361 return InductiveRangeCheck::Range(NewBegin, NewEnd); 1362} 1363 1364bool InductiveRangeCheckElimination::runOnLoop(Loop *L, LPPassManager &LPM) { 1365 if (skipLoop(L)) 1366 return false; 1367 1368 if (L->getBlocks().size() >= LoopSizeCutoff) { 1369 DEBUG(dbgs() << "irce: giving up constraining loop, too large\n";); 1370 return false; 1371 } 1372 1373 BasicBlock *Preheader = L->getLoopPreheader(); 1374 if (!Preheader) { 1375 DEBUG(dbgs() << "irce: loop has no preheader, leaving\n"); 1376 return false; 1377 } 1378 1379 LLVMContext &Context = Preheader->getContext(); 1380 SmallVector<InductiveRangeCheck, 16> RangeChecks; 1381 ScalarEvolution &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE(); 1382 BranchProbabilityInfo &BPI = 1383 getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI(); 1384 1385 for (auto BBI : L->getBlocks()) 1386 if (BranchInst *TBI = dyn_cast<BranchInst>(BBI->getTerminator())) 1387 InductiveRangeCheck::extractRangeChecksFromBranch(TBI, L, SE, BPI, 1388 RangeChecks); 1389 1390 if (RangeChecks.empty()) 1391 return false; 1392 1393 auto PrintRecognizedRangeChecks = [&](raw_ostream &OS) { 1394 OS << "irce: looking at loop "; L->print(OS); 1395 OS << "irce: loop has " << RangeChecks.size() 1396 << " inductive range checks: \n"; 1397 for (InductiveRangeCheck &IRC : RangeChecks) 1398 IRC.print(OS); 1399 }; 1400 1401 DEBUG(PrintRecognizedRangeChecks(dbgs())); 1402 1403 if (PrintRangeChecks) 1404 PrintRecognizedRangeChecks(errs()); 1405 1406 const char *FailureReason = nullptr; 1407 Optional<LoopStructure> MaybeLoopStructure = 1408 LoopStructure::parseLoopStructure(SE, BPI, *L, FailureReason); 1409 if (!MaybeLoopStructure.hasValue()) { 1410 DEBUG(dbgs() << "irce: could not parse loop structure: " << FailureReason 1411 << "\n";); 1412 return false; 1413 } 1414 LoopStructure LS = MaybeLoopStructure.getValue(); 1415 bool Increasing = LS.IndVarIncreasing; 1416 const SCEV *MinusOne = 1417 SE.getConstant(LS.IndVarNext->getType(), Increasing ? -1 : 1, true); 1418 const SCEVAddRecExpr *IndVar = 1419 cast<SCEVAddRecExpr>(SE.getAddExpr(SE.getSCEV(LS.IndVarNext), MinusOne)); 1420 1421 Optional<InductiveRangeCheck::Range> SafeIterRange; 1422 Instruction *ExprInsertPt = Preheader->getTerminator(); 1423 1424 SmallVector<InductiveRangeCheck, 4> RangeChecksToEliminate; 1425 1426 IRBuilder<> B(ExprInsertPt); 1427 for (InductiveRangeCheck &IRC : RangeChecks) { 1428 auto Result = IRC.computeSafeIterationSpace(SE, IndVar); 1429 if (Result.hasValue()) { 1430 auto MaybeSafeIterRange = 1431 IntersectRange(SE, SafeIterRange, Result.getValue()); 1432 if (MaybeSafeIterRange.hasValue()) { 1433 RangeChecksToEliminate.push_back(IRC); 1434 SafeIterRange = MaybeSafeIterRange.getValue(); 1435 } 1436 } 1437 } 1438 1439 if (!SafeIterRange.hasValue()) 1440 return false; 1441 1442 LoopConstrainer LC(*L, getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), LS, 1443 SE, SafeIterRange.getValue()); 1444 bool Changed = LC.run(); 1445 1446 if (Changed) { 1447 auto PrintConstrainedLoopInfo = [L]() { 1448 dbgs() << "irce: in function "; 1449 dbgs() << L->getHeader()->getParent()->getName() << ": "; 1450 dbgs() << "constrained "; 1451 L->print(dbgs()); 1452 }; 1453 1454 DEBUG(PrintConstrainedLoopInfo()); 1455 1456 if (PrintChangedLoops) 1457 PrintConstrainedLoopInfo(); 1458 1459 // Optimize away the now-redundant range checks. 1460 1461 for (InductiveRangeCheck &IRC : RangeChecksToEliminate) { 1462 ConstantInt *FoldedRangeCheck = IRC.getPassingDirection() 1463 ? ConstantInt::getTrue(Context) 1464 : ConstantInt::getFalse(Context); 1465 IRC.getCheckUse()->set(FoldedRangeCheck); 1466 } 1467 } 1468 1469 return Changed; 1470} 1471 1472Pass *llvm::createInductiveRangeCheckEliminationPass() { 1473 return new InductiveRangeCheckElimination; 1474} 1475