1//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the implementation of the scalar evolution expander, 11// which is used to generate the code corresponding to a given scalar evolution 12// expression. 13// 14//===----------------------------------------------------------------------===// 15 16#include "llvm/Analysis/ScalarEvolutionExpander.h" 17#include "llvm/Analysis/LoopInfo.h" 18#include "llvm/IntrinsicInst.h" 19#include "llvm/LLVMContext.h" 20#include "llvm/Support/Debug.h" 21#include "llvm/Target/TargetData.h" 22#include "llvm/ADT/STLExtras.h" 23 24using namespace llvm; 25 26/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP, 27/// reusing an existing cast if a suitable one exists, moving an existing 28/// cast if a suitable one exists but isn't in the right place, or 29/// creating a new one. 30Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty, 31 Instruction::CastOps Op, 32 BasicBlock::iterator IP) { 33 // Check to see if there is already a cast! 34 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); 35 UI != E; ++UI) { 36 User *U = *UI; 37 if (U->getType() == Ty) 38 if (CastInst *CI = dyn_cast<CastInst>(U)) 39 if (CI->getOpcode() == Op) { 40 // If the cast isn't where we want it, fix it. 41 if (BasicBlock::iterator(CI) != IP) { 42 // Create a new cast, and leave the old cast in place in case 43 // it is being used as an insert point. Clear its operand 44 // so that it doesn't hold anything live. 45 Instruction *NewCI = CastInst::Create(Op, V, Ty, "", IP); 46 NewCI->takeName(CI); 47 CI->replaceAllUsesWith(NewCI); 48 CI->setOperand(0, UndefValue::get(V->getType())); 49 rememberInstruction(NewCI); 50 return NewCI; 51 } 52 rememberInstruction(CI); 53 return CI; 54 } 55 } 56 57 // Create a new cast. 58 Instruction *I = CastInst::Create(Op, V, Ty, V->getName(), IP); 59 rememberInstruction(I); 60 return I; 61} 62 63/// InsertNoopCastOfTo - Insert a cast of V to the specified type, 64/// which must be possible with a noop cast, doing what we can to share 65/// the casts. 66Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) { 67 Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false); 68 assert((Op == Instruction::BitCast || 69 Op == Instruction::PtrToInt || 70 Op == Instruction::IntToPtr) && 71 "InsertNoopCastOfTo cannot perform non-noop casts!"); 72 assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) && 73 "InsertNoopCastOfTo cannot change sizes!"); 74 75 // Short-circuit unnecessary bitcasts. 76 if (Op == Instruction::BitCast && V->getType() == Ty) 77 return V; 78 79 // Short-circuit unnecessary inttoptr<->ptrtoint casts. 80 if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) && 81 SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) { 82 if (CastInst *CI = dyn_cast<CastInst>(V)) 83 if ((CI->getOpcode() == Instruction::PtrToInt || 84 CI->getOpcode() == Instruction::IntToPtr) && 85 SE.getTypeSizeInBits(CI->getType()) == 86 SE.getTypeSizeInBits(CI->getOperand(0)->getType())) 87 return CI->getOperand(0); 88 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) 89 if ((CE->getOpcode() == Instruction::PtrToInt || 90 CE->getOpcode() == Instruction::IntToPtr) && 91 SE.getTypeSizeInBits(CE->getType()) == 92 SE.getTypeSizeInBits(CE->getOperand(0)->getType())) 93 return CE->getOperand(0); 94 } 95 96 // Fold a cast of a constant. 97 if (Constant *C = dyn_cast<Constant>(V)) 98 return ConstantExpr::getCast(Op, C, Ty); 99 100 // Cast the argument at the beginning of the entry block, after 101 // any bitcasts of other arguments. 102 if (Argument *A = dyn_cast<Argument>(V)) { 103 BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin(); 104 while ((isa<BitCastInst>(IP) && 105 isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) && 106 cast<BitCastInst>(IP)->getOperand(0) != A) || 107 isa<DbgInfoIntrinsic>(IP) || 108 isa<LandingPadInst>(IP)) 109 ++IP; 110 return ReuseOrCreateCast(A, Ty, Op, IP); 111 } 112 113 // Cast the instruction immediately after the instruction. 114 Instruction *I = cast<Instruction>(V); 115 BasicBlock::iterator IP = I; ++IP; 116 if (InvokeInst *II = dyn_cast<InvokeInst>(I)) 117 IP = II->getNormalDest()->begin(); 118 while (isa<PHINode>(IP) || isa<DbgInfoIntrinsic>(IP) || 119 isa<LandingPadInst>(IP)) 120 ++IP; 121 return ReuseOrCreateCast(I, Ty, Op, IP); 122} 123 124/// InsertBinop - Insert the specified binary operator, doing a small amount 125/// of work to avoid inserting an obviously redundant operation. 126Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode, 127 Value *LHS, Value *RHS) { 128 // Fold a binop with constant operands. 129 if (Constant *CLHS = dyn_cast<Constant>(LHS)) 130 if (Constant *CRHS = dyn_cast<Constant>(RHS)) 131 return ConstantExpr::get(Opcode, CLHS, CRHS); 132 133 // Do a quick scan to see if we have this binop nearby. If so, reuse it. 134 unsigned ScanLimit = 6; 135 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 136 // Scanning starts from the last instruction before the insertion point. 137 BasicBlock::iterator IP = Builder.GetInsertPoint(); 138 if (IP != BlockBegin) { 139 --IP; 140 for (; ScanLimit; --IP, --ScanLimit) { 141 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 142 // generated code. 143 if (isa<DbgInfoIntrinsic>(IP)) 144 ScanLimit++; 145 if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS && 146 IP->getOperand(1) == RHS) 147 return IP; 148 if (IP == BlockBegin) break; 149 } 150 } 151 152 // Save the original insertion point so we can restore it when we're done. 153 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 154 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 155 156 // Move the insertion point out of as many loops as we can. 157 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 158 if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break; 159 BasicBlock *Preheader = L->getLoopPreheader(); 160 if (!Preheader) break; 161 162 // Ok, move up a level. 163 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 164 } 165 166 // If we haven't found this binop, insert it. 167 Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS)); 168 BO->setDebugLoc(SaveInsertPt->getDebugLoc()); 169 rememberInstruction(BO); 170 171 // Restore the original insert point. 172 if (SaveInsertBB) 173 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 174 175 return BO; 176} 177 178/// FactorOutConstant - Test if S is divisible by Factor, using signed 179/// division. If so, update S with Factor divided out and return true. 180/// S need not be evenly divisible if a reasonable remainder can be 181/// computed. 182/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made 183/// unnecessary; in its place, just signed-divide Ops[i] by the scale and 184/// check to see if the divide was folded. 185static bool FactorOutConstant(const SCEV *&S, 186 const SCEV *&Remainder, 187 const SCEV *Factor, 188 ScalarEvolution &SE, 189 const TargetData *TD) { 190 // Everything is divisible by one. 191 if (Factor->isOne()) 192 return true; 193 194 // x/x == 1. 195 if (S == Factor) { 196 S = SE.getConstant(S->getType(), 1); 197 return true; 198 } 199 200 // For a Constant, check for a multiple of the given factor. 201 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) { 202 // 0/x == 0. 203 if (C->isZero()) 204 return true; 205 // Check for divisibility. 206 if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) { 207 ConstantInt *CI = 208 ConstantInt::get(SE.getContext(), 209 C->getValue()->getValue().sdiv( 210 FC->getValue()->getValue())); 211 // If the quotient is zero and the remainder is non-zero, reject 212 // the value at this scale. It will be considered for subsequent 213 // smaller scales. 214 if (!CI->isZero()) { 215 const SCEV *Div = SE.getConstant(CI); 216 S = Div; 217 Remainder = 218 SE.getAddExpr(Remainder, 219 SE.getConstant(C->getValue()->getValue().srem( 220 FC->getValue()->getValue()))); 221 return true; 222 } 223 } 224 } 225 226 // In a Mul, check if there is a constant operand which is a multiple 227 // of the given factor. 228 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { 229 if (TD) { 230 // With TargetData, the size is known. Check if there is a constant 231 // operand which is a multiple of the given factor. If so, we can 232 // factor it. 233 const SCEVConstant *FC = cast<SCEVConstant>(Factor); 234 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0))) 235 if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) { 236 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 237 NewMulOps[0] = 238 SE.getConstant(C->getValue()->getValue().sdiv( 239 FC->getValue()->getValue())); 240 S = SE.getMulExpr(NewMulOps); 241 return true; 242 } 243 } else { 244 // Without TargetData, check if Factor can be factored out of any of the 245 // Mul's operands. If so, we can just remove it. 246 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { 247 const SCEV *SOp = M->getOperand(i); 248 const SCEV *Remainder = SE.getConstant(SOp->getType(), 0); 249 if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) && 250 Remainder->isZero()) { 251 SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end()); 252 NewMulOps[i] = SOp; 253 S = SE.getMulExpr(NewMulOps); 254 return true; 255 } 256 } 257 } 258 } 259 260 // In an AddRec, check if both start and step are divisible. 261 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) { 262 const SCEV *Step = A->getStepRecurrence(SE); 263 const SCEV *StepRem = SE.getConstant(Step->getType(), 0); 264 if (!FactorOutConstant(Step, StepRem, Factor, SE, TD)) 265 return false; 266 if (!StepRem->isZero()) 267 return false; 268 const SCEV *Start = A->getStart(); 269 if (!FactorOutConstant(Start, Remainder, Factor, SE, TD)) 270 return false; 271 // FIXME: can use A->getNoWrapFlags(FlagNW) 272 S = SE.getAddRecExpr(Start, Step, A->getLoop(), SCEV::FlagAnyWrap); 273 return true; 274 } 275 276 return false; 277} 278 279/// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs 280/// is the number of SCEVAddRecExprs present, which are kept at the end of 281/// the list. 282/// 283static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops, 284 Type *Ty, 285 ScalarEvolution &SE) { 286 unsigned NumAddRecs = 0; 287 for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i) 288 ++NumAddRecs; 289 // Group Ops into non-addrecs and addrecs. 290 SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs); 291 SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end()); 292 // Let ScalarEvolution sort and simplify the non-addrecs list. 293 const SCEV *Sum = NoAddRecs.empty() ? 294 SE.getConstant(Ty, 0) : 295 SE.getAddExpr(NoAddRecs); 296 // If it returned an add, use the operands. Otherwise it simplified 297 // the sum into a single value, so just use that. 298 Ops.clear(); 299 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum)) 300 Ops.append(Add->op_begin(), Add->op_end()); 301 else if (!Sum->isZero()) 302 Ops.push_back(Sum); 303 // Then append the addrecs. 304 Ops.append(AddRecs.begin(), AddRecs.end()); 305} 306 307/// SplitAddRecs - Flatten a list of add operands, moving addrec start values 308/// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}. 309/// This helps expose more opportunities for folding parts of the expressions 310/// into GEP indices. 311/// 312static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops, 313 Type *Ty, 314 ScalarEvolution &SE) { 315 // Find the addrecs. 316 SmallVector<const SCEV *, 8> AddRecs; 317 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 318 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) { 319 const SCEV *Start = A->getStart(); 320 if (Start->isZero()) break; 321 const SCEV *Zero = SE.getConstant(Ty, 0); 322 AddRecs.push_back(SE.getAddRecExpr(Zero, 323 A->getStepRecurrence(SE), 324 A->getLoop(), 325 // FIXME: A->getNoWrapFlags(FlagNW) 326 SCEV::FlagAnyWrap)); 327 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) { 328 Ops[i] = Zero; 329 Ops.append(Add->op_begin(), Add->op_end()); 330 e += Add->getNumOperands(); 331 } else { 332 Ops[i] = Start; 333 } 334 } 335 if (!AddRecs.empty()) { 336 // Add the addrecs onto the end of the list. 337 Ops.append(AddRecs.begin(), AddRecs.end()); 338 // Resort the operand list, moving any constants to the front. 339 SimplifyAddOperands(Ops, Ty, SE); 340 } 341} 342 343/// expandAddToGEP - Expand an addition expression with a pointer type into 344/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps 345/// BasicAliasAnalysis and other passes analyze the result. See the rules 346/// for getelementptr vs. inttoptr in 347/// http://llvm.org/docs/LangRef.html#pointeraliasing 348/// for details. 349/// 350/// Design note: The correctness of using getelementptr here depends on 351/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as 352/// they may introduce pointer arithmetic which may not be safely converted 353/// into getelementptr. 354/// 355/// Design note: It might seem desirable for this function to be more 356/// loop-aware. If some of the indices are loop-invariant while others 357/// aren't, it might seem desirable to emit multiple GEPs, keeping the 358/// loop-invariant portions of the overall computation outside the loop. 359/// However, there are a few reasons this is not done here. Hoisting simple 360/// arithmetic is a low-level optimization that often isn't very 361/// important until late in the optimization process. In fact, passes 362/// like InstructionCombining will combine GEPs, even if it means 363/// pushing loop-invariant computation down into loops, so even if the 364/// GEPs were split here, the work would quickly be undone. The 365/// LoopStrengthReduction pass, which is usually run quite late (and 366/// after the last InstructionCombining pass), takes care of hoisting 367/// loop-invariant portions of expressions, after considering what 368/// can be folded using target addressing modes. 369/// 370Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin, 371 const SCEV *const *op_end, 372 PointerType *PTy, 373 Type *Ty, 374 Value *V) { 375 Type *ElTy = PTy->getElementType(); 376 SmallVector<Value *, 4> GepIndices; 377 SmallVector<const SCEV *, 8> Ops(op_begin, op_end); 378 bool AnyNonZeroIndices = false; 379 380 // Split AddRecs up into parts as either of the parts may be usable 381 // without the other. 382 SplitAddRecs(Ops, Ty, SE); 383 384 // Descend down the pointer's type and attempt to convert the other 385 // operands into GEP indices, at each level. The first index in a GEP 386 // indexes into the array implied by the pointer operand; the rest of 387 // the indices index into the element or field type selected by the 388 // preceding index. 389 for (;;) { 390 // If the scale size is not 0, attempt to factor out a scale for 391 // array indexing. 392 SmallVector<const SCEV *, 8> ScaledOps; 393 if (ElTy->isSized()) { 394 const SCEV *ElSize = SE.getSizeOfExpr(ElTy); 395 if (!ElSize->isZero()) { 396 SmallVector<const SCEV *, 8> NewOps; 397 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 398 const SCEV *Op = Ops[i]; 399 const SCEV *Remainder = SE.getConstant(Ty, 0); 400 if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) { 401 // Op now has ElSize factored out. 402 ScaledOps.push_back(Op); 403 if (!Remainder->isZero()) 404 NewOps.push_back(Remainder); 405 AnyNonZeroIndices = true; 406 } else { 407 // The operand was not divisible, so add it to the list of operands 408 // we'll scan next iteration. 409 NewOps.push_back(Ops[i]); 410 } 411 } 412 // If we made any changes, update Ops. 413 if (!ScaledOps.empty()) { 414 Ops = NewOps; 415 SimplifyAddOperands(Ops, Ty, SE); 416 } 417 } 418 } 419 420 // Record the scaled array index for this level of the type. If 421 // we didn't find any operands that could be factored, tentatively 422 // assume that element zero was selected (since the zero offset 423 // would obviously be folded away). 424 Value *Scaled = ScaledOps.empty() ? 425 Constant::getNullValue(Ty) : 426 expandCodeFor(SE.getAddExpr(ScaledOps), Ty); 427 GepIndices.push_back(Scaled); 428 429 // Collect struct field index operands. 430 while (StructType *STy = dyn_cast<StructType>(ElTy)) { 431 bool FoundFieldNo = false; 432 // An empty struct has no fields. 433 if (STy->getNumElements() == 0) break; 434 if (SE.TD) { 435 // With TargetData, field offsets are known. See if a constant offset 436 // falls within any of the struct fields. 437 if (Ops.empty()) break; 438 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0])) 439 if (SE.getTypeSizeInBits(C->getType()) <= 64) { 440 const StructLayout &SL = *SE.TD->getStructLayout(STy); 441 uint64_t FullOffset = C->getValue()->getZExtValue(); 442 if (FullOffset < SL.getSizeInBytes()) { 443 unsigned ElIdx = SL.getElementContainingOffset(FullOffset); 444 GepIndices.push_back( 445 ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx)); 446 ElTy = STy->getTypeAtIndex(ElIdx); 447 Ops[0] = 448 SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx)); 449 AnyNonZeroIndices = true; 450 FoundFieldNo = true; 451 } 452 } 453 } else { 454 // Without TargetData, just check for an offsetof expression of the 455 // appropriate struct type. 456 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 457 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) { 458 Type *CTy; 459 Constant *FieldNo; 460 if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) { 461 GepIndices.push_back(FieldNo); 462 ElTy = 463 STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue()); 464 Ops[i] = SE.getConstant(Ty, 0); 465 AnyNonZeroIndices = true; 466 FoundFieldNo = true; 467 break; 468 } 469 } 470 } 471 // If no struct field offsets were found, tentatively assume that 472 // field zero was selected (since the zero offset would obviously 473 // be folded away). 474 if (!FoundFieldNo) { 475 ElTy = STy->getTypeAtIndex(0u); 476 GepIndices.push_back( 477 Constant::getNullValue(Type::getInt32Ty(Ty->getContext()))); 478 } 479 } 480 481 if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy)) 482 ElTy = ATy->getElementType(); 483 else 484 break; 485 } 486 487 // If none of the operands were convertible to proper GEP indices, cast 488 // the base to i8* and do an ugly getelementptr with that. It's still 489 // better than ptrtoint+arithmetic+inttoptr at least. 490 if (!AnyNonZeroIndices) { 491 // Cast the base to i8*. 492 V = InsertNoopCastOfTo(V, 493 Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace())); 494 495 // Expand the operands for a plain byte offset. 496 Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty); 497 498 // Fold a GEP with constant operands. 499 if (Constant *CLHS = dyn_cast<Constant>(V)) 500 if (Constant *CRHS = dyn_cast<Constant>(Idx)) 501 return ConstantExpr::getGetElementPtr(CLHS, CRHS); 502 503 // Do a quick scan to see if we have this GEP nearby. If so, reuse it. 504 unsigned ScanLimit = 6; 505 BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin(); 506 // Scanning starts from the last instruction before the insertion point. 507 BasicBlock::iterator IP = Builder.GetInsertPoint(); 508 if (IP != BlockBegin) { 509 --IP; 510 for (; ScanLimit; --IP, --ScanLimit) { 511 // Don't count dbg.value against the ScanLimit, to avoid perturbing the 512 // generated code. 513 if (isa<DbgInfoIntrinsic>(IP)) 514 ScanLimit++; 515 if (IP->getOpcode() == Instruction::GetElementPtr && 516 IP->getOperand(0) == V && IP->getOperand(1) == Idx) 517 return IP; 518 if (IP == BlockBegin) break; 519 } 520 } 521 522 // Save the original insertion point so we can restore it when we're done. 523 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 524 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 525 526 // Move the insertion point out of as many loops as we can. 527 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 528 if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break; 529 BasicBlock *Preheader = L->getLoopPreheader(); 530 if (!Preheader) break; 531 532 // Ok, move up a level. 533 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 534 } 535 536 // Emit a GEP. 537 Value *GEP = Builder.CreateGEP(V, Idx, "uglygep"); 538 rememberInstruction(GEP); 539 540 // Restore the original insert point. 541 if (SaveInsertBB) 542 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 543 544 return GEP; 545 } 546 547 // Save the original insertion point so we can restore it when we're done. 548 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 549 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 550 551 // Move the insertion point out of as many loops as we can. 552 while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) { 553 if (!L->isLoopInvariant(V)) break; 554 555 bool AnyIndexNotLoopInvariant = false; 556 for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(), 557 E = GepIndices.end(); I != E; ++I) 558 if (!L->isLoopInvariant(*I)) { 559 AnyIndexNotLoopInvariant = true; 560 break; 561 } 562 if (AnyIndexNotLoopInvariant) 563 break; 564 565 BasicBlock *Preheader = L->getLoopPreheader(); 566 if (!Preheader) break; 567 568 // Ok, move up a level. 569 Builder.SetInsertPoint(Preheader, Preheader->getTerminator()); 570 } 571 572 // Insert a pretty getelementptr. Note that this GEP is not marked inbounds, 573 // because ScalarEvolution may have changed the address arithmetic to 574 // compute a value which is beyond the end of the allocated object. 575 Value *Casted = V; 576 if (V->getType() != PTy) 577 Casted = InsertNoopCastOfTo(Casted, PTy); 578 Value *GEP = Builder.CreateGEP(Casted, 579 GepIndices, 580 "scevgep"); 581 Ops.push_back(SE.getUnknown(GEP)); 582 rememberInstruction(GEP); 583 584 // Restore the original insert point. 585 if (SaveInsertBB) 586 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 587 588 return expand(SE.getAddExpr(Ops)); 589} 590 591/// isNonConstantNegative - Return true if the specified scev is negated, but 592/// not a constant. 593static bool isNonConstantNegative(const SCEV *F) { 594 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(F); 595 if (!Mul) return false; 596 597 // If there is a constant factor, it will be first. 598 const SCEVConstant *SC = dyn_cast<SCEVConstant>(Mul->getOperand(0)); 599 if (!SC) return false; 600 601 // Return true if the value is negative, this matches things like (-42 * V). 602 return SC->getValue()->getValue().isNegative(); 603} 604 605/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for 606/// SCEV expansion. If they are nested, this is the most nested. If they are 607/// neighboring, pick the later. 608static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B, 609 DominatorTree &DT) { 610 if (!A) return B; 611 if (!B) return A; 612 if (A->contains(B)) return B; 613 if (B->contains(A)) return A; 614 if (DT.dominates(A->getHeader(), B->getHeader())) return B; 615 if (DT.dominates(B->getHeader(), A->getHeader())) return A; 616 return A; // Arbitrarily break the tie. 617} 618 619/// getRelevantLoop - Get the most relevant loop associated with the given 620/// expression, according to PickMostRelevantLoop. 621const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) { 622 // Test whether we've already computed the most relevant loop for this SCEV. 623 std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair = 624 RelevantLoops.insert(std::make_pair(S, static_cast<const Loop *>(0))); 625 if (!Pair.second) 626 return Pair.first->second; 627 628 if (isa<SCEVConstant>(S)) 629 // A constant has no relevant loops. 630 return 0; 631 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) { 632 if (const Instruction *I = dyn_cast<Instruction>(U->getValue())) 633 return Pair.first->second = SE.LI->getLoopFor(I->getParent()); 634 // A non-instruction has no relevant loops. 635 return 0; 636 } 637 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) { 638 const Loop *L = 0; 639 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) 640 L = AR->getLoop(); 641 for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end(); 642 I != E; ++I) 643 L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT); 644 return RelevantLoops[N] = L; 645 } 646 if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) { 647 const Loop *Result = getRelevantLoop(C->getOperand()); 648 return RelevantLoops[C] = Result; 649 } 650 if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) { 651 const Loop *Result = 652 PickMostRelevantLoop(getRelevantLoop(D->getLHS()), 653 getRelevantLoop(D->getRHS()), 654 *SE.DT); 655 return RelevantLoops[D] = Result; 656 } 657 llvm_unreachable("Unexpected SCEV type!"); 658 return 0; 659} 660 661namespace { 662 663/// LoopCompare - Compare loops by PickMostRelevantLoop. 664class LoopCompare { 665 DominatorTree &DT; 666public: 667 explicit LoopCompare(DominatorTree &dt) : DT(dt) {} 668 669 bool operator()(std::pair<const Loop *, const SCEV *> LHS, 670 std::pair<const Loop *, const SCEV *> RHS) const { 671 // Keep pointer operands sorted at the end. 672 if (LHS.second->getType()->isPointerTy() != 673 RHS.second->getType()->isPointerTy()) 674 return LHS.second->getType()->isPointerTy(); 675 676 // Compare loops with PickMostRelevantLoop. 677 if (LHS.first != RHS.first) 678 return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first; 679 680 // If one operand is a non-constant negative and the other is not, 681 // put the non-constant negative on the right so that a sub can 682 // be used instead of a negate and add. 683 if (isNonConstantNegative(LHS.second)) { 684 if (!isNonConstantNegative(RHS.second)) 685 return false; 686 } else if (isNonConstantNegative(RHS.second)) 687 return true; 688 689 // Otherwise they are equivalent according to this comparison. 690 return false; 691 } 692}; 693 694} 695 696Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { 697 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 698 699 // Collect all the add operands in a loop, along with their associated loops. 700 // Iterate in reverse so that constants are emitted last, all else equal, and 701 // so that pointer operands are inserted first, which the code below relies on 702 // to form more involved GEPs. 703 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 704 for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()), 705 E(S->op_begin()); I != E; ++I) 706 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 707 708 // Sort by loop. Use a stable sort so that constants follow non-constants and 709 // pointer operands precede non-pointer operands. 710 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 711 712 // Emit instructions to add all the operands. Hoist as much as possible 713 // out of loops, and form meaningful getelementptrs where possible. 714 Value *Sum = 0; 715 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 716 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 717 const Loop *CurLoop = I->first; 718 const SCEV *Op = I->second; 719 if (!Sum) { 720 // This is the first operand. Just expand it. 721 Sum = expand(Op); 722 ++I; 723 } else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) { 724 // The running sum expression is a pointer. Try to form a getelementptr 725 // at this level with that as the base. 726 SmallVector<const SCEV *, 4> NewOps; 727 for (; I != E && I->first == CurLoop; ++I) { 728 // If the operand is SCEVUnknown and not instructions, peek through 729 // it, to enable more of it to be folded into the GEP. 730 const SCEV *X = I->second; 731 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X)) 732 if (!isa<Instruction>(U->getValue())) 733 X = SE.getSCEV(U->getValue()); 734 NewOps.push_back(X); 735 } 736 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum); 737 } else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) { 738 // The running sum is an integer, and there's a pointer at this level. 739 // Try to form a getelementptr. If the running sum is instructions, 740 // use a SCEVUnknown to avoid re-analyzing them. 741 SmallVector<const SCEV *, 4> NewOps; 742 NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) : 743 SE.getSCEV(Sum)); 744 for (++I; I != E && I->first == CurLoop; ++I) 745 NewOps.push_back(I->second); 746 Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op)); 747 } else if (isNonConstantNegative(Op)) { 748 // Instead of doing a negate and add, just do a subtract. 749 Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty); 750 Sum = InsertNoopCastOfTo(Sum, Ty); 751 Sum = InsertBinop(Instruction::Sub, Sum, W); 752 ++I; 753 } else { 754 // A simple add. 755 Value *W = expandCodeFor(Op, Ty); 756 Sum = InsertNoopCastOfTo(Sum, Ty); 757 // Canonicalize a constant to the RHS. 758 if (isa<Constant>(Sum)) std::swap(Sum, W); 759 Sum = InsertBinop(Instruction::Add, Sum, W); 760 ++I; 761 } 762 } 763 764 return Sum; 765} 766 767Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) { 768 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 769 770 // Collect all the mul operands in a loop, along with their associated loops. 771 // Iterate in reverse so that constants are emitted last, all else equal. 772 SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops; 773 for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()), 774 E(S->op_begin()); I != E; ++I) 775 OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I)); 776 777 // Sort by loop. Use a stable sort so that constants follow non-constants. 778 std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT)); 779 780 // Emit instructions to mul all the operands. Hoist as much as possible 781 // out of loops. 782 Value *Prod = 0; 783 for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator 784 I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) { 785 const SCEV *Op = I->second; 786 if (!Prod) { 787 // This is the first operand. Just expand it. 788 Prod = expand(Op); 789 ++I; 790 } else if (Op->isAllOnesValue()) { 791 // Instead of doing a multiply by negative one, just do a negate. 792 Prod = InsertNoopCastOfTo(Prod, Ty); 793 Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod); 794 ++I; 795 } else { 796 // A simple mul. 797 Value *W = expandCodeFor(Op, Ty); 798 Prod = InsertNoopCastOfTo(Prod, Ty); 799 // Canonicalize a constant to the RHS. 800 if (isa<Constant>(Prod)) std::swap(Prod, W); 801 Prod = InsertBinop(Instruction::Mul, Prod, W); 802 ++I; 803 } 804 } 805 806 return Prod; 807} 808 809Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) { 810 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 811 812 Value *LHS = expandCodeFor(S->getLHS(), Ty); 813 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) { 814 const APInt &RHS = SC->getValue()->getValue(); 815 if (RHS.isPowerOf2()) 816 return InsertBinop(Instruction::LShr, LHS, 817 ConstantInt::get(Ty, RHS.logBase2())); 818 } 819 820 Value *RHS = expandCodeFor(S->getRHS(), Ty); 821 return InsertBinop(Instruction::UDiv, LHS, RHS); 822} 823 824/// Move parts of Base into Rest to leave Base with the minimal 825/// expression that provides a pointer operand suitable for a 826/// GEP expansion. 827static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest, 828 ScalarEvolution &SE) { 829 while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) { 830 Base = A->getStart(); 831 Rest = SE.getAddExpr(Rest, 832 SE.getAddRecExpr(SE.getConstant(A->getType(), 0), 833 A->getStepRecurrence(SE), 834 A->getLoop(), 835 // FIXME: A->getNoWrapFlags(FlagNW) 836 SCEV::FlagAnyWrap)); 837 } 838 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) { 839 Base = A->getOperand(A->getNumOperands()-1); 840 SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end()); 841 NewAddOps.back() = Rest; 842 Rest = SE.getAddExpr(NewAddOps); 843 ExposePointerBase(Base, Rest, SE); 844 } 845} 846 847/// Determine if this is a well-behaved chain of instructions leading back to 848/// the PHI. If so, it may be reused by expanded expressions. 849bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV, 850 const Loop *L) { 851 if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) || 852 (isa<CastInst>(IncV) && !isa<BitCastInst>(IncV))) 853 return false; 854 // If any of the operands don't dominate the insert position, bail. 855 // Addrec operands are always loop-invariant, so this can only happen 856 // if there are instructions which haven't been hoisted. 857 if (L == IVIncInsertLoop) { 858 for (User::op_iterator OI = IncV->op_begin()+1, 859 OE = IncV->op_end(); OI != OE; ++OI) 860 if (Instruction *OInst = dyn_cast<Instruction>(OI)) 861 if (!SE.DT->dominates(OInst, IVIncInsertPos)) 862 return false; 863 } 864 // Advance to the next instruction. 865 IncV = dyn_cast<Instruction>(IncV->getOperand(0)); 866 if (!IncV) 867 return false; 868 869 if (IncV->mayHaveSideEffects()) 870 return false; 871 872 if (IncV != PN) 873 return true; 874 875 return isNormalAddRecExprPHI(PN, IncV, L); 876} 877 878/// Determine if this cyclic phi is in a form that would have been generated by 879/// LSR. We don't care if the phi was actually expanded in this pass, as long 880/// as it is in a low-cost form, for example, no implied multiplication. This 881/// should match any patterns generated by getAddRecExprPHILiterally and 882/// expandAddtoGEP. 883bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV, 884 const Loop *L) { 885 switch (IncV->getOpcode()) { 886 // Check for a simple Add/Sub or GEP of a loop invariant step. 887 case Instruction::Add: 888 case Instruction::Sub: 889 return IncV->getOperand(0) == PN 890 && L->isLoopInvariant(IncV->getOperand(1)); 891 case Instruction::BitCast: 892 IncV = dyn_cast<GetElementPtrInst>(IncV->getOperand(0)); 893 if (!IncV) 894 return false; 895 // fall-thru to GEP handling 896 case Instruction::GetElementPtr: { 897 // This must be a pointer addition of constants (pretty) or some number of 898 // address-size elements (ugly). 899 for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end(); 900 I != E; ++I) { 901 if (isa<Constant>(*I)) 902 continue; 903 // ugly geps have 2 operands. 904 // i1* is used by the expander to represent an address-size element. 905 if (IncV->getNumOperands() != 2) 906 return false; 907 unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace(); 908 if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS) 909 && IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS)) 910 return false; 911 // Ensure the operands dominate the insertion point. I don't know of a 912 // case when this would not be true, so this is somewhat untested. 913 if (L == IVIncInsertLoop) { 914 for (User::op_iterator OI = IncV->op_begin()+1, 915 OE = IncV->op_end(); OI != OE; ++OI) 916 if (Instruction *OInst = dyn_cast<Instruction>(OI)) 917 if (!SE.DT->dominates(OInst, IVIncInsertPos)) 918 return false; 919 } 920 break; 921 } 922 IncV = dyn_cast<Instruction>(IncV->getOperand(0)); 923 if (IncV && IncV->getOpcode() == Instruction::BitCast) 924 IncV = dyn_cast<Instruction>(IncV->getOperand(0)); 925 return IncV == PN; 926 } 927 default: 928 return false; 929 } 930} 931 932/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand 933/// the base addrec, which is the addrec without any non-loop-dominating 934/// values, and return the PHI. 935PHINode * 936SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized, 937 const Loop *L, 938 Type *ExpandTy, 939 Type *IntTy) { 940 assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position"); 941 942 // Reuse a previously-inserted PHI, if present. 943 BasicBlock *LatchBlock = L->getLoopLatch(); 944 if (LatchBlock) { 945 for (BasicBlock::iterator I = L->getHeader()->begin(); 946 PHINode *PN = dyn_cast<PHINode>(I); ++I) { 947 if (!SE.isSCEVable(PN->getType()) || 948 (SE.getEffectiveSCEVType(PN->getType()) != 949 SE.getEffectiveSCEVType(Normalized->getType())) || 950 SE.getSCEV(PN) != Normalized) 951 continue; 952 953 Instruction *IncV = 954 cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock)); 955 956 if (LSRMode) { 957 if (!isExpandedAddRecExprPHI(PN, IncV, L)) 958 continue; 959 } 960 else { 961 if (!isNormalAddRecExprPHI(PN, IncV, L)) 962 continue; 963 } 964 // Ok, the add recurrence looks usable. 965 // Remember this PHI, even in post-inc mode. 966 InsertedValues.insert(PN); 967 // Remember the increment. 968 rememberInstruction(IncV); 969 if (L == IVIncInsertLoop) 970 do { 971 if (SE.DT->dominates(IncV, IVIncInsertPos)) 972 break; 973 // Make sure the increment is where we want it. But don't move it 974 // down past a potential existing post-inc user. 975 IncV->moveBefore(IVIncInsertPos); 976 IVIncInsertPos = IncV; 977 IncV = cast<Instruction>(IncV->getOperand(0)); 978 } while (IncV != PN); 979 return PN; 980 } 981 } 982 983 // Save the original insertion point so we can restore it when we're done. 984 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 985 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 986 987 // Expand code for the start value. 988 Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy, 989 L->getHeader()->begin()); 990 991 // StartV must be hoisted into L's preheader to dominate the new phi. 992 assert(!isa<Instruction>(StartV) || 993 SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(), 994 L->getHeader())); 995 996 // Expand code for the step value. Insert instructions right before the 997 // terminator corresponding to the back-edge. Do this before creating the PHI 998 // so that PHI reuse code doesn't see an incomplete PHI. If the stride is 999 // negative, insert a sub instead of an add for the increment (unless it's a 1000 // constant, because subtracts of constants are canonicalized to adds). 1001 const SCEV *Step = Normalized->getStepRecurrence(SE); 1002 bool isPointer = ExpandTy->isPointerTy(); 1003 bool isNegative = !isPointer && isNonConstantNegative(Step); 1004 if (isNegative) 1005 Step = SE.getNegativeSCEV(Step); 1006 Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin()); 1007 1008 // Create the PHI. 1009 BasicBlock *Header = L->getHeader(); 1010 Builder.SetInsertPoint(Header, Header->begin()); 1011 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1012 PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE), 1013 Twine(IVName) + ".iv"); 1014 rememberInstruction(PN); 1015 1016 // Create the step instructions and populate the PHI. 1017 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1018 BasicBlock *Pred = *HPI; 1019 1020 // Add a start value. 1021 if (!L->contains(Pred)) { 1022 PN->addIncoming(StartV, Pred); 1023 continue; 1024 } 1025 1026 // Create a step value and add it to the PHI. If IVIncInsertLoop is 1027 // non-null and equal to the addrec's loop, insert the instructions 1028 // at IVIncInsertPos. 1029 Instruction *InsertPos = L == IVIncInsertLoop ? 1030 IVIncInsertPos : Pred->getTerminator(); 1031 Builder.SetInsertPoint(InsertPos); 1032 Value *IncV; 1033 // If the PHI is a pointer, use a GEP, otherwise use an add or sub. 1034 if (isPointer) { 1035 PointerType *GEPPtrTy = cast<PointerType>(ExpandTy); 1036 // If the step isn't constant, don't use an implicitly scaled GEP, because 1037 // that would require a multiply inside the loop. 1038 if (!isa<ConstantInt>(StepV)) 1039 GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()), 1040 GEPPtrTy->getAddressSpace()); 1041 const SCEV *const StepArray[1] = { SE.getSCEV(StepV) }; 1042 IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN); 1043 if (IncV->getType() != PN->getType()) { 1044 IncV = Builder.CreateBitCast(IncV, PN->getType()); 1045 rememberInstruction(IncV); 1046 } 1047 } else { 1048 IncV = isNegative ? 1049 Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") : 1050 Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next"); 1051 rememberInstruction(IncV); 1052 } 1053 PN->addIncoming(IncV, Pred); 1054 } 1055 1056 // Restore the original insert point. 1057 if (SaveInsertBB) 1058 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1059 1060 // Remember this PHI, even in post-inc mode. 1061 InsertedValues.insert(PN); 1062 1063 return PN; 1064} 1065 1066Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) { 1067 Type *STy = S->getType(); 1068 Type *IntTy = SE.getEffectiveSCEVType(STy); 1069 const Loop *L = S->getLoop(); 1070 1071 // Determine a normalized form of this expression, which is the expression 1072 // before any post-inc adjustment is made. 1073 const SCEVAddRecExpr *Normalized = S; 1074 if (PostIncLoops.count(L)) { 1075 PostIncLoopSet Loops; 1076 Loops.insert(L); 1077 Normalized = 1078 cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, 0, 0, 1079 Loops, SE, *SE.DT)); 1080 } 1081 1082 // Strip off any non-loop-dominating component from the addrec start. 1083 const SCEV *Start = Normalized->getStart(); 1084 const SCEV *PostLoopOffset = 0; 1085 if (!SE.properlyDominates(Start, L->getHeader())) { 1086 PostLoopOffset = Start; 1087 Start = SE.getConstant(Normalized->getType(), 0); 1088 Normalized = cast<SCEVAddRecExpr>( 1089 SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE), 1090 Normalized->getLoop(), 1091 // FIXME: Normalized->getNoWrapFlags(FlagNW) 1092 SCEV::FlagAnyWrap)); 1093 } 1094 1095 // Strip off any non-loop-dominating component from the addrec step. 1096 const SCEV *Step = Normalized->getStepRecurrence(SE); 1097 const SCEV *PostLoopScale = 0; 1098 if (!SE.dominates(Step, L->getHeader())) { 1099 PostLoopScale = Step; 1100 Step = SE.getConstant(Normalized->getType(), 1); 1101 Normalized = 1102 cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start, Step, 1103 Normalized->getLoop(), 1104 // FIXME: Normalized 1105 // ->getNoWrapFlags(FlagNW) 1106 SCEV::FlagAnyWrap)); 1107 } 1108 1109 // Expand the core addrec. If we need post-loop scaling, force it to 1110 // expand to an integer type to avoid the need for additional casting. 1111 Type *ExpandTy = PostLoopScale ? IntTy : STy; 1112 PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy); 1113 1114 // Accommodate post-inc mode, if necessary. 1115 Value *Result; 1116 if (!PostIncLoops.count(L)) 1117 Result = PN; 1118 else { 1119 // In PostInc mode, use the post-incremented value. 1120 BasicBlock *LatchBlock = L->getLoopLatch(); 1121 assert(LatchBlock && "PostInc mode requires a unique loop latch!"); 1122 Result = PN->getIncomingValueForBlock(LatchBlock); 1123 1124 // For an expansion to use the postinc form, the client must call 1125 // expandCodeFor with an InsertPoint that is either outside the PostIncLoop 1126 // or dominated by IVIncInsertPos. 1127 assert((!isa<Instruction>(Result) || 1128 SE.DT->dominates(cast<Instruction>(Result), 1129 Builder.GetInsertPoint())) && 1130 "postinc expansion does not dominate use"); 1131 } 1132 1133 // Re-apply any non-loop-dominating scale. 1134 if (PostLoopScale) { 1135 Result = InsertNoopCastOfTo(Result, IntTy); 1136 Result = Builder.CreateMul(Result, 1137 expandCodeFor(PostLoopScale, IntTy)); 1138 rememberInstruction(Result); 1139 } 1140 1141 // Re-apply any non-loop-dominating offset. 1142 if (PostLoopOffset) { 1143 if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) { 1144 const SCEV *const OffsetArray[1] = { PostLoopOffset }; 1145 Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result); 1146 } else { 1147 Result = InsertNoopCastOfTo(Result, IntTy); 1148 Result = Builder.CreateAdd(Result, 1149 expandCodeFor(PostLoopOffset, IntTy)); 1150 rememberInstruction(Result); 1151 } 1152 } 1153 1154 return Result; 1155} 1156 1157Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { 1158 if (!CanonicalMode) return expandAddRecExprLiterally(S); 1159 1160 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1161 const Loop *L = S->getLoop(); 1162 1163 // First check for an existing canonical IV in a suitable type. 1164 PHINode *CanonicalIV = 0; 1165 if (PHINode *PN = L->getCanonicalInductionVariable()) 1166 if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty)) 1167 CanonicalIV = PN; 1168 1169 // Rewrite an AddRec in terms of the canonical induction variable, if 1170 // its type is more narrow. 1171 if (CanonicalIV && 1172 SE.getTypeSizeInBits(CanonicalIV->getType()) > 1173 SE.getTypeSizeInBits(Ty)) { 1174 SmallVector<const SCEV *, 4> NewOps(S->getNumOperands()); 1175 for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i) 1176 NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType()); 1177 Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(), 1178 // FIXME: S->getNoWrapFlags(FlagNW) 1179 SCEV::FlagAnyWrap)); 1180 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1181 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1182 BasicBlock::iterator NewInsertPt = 1183 llvm::next(BasicBlock::iterator(cast<Instruction>(V))); 1184 while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) || 1185 isa<LandingPadInst>(NewInsertPt)) 1186 ++NewInsertPt; 1187 V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0, 1188 NewInsertPt); 1189 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1190 return V; 1191 } 1192 1193 // {X,+,F} --> X + {0,+,F} 1194 if (!S->getStart()->isZero()) { 1195 SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end()); 1196 NewOps[0] = SE.getConstant(Ty, 0); 1197 // FIXME: can use S->getNoWrapFlags() 1198 const SCEV *Rest = SE.getAddRecExpr(NewOps, L, SCEV::FlagAnyWrap); 1199 1200 // Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the 1201 // comments on expandAddToGEP for details. 1202 const SCEV *Base = S->getStart(); 1203 const SCEV *RestArray[1] = { Rest }; 1204 // Dig into the expression to find the pointer base for a GEP. 1205 ExposePointerBase(Base, RestArray[0], SE); 1206 // If we found a pointer, expand the AddRec with a GEP. 1207 if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) { 1208 // Make sure the Base isn't something exotic, such as a multiplied 1209 // or divided pointer value. In those cases, the result type isn't 1210 // actually a pointer type. 1211 if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) { 1212 Value *StartV = expand(Base); 1213 assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!"); 1214 return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV); 1215 } 1216 } 1217 1218 // Just do a normal add. Pre-expand the operands to suppress folding. 1219 return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())), 1220 SE.getUnknown(expand(Rest)))); 1221 } 1222 1223 // If we don't yet have a canonical IV, create one. 1224 if (!CanonicalIV) { 1225 // Create and insert the PHI node for the induction variable in the 1226 // specified loop. 1227 BasicBlock *Header = L->getHeader(); 1228 pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header); 1229 CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar", 1230 Header->begin()); 1231 rememberInstruction(CanonicalIV); 1232 1233 Constant *One = ConstantInt::get(Ty, 1); 1234 for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) { 1235 BasicBlock *HP = *HPI; 1236 if (L->contains(HP)) { 1237 // Insert a unit add instruction right before the terminator 1238 // corresponding to the back-edge. 1239 Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One, 1240 "indvar.next", 1241 HP->getTerminator()); 1242 Add->setDebugLoc(HP->getTerminator()->getDebugLoc()); 1243 rememberInstruction(Add); 1244 CanonicalIV->addIncoming(Add, HP); 1245 } else { 1246 CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP); 1247 } 1248 } 1249 } 1250 1251 // {0,+,1} --> Insert a canonical induction variable into the loop! 1252 if (S->isAffine() && S->getOperand(1)->isOne()) { 1253 assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) && 1254 "IVs with types different from the canonical IV should " 1255 "already have been handled!"); 1256 return CanonicalIV; 1257 } 1258 1259 // {0,+,F} --> {0,+,1} * F 1260 1261 // If this is a simple linear addrec, emit it now as a special case. 1262 if (S->isAffine()) // {0,+,F} --> i*F 1263 return 1264 expand(SE.getTruncateOrNoop( 1265 SE.getMulExpr(SE.getUnknown(CanonicalIV), 1266 SE.getNoopOrAnyExtend(S->getOperand(1), 1267 CanonicalIV->getType())), 1268 Ty)); 1269 1270 // If this is a chain of recurrences, turn it into a closed form, using the 1271 // folders, then expandCodeFor the closed form. This allows the folders to 1272 // simplify the expression without having to build a bunch of special code 1273 // into this folder. 1274 const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV. 1275 1276 // Promote S up to the canonical IV type, if the cast is foldable. 1277 const SCEV *NewS = S; 1278 const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType()); 1279 if (isa<SCEVAddRecExpr>(Ext)) 1280 NewS = Ext; 1281 1282 const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE); 1283 //cerr << "Evaluated: " << *this << "\n to: " << *V << "\n"; 1284 1285 // Truncate the result down to the original type, if needed. 1286 const SCEV *T = SE.getTruncateOrNoop(V, Ty); 1287 return expand(T); 1288} 1289 1290Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) { 1291 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1292 Value *V = expandCodeFor(S->getOperand(), 1293 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1294 Value *I = Builder.CreateTrunc(V, Ty); 1295 rememberInstruction(I); 1296 return I; 1297} 1298 1299Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) { 1300 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1301 Value *V = expandCodeFor(S->getOperand(), 1302 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1303 Value *I = Builder.CreateZExt(V, Ty); 1304 rememberInstruction(I); 1305 return I; 1306} 1307 1308Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) { 1309 Type *Ty = SE.getEffectiveSCEVType(S->getType()); 1310 Value *V = expandCodeFor(S->getOperand(), 1311 SE.getEffectiveSCEVType(S->getOperand()->getType())); 1312 Value *I = Builder.CreateSExt(V, Ty); 1313 rememberInstruction(I); 1314 return I; 1315} 1316 1317Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) { 1318 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1319 Type *Ty = LHS->getType(); 1320 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1321 // In the case of mixed integer and pointer types, do the 1322 // rest of the comparisons as integer. 1323 if (S->getOperand(i)->getType() != Ty) { 1324 Ty = SE.getEffectiveSCEVType(Ty); 1325 LHS = InsertNoopCastOfTo(LHS, Ty); 1326 } 1327 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1328 Value *ICmp = Builder.CreateICmpSGT(LHS, RHS); 1329 rememberInstruction(ICmp); 1330 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax"); 1331 rememberInstruction(Sel); 1332 LHS = Sel; 1333 } 1334 // In the case of mixed integer and pointer types, cast the 1335 // final result back to the pointer type. 1336 if (LHS->getType() != S->getType()) 1337 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1338 return LHS; 1339} 1340 1341Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) { 1342 Value *LHS = expand(S->getOperand(S->getNumOperands()-1)); 1343 Type *Ty = LHS->getType(); 1344 for (int i = S->getNumOperands()-2; i >= 0; --i) { 1345 // In the case of mixed integer and pointer types, do the 1346 // rest of the comparisons as integer. 1347 if (S->getOperand(i)->getType() != Ty) { 1348 Ty = SE.getEffectiveSCEVType(Ty); 1349 LHS = InsertNoopCastOfTo(LHS, Ty); 1350 } 1351 Value *RHS = expandCodeFor(S->getOperand(i), Ty); 1352 Value *ICmp = Builder.CreateICmpUGT(LHS, RHS); 1353 rememberInstruction(ICmp); 1354 Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax"); 1355 rememberInstruction(Sel); 1356 LHS = Sel; 1357 } 1358 // In the case of mixed integer and pointer types, cast the 1359 // final result back to the pointer type. 1360 if (LHS->getType() != S->getType()) 1361 LHS = InsertNoopCastOfTo(LHS, S->getType()); 1362 return LHS; 1363} 1364 1365Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty, 1366 Instruction *I) { 1367 BasicBlock::iterator IP = I; 1368 while (isInsertedInstruction(IP) || isa<DbgInfoIntrinsic>(IP)) 1369 ++IP; 1370 Builder.SetInsertPoint(IP->getParent(), IP); 1371 return expandCodeFor(SH, Ty); 1372} 1373 1374Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) { 1375 // Expand the code for this SCEV. 1376 Value *V = expand(SH); 1377 if (Ty) { 1378 assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) && 1379 "non-trivial casts should be done with the SCEVs directly!"); 1380 V = InsertNoopCastOfTo(V, Ty); 1381 } 1382 return V; 1383} 1384 1385Value *SCEVExpander::expand(const SCEV *S) { 1386 // Compute an insertion point for this SCEV object. Hoist the instructions 1387 // as far out in the loop nest as possible. 1388 Instruction *InsertPt = Builder.GetInsertPoint(); 1389 for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ; 1390 L = L->getParentLoop()) 1391 if (SE.isLoopInvariant(S, L)) { 1392 if (!L) break; 1393 if (BasicBlock *Preheader = L->getLoopPreheader()) 1394 InsertPt = Preheader->getTerminator(); 1395 } else { 1396 // If the SCEV is computable at this level, insert it into the header 1397 // after the PHIs (and after any other instructions that we've inserted 1398 // there) so that it is guaranteed to dominate any user inside the loop. 1399 if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L)) 1400 InsertPt = L->getHeader()->getFirstInsertionPt(); 1401 while (isInsertedInstruction(InsertPt) || isa<DbgInfoIntrinsic>(InsertPt)) 1402 InsertPt = llvm::next(BasicBlock::iterator(InsertPt)); 1403 break; 1404 } 1405 1406 // Check to see if we already expanded this here. 1407 std::map<std::pair<const SCEV *, Instruction *>, 1408 AssertingVH<Value> >::iterator I = 1409 InsertedExpressions.find(std::make_pair(S, InsertPt)); 1410 if (I != InsertedExpressions.end()) 1411 return I->second; 1412 1413 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1414 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1415 Builder.SetInsertPoint(InsertPt->getParent(), InsertPt); 1416 1417 // Expand the expression into instructions. 1418 Value *V = visit(S); 1419 1420 // Remember the expanded value for this SCEV at this location. 1421 // 1422 // This is independent of PostIncLoops. The mapped value simply materializes 1423 // the expression at this insertion point. If the mapped value happened to be 1424 // a postinc expansion, it could be reused by a non postinc user, but only if 1425 // its insertion point was already at the head of the loop. 1426 InsertedExpressions[std::make_pair(S, InsertPt)] = V; 1427 1428 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1429 return V; 1430} 1431 1432void SCEVExpander::rememberInstruction(Value *I) { 1433 if (!PostIncLoops.empty()) 1434 InsertedPostIncValues.insert(I); 1435 else 1436 InsertedValues.insert(I); 1437 1438 // If we just claimed an existing instruction and that instruction had 1439 // been the insert point, adjust the insert point forward so that 1440 // subsequently inserted code will be dominated. 1441 if (Builder.GetInsertPoint() == I) { 1442 BasicBlock::iterator It = cast<Instruction>(I); 1443 do { ++It; } while (isInsertedInstruction(It) || 1444 isa<DbgInfoIntrinsic>(It)); 1445 Builder.SetInsertPoint(Builder.GetInsertBlock(), It); 1446 } 1447} 1448 1449void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) { 1450 // If we acquired more instructions since the old insert point was saved, 1451 // advance past them. 1452 while (isInsertedInstruction(I) || isa<DbgInfoIntrinsic>(I)) ++I; 1453 1454 Builder.SetInsertPoint(BB, I); 1455} 1456 1457/// getOrInsertCanonicalInductionVariable - This method returns the 1458/// canonical induction variable of the specified type for the specified 1459/// loop (inserting one if there is none). A canonical induction variable 1460/// starts at zero and steps by one on each iteration. 1461PHINode * 1462SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L, 1463 Type *Ty) { 1464 assert(Ty->isIntegerTy() && "Can only insert integer induction variables!"); 1465 1466 // Build a SCEV for {0,+,1}<L>. 1467 // Conservatively use FlagAnyWrap for now. 1468 const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0), 1469 SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap); 1470 1471 // Emit code for it. 1472 BasicBlock *SaveInsertBB = Builder.GetInsertBlock(); 1473 BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint(); 1474 PHINode *V = cast<PHINode>(expandCodeFor(H, 0, L->getHeader()->begin())); 1475 if (SaveInsertBB) 1476 restoreInsertPoint(SaveInsertBB, SaveInsertPt); 1477 1478 return V; 1479} 1480 1481/// hoistStep - Attempt to hoist an IV increment above a potential use. 1482/// 1483/// To successfully hoist, two criteria must be met: 1484/// - IncV operands dominate InsertPos and 1485/// - InsertPos dominates IncV 1486/// 1487/// Meeting the second condition means that we don't need to check all of IncV's 1488/// existing uses (it's moving up in the domtree). 1489/// 1490/// This does not yet recursively hoist the operands, although that would 1491/// not be difficult. 1492/// 1493/// This does not require a SCEVExpander instance and could be replaced by a 1494/// general code-insertion helper. 1495bool SCEVExpander::hoistStep(Instruction *IncV, Instruction *InsertPos, 1496 const DominatorTree *DT) { 1497 if (DT->dominates(IncV, InsertPos)) 1498 return true; 1499 1500 if (!DT->dominates(InsertPos->getParent(), IncV->getParent())) 1501 return false; 1502 1503 if (IncV->mayHaveSideEffects()) 1504 return false; 1505 1506 // Attempt to hoist IncV 1507 for (User::op_iterator OI = IncV->op_begin(), OE = IncV->op_end(); 1508 OI != OE; ++OI) { 1509 Instruction *OInst = dyn_cast<Instruction>(OI); 1510 if (OInst && !DT->dominates(OInst, InsertPos)) 1511 return false; 1512 } 1513 IncV->moveBefore(InsertPos); 1514 return true; 1515} 1516 1517/// replaceCongruentIVs - Check for congruent phis in this loop header and 1518/// replace them with their most canonical representative. Return the number of 1519/// phis eliminated. 1520/// 1521/// This does not depend on any SCEVExpander state but should be used in 1522/// the same context that SCEVExpander is used. 1523unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT, 1524 SmallVectorImpl<WeakVH> &DeadInsts) { 1525 unsigned NumElim = 0; 1526 DenseMap<const SCEV *, PHINode *> ExprToIVMap; 1527 for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) { 1528 PHINode *Phi = cast<PHINode>(I); 1529 if (!SE.isSCEVable(Phi->getType())) 1530 continue; 1531 1532 PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)]; 1533 if (!OrigPhiRef) { 1534 OrigPhiRef = Phi; 1535 continue; 1536 } 1537 1538 // If one phi derives from the other via GEPs, types may differ. 1539 // We could consider adding a bitcast here to handle it. 1540 if (OrigPhiRef->getType() != Phi->getType()) 1541 continue; 1542 1543 if (BasicBlock *LatchBlock = L->getLoopLatch()) { 1544 Instruction *OrigInc = 1545 cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock)); 1546 Instruction *IsomorphicInc = 1547 cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock)); 1548 1549 // If this phi is more canonical, swap it with the original. 1550 if (!isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L) 1551 && isExpandedAddRecExprPHI(Phi, IsomorphicInc, L)) { 1552 std::swap(OrigPhiRef, Phi); 1553 std::swap(OrigInc, IsomorphicInc); 1554 } 1555 // Replacing the congruent phi is sufficient because acyclic redundancy 1556 // elimination, CSE/GVN, should handle the rest. However, once SCEV proves 1557 // that a phi is congruent, it's often the head of an IV user cycle that 1558 // is isomorphic with the original phi. So it's worth eagerly cleaning up 1559 // the common case of a single IV increment. 1560 if (OrigInc != IsomorphicInc && 1561 OrigInc->getType() == IsomorphicInc->getType() && 1562 SE.getSCEV(OrigInc) == SE.getSCEV(IsomorphicInc) && 1563 hoistStep(OrigInc, IsomorphicInc, DT)) { 1564 DEBUG_WITH_TYPE(DebugType, dbgs() 1565 << "INDVARS: Eliminated congruent iv.inc: " 1566 << *IsomorphicInc << '\n'); 1567 IsomorphicInc->replaceAllUsesWith(OrigInc); 1568 DeadInsts.push_back(IsomorphicInc); 1569 } 1570 } 1571 DEBUG_WITH_TYPE(DebugType, dbgs() 1572 << "INDVARS: Eliminated congruent iv: " << *Phi << '\n'); 1573 ++NumElim; 1574 Phi->replaceAllUsesWith(OrigPhiRef); 1575 DeadInsts.push_back(Phi); 1576 } 1577 return NumElim; 1578} 1579