LoopStrengthReduce.cpp revision a4479ad25f7f184fc4600beb1d39fd1e71849c4d
1//===- LoopStrengthReduce.cpp - Strength Reduce GEPs in Loops -------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by Nate Begeman and is distributed under the 6// University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs a strength reduction on array references inside loops that 11// have as one or more of their components the loop induction variable. This is 12// accomplished by creating a new Value to hold the initial value of the array 13// access for the first iteration, and then creating a new GEP instruction in 14// the loop to increment the value by the appropriate amount. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "loop-reduce" 19#include "llvm/Transforms/Scalar.h" 20#include "llvm/Constants.h" 21#include "llvm/Instructions.h" 22#include "llvm/Type.h" 23#include "llvm/DerivedTypes.h" 24#include "llvm/Analysis/Dominators.h" 25#include "llvm/Analysis/LoopInfo.h" 26#include "llvm/Analysis/ScalarEvolutionExpander.h" 27#include "llvm/Support/CFG.h" 28#include "llvm/Support/GetElementPtrTypeIterator.h" 29#include "llvm/Transforms/Utils/Local.h" 30#include "llvm/Target/TargetData.h" 31#include "llvm/ADT/Statistic.h" 32#include "llvm/Support/Debug.h" 33#include <algorithm> 34#include <set> 35using namespace llvm; 36 37namespace { 38 Statistic<> NumReduced ("loop-reduce", "Number of GEPs strength reduced"); 39 40 class GEPCache { 41 public: 42 GEPCache() : CachedPHINode(0), Map() {} 43 44 GEPCache *get(Value *v) { 45 std::map<Value *, GEPCache>::iterator I = Map.find(v); 46 if (I == Map.end()) 47 I = Map.insert(std::pair<Value *, GEPCache>(v, GEPCache())).first; 48 return &I->second; 49 } 50 51 PHINode *CachedPHINode; 52 std::map<Value *, GEPCache> Map; 53 }; 54 55 /// IVStrideUse - Keep track of one use of a strided induction variable, where 56 /// the stride is stored externally. The Offset member keeps track of the 57 /// offset from the IV, User is the actual user of the operand, and 'Operand' 58 /// is the operand # of the User that is the use. 59 struct IVStrideUse { 60 SCEVHandle Offset; 61 Instruction *User; 62 Value *OperandValToReplace; 63 64 IVStrideUse(const SCEVHandle &Offs, Instruction *U, Value *O) 65 : Offset(Offs), User(U), OperandValToReplace(O) {} 66 }; 67 68 /// IVUsersOfOneStride - This structure keeps track of all instructions that 69 /// have an operand that is based on the trip count multiplied by some stride. 70 /// The stride for all of these users is common and kept external to this 71 /// structure. 72 struct IVUsersOfOneStride { 73 /// Users - Keep track of all of the users of this stride as well as the 74 /// initial value and the operand that uses the IV. 75 std::vector<IVStrideUse> Users; 76 77 void addUser(const SCEVHandle &Offset,Instruction *User, Value *Operand) { 78 Users.push_back(IVStrideUse(Offset, User, Operand)); 79 } 80 }; 81 82 83 class LoopStrengthReduce : public FunctionPass { 84 LoopInfo *LI; 85 DominatorSet *DS; 86 ScalarEvolution *SE; 87 const TargetData *TD; 88 const Type *UIntPtrTy; 89 bool Changed; 90 91 /// MaxTargetAMSize - This is the maximum power-of-two scale value that the 92 /// target can handle for free with its addressing modes. 93 unsigned MaxTargetAMSize; 94 95 /// IVUsesByStride - Keep track of all uses of induction variables that we 96 /// are interested in. The key of the map is the stride of the access. 97 std::map<Value*, IVUsersOfOneStride> IVUsesByStride; 98 99 /// CastedBasePointers - As we need to lower getelementptr instructions, we 100 /// cast the pointer input to uintptr_t. This keeps track of the casted 101 /// values for the pointers we have processed so far. 102 std::map<Value*, Value*> CastedBasePointers; 103 104 /// DeadInsts - Keep track of instructions we may have made dead, so that 105 /// we can remove them after we are done working. 106 std::set<Instruction*> DeadInsts; 107 public: 108 LoopStrengthReduce(unsigned MTAMS = 1) 109 : MaxTargetAMSize(MTAMS) { 110 } 111 112 virtual bool runOnFunction(Function &) { 113 LI = &getAnalysis<LoopInfo>(); 114 DS = &getAnalysis<DominatorSet>(); 115 SE = &getAnalysis<ScalarEvolution>(); 116 TD = &getAnalysis<TargetData>(); 117 UIntPtrTy = TD->getIntPtrType(); 118 Changed = false; 119 120 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I) 121 runOnLoop(*I); 122 return Changed; 123 } 124 125 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 126 AU.setPreservesCFG(); 127 AU.addRequiredID(LoopSimplifyID); 128 AU.addRequired<LoopInfo>(); 129 AU.addRequired<DominatorSet>(); 130 AU.addRequired<TargetData>(); 131 AU.addRequired<ScalarEvolution>(); 132 } 133 private: 134 void runOnLoop(Loop *L); 135 bool AddUsersIfInteresting(Instruction *I, Loop *L); 136 void AnalyzeGetElementPtrUsers(GetElementPtrInst *GEP, Instruction *I, 137 Loop *L); 138 139 void StrengthReduceStridedIVUsers(Value *Stride, IVUsersOfOneStride &Uses, 140 Loop *L, bool isOnlyStride); 141 142 void strengthReduceGEP(GetElementPtrInst *GEPI, Loop *L, 143 GEPCache* GEPCache, 144 Instruction *InsertBefore, 145 std::set<Instruction*> &DeadInsts); 146 void DeleteTriviallyDeadInstructions(std::set<Instruction*> &Insts); 147 }; 148 RegisterOpt<LoopStrengthReduce> X("loop-reduce", 149 "Strength Reduce GEP Uses of Ind. Vars"); 150} 151 152FunctionPass *llvm::createLoopStrengthReducePass(unsigned MaxTargetAMSize) { 153 return new LoopStrengthReduce(MaxTargetAMSize); 154} 155 156/// DeleteTriviallyDeadInstructions - If any of the instructions is the 157/// specified set are trivially dead, delete them and see if this makes any of 158/// their operands subsequently dead. 159void LoopStrengthReduce:: 160DeleteTriviallyDeadInstructions(std::set<Instruction*> &Insts) { 161 while (!Insts.empty()) { 162 Instruction *I = *Insts.begin(); 163 Insts.erase(Insts.begin()); 164 if (isInstructionTriviallyDead(I)) { 165 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) 166 if (Instruction *U = dyn_cast<Instruction>(I->getOperand(i))) 167 Insts.insert(U); 168 SE->deleteInstructionFromRecords(I); 169 I->eraseFromParent(); 170 Changed = true; 171 } 172 } 173} 174 175 176/// CanReduceSCEV - Return true if we can strength reduce this scalar evolution 177/// in the specified loop. 178static bool CanReduceSCEV(const SCEVHandle &SH, Loop *L) { 179 SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SH); 180 if (!AddRec || AddRec->getLoop() != L) return false; 181 182 // FIXME: Generalize to non-affine IV's. 183 if (!AddRec->isAffine()) return false; 184 185 // FIXME: generalize to IV's with more complex strides (must emit stride 186 // expression outside of loop!) 187 if (isa<SCEVConstant>(AddRec->getOperand(1))) 188 return true; 189 190 // We handle steps by unsigned values, because we know we won't have to insert 191 // a cast for them. 192 if (SCEVUnknown *SU = dyn_cast<SCEVUnknown>(AddRec->getOperand(1))) 193 if (SU->getValue()->getType()->isUnsigned()) 194 return true; 195 196 // Otherwise, no, we can't handle it yet. 197 return false; 198} 199 200 201/// GetAdjustedIndex - Adjust the specified GEP sequential type index to match 202/// the size of the pointer type, and scale it by the type size. 203static SCEVHandle GetAdjustedIndex(const SCEVHandle &Idx, uint64_t TySize, 204 const Type *UIntPtrTy) { 205 SCEVHandle Result = Idx; 206 if (Result->getType()->getUnsignedVersion() != UIntPtrTy) { 207 if (UIntPtrTy->getPrimitiveSize() < Result->getType()->getPrimitiveSize()) 208 Result = SCEVTruncateExpr::get(Result, UIntPtrTy); 209 else 210 Result = SCEVZeroExtendExpr::get(Result, UIntPtrTy); 211 } 212 213 // This index is scaled by the type size being indexed. 214 if (TySize != 1) 215 Result = SCEVMulExpr::get(Result, 216 SCEVConstant::get(ConstantUInt::get(UIntPtrTy, 217 TySize))); 218 return Result; 219} 220 221/// AnalyzeGetElementPtrUsers - Analyze all of the users of the specified 222/// getelementptr instruction, adding them to the IVUsesByStride table. Note 223/// that we only want to analyze a getelementptr instruction once, and it can 224/// have multiple operands that are uses of the indvar (e.g. A[i][i]). Because 225/// of this, we only process a GEP instruction if its first recurrent operand is 226/// "op", otherwise we will either have already processed it or we will sometime 227/// later. 228void LoopStrengthReduce::AnalyzeGetElementPtrUsers(GetElementPtrInst *GEP, 229 Instruction *Op, Loop *L) { 230 // Analyze all of the subscripts of this getelementptr instruction, looking 231 // for uses that are determined by the trip count of L. First, skip all 232 // operands the are not dependent on the IV. 233 234 // Build up the base expression. Insert an LLVM cast of the pointer to 235 // uintptr_t first. 236 Value *BasePtr; 237 if (Constant *CB = dyn_cast<Constant>(GEP->getOperand(0))) 238 BasePtr = ConstantExpr::getCast(CB, UIntPtrTy); 239 else { 240 Value *&BP = CastedBasePointers[GEP->getOperand(0)]; 241 if (BP == 0) { 242 BasicBlock::iterator InsertPt; 243 if (isa<Argument>(GEP->getOperand(0))) { 244 InsertPt = GEP->getParent()->getParent()->begin()->begin(); 245 } else { 246 InsertPt = cast<Instruction>(GEP->getOperand(0)); 247 if (InvokeInst *II = dyn_cast<InvokeInst>(GEP->getOperand(0))) 248 InsertPt = II->getNormalDest()->begin(); 249 else 250 ++InsertPt; 251 } 252 253 // Do not insert casts into the middle of PHI node blocks. 254 while (isa<PHINode>(InsertPt)) ++InsertPt; 255 256 BP = new CastInst(GEP->getOperand(0), UIntPtrTy, 257 GEP->getOperand(0)->getName(), InsertPt); 258 } 259 BasePtr = BP; 260 } 261 262 SCEVHandle Base = SCEVUnknown::get(BasePtr); 263 264 gep_type_iterator GTI = gep_type_begin(GEP); 265 unsigned i = 1; 266 for (; GEP->getOperand(i) != Op; ++i, ++GTI) { 267 // If this is a use of a recurrence that we can analyze, and it comes before 268 // Op does in the GEP operand list, we will handle this when we process this 269 // operand. 270 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 271 const StructLayout *SL = TD->getStructLayout(STy); 272 unsigned Idx = cast<ConstantUInt>(GEP->getOperand(i))->getValue(); 273 uint64_t Offset = SL->MemberOffsets[Idx]; 274 Base = SCEVAddExpr::get(Base, SCEVUnknown::getIntegerSCEV(Offset, 275 UIntPtrTy)); 276 } else { 277 SCEVHandle Idx = SE->getSCEV(GEP->getOperand(i)); 278 279 // If this operand is reducible, and it's not the one we are looking at 280 // currently, do not process the GEP at this time. 281 if (CanReduceSCEV(Idx, L)) 282 return; 283 Base = SCEVAddExpr::get(Base, GetAdjustedIndex(Idx, 284 TD->getTypeSize(GTI.getIndexedType()), UIntPtrTy)); 285 } 286 } 287 288 // Get the index, convert it to intptr_t. 289 SCEVHandle GEPIndexExpr = 290 GetAdjustedIndex(SE->getSCEV(Op), TD->getTypeSize(GTI.getIndexedType()), 291 UIntPtrTy); 292 293 // Process all remaining subscripts in the GEP instruction. 294 for (++i, ++GTI; i != GEP->getNumOperands(); ++i, ++GTI) 295 if (const StructType *STy = dyn_cast<StructType>(*GTI)) { 296 const StructLayout *SL = TD->getStructLayout(STy); 297 unsigned Idx = cast<ConstantUInt>(GEP->getOperand(i))->getValue(); 298 uint64_t Offset = SL->MemberOffsets[Idx]; 299 Base = SCEVAddExpr::get(Base, SCEVUnknown::getIntegerSCEV(Offset, 300 UIntPtrTy)); 301 } else { 302 SCEVHandle Idx = SE->getSCEV(GEP->getOperand(i)); 303 if (CanReduceSCEV(Idx, L)) { // Another IV subscript 304 GEPIndexExpr = SCEVAddExpr::get(GEPIndexExpr, 305 GetAdjustedIndex(Idx, TD->getTypeSize(GTI.getIndexedType()), 306 UIntPtrTy)); 307 assert(CanReduceSCEV(GEPIndexExpr, L) && 308 "Cannot reduce the sum of two reducible SCEV's??"); 309 } else { 310 Base = SCEVAddExpr::get(Base, GetAdjustedIndex(Idx, 311 TD->getTypeSize(GTI.getIndexedType()), UIntPtrTy)); 312 } 313 } 314 315 assert(CanReduceSCEV(GEPIndexExpr, L) && "Non reducible idx??"); 316 317 // FIXME: If the base is not loop invariant, we currently cannot emit this. 318 if (!Base->isLoopInvariant(L)) { 319 DEBUG(std::cerr << "IGNORING GEP due to non-invaiant base: " 320 << *Base << "\n"); 321 return; 322 } 323 324 Base = SCEVAddExpr::get(Base, cast<SCEVAddRecExpr>(GEPIndexExpr)->getStart()); 325 SCEVHandle Stride = cast<SCEVAddRecExpr>(GEPIndexExpr)->getOperand(1); 326 327 DEBUG(std::cerr << "GEP BASE : " << *Base << "\n"); 328 DEBUG(std::cerr << "GEP STRIDE: " << *Stride << "\n"); 329 330 Value *Step = 0; // Step of ISE. 331 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride)) 332 /// Always get the step value as an unsigned value. 333 Step = ConstantExpr::getCast(SC->getValue(), 334 SC->getValue()->getType()->getUnsignedVersion()); 335 else 336 Step = cast<SCEVUnknown>(Stride)->getValue(); 337 assert(Step->getType()->isUnsigned() && "Bad step value!"); 338 339 340 // Now that we know the base and stride contributed by the GEP instruction, 341 // process all users. 342 for (Value::use_iterator UI = GEP->use_begin(), E = GEP->use_end(); 343 UI != E; ++UI) { 344 Instruction *User = cast<Instruction>(*UI); 345 346 // Do not infinitely recurse on PHI nodes. 347 if (isa<PHINode>(User) && User->getParent() == L->getHeader()) 348 continue; 349 350 // If this is an instruction defined in a nested loop, or outside this loop, 351 // don't mess with it. 352 if (LI->getLoopFor(User->getParent()) != L) 353 continue; 354 355 DEBUG(std::cerr << "FOUND USER: " << *User 356 << " OF STRIDE: " << *Step << " BASE = " << *Base << "\n"); 357 358 // Okay, we found a user that we cannot reduce. Analyze the instruction 359 // and decide what to do with it. 360 IVUsesByStride[Step].addUser(Base, User, GEP); 361 } 362} 363 364/// AddUsersIfInteresting - Inspect the specified instruction. If it is a 365/// reducible SCEV, recursively add its users to the IVUsesByStride set and 366/// return true. Otherwise, return false. 367bool LoopStrengthReduce::AddUsersIfInteresting(Instruction *I, Loop *L) { 368 if (I->getType() == Type::VoidTy) return false; 369 SCEVHandle ISE = SE->getSCEV(I); 370 if (!CanReduceSCEV(ISE, L)) return false; 371 372 SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(ISE); 373 SCEVHandle Start = AR->getStart(); 374 375 // Get the step value, canonicalizing to an unsigned integer type so that 376 // lookups in the map will match. 377 Value *Step = 0; // Step of ISE. 378 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(AR->getOperand(1))) 379 /// Always get the step value as an unsigned value. 380 Step = ConstantExpr::getCast(SC->getValue(), 381 SC->getValue()->getType()->getUnsignedVersion()); 382 else 383 Step = cast<SCEVUnknown>(AR->getOperand(1))->getValue(); 384 assert(Step->getType()->isUnsigned() && "Bad step value!"); 385 386 std::set<GetElementPtrInst*> AnalyzedGEPs; 387 388 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E;++UI){ 389 Instruction *User = cast<Instruction>(*UI); 390 391 // Do not infinitely recurse on PHI nodes. 392 if (isa<PHINode>(User) && User->getParent() == L->getHeader()) 393 continue; 394 395 // If this is an instruction defined in a nested loop, or outside this loop, 396 // don't recurse into it. 397 if (LI->getLoopFor(User->getParent()) != L) { 398 DEBUG(std::cerr << "FOUND USER in nested loop: " << *User 399 << " OF SCEV: " << *ISE << "\n"); 400 401 // Okay, we found a user that we cannot reduce. Analyze the instruction 402 // and decide what to do with it. 403 IVUsesByStride[Step].addUser(Start, User, I); 404 continue; 405 } 406 407 // Next, see if this user is analyzable itself! 408 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) { 409 // If this is a getelementptr instruction, figure out what linear 410 // expression of induction variable is actually being used. 411 if (AnalyzedGEPs.insert(GEP).second) // Not already analyzed? 412 AnalyzeGetElementPtrUsers(GEP, I, L); 413 } else if (!AddUsersIfInteresting(User, L)) { 414 DEBUG(std::cerr << "FOUND USER: " << *User 415 << " OF SCEV: " << *ISE << "\n"); 416 417 // Okay, we found a user that we cannot reduce. Analyze the instruction 418 // and decide what to do with it. 419 IVUsesByStride[Step].addUser(Start, User, I); 420 } 421 } 422 return true; 423} 424 425namespace { 426 /// BasedUser - For a particular base value, keep information about how we've 427 /// partitioned the expression so far. 428 struct BasedUser { 429 /// Inst - The instruction using the induction variable. 430 Instruction *Inst; 431 432 /// OperandValToReplace - The operand value of Inst to replace with the 433 /// EmittedBase. 434 Value *OperandValToReplace; 435 436 /// Imm - The immediate value that should be added to the base immediately 437 /// before Inst, because it will be folded into the imm field of the 438 /// instruction. 439 SCEVHandle Imm; 440 441 /// EmittedBase - The actual value* to use for the base value of this 442 /// operation. This is null if we should just use zero so far. 443 Value *EmittedBase; 444 445 BasedUser(Instruction *I, Value *Op, const SCEVHandle &IMM) 446 : Inst(I), OperandValToReplace(Op), Imm(IMM), EmittedBase(0) {} 447 448 449 // No need to compare these. 450 bool operator<(const BasedUser &BU) const { return 0; } 451 452 void dump() const; 453 }; 454} 455 456void BasedUser::dump() const { 457 std::cerr << " Imm=" << *Imm; 458 if (EmittedBase) 459 std::cerr << " EB=" << *EmittedBase; 460 461 std::cerr << " Inst: " << *Inst; 462} 463 464/// isTargetConstant - Return true if the following can be referenced by the 465/// immediate field of a target instruction. 466static bool isTargetConstant(const SCEVHandle &V) { 467 468 // FIXME: Look at the target to decide if &GV is a legal constant immediate. 469 if (isa<SCEVConstant>(V)) return true; 470 471 return false; // ENABLE this for x86 472 473 if (SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) 474 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(SU->getValue())) 475 if (CE->getOpcode() == Instruction::Cast) 476 if (isa<GlobalValue>(CE->getOperand(0))) 477 // FIXME: should check to see that the dest is uintptr_t! 478 return true; 479 return false; 480} 481 482/// GetImmediateValues - Look at Val, and pull out any additions of constants 483/// that can fit into the immediate field of instructions in the target. 484static SCEVHandle GetImmediateValues(SCEVHandle Val, bool isAddress) { 485 if (!isAddress) 486 return SCEVUnknown::getIntegerSCEV(0, Val->getType()); 487 if (isTargetConstant(Val)) 488 return Val; 489 490 if (SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) { 491 unsigned i = 0; 492 for (; i != SAE->getNumOperands(); ++i) 493 if (isTargetConstant(SAE->getOperand(i))) { 494 SCEVHandle ImmVal = SAE->getOperand(i); 495 496 // If there are any other immediates that we can handle here, pull them 497 // out too. 498 for (++i; i != SAE->getNumOperands(); ++i) 499 if (isTargetConstant(SAE->getOperand(i))) 500 ImmVal = SCEVAddExpr::get(ImmVal, SAE->getOperand(i)); 501 return ImmVal; 502 } 503 } else if (SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) { 504 // Try to pull immediates out of the start value of nested addrec's. 505 return GetImmediateValues(SARE->getStart(), isAddress); 506 } 507 508 return SCEVUnknown::getIntegerSCEV(0, Val->getType()); 509} 510 511/// StrengthReduceStridedIVUsers - Strength reduce all of the users of a single 512/// stride of IV. All of the users may have different starting values, and this 513/// may not be the only stride (we know it is if isOnlyStride is true). 514void LoopStrengthReduce::StrengthReduceStridedIVUsers(Value *Stride, 515 IVUsersOfOneStride &Uses, 516 Loop *L, 517 bool isOnlyStride) { 518 // Transform our list of users and offsets to a bit more complex table. In 519 // this new vector, the first entry for each element is the base of the 520 // strided access, and the second is the BasedUser object for the use. We 521 // progressively move information from the first to the second entry, until we 522 // eventually emit the object. 523 std::vector<std::pair<SCEVHandle, BasedUser> > UsersToProcess; 524 UsersToProcess.reserve(Uses.Users.size()); 525 526 SCEVHandle ZeroBase = SCEVUnknown::getIntegerSCEV(0, 527 Uses.Users[0].Offset->getType()); 528 529 for (unsigned i = 0, e = Uses.Users.size(); i != e; ++i) 530 UsersToProcess.push_back(std::make_pair(Uses.Users[i].Offset, 531 BasedUser(Uses.Users[i].User, 532 Uses.Users[i].OperandValToReplace, 533 ZeroBase))); 534 535 // First pass, figure out what we can represent in the immediate fields of 536 // instructions. If we can represent anything there, move it to the imm 537 // fields of the BasedUsers. 538 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) { 539 bool isAddress = isa<LoadInst>(UsersToProcess[i].second.Inst) || 540 isa<StoreInst>(UsersToProcess[i].second.Inst); 541 UsersToProcess[i].second.Imm = GetImmediateValues(UsersToProcess[i].first, 542 isAddress); 543 UsersToProcess[i].first = SCEV::getMinusSCEV(UsersToProcess[i].first, 544 UsersToProcess[i].second.Imm); 545 546 DEBUG(std::cerr << "BASE: " << *UsersToProcess[i].first); 547 DEBUG(UsersToProcess[i].second.dump()); 548 } 549 550 SCEVExpander Rewriter(*SE, *LI); 551 BasicBlock *Preheader = L->getLoopPreheader(); 552 Instruction *PreInsertPt = Preheader->getTerminator(); 553 Instruction *PhiInsertBefore = L->getHeader()->begin(); 554 555 assert(isa<PHINode>(PhiInsertBefore) && 556 "How could this loop have IV's without any phis?"); 557 PHINode *SomeLoopPHI = cast<PHINode>(PhiInsertBefore); 558 assert(SomeLoopPHI->getNumIncomingValues() == 2 && 559 "This loop isn't canonicalized right"); 560 BasicBlock *LatchBlock = 561 SomeLoopPHI->getIncomingBlock(SomeLoopPHI->getIncomingBlock(0) == Preheader); 562 563 DEBUG(std::cerr << "INSERTING IVs of STRIDE " << *Stride << ":\n"); 564 565 // FIXME: This loop needs increasing levels of intelligence. 566 // STAGE 0: just emit everything as its own base. 567 // STAGE 1: factor out common vars from bases, and try and push resulting 568 // constants into Imm field. <-- We are here 569 // STAGE 2: factor out large constants to try and make more constants 570 // acceptable for target loads and stores. 571 572 // Sort by the base value, so that all IVs with identical bases are next to 573 // each other. 574 std::sort(UsersToProcess.begin(), UsersToProcess.end()); 575 while (!UsersToProcess.empty()) { 576 SCEVHandle Base = UsersToProcess.front().first; 577 578 DEBUG(std::cerr << " INSERTING PHI with BASE = " << *Base << ":\n"); 579 580 // Create a new Phi for this base, and stick it in the loop header. 581 const Type *ReplacedTy = Base->getType(); 582 PHINode *NewPHI = new PHINode(ReplacedTy, "iv.", PhiInsertBefore); 583 584 // Emit the initial base value into the loop preheader, and add it to the 585 // Phi node. 586 Value *BaseV = Rewriter.expandCodeFor(Base, PreInsertPt, ReplacedTy); 587 NewPHI->addIncoming(BaseV, Preheader); 588 589 // Emit the increment of the base value before the terminator of the loop 590 // latch block, and add it to the Phi node. 591 SCEVHandle Inc = SCEVAddExpr::get(SCEVUnknown::get(NewPHI), 592 SCEVUnknown::get(Stride)); 593 594 Value *IncV = Rewriter.expandCodeFor(Inc, LatchBlock->getTerminator(), 595 ReplacedTy); 596 IncV->setName(NewPHI->getName()+".inc"); 597 NewPHI->addIncoming(IncV, LatchBlock); 598 599 // Emit the code to add the immediate offset to the Phi value, just before 600 // the instructions that we identified as using this stride and base. 601 while (!UsersToProcess.empty() && UsersToProcess.front().first == Base) { 602 BasedUser &User = UsersToProcess.front().second; 603 604 // Clear the SCEVExpander's expression map so that we are guaranteed 605 // to have the code emitted where we expect it. 606 Rewriter.clear(); 607 SCEVHandle NewValSCEV = SCEVAddExpr::get(SCEVUnknown::get(NewPHI), 608 User.Imm); 609 Value *Replaced = User.OperandValToReplace; 610 Value *newVal = Rewriter.expandCodeFor(NewValSCEV, User.Inst, 611 Replaced->getType()); 612 613 // Replace the use of the operand Value with the new Phi we just created. 614 User.Inst->replaceUsesOfWith(Replaced, newVal); 615 DEBUG(std::cerr << " CHANGED: IMM =" << *User.Imm << " Inst = " 616 << *User.Inst); 617 618 // Mark old value we replaced as possibly dead, so that it is elminated 619 // if we just replaced the last use of that value. 620 DeadInsts.insert(cast<Instruction>(Replaced)); 621 622 UsersToProcess.erase(UsersToProcess.begin()); 623 ++NumReduced; 624 } 625 // TODO: Next, find out which base index is the most common, pull it out. 626 } 627 628 // IMPORTANT TODO: Figure out how to partition the IV's with this stride, but 629 // different starting values, into different PHIs. 630 631 // BEFORE writing this, it's probably useful to handle GEP's. 632 633 // NOTE: pull all constants together, for REG+IMM addressing, include &GV in 634 // 'IMM' if the target supports it. 635} 636 637 638void LoopStrengthReduce::runOnLoop(Loop *L) { 639 // First step, transform all loops nesting inside of this loop. 640 for (LoopInfo::iterator I = L->begin(), E = L->end(); I != E; ++I) 641 runOnLoop(*I); 642 643 // Next, find all uses of induction variables in this loop, and catagorize 644 // them by stride. Start by finding all of the PHI nodes in the header for 645 // this loop. If they are induction variables, inspect their uses. 646 for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I) 647 AddUsersIfInteresting(I, L); 648 649 // If we have nothing to do, return. 650 //if (IVUsesByStride.empty()) return; 651 652 // FIXME: We can widen subreg IV's here for RISC targets. e.g. instead of 653 // doing computation in byte values, promote to 32-bit values if safe. 654 655 // FIXME: Attempt to reuse values across multiple IV's. In particular, we 656 // could have something like "for(i) { foo(i*8); bar(i*16) }", which should be 657 // codegened as "for (j = 0;; j+=8) { foo(j); bar(j+j); }" on X86/PPC. Need 658 // to be careful that IV's are all the same type. Only works for intptr_t 659 // indvars. 660 661 // If we only have one stride, we can more aggressively eliminate some things. 662 bool HasOneStride = IVUsesByStride.size() == 1; 663 664 for (std::map<Value*, IVUsersOfOneStride>::iterator SI 665 = IVUsesByStride.begin(), E = IVUsesByStride.end(); SI != E; ++SI) 666 StrengthReduceStridedIVUsers(SI->first, SI->second, L, HasOneStride); 667 668 // Clean up after ourselves 669 if (!DeadInsts.empty()) { 670 DeleteTriviallyDeadInstructions(DeadInsts); 671 672 BasicBlock::iterator I = L->getHeader()->begin(); 673 PHINode *PN; 674 while ((PN = dyn_cast<PHINode>(I))) { 675 ++I; // Preincrement iterator to avoid invalidating it when deleting PN. 676 677 // At this point, we know that we have killed one or more GEP instructions. 678 // It is worth checking to see if the cann indvar is also dead, so that we 679 // can remove it as well. The requirements for the cann indvar to be 680 // considered dead are: 681 // 1. the cann indvar has one use 682 // 2. the use is an add instruction 683 // 3. the add has one use 684 // 4. the add is used by the cann indvar 685 // If all four cases above are true, then we can remove both the add and 686 // the cann indvar. 687 // FIXME: this needs to eliminate an induction variable even if it's being 688 // compared against some value to decide loop termination. 689 if (PN->hasOneUse()) { 690 BinaryOperator *BO = dyn_cast<BinaryOperator>(*(PN->use_begin())); 691 if (BO && BO->hasOneUse()) { 692 if (PN == *(BO->use_begin())) { 693 DeadInsts.insert(BO); 694 // Break the cycle, then delete the PHI. 695 PN->replaceAllUsesWith(UndefValue::get(PN->getType())); 696 SE->deleteInstructionFromRecords(PN); 697 PN->eraseFromParent(); 698 } 699 } 700 } 701 } 702 DeleteTriviallyDeadInstructions(DeadInsts); 703 } 704 705 IVUsesByStride.clear(); 706 CastedBasePointers.clear(); 707 return; 708} 709