MachineLICM.cpp revision acde91e2735cf2841a306a7c7af7af8c31f34a4a
1//===-- MachineLICM.cpp - Machine Loop Invariant Code Motion Pass ---------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This pass performs loop invariant code motion on machine instructions. We 11// attempt to remove as much code from the body of a loop as possible. 12// 13// This pass does not attempt to throttle itself to limit register pressure. 14// The register allocation phases are expected to perform rematerialization 15// to recover when register pressure is high. 16// 17// This pass is not intended to be a replacement or a complete alternative 18// for the LLVM-IR-level LICM pass. It is only designed to hoist simple 19// constructs that are not exposed before lowering and instruction selection. 20// 21//===----------------------------------------------------------------------===// 22 23#define DEBUG_TYPE "machine-licm" 24#include "llvm/CodeGen/Passes.h" 25#include "llvm/CodeGen/MachineDominators.h" 26#include "llvm/CodeGen/MachineFrameInfo.h" 27#include "llvm/CodeGen/MachineLoopInfo.h" 28#include "llvm/CodeGen/MachineMemOperand.h" 29#include "llvm/CodeGen/MachineRegisterInfo.h" 30#include "llvm/CodeGen/PseudoSourceValue.h" 31#include "llvm/MC/MCInstrItineraries.h" 32#include "llvm/Target/TargetLowering.h" 33#include "llvm/Target/TargetRegisterInfo.h" 34#include "llvm/Target/TargetInstrInfo.h" 35#include "llvm/Target/TargetMachine.h" 36#include "llvm/Analysis/AliasAnalysis.h" 37#include "llvm/ADT/DenseMap.h" 38#include "llvm/ADT/SmallSet.h" 39#include "llvm/ADT/Statistic.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/Debug.h" 42#include "llvm/Support/raw_ostream.h" 43using namespace llvm; 44 45static cl::opt<bool> 46AvoidSpeculation("avoid-speculation", 47 cl::desc("MachineLICM should avoid speculation"), 48 cl::init(true), cl::Hidden); 49 50STATISTIC(NumHoisted, 51 "Number of machine instructions hoisted out of loops"); 52STATISTIC(NumLowRP, 53 "Number of instructions hoisted in low reg pressure situation"); 54STATISTIC(NumHighLatency, 55 "Number of high latency instructions hoisted"); 56STATISTIC(NumCSEed, 57 "Number of hoisted machine instructions CSEed"); 58STATISTIC(NumPostRAHoisted, 59 "Number of machine instructions hoisted out of loops post regalloc"); 60 61namespace { 62 class MachineLICM : public MachineFunctionPass { 63 bool PreRegAlloc; 64 65 const TargetMachine *TM; 66 const TargetInstrInfo *TII; 67 const TargetLowering *TLI; 68 const TargetRegisterInfo *TRI; 69 const MachineFrameInfo *MFI; 70 MachineRegisterInfo *MRI; 71 const InstrItineraryData *InstrItins; 72 73 // Various analyses that we use... 74 AliasAnalysis *AA; // Alias analysis info. 75 MachineLoopInfo *MLI; // Current MachineLoopInfo 76 MachineDominatorTree *DT; // Machine dominator tree for the cur loop 77 78 // State that is updated as we process loops 79 bool Changed; // True if a loop is changed. 80 bool FirstInLoop; // True if it's the first LICM in the loop. 81 MachineLoop *CurLoop; // The current loop we are working on. 82 MachineBasicBlock *CurPreheader; // The preheader for CurLoop. 83 84 BitVector AllocatableSet; 85 86 // Track 'estimated' register pressure. 87 SmallSet<unsigned, 32> RegSeen; 88 SmallVector<unsigned, 8> RegPressure; 89 90 // Register pressure "limit" per register class. If the pressure 91 // is higher than the limit, then it's considered high. 92 SmallVector<unsigned, 8> RegLimit; 93 94 // Register pressure on path leading from loop preheader to current BB. 95 SmallVector<SmallVector<unsigned, 8>, 16> BackTrace; 96 97 // For each opcode, keep a list of potential CSE instructions. 98 DenseMap<unsigned, std::vector<const MachineInstr*> > CSEMap; 99 100 enum { 101 SpeculateFalse = 0, 102 SpeculateTrue = 1, 103 SpeculateUnknown = 2 104 }; 105 106 // If a MBB does not dominate loop exiting blocks then it may not safe 107 // to hoist loads from this block. 108 // Tri-state: 0 - false, 1 - true, 2 - unknown 109 unsigned SpeculationState; 110 111 public: 112 static char ID; // Pass identification, replacement for typeid 113 MachineLICM() : 114 MachineFunctionPass(ID), PreRegAlloc(true) { 115 initializeMachineLICMPass(*PassRegistry::getPassRegistry()); 116 } 117 118 explicit MachineLICM(bool PreRA) : 119 MachineFunctionPass(ID), PreRegAlloc(PreRA) { 120 initializeMachineLICMPass(*PassRegistry::getPassRegistry()); 121 } 122 123 virtual bool runOnMachineFunction(MachineFunction &MF); 124 125 const char *getPassName() const { return "Machine Instruction LICM"; } 126 127 virtual void getAnalysisUsage(AnalysisUsage &AU) const { 128 AU.addRequired<MachineLoopInfo>(); 129 AU.addRequired<MachineDominatorTree>(); 130 AU.addRequired<AliasAnalysis>(); 131 AU.addPreserved<MachineLoopInfo>(); 132 AU.addPreserved<MachineDominatorTree>(); 133 MachineFunctionPass::getAnalysisUsage(AU); 134 } 135 136 virtual void releaseMemory() { 137 RegSeen.clear(); 138 RegPressure.clear(); 139 RegLimit.clear(); 140 BackTrace.clear(); 141 for (DenseMap<unsigned,std::vector<const MachineInstr*> >::iterator 142 CI = CSEMap.begin(), CE = CSEMap.end(); CI != CE; ++CI) 143 CI->second.clear(); 144 CSEMap.clear(); 145 } 146 147 private: 148 /// CandidateInfo - Keep track of information about hoisting candidates. 149 struct CandidateInfo { 150 MachineInstr *MI; 151 unsigned Def; 152 int FI; 153 CandidateInfo(MachineInstr *mi, unsigned def, int fi) 154 : MI(mi), Def(def), FI(fi) {} 155 }; 156 157 /// HoistRegionPostRA - Walk the specified region of the CFG and hoist loop 158 /// invariants out to the preheader. 159 void HoistRegionPostRA(); 160 161 /// HoistPostRA - When an instruction is found to only use loop invariant 162 /// operands that is safe to hoist, this instruction is called to do the 163 /// dirty work. 164 void HoistPostRA(MachineInstr *MI, unsigned Def); 165 166 /// ProcessMI - Examine the instruction for potentai LICM candidate. Also 167 /// gather register def and frame object update information. 168 void ProcessMI(MachineInstr *MI, unsigned *PhysRegDefs, 169 SmallSet<int, 32> &StoredFIs, 170 SmallVector<CandidateInfo, 32> &Candidates); 171 172 /// AddToLiveIns - Add register 'Reg' to the livein sets of BBs in the 173 /// current loop. 174 void AddToLiveIns(unsigned Reg); 175 176 /// IsLICMCandidate - Returns true if the instruction may be a suitable 177 /// candidate for LICM. e.g. If the instruction is a call, then it's 178 /// obviously not safe to hoist it. 179 bool IsLICMCandidate(MachineInstr &I); 180 181 /// IsLoopInvariantInst - Returns true if the instruction is loop 182 /// invariant. I.e., all virtual register operands are defined outside of 183 /// the loop, physical registers aren't accessed (explicitly or implicitly), 184 /// and the instruction is hoistable. 185 /// 186 bool IsLoopInvariantInst(MachineInstr &I); 187 188 /// HasAnyPHIUse - Return true if the specified register is used by any 189 /// phi node. 190 bool HasAnyPHIUse(unsigned Reg) const; 191 192 /// HasHighOperandLatency - Compute operand latency between a def of 'Reg' 193 /// and an use in the current loop, return true if the target considered 194 /// it 'high'. 195 bool HasHighOperandLatency(MachineInstr &MI, unsigned DefIdx, 196 unsigned Reg) const; 197 198 bool IsCheapInstruction(MachineInstr &MI) const; 199 200 /// CanCauseHighRegPressure - Visit BBs from header to current BB, 201 /// check if hoisting an instruction of the given cost matrix can cause high 202 /// register pressure. 203 bool CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost); 204 205 /// UpdateBackTraceRegPressure - Traverse the back trace from header to 206 /// the current block and update their register pressures to reflect the 207 /// effect of hoisting MI from the current block to the preheader. 208 void UpdateBackTraceRegPressure(const MachineInstr *MI); 209 210 /// IsProfitableToHoist - Return true if it is potentially profitable to 211 /// hoist the given loop invariant. 212 bool IsProfitableToHoist(MachineInstr &MI); 213 214 /// IsGuaranteedToExecute - Check if this mbb is guaranteed to execute. 215 /// If not then a load from this mbb may not be safe to hoist. 216 bool IsGuaranteedToExecute(MachineBasicBlock *BB); 217 218 void EnterScope(MachineBasicBlock *MBB); 219 220 void ExitScope(MachineBasicBlock *MBB); 221 222 /// ExitScopeIfDone - Destroy scope for the MBB that corresponds to given 223 /// dominator tree node if its a leaf or all of its children are done. Walk 224 /// up the dominator tree to destroy ancestors which are now done. 225 void ExitScopeIfDone(MachineDomTreeNode *Node, 226 DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren, 227 DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap); 228 229 /// HoistOutOfLoop - Walk the specified loop in the CFG (defined by all 230 /// blocks dominated by the specified header block, and that are in the 231 /// current loop) in depth first order w.r.t the DominatorTree. This allows 232 /// us to visit definitions before uses, allowing us to hoist a loop body in 233 /// one pass without iteration. 234 /// 235 void HoistOutOfLoop(MachineDomTreeNode *LoopHeaderNode); 236 void HoistRegion(MachineDomTreeNode *N, bool IsHeader); 237 238 /// getRegisterClassIDAndCost - For a given MI, register, and the operand 239 /// index, return the ID and cost of its representative register class by 240 /// reference. 241 void getRegisterClassIDAndCost(const MachineInstr *MI, 242 unsigned Reg, unsigned OpIdx, 243 unsigned &RCId, unsigned &RCCost) const; 244 245 /// InitRegPressure - Find all virtual register references that are liveout 246 /// of the preheader to initialize the starting "register pressure". Note 247 /// this does not count live through (livein but not used) registers. 248 void InitRegPressure(MachineBasicBlock *BB); 249 250 /// UpdateRegPressure - Update estimate of register pressure after the 251 /// specified instruction. 252 void UpdateRegPressure(const MachineInstr *MI); 253 254 /// ExtractHoistableLoad - Unfold a load from the given machineinstr if 255 /// the load itself could be hoisted. Return the unfolded and hoistable 256 /// load, or null if the load couldn't be unfolded or if it wouldn't 257 /// be hoistable. 258 MachineInstr *ExtractHoistableLoad(MachineInstr *MI); 259 260 /// LookForDuplicate - Find an instruction amount PrevMIs that is a 261 /// duplicate of MI. Return this instruction if it's found. 262 const MachineInstr *LookForDuplicate(const MachineInstr *MI, 263 std::vector<const MachineInstr*> &PrevMIs); 264 265 /// EliminateCSE - Given a LICM'ed instruction, look for an instruction on 266 /// the preheader that compute the same value. If it's found, do a RAU on 267 /// with the definition of the existing instruction rather than hoisting 268 /// the instruction to the preheader. 269 bool EliminateCSE(MachineInstr *MI, 270 DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator &CI); 271 272 /// MayCSE - Return true if the given instruction will be CSE'd if it's 273 /// hoisted out of the loop. 274 bool MayCSE(MachineInstr *MI); 275 276 /// Hoist - When an instruction is found to only use loop invariant operands 277 /// that is safe to hoist, this instruction is called to do the dirty work. 278 /// It returns true if the instruction is hoisted. 279 bool Hoist(MachineInstr *MI, MachineBasicBlock *Preheader); 280 281 /// InitCSEMap - Initialize the CSE map with instructions that are in the 282 /// current loop preheader that may become duplicates of instructions that 283 /// are hoisted out of the loop. 284 void InitCSEMap(MachineBasicBlock *BB); 285 286 /// getCurPreheader - Get the preheader for the current loop, splitting 287 /// a critical edge if needed. 288 MachineBasicBlock *getCurPreheader(); 289 }; 290} // end anonymous namespace 291 292char MachineLICM::ID = 0; 293INITIALIZE_PASS_BEGIN(MachineLICM, "machinelicm", 294 "Machine Loop Invariant Code Motion", false, false) 295INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) 296INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) 297INITIALIZE_AG_DEPENDENCY(AliasAnalysis) 298INITIALIZE_PASS_END(MachineLICM, "machinelicm", 299 "Machine Loop Invariant Code Motion", false, false) 300 301FunctionPass *llvm::createMachineLICMPass(bool PreRegAlloc) { 302 return new MachineLICM(PreRegAlloc); 303} 304 305/// LoopIsOuterMostWithPredecessor - Test if the given loop is the outer-most 306/// loop that has a unique predecessor. 307static bool LoopIsOuterMostWithPredecessor(MachineLoop *CurLoop) { 308 // Check whether this loop even has a unique predecessor. 309 if (!CurLoop->getLoopPredecessor()) 310 return false; 311 // Ok, now check to see if any of its outer loops do. 312 for (MachineLoop *L = CurLoop->getParentLoop(); L; L = L->getParentLoop()) 313 if (L->getLoopPredecessor()) 314 return false; 315 // None of them did, so this is the outermost with a unique predecessor. 316 return true; 317} 318 319bool MachineLICM::runOnMachineFunction(MachineFunction &MF) { 320 if (PreRegAlloc) 321 DEBUG(dbgs() << "******** Pre-regalloc Machine LICM: "); 322 else 323 DEBUG(dbgs() << "******** Post-regalloc Machine LICM: "); 324 DEBUG(dbgs() << MF.getFunction()->getName() << " ********\n"); 325 326 Changed = FirstInLoop = false; 327 TM = &MF.getTarget(); 328 TII = TM->getInstrInfo(); 329 TLI = TM->getTargetLowering(); 330 TRI = TM->getRegisterInfo(); 331 MFI = MF.getFrameInfo(); 332 MRI = &MF.getRegInfo(); 333 InstrItins = TM->getInstrItineraryData(); 334 AllocatableSet = TRI->getAllocatableSet(MF); 335 336 if (PreRegAlloc) { 337 // Estimate register pressure during pre-regalloc pass. 338 unsigned NumRC = TRI->getNumRegClasses(); 339 RegPressure.resize(NumRC); 340 std::fill(RegPressure.begin(), RegPressure.end(), 0); 341 RegLimit.resize(NumRC); 342 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(), 343 E = TRI->regclass_end(); I != E; ++I) 344 RegLimit[(*I)->getID()] = TRI->getRegPressureLimit(*I, MF); 345 } 346 347 // Get our Loop information... 348 MLI = &getAnalysis<MachineLoopInfo>(); 349 DT = &getAnalysis<MachineDominatorTree>(); 350 AA = &getAnalysis<AliasAnalysis>(); 351 352 SmallVector<MachineLoop *, 8> Worklist(MLI->begin(), MLI->end()); 353 while (!Worklist.empty()) { 354 CurLoop = Worklist.pop_back_val(); 355 CurPreheader = 0; 356 357 // If this is done before regalloc, only visit outer-most preheader-sporting 358 // loops. 359 if (PreRegAlloc && !LoopIsOuterMostWithPredecessor(CurLoop)) { 360 Worklist.append(CurLoop->begin(), CurLoop->end()); 361 continue; 362 } 363 364 if (!PreRegAlloc) 365 HoistRegionPostRA(); 366 else { 367 // CSEMap is initialized for loop header when the first instruction is 368 // being hoisted. 369 MachineDomTreeNode *N = DT->getNode(CurLoop->getHeader()); 370 FirstInLoop = true; 371 HoistOutOfLoop(N); 372 CSEMap.clear(); 373 } 374 } 375 376 return Changed; 377} 378 379/// InstructionStoresToFI - Return true if instruction stores to the 380/// specified frame. 381static bool InstructionStoresToFI(const MachineInstr *MI, int FI) { 382 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(), 383 oe = MI->memoperands_end(); o != oe; ++o) { 384 if (!(*o)->isStore() || !(*o)->getValue()) 385 continue; 386 if (const FixedStackPseudoSourceValue *Value = 387 dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) { 388 if (Value->getFrameIndex() == FI) 389 return true; 390 } 391 } 392 return false; 393} 394 395/// ProcessMI - Examine the instruction for potentai LICM candidate. Also 396/// gather register def and frame object update information. 397void MachineLICM::ProcessMI(MachineInstr *MI, 398 unsigned *PhysRegDefs, 399 SmallSet<int, 32> &StoredFIs, 400 SmallVector<CandidateInfo, 32> &Candidates) { 401 bool RuledOut = false; 402 bool HasNonInvariantUse = false; 403 unsigned Def = 0; 404 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 405 const MachineOperand &MO = MI->getOperand(i); 406 if (MO.isFI()) { 407 // Remember if the instruction stores to the frame index. 408 int FI = MO.getIndex(); 409 if (!StoredFIs.count(FI) && 410 MFI->isSpillSlotObjectIndex(FI) && 411 InstructionStoresToFI(MI, FI)) 412 StoredFIs.insert(FI); 413 HasNonInvariantUse = true; 414 continue; 415 } 416 417 if (!MO.isReg()) 418 continue; 419 unsigned Reg = MO.getReg(); 420 if (!Reg) 421 continue; 422 assert(TargetRegisterInfo::isPhysicalRegister(Reg) && 423 "Not expecting virtual register!"); 424 425 if (!MO.isDef()) { 426 if (Reg && PhysRegDefs[Reg]) 427 // If it's using a non-loop-invariant register, then it's obviously not 428 // safe to hoist. 429 HasNonInvariantUse = true; 430 continue; 431 } 432 433 if (MO.isImplicit()) { 434 ++PhysRegDefs[Reg]; 435 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) 436 ++PhysRegDefs[*AS]; 437 if (!MO.isDead()) 438 // Non-dead implicit def? This cannot be hoisted. 439 RuledOut = true; 440 // No need to check if a dead implicit def is also defined by 441 // another instruction. 442 continue; 443 } 444 445 // FIXME: For now, avoid instructions with multiple defs, unless 446 // it's a dead implicit def. 447 if (Def) 448 RuledOut = true; 449 else 450 Def = Reg; 451 452 // If we have already seen another instruction that defines the same 453 // register, then this is not safe. 454 if (++PhysRegDefs[Reg] > 1) 455 // MI defined register is seen defined by another instruction in 456 // the loop, it cannot be a LICM candidate. 457 RuledOut = true; 458 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) 459 if (++PhysRegDefs[*AS] > 1) 460 RuledOut = true; 461 } 462 463 // Only consider reloads for now and remats which do not have register 464 // operands. FIXME: Consider unfold load folding instructions. 465 if (Def && !RuledOut) { 466 int FI = INT_MIN; 467 if ((!HasNonInvariantUse && IsLICMCandidate(*MI)) || 468 (TII->isLoadFromStackSlot(MI, FI) && MFI->isSpillSlotObjectIndex(FI))) 469 Candidates.push_back(CandidateInfo(MI, Def, FI)); 470 } 471} 472 473/// HoistRegionPostRA - Walk the specified region of the CFG and hoist loop 474/// invariants out to the preheader. 475void MachineLICM::HoistRegionPostRA() { 476 unsigned NumRegs = TRI->getNumRegs(); 477 unsigned *PhysRegDefs = new unsigned[NumRegs]; 478 std::fill(PhysRegDefs, PhysRegDefs + NumRegs, 0); 479 480 SmallVector<CandidateInfo, 32> Candidates; 481 SmallSet<int, 32> StoredFIs; 482 483 // Walk the entire region, count number of defs for each register, and 484 // collect potential LICM candidates. 485 const std::vector<MachineBasicBlock*> Blocks = CurLoop->getBlocks(); 486 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 487 MachineBasicBlock *BB = Blocks[i]; 488 489 // If the header of the loop containing this basic block is a landing pad, 490 // then don't try to hoist instructions out of this loop. 491 const MachineLoop *ML = MLI->getLoopFor(BB); 492 if (ML && ML->getHeader()->isLandingPad()) continue; 493 494 // Conservatively treat live-in's as an external def. 495 // FIXME: That means a reload that're reused in successor block(s) will not 496 // be LICM'ed. 497 for (MachineBasicBlock::livein_iterator I = BB->livein_begin(), 498 E = BB->livein_end(); I != E; ++I) { 499 unsigned Reg = *I; 500 ++PhysRegDefs[Reg]; 501 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) 502 ++PhysRegDefs[*AS]; 503 } 504 505 SpeculationState = SpeculateUnknown; 506 for (MachineBasicBlock::iterator 507 MII = BB->begin(), E = BB->end(); MII != E; ++MII) { 508 MachineInstr *MI = &*MII; 509 ProcessMI(MI, PhysRegDefs, StoredFIs, Candidates); 510 } 511 } 512 513 // Now evaluate whether the potential candidates qualify. 514 // 1. Check if the candidate defined register is defined by another 515 // instruction in the loop. 516 // 2. If the candidate is a load from stack slot (always true for now), 517 // check if the slot is stored anywhere in the loop. 518 for (unsigned i = 0, e = Candidates.size(); i != e; ++i) { 519 if (Candidates[i].FI != INT_MIN && 520 StoredFIs.count(Candidates[i].FI)) 521 continue; 522 523 if (PhysRegDefs[Candidates[i].Def] == 1) { 524 bool Safe = true; 525 MachineInstr *MI = Candidates[i].MI; 526 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 527 const MachineOperand &MO = MI->getOperand(j); 528 if (!MO.isReg() || MO.isDef() || !MO.getReg()) 529 continue; 530 if (PhysRegDefs[MO.getReg()]) { 531 // If it's using a non-loop-invariant register, then it's obviously 532 // not safe to hoist. 533 Safe = false; 534 break; 535 } 536 } 537 if (Safe) 538 HoistPostRA(MI, Candidates[i].Def); 539 } 540 } 541 542 delete[] PhysRegDefs; 543} 544 545/// AddToLiveIns - Add register 'Reg' to the livein sets of BBs in the current 546/// loop, and make sure it is not killed by any instructions in the loop. 547void MachineLICM::AddToLiveIns(unsigned Reg) { 548 const std::vector<MachineBasicBlock*> Blocks = CurLoop->getBlocks(); 549 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 550 MachineBasicBlock *BB = Blocks[i]; 551 if (!BB->isLiveIn(Reg)) 552 BB->addLiveIn(Reg); 553 for (MachineBasicBlock::iterator 554 MII = BB->begin(), E = BB->end(); MII != E; ++MII) { 555 MachineInstr *MI = &*MII; 556 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 557 MachineOperand &MO = MI->getOperand(i); 558 if (!MO.isReg() || !MO.getReg() || MO.isDef()) continue; 559 if (MO.getReg() == Reg || TRI->isSuperRegister(Reg, MO.getReg())) 560 MO.setIsKill(false); 561 } 562 } 563 } 564} 565 566/// HoistPostRA - When an instruction is found to only use loop invariant 567/// operands that is safe to hoist, this instruction is called to do the 568/// dirty work. 569void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) { 570 MachineBasicBlock *Preheader = getCurPreheader(); 571 if (!Preheader) return; 572 573 // Now move the instructions to the predecessor, inserting it before any 574 // terminator instructions. 575 DEBUG({ 576 dbgs() << "Hoisting " << *MI; 577 if (Preheader->getBasicBlock()) 578 dbgs() << " to MachineBasicBlock " 579 << Preheader->getName(); 580 if (MI->getParent()->getBasicBlock()) 581 dbgs() << " from MachineBasicBlock " 582 << MI->getParent()->getName(); 583 dbgs() << "\n"; 584 }); 585 586 // Splice the instruction to the preheader. 587 MachineBasicBlock *MBB = MI->getParent(); 588 Preheader->splice(Preheader->getFirstTerminator(), MBB, MI); 589 590 // Add register to livein list to all the BBs in the current loop since a 591 // loop invariant must be kept live throughout the whole loop. This is 592 // important to ensure later passes do not scavenge the def register. 593 AddToLiveIns(Def); 594 595 ++NumPostRAHoisted; 596 Changed = true; 597} 598 599// IsGuaranteedToExecute - Check if this mbb is guaranteed to execute. 600// If not then a load from this mbb may not be safe to hoist. 601bool MachineLICM::IsGuaranteedToExecute(MachineBasicBlock *BB) { 602 if (SpeculationState != SpeculateUnknown) 603 return SpeculationState == SpeculateFalse; 604 605 if (BB != CurLoop->getHeader()) { 606 // Check loop exiting blocks. 607 SmallVector<MachineBasicBlock*, 8> CurrentLoopExitingBlocks; 608 CurLoop->getExitingBlocks(CurrentLoopExitingBlocks); 609 for (unsigned i = 0, e = CurrentLoopExitingBlocks.size(); i != e; ++i) 610 if (!DT->dominates(BB, CurrentLoopExitingBlocks[i])) { 611 SpeculationState = SpeculateTrue; 612 return false; 613 } 614 } 615 616 SpeculationState = SpeculateFalse; 617 return true; 618} 619 620void MachineLICM::EnterScope(MachineBasicBlock *MBB) { 621 DEBUG(dbgs() << "Entering: " << MBB->getName() << '\n'); 622 623 // Remember livein register pressure. 624 BackTrace.push_back(RegPressure); 625} 626 627void MachineLICM::ExitScope(MachineBasicBlock *MBB) { 628 DEBUG(dbgs() << "Exiting: " << MBB->getName() << '\n'); 629 BackTrace.pop_back(); 630} 631 632/// ExitScopeIfDone - Destroy scope for the MBB that corresponds to the given 633/// dominator tree node if its a leaf or all of its children are done. Walk 634/// up the dominator tree to destroy ancestors which are now done. 635void MachineLICM::ExitScopeIfDone(MachineDomTreeNode *Node, 636 DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren, 637 DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap) { 638 if (OpenChildren[Node]) 639 return; 640 641 // Pop scope. 642 ExitScope(Node->getBlock()); 643 644 // Now traverse upwards to pop ancestors whose offsprings are all done. 645 while (MachineDomTreeNode *Parent = ParentMap[Node]) { 646 unsigned Left = --OpenChildren[Parent]; 647 if (Left != 0) 648 break; 649 ExitScope(Parent->getBlock()); 650 Node = Parent; 651 } 652} 653 654/// HoistOutOfLoop - Walk the specified loop in the CFG (defined by all 655/// blocks dominated by the specified header block, and that are in the 656/// current loop) in depth first order w.r.t the DominatorTree. This allows 657/// us to visit definitions before uses, allowing us to hoist a loop body in 658/// one pass without iteration. 659/// 660void MachineLICM::HoistOutOfLoop(MachineDomTreeNode *HeaderN) { 661 SmallVector<MachineDomTreeNode*, 32> Scopes; 662 SmallVector<MachineDomTreeNode*, 8> WorkList; 663 DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> ParentMap; 664 DenseMap<MachineDomTreeNode*, unsigned> OpenChildren; 665 666 // Perform a DFS walk to determine the order of visit. 667 WorkList.push_back(HeaderN); 668 do { 669 MachineDomTreeNode *Node = WorkList.pop_back_val(); 670 assert(Node != 0 && "Null dominator tree node?"); 671 MachineBasicBlock *BB = Node->getBlock(); 672 673 // If the header of the loop containing this basic block is a landing pad, 674 // then don't try to hoist instructions out of this loop. 675 const MachineLoop *ML = MLI->getLoopFor(BB); 676 if (ML && ML->getHeader()->isLandingPad()) 677 continue; 678 679 // If this subregion is not in the top level loop at all, exit. 680 if (!CurLoop->contains(BB)) 681 continue; 682 683 Scopes.push_back(Node); 684 const std::vector<MachineDomTreeNode*> &Children = Node->getChildren(); 685 unsigned NumChildren = Children.size(); 686 687 // Don't hoist things out of a large switch statement. This often causes 688 // code to be hoisted that wasn't going to be executed, and increases 689 // register pressure in a situation where it's likely to matter. 690 if (BB->succ_size() >= 25) 691 NumChildren = 0; 692 693 OpenChildren[Node] = NumChildren; 694 // Add children in reverse order as then the next popped worklist node is 695 // the first child of this node. This means we ultimately traverse the 696 // DOM tree in exactly the same order as if we'd recursed. 697 for (int i = (int)NumChildren-1; i >= 0; --i) { 698 MachineDomTreeNode *Child = Children[i]; 699 ParentMap[Child] = Node; 700 WorkList.push_back(Child); 701 } 702 } while (!WorkList.empty()); 703 704 if (Scopes.size() != 0) { 705 MachineBasicBlock *Preheader = getCurPreheader(); 706 if (!Preheader) 707 return; 708 709 // Compute registers which are livein into the loop headers. 710 RegSeen.clear(); 711 BackTrace.clear(); 712 InitRegPressure(Preheader); 713 } 714 715 // Now perform LICM. 716 for (unsigned i = 0, e = Scopes.size(); i != e; ++i) { 717 MachineDomTreeNode *Node = Scopes[i]; 718 MachineBasicBlock *MBB = Node->getBlock(); 719 720 MachineBasicBlock *Preheader = getCurPreheader(); 721 if (!Preheader) 722 continue; 723 724 EnterScope(MBB); 725 726 // Process the block 727 SpeculationState = SpeculateUnknown; 728 for (MachineBasicBlock::iterator 729 MII = MBB->begin(), E = MBB->end(); MII != E; ) { 730 MachineBasicBlock::iterator NextMII = MII; ++NextMII; 731 MachineInstr *MI = &*MII; 732 if (!Hoist(MI, Preheader)) 733 UpdateRegPressure(MI); 734 MII = NextMII; 735 } 736 737 // If it's a leaf node, it's done. Traverse upwards to pop ancestors. 738 ExitScopeIfDone(Node, OpenChildren, ParentMap); 739 } 740} 741 742static bool isOperandKill(const MachineOperand &MO, MachineRegisterInfo *MRI) { 743 return MO.isKill() || MRI->hasOneNonDBGUse(MO.getReg()); 744} 745 746/// getRegisterClassIDAndCost - For a given MI, register, and the operand 747/// index, return the ID and cost of its representative register class. 748void 749MachineLICM::getRegisterClassIDAndCost(const MachineInstr *MI, 750 unsigned Reg, unsigned OpIdx, 751 unsigned &RCId, unsigned &RCCost) const { 752 const TargetRegisterClass *RC = MRI->getRegClass(Reg); 753 EVT VT = *RC->vt_begin(); 754 if (VT == MVT::Untyped) { 755 RCId = RC->getID(); 756 RCCost = 1; 757 } else { 758 RCId = TLI->getRepRegClassFor(VT)->getID(); 759 RCCost = TLI->getRepRegClassCostFor(VT); 760 } 761} 762 763/// InitRegPressure - Find all virtual register references that are liveout of 764/// the preheader to initialize the starting "register pressure". Note this 765/// does not count live through (livein but not used) registers. 766void MachineLICM::InitRegPressure(MachineBasicBlock *BB) { 767 std::fill(RegPressure.begin(), RegPressure.end(), 0); 768 769 // If the preheader has only a single predecessor and it ends with a 770 // fallthrough or an unconditional branch, then scan its predecessor for live 771 // defs as well. This happens whenever the preheader is created by splitting 772 // the critical edge from the loop predecessor to the loop header. 773 if (BB->pred_size() == 1) { 774 MachineBasicBlock *TBB = 0, *FBB = 0; 775 SmallVector<MachineOperand, 4> Cond; 776 if (!TII->AnalyzeBranch(*BB, TBB, FBB, Cond, false) && Cond.empty()) 777 InitRegPressure(*BB->pred_begin()); 778 } 779 780 for (MachineBasicBlock::iterator MII = BB->begin(), E = BB->end(); 781 MII != E; ++MII) { 782 MachineInstr *MI = &*MII; 783 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) { 784 const MachineOperand &MO = MI->getOperand(i); 785 if (!MO.isReg() || MO.isImplicit()) 786 continue; 787 unsigned Reg = MO.getReg(); 788 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 789 continue; 790 791 bool isNew = RegSeen.insert(Reg); 792 unsigned RCId, RCCost; 793 getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost); 794 if (MO.isDef()) 795 RegPressure[RCId] += RCCost; 796 else { 797 bool isKill = isOperandKill(MO, MRI); 798 if (isNew && !isKill) 799 // Haven't seen this, it must be a livein. 800 RegPressure[RCId] += RCCost; 801 else if (!isNew && isKill) 802 RegPressure[RCId] -= RCCost; 803 } 804 } 805 } 806} 807 808/// UpdateRegPressure - Update estimate of register pressure after the 809/// specified instruction. 810void MachineLICM::UpdateRegPressure(const MachineInstr *MI) { 811 if (MI->isImplicitDef()) 812 return; 813 814 SmallVector<unsigned, 4> Defs; 815 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) { 816 const MachineOperand &MO = MI->getOperand(i); 817 if (!MO.isReg() || MO.isImplicit()) 818 continue; 819 unsigned Reg = MO.getReg(); 820 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 821 continue; 822 823 bool isNew = RegSeen.insert(Reg); 824 if (MO.isDef()) 825 Defs.push_back(Reg); 826 else if (!isNew && isOperandKill(MO, MRI)) { 827 unsigned RCId, RCCost; 828 getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost); 829 if (RCCost > RegPressure[RCId]) 830 RegPressure[RCId] = 0; 831 else 832 RegPressure[RCId] -= RCCost; 833 } 834 } 835 836 unsigned Idx = 0; 837 while (!Defs.empty()) { 838 unsigned Reg = Defs.pop_back_val(); 839 unsigned RCId, RCCost; 840 getRegisterClassIDAndCost(MI, Reg, Idx, RCId, RCCost); 841 RegPressure[RCId] += RCCost; 842 ++Idx; 843 } 844} 845 846/// isLoadFromGOTOrConstantPool - Return true if this machine instruction 847/// loads from global offset table or constant pool. 848static bool isLoadFromGOTOrConstantPool(MachineInstr &MI) { 849 assert (MI.mayLoad() && "Expected MI that loads!"); 850 for (MachineInstr::mmo_iterator I = MI.memoperands_begin(), 851 E = MI.memoperands_end(); I != E; ++I) { 852 if (const Value *V = (*I)->getValue()) { 853 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) 854 if (PSV == PSV->getGOT() || PSV == PSV->getConstantPool()) 855 return true; 856 } 857 } 858 return false; 859} 860 861/// IsLICMCandidate - Returns true if the instruction may be a suitable 862/// candidate for LICM. e.g. If the instruction is a call, then it's obviously 863/// not safe to hoist it. 864bool MachineLICM::IsLICMCandidate(MachineInstr &I) { 865 // Check if it's safe to move the instruction. 866 bool DontMoveAcrossStore = true; 867 if (!I.isSafeToMove(TII, AA, DontMoveAcrossStore)) 868 return false; 869 870 // If it is load then check if it is guaranteed to execute by making sure that 871 // it dominates all exiting blocks. If it doesn't, then there is a path out of 872 // the loop which does not execute this load, so we can't hoist it. Loads 873 // from constant memory are not safe to speculate all the time, for example 874 // indexed load from a jump table. 875 // Stores and side effects are already checked by isSafeToMove. 876 if (I.mayLoad() && !isLoadFromGOTOrConstantPool(I) && 877 !IsGuaranteedToExecute(I.getParent())) 878 return false; 879 880 return true; 881} 882 883/// IsLoopInvariantInst - Returns true if the instruction is loop 884/// invariant. I.e., all virtual register operands are defined outside of the 885/// loop, physical registers aren't accessed explicitly, and there are no side 886/// effects that aren't captured by the operands or other flags. 887/// 888bool MachineLICM::IsLoopInvariantInst(MachineInstr &I) { 889 if (!IsLICMCandidate(I)) 890 return false; 891 892 // The instruction is loop invariant if all of its operands are. 893 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) { 894 const MachineOperand &MO = I.getOperand(i); 895 896 if (!MO.isReg()) 897 continue; 898 899 unsigned Reg = MO.getReg(); 900 if (Reg == 0) continue; 901 902 // Don't hoist an instruction that uses or defines a physical register. 903 if (TargetRegisterInfo::isPhysicalRegister(Reg)) { 904 if (MO.isUse()) { 905 // If the physreg has no defs anywhere, it's just an ambient register 906 // and we can freely move its uses. Alternatively, if it's allocatable, 907 // it could get allocated to something with a def during allocation. 908 if (!MRI->def_empty(Reg)) 909 return false; 910 if (AllocatableSet.test(Reg)) 911 return false; 912 // Check for a def among the register's aliases too. 913 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 914 unsigned AliasReg = *Alias; 915 if (!MRI->def_empty(AliasReg)) 916 return false; 917 if (AllocatableSet.test(AliasReg)) 918 return false; 919 } 920 // Otherwise it's safe to move. 921 continue; 922 } else if (!MO.isDead()) { 923 // A def that isn't dead. We can't move it. 924 return false; 925 } else if (CurLoop->getHeader()->isLiveIn(Reg)) { 926 // If the reg is live into the loop, we can't hoist an instruction 927 // which would clobber it. 928 return false; 929 } 930 } 931 932 if (!MO.isUse()) 933 continue; 934 935 assert(MRI->getVRegDef(Reg) && 936 "Machine instr not mapped for this vreg?!"); 937 938 // If the loop contains the definition of an operand, then the instruction 939 // isn't loop invariant. 940 if (CurLoop->contains(MRI->getVRegDef(Reg))) 941 return false; 942 } 943 944 // If we got this far, the instruction is loop invariant! 945 return true; 946} 947 948 949/// HasAnyPHIUse - Return true if the specified register is used by any 950/// phi node. 951bool MachineLICM::HasAnyPHIUse(unsigned Reg) const { 952 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg), 953 UE = MRI->use_end(); UI != UE; ++UI) { 954 MachineInstr *UseMI = &*UI; 955 if (UseMI->isPHI()) 956 return true; 957 // Look pass copies as well. 958 if (UseMI->isCopy()) { 959 unsigned Def = UseMI->getOperand(0).getReg(); 960 if (TargetRegisterInfo::isVirtualRegister(Def) && 961 HasAnyPHIUse(Def)) 962 return true; 963 } 964 } 965 return false; 966} 967 968/// HasHighOperandLatency - Compute operand latency between a def of 'Reg' 969/// and an use in the current loop, return true if the target considered 970/// it 'high'. 971bool MachineLICM::HasHighOperandLatency(MachineInstr &MI, 972 unsigned DefIdx, unsigned Reg) const { 973 if (!InstrItins || InstrItins->isEmpty() || MRI->use_nodbg_empty(Reg)) 974 return false; 975 976 for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg), 977 E = MRI->use_nodbg_end(); I != E; ++I) { 978 MachineInstr *UseMI = &*I; 979 if (UseMI->isCopyLike()) 980 continue; 981 if (!CurLoop->contains(UseMI->getParent())) 982 continue; 983 for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) { 984 const MachineOperand &MO = UseMI->getOperand(i); 985 if (!MO.isReg() || !MO.isUse()) 986 continue; 987 unsigned MOReg = MO.getReg(); 988 if (MOReg != Reg) 989 continue; 990 991 if (TII->hasHighOperandLatency(InstrItins, MRI, &MI, DefIdx, UseMI, i)) 992 return true; 993 } 994 995 // Only look at the first in loop use. 996 break; 997 } 998 999 return false; 1000} 1001 1002/// IsCheapInstruction - Return true if the instruction is marked "cheap" or 1003/// the operand latency between its def and a use is one or less. 1004bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const { 1005 if (MI.isAsCheapAsAMove() || MI.isCopyLike()) 1006 return true; 1007 if (!InstrItins || InstrItins->isEmpty()) 1008 return false; 1009 1010 bool isCheap = false; 1011 unsigned NumDefs = MI.getDesc().getNumDefs(); 1012 for (unsigned i = 0, e = MI.getNumOperands(); NumDefs && i != e; ++i) { 1013 MachineOperand &DefMO = MI.getOperand(i); 1014 if (!DefMO.isReg() || !DefMO.isDef()) 1015 continue; 1016 --NumDefs; 1017 unsigned Reg = DefMO.getReg(); 1018 if (TargetRegisterInfo::isPhysicalRegister(Reg)) 1019 continue; 1020 1021 if (!TII->hasLowDefLatency(InstrItins, &MI, i)) 1022 return false; 1023 isCheap = true; 1024 } 1025 1026 return isCheap; 1027} 1028 1029/// CanCauseHighRegPressure - Visit BBs from header to current BB, check 1030/// if hoisting an instruction of the given cost matrix can cause high 1031/// register pressure. 1032bool MachineLICM::CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost) { 1033 for (DenseMap<unsigned, int>::iterator CI = Cost.begin(), CE = Cost.end(); 1034 CI != CE; ++CI) { 1035 if (CI->second <= 0) 1036 continue; 1037 1038 unsigned RCId = CI->first; 1039 for (unsigned i = BackTrace.size(); i != 0; --i) { 1040 SmallVector<unsigned, 8> &RP = BackTrace[i-1]; 1041 if (RP[RCId] + CI->second >= RegLimit[RCId]) 1042 return true; 1043 } 1044 } 1045 1046 return false; 1047} 1048 1049/// UpdateBackTraceRegPressure - Traverse the back trace from header to the 1050/// current block and update their register pressures to reflect the effect 1051/// of hoisting MI from the current block to the preheader. 1052void MachineLICM::UpdateBackTraceRegPressure(const MachineInstr *MI) { 1053 if (MI->isImplicitDef()) 1054 return; 1055 1056 // First compute the 'cost' of the instruction, i.e. its contribution 1057 // to register pressure. 1058 DenseMap<unsigned, int> Cost; 1059 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) { 1060 const MachineOperand &MO = MI->getOperand(i); 1061 if (!MO.isReg() || MO.isImplicit()) 1062 continue; 1063 unsigned Reg = MO.getReg(); 1064 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1065 continue; 1066 1067 unsigned RCId, RCCost; 1068 getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost); 1069 if (MO.isDef()) { 1070 DenseMap<unsigned, int>::iterator CI = Cost.find(RCId); 1071 if (CI != Cost.end()) 1072 CI->second += RCCost; 1073 else 1074 Cost.insert(std::make_pair(RCId, RCCost)); 1075 } else if (isOperandKill(MO, MRI)) { 1076 DenseMap<unsigned, int>::iterator CI = Cost.find(RCId); 1077 if (CI != Cost.end()) 1078 CI->second -= RCCost; 1079 else 1080 Cost.insert(std::make_pair(RCId, -RCCost)); 1081 } 1082 } 1083 1084 // Update register pressure of blocks from loop header to current block. 1085 for (unsigned i = 0, e = BackTrace.size(); i != e; ++i) { 1086 SmallVector<unsigned, 8> &RP = BackTrace[i]; 1087 for (DenseMap<unsigned, int>::iterator CI = Cost.begin(), CE = Cost.end(); 1088 CI != CE; ++CI) { 1089 unsigned RCId = CI->first; 1090 RP[RCId] += CI->second; 1091 } 1092 } 1093} 1094 1095/// IsProfitableToHoist - Return true if it is potentially profitable to hoist 1096/// the given loop invariant. 1097bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) { 1098 if (MI.isImplicitDef()) 1099 return true; 1100 1101 // If the instruction is cheap, only hoist if it is re-materilizable. LICM 1102 // will increase register pressure. It's probably not worth it if the 1103 // instruction is cheap. 1104 // Also hoist loads from constant memory, e.g. load from stubs, GOT. Hoisting 1105 // these tend to help performance in low register pressure situation. The 1106 // trade off is it may cause spill in high pressure situation. It will end up 1107 // adding a store in the loop preheader. But the reload is no more expensive. 1108 // The side benefit is these loads are frequently CSE'ed. 1109 if (IsCheapInstruction(MI)) { 1110 if (!TII->isTriviallyReMaterializable(&MI, AA)) 1111 return false; 1112 } else { 1113 // Estimate register pressure to determine whether to LICM the instruction. 1114 // In low register pressure situation, we can be more aggressive about 1115 // hoisting. Also, favors hoisting long latency instructions even in 1116 // moderately high pressure situation. 1117 // FIXME: If there are long latency loop-invariant instructions inside the 1118 // loop at this point, why didn't the optimizer's LICM hoist them? 1119 DenseMap<unsigned, int> Cost; 1120 for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) { 1121 const MachineOperand &MO = MI.getOperand(i); 1122 if (!MO.isReg() || MO.isImplicit()) 1123 continue; 1124 unsigned Reg = MO.getReg(); 1125 if (!TargetRegisterInfo::isVirtualRegister(Reg)) 1126 continue; 1127 1128 unsigned RCId, RCCost; 1129 getRegisterClassIDAndCost(&MI, Reg, i, RCId, RCCost); 1130 if (MO.isDef()) { 1131 if (HasHighOperandLatency(MI, i, Reg)) { 1132 ++NumHighLatency; 1133 return true; 1134 } 1135 1136 DenseMap<unsigned, int>::iterator CI = Cost.find(RCId); 1137 if (CI != Cost.end()) 1138 CI->second += RCCost; 1139 else 1140 Cost.insert(std::make_pair(RCId, RCCost)); 1141 } else if (isOperandKill(MO, MRI)) { 1142 // Is a virtual register use is a kill, hoisting it out of the loop 1143 // may actually reduce register pressure or be register pressure 1144 // neutral. 1145 DenseMap<unsigned, int>::iterator CI = Cost.find(RCId); 1146 if (CI != Cost.end()) 1147 CI->second -= RCCost; 1148 else 1149 Cost.insert(std::make_pair(RCId, -RCCost)); 1150 } 1151 } 1152 1153 // Visit BBs from header to current BB, if hoisting this doesn't cause 1154 // high register pressure, then it's safe to proceed. 1155 if (!CanCauseHighRegPressure(Cost)) { 1156 ++NumLowRP; 1157 return true; 1158 } 1159 1160 // Do not "speculate" in high register pressure situation. If an 1161 // instruction is not guaranteed to be executed in the loop, it's best to be 1162 // conservative. 1163 if (AvoidSpeculation && 1164 (!IsGuaranteedToExecute(MI.getParent()) && !MayCSE(&MI))) 1165 return false; 1166 1167 // High register pressure situation, only hoist if the instruction is going to 1168 // be remat'ed. 1169 if (!TII->isTriviallyReMaterializable(&MI, AA) && 1170 !MI.isInvariantLoad(AA)) 1171 return false; 1172 } 1173 1174 // If result(s) of this instruction is used by PHIs outside of the loop, then 1175 // don't hoist it if the instruction because it will introduce an extra copy. 1176 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1177 const MachineOperand &MO = MI.getOperand(i); 1178 if (!MO.isReg() || !MO.isDef()) 1179 continue; 1180 if (HasAnyPHIUse(MO.getReg())) 1181 return false; 1182 } 1183 1184 return true; 1185} 1186 1187MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) { 1188 // Don't unfold simple loads. 1189 if (MI->canFoldAsLoad()) 1190 return 0; 1191 1192 // If not, we may be able to unfold a load and hoist that. 1193 // First test whether the instruction is loading from an amenable 1194 // memory location. 1195 if (!MI->isInvariantLoad(AA)) 1196 return 0; 1197 1198 // Next determine the register class for a temporary register. 1199 unsigned LoadRegIndex; 1200 unsigned NewOpc = 1201 TII->getOpcodeAfterMemoryUnfold(MI->getOpcode(), 1202 /*UnfoldLoad=*/true, 1203 /*UnfoldStore=*/false, 1204 &LoadRegIndex); 1205 if (NewOpc == 0) return 0; 1206 const MCInstrDesc &MID = TII->get(NewOpc); 1207 if (MID.getNumDefs() != 1) return 0; 1208 const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI); 1209 // Ok, we're unfolding. Create a temporary register and do the unfold. 1210 unsigned Reg = MRI->createVirtualRegister(RC); 1211 1212 MachineFunction &MF = *MI->getParent()->getParent(); 1213 SmallVector<MachineInstr *, 2> NewMIs; 1214 bool Success = 1215 TII->unfoldMemoryOperand(MF, MI, Reg, 1216 /*UnfoldLoad=*/true, /*UnfoldStore=*/false, 1217 NewMIs); 1218 (void)Success; 1219 assert(Success && 1220 "unfoldMemoryOperand failed when getOpcodeAfterMemoryUnfold " 1221 "succeeded!"); 1222 assert(NewMIs.size() == 2 && 1223 "Unfolded a load into multiple instructions!"); 1224 MachineBasicBlock *MBB = MI->getParent(); 1225 MachineBasicBlock::iterator Pos = MI; 1226 MBB->insert(Pos, NewMIs[0]); 1227 MBB->insert(Pos, NewMIs[1]); 1228 // If unfolding produced a load that wasn't loop-invariant or profitable to 1229 // hoist, discard the new instructions and bail. 1230 if (!IsLoopInvariantInst(*NewMIs[0]) || !IsProfitableToHoist(*NewMIs[0])) { 1231 NewMIs[0]->eraseFromParent(); 1232 NewMIs[1]->eraseFromParent(); 1233 return 0; 1234 } 1235 1236 // Update register pressure for the unfolded instruction. 1237 UpdateRegPressure(NewMIs[1]); 1238 1239 // Otherwise we successfully unfolded a load that we can hoist. 1240 MI->eraseFromParent(); 1241 return NewMIs[0]; 1242} 1243 1244void MachineLICM::InitCSEMap(MachineBasicBlock *BB) { 1245 for (MachineBasicBlock::iterator I = BB->begin(),E = BB->end(); I != E; ++I) { 1246 const MachineInstr *MI = &*I; 1247 unsigned Opcode = MI->getOpcode(); 1248 DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator 1249 CI = CSEMap.find(Opcode); 1250 if (CI != CSEMap.end()) 1251 CI->second.push_back(MI); 1252 else { 1253 std::vector<const MachineInstr*> CSEMIs; 1254 CSEMIs.push_back(MI); 1255 CSEMap.insert(std::make_pair(Opcode, CSEMIs)); 1256 } 1257 } 1258} 1259 1260const MachineInstr* 1261MachineLICM::LookForDuplicate(const MachineInstr *MI, 1262 std::vector<const MachineInstr*> &PrevMIs) { 1263 for (unsigned i = 0, e = PrevMIs.size(); i != e; ++i) { 1264 const MachineInstr *PrevMI = PrevMIs[i]; 1265 if (TII->produceSameValue(MI, PrevMI, (PreRegAlloc ? MRI : 0))) 1266 return PrevMI; 1267 } 1268 return 0; 1269} 1270 1271bool MachineLICM::EliminateCSE(MachineInstr *MI, 1272 DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator &CI) { 1273 // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate 1274 // the undef property onto uses. 1275 if (CI == CSEMap.end() || MI->isImplicitDef()) 1276 return false; 1277 1278 if (const MachineInstr *Dup = LookForDuplicate(MI, CI->second)) { 1279 DEBUG(dbgs() << "CSEing " << *MI << " with " << *Dup); 1280 1281 // Replace virtual registers defined by MI by their counterparts defined 1282 // by Dup. 1283 SmallVector<unsigned, 2> Defs; 1284 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1285 const MachineOperand &MO = MI->getOperand(i); 1286 1287 // Physical registers may not differ here. 1288 assert((!MO.isReg() || MO.getReg() == 0 || 1289 !TargetRegisterInfo::isPhysicalRegister(MO.getReg()) || 1290 MO.getReg() == Dup->getOperand(i).getReg()) && 1291 "Instructions with different phys regs are not identical!"); 1292 1293 if (MO.isReg() && MO.isDef() && 1294 !TargetRegisterInfo::isPhysicalRegister(MO.getReg())) 1295 Defs.push_back(i); 1296 } 1297 1298 SmallVector<const TargetRegisterClass*, 2> OrigRCs; 1299 for (unsigned i = 0, e = Defs.size(); i != e; ++i) { 1300 unsigned Idx = Defs[i]; 1301 unsigned Reg = MI->getOperand(Idx).getReg(); 1302 unsigned DupReg = Dup->getOperand(Idx).getReg(); 1303 OrigRCs.push_back(MRI->getRegClass(DupReg)); 1304 1305 if (!MRI->constrainRegClass(DupReg, MRI->getRegClass(Reg))) { 1306 // Restore old RCs if more than one defs. 1307 for (unsigned j = 0; j != i; ++j) 1308 MRI->setRegClass(Dup->getOperand(Defs[j]).getReg(), OrigRCs[j]); 1309 return false; 1310 } 1311 } 1312 1313 for (unsigned i = 0, e = Defs.size(); i != e; ++i) { 1314 unsigned Idx = Defs[i]; 1315 unsigned Reg = MI->getOperand(Idx).getReg(); 1316 unsigned DupReg = Dup->getOperand(Idx).getReg(); 1317 MRI->replaceRegWith(Reg, DupReg); 1318 MRI->clearKillFlags(DupReg); 1319 } 1320 1321 MI->eraseFromParent(); 1322 ++NumCSEed; 1323 return true; 1324 } 1325 return false; 1326} 1327 1328/// MayCSE - Return true if the given instruction will be CSE'd if it's 1329/// hoisted out of the loop. 1330bool MachineLICM::MayCSE(MachineInstr *MI) { 1331 unsigned Opcode = MI->getOpcode(); 1332 DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator 1333 CI = CSEMap.find(Opcode); 1334 // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate 1335 // the undef property onto uses. 1336 if (CI == CSEMap.end() || MI->isImplicitDef()) 1337 return false; 1338 1339 return LookForDuplicate(MI, CI->second) != 0; 1340} 1341 1342/// Hoist - When an instruction is found to use only loop invariant operands 1343/// that are safe to hoist, this instruction is called to do the dirty work. 1344/// 1345bool MachineLICM::Hoist(MachineInstr *MI, MachineBasicBlock *Preheader) { 1346 // First check whether we should hoist this instruction. 1347 if (!IsLoopInvariantInst(*MI) || !IsProfitableToHoist(*MI)) { 1348 // If not, try unfolding a hoistable load. 1349 MI = ExtractHoistableLoad(MI); 1350 if (!MI) return false; 1351 } 1352 1353 // Now move the instructions to the predecessor, inserting it before any 1354 // terminator instructions. 1355 DEBUG({ 1356 dbgs() << "Hoisting " << *MI; 1357 if (Preheader->getBasicBlock()) 1358 dbgs() << " to MachineBasicBlock " 1359 << Preheader->getName(); 1360 if (MI->getParent()->getBasicBlock()) 1361 dbgs() << " from MachineBasicBlock " 1362 << MI->getParent()->getName(); 1363 dbgs() << "\n"; 1364 }); 1365 1366 // If this is the first instruction being hoisted to the preheader, 1367 // initialize the CSE map with potential common expressions. 1368 if (FirstInLoop) { 1369 InitCSEMap(Preheader); 1370 FirstInLoop = false; 1371 } 1372 1373 // Look for opportunity to CSE the hoisted instruction. 1374 unsigned Opcode = MI->getOpcode(); 1375 DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator 1376 CI = CSEMap.find(Opcode); 1377 if (!EliminateCSE(MI, CI)) { 1378 // Otherwise, splice the instruction to the preheader. 1379 Preheader->splice(Preheader->getFirstTerminator(),MI->getParent(),MI); 1380 1381 // Update register pressure for BBs from header to this block. 1382 UpdateBackTraceRegPressure(MI); 1383 1384 // Clear the kill flags of any register this instruction defines, 1385 // since they may need to be live throughout the entire loop 1386 // rather than just live for part of it. 1387 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1388 MachineOperand &MO = MI->getOperand(i); 1389 if (MO.isReg() && MO.isDef() && !MO.isDead()) 1390 MRI->clearKillFlags(MO.getReg()); 1391 } 1392 1393 // Add to the CSE map. 1394 if (CI != CSEMap.end()) 1395 CI->second.push_back(MI); 1396 else { 1397 std::vector<const MachineInstr*> CSEMIs; 1398 CSEMIs.push_back(MI); 1399 CSEMap.insert(std::make_pair(Opcode, CSEMIs)); 1400 } 1401 } 1402 1403 ++NumHoisted; 1404 Changed = true; 1405 1406 return true; 1407} 1408 1409MachineBasicBlock *MachineLICM::getCurPreheader() { 1410 // Determine the block to which to hoist instructions. If we can't find a 1411 // suitable loop predecessor, we can't do any hoisting. 1412 1413 // If we've tried to get a preheader and failed, don't try again. 1414 if (CurPreheader == reinterpret_cast<MachineBasicBlock *>(-1)) 1415 return 0; 1416 1417 if (!CurPreheader) { 1418 CurPreheader = CurLoop->getLoopPreheader(); 1419 if (!CurPreheader) { 1420 MachineBasicBlock *Pred = CurLoop->getLoopPredecessor(); 1421 if (!Pred) { 1422 CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1); 1423 return 0; 1424 } 1425 1426 CurPreheader = Pred->SplitCriticalEdge(CurLoop->getHeader(), this); 1427 if (!CurPreheader) { 1428 CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1); 1429 return 0; 1430 } 1431 } 1432 } 1433 return CurPreheader; 1434} 1435