PostRASchedulerList.cpp revision 1274ced8a3f0fd1e9a6f7c7e17d69368c4f78b90
1//===----- SchedulePostRAList.cpp - list scheduler ------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements a top-down list scheduler, using standard algorithms. 11// The basic approach uses a priority queue of available nodes to schedule. 12// One at a time, nodes are taken from the priority queue (thus in priority 13// order), checked for legality to schedule, and emitted if legal. 14// 15// Nodes may not be legal to schedule either due to structural hazards (e.g. 16// pipeline or resource constraints) or because an input to the instruction has 17// not completed execution. 18// 19//===----------------------------------------------------------------------===// 20 21#define DEBUG_TYPE "post-RA-sched" 22#include "ScheduleDAGInstrs.h" 23#include "llvm/CodeGen/Passes.h" 24#include "llvm/CodeGen/LatencyPriorityQueue.h" 25#include "llvm/CodeGen/SchedulerRegistry.h" 26#include "llvm/CodeGen/MachineDominators.h" 27#include "llvm/CodeGen/MachineFunctionPass.h" 28#include "llvm/CodeGen/MachineLoopInfo.h" 29#include "llvm/CodeGen/MachineRegisterInfo.h" 30#include "llvm/CodeGen/ScheduleHazardRecognizer.h" 31#include "llvm/Target/TargetLowering.h" 32#include "llvm/Target/TargetMachine.h" 33#include "llvm/Target/TargetInstrInfo.h" 34#include "llvm/Target/TargetRegisterInfo.h" 35#include "llvm/Support/Compiler.h" 36#include "llvm/Support/Debug.h" 37#include "llvm/ADT/Statistic.h" 38#include <map> 39using namespace llvm; 40 41STATISTIC(NumNoops, "Number of noops inserted"); 42STATISTIC(NumStalls, "Number of pipeline stalls"); 43 44static cl::opt<bool> 45EnableAntiDepBreaking("break-anti-dependencies", 46 cl::desc("Break post-RA scheduling anti-dependencies"), 47 cl::init(true), cl::Hidden); 48 49static cl::opt<bool> 50EnablePostRAHazardAvoidance("avoid-hazards", 51 cl::desc("Enable simple hazard-avoidance"), 52 cl::init(true), cl::Hidden); 53 54namespace { 55 class VISIBILITY_HIDDEN PostRAScheduler : public MachineFunctionPass { 56 public: 57 static char ID; 58 PostRAScheduler() : MachineFunctionPass(&ID) {} 59 60 void getAnalysisUsage(AnalysisUsage &AU) const { 61 AU.addRequired<MachineDominatorTree>(); 62 AU.addPreserved<MachineDominatorTree>(); 63 AU.addRequired<MachineLoopInfo>(); 64 AU.addPreserved<MachineLoopInfo>(); 65 MachineFunctionPass::getAnalysisUsage(AU); 66 } 67 68 const char *getPassName() const { 69 return "Post RA top-down list latency scheduler"; 70 } 71 72 bool runOnMachineFunction(MachineFunction &Fn); 73 }; 74 char PostRAScheduler::ID = 0; 75 76 class VISIBILITY_HIDDEN SchedulePostRATDList : public ScheduleDAGInstrs { 77 /// AvailableQueue - The priority queue to use for the available SUnits. 78 /// 79 LatencyPriorityQueue AvailableQueue; 80 81 /// PendingQueue - This contains all of the instructions whose operands have 82 /// been issued, but their results are not ready yet (due to the latency of 83 /// the operation). Once the operands becomes available, the instruction is 84 /// added to the AvailableQueue. 85 std::vector<SUnit*> PendingQueue; 86 87 /// Topo - A topological ordering for SUnits. 88 ScheduleDAGTopologicalSort Topo; 89 90 /// AllocatableSet - The set of allocatable registers. 91 /// We'll be ignoring anti-dependencies on non-allocatable registers, 92 /// because they may not be safe to break. 93 const BitVector AllocatableSet; 94 95 /// HazardRec - The hazard recognizer to use. 96 ScheduleHazardRecognizer *HazardRec; 97 98 /// Classes - For live regs that are only used in one register class in a 99 /// live range, the register class. If the register is not live, the 100 /// corresponding value is null. If the register is live but used in 101 /// multiple register classes, the corresponding value is -1 casted to a 102 /// pointer. 103 const TargetRegisterClass * 104 Classes[TargetRegisterInfo::FirstVirtualRegister]; 105 106 /// RegRegs - Map registers to all their references within a live range. 107 std::multimap<unsigned, MachineOperand *> RegRefs; 108 109 /// The index of the most recent kill (proceding bottom-up), or ~0u if 110 /// the register is not live. 111 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister]; 112 113 /// The index of the most recent complete def (proceding bottom up), or ~0u 114 /// if the register is live. 115 unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister]; 116 117 public: 118 SchedulePostRATDList(MachineFunction &MF, 119 const MachineLoopInfo &MLI, 120 const MachineDominatorTree &MDT, 121 ScheduleHazardRecognizer *HR) 122 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits), 123 AllocatableSet(TRI->getAllocatableSet(MF)), 124 HazardRec(HR) {} 125 126 ~SchedulePostRATDList() { 127 delete HazardRec; 128 } 129 130 /// StartBlock - Initialize register live-range state for scheduling in 131 /// this block. 132 /// 133 void StartBlock(MachineBasicBlock *BB); 134 135 /// Schedule - Schedule the instruction range using list scheduling. 136 /// 137 void Schedule(); 138 139 /// Observe - Update liveness information to account for the current 140 /// instruction, which will not be scheduled. 141 /// 142 void Observe(MachineInstr *MI, unsigned Count); 143 144 /// FinishBlock - Clean up register live-range state. 145 /// 146 void FinishBlock(); 147 148 private: 149 void PrescanInstruction(MachineInstr *MI); 150 void ScanInstruction(MachineInstr *MI, unsigned Count); 151 void ReleaseSucc(SUnit *SU, SDep *SuccEdge); 152 void ReleaseSuccessors(SUnit *SU); 153 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle); 154 void ListScheduleTopDown(); 155 bool BreakAntiDependencies(); 156 }; 157 158 /// SimpleHazardRecognizer - A *very* simple hazard recognizer. It uses 159 /// a coarse classification and attempts to avoid that instructions of 160 /// a given class aren't grouped too densely together. 161 class SimpleHazardRecognizer : public ScheduleHazardRecognizer { 162 /// Class - A simple classification for SUnits. 163 enum Class { 164 Other, Load, Store 165 }; 166 167 /// Window - The Class values of the most recently issued 168 /// instructions. 169 Class Window[8]; 170 171 /// getClass - Classify the given SUnit. 172 Class getClass(const SUnit *SU) { 173 const MachineInstr *MI = SU->getInstr(); 174 const TargetInstrDesc &TID = MI->getDesc(); 175 if (TID.mayLoad()) 176 return Load; 177 if (TID.mayStore()) 178 return Store; 179 return Other; 180 } 181 182 /// Step - Rotate the existing entries in Window and insert the 183 /// given class value in position as the most recent. 184 void Step(Class C) { 185 std::copy(Window+1, array_endof(Window), Window); 186 Window[array_lengthof(Window)-1] = C; 187 } 188 189 public: 190 SimpleHazardRecognizer() : Window() {} 191 192 virtual HazardType getHazardType(SUnit *SU) { 193 Class C = getClass(SU); 194 if (C == Other) 195 return NoHazard; 196 unsigned Score = 0; 197 for (unsigned i = 0; i != array_lengthof(Window); ++i) 198 if (Window[i] == C) 199 Score += i + 1; 200 if (Score > array_lengthof(Window) * 2) 201 return Hazard; 202 return NoHazard; 203 } 204 205 virtual void EmitInstruction(SUnit *SU) { 206 Step(getClass(SU)); 207 } 208 209 virtual void AdvanceCycle() { 210 Step(Other); 211 } 212 }; 213} 214 215/// isSchedulingBoundary - Test if the given instruction should be 216/// considered a scheduling boundary. This primarily includes labels 217/// and terminators. 218/// 219static bool isSchedulingBoundary(const MachineInstr *MI, 220 const MachineFunction &MF) { 221 // Terminators and labels can't be scheduled around. 222 if (MI->getDesc().isTerminator() || MI->isLabel()) 223 return true; 224 225 // Don't attempt to schedule around any instruction that modifies 226 // a stack-oriented pointer, as it's unlikely to be profitable. This 227 // saves compile time, because it doesn't require every single 228 // stack slot reference to depend on the instruction that does the 229 // modification. 230 const TargetLowering &TLI = *MF.getTarget().getTargetLowering(); 231 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore())) 232 return true; 233 234 return false; 235} 236 237bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { 238 DOUT << "PostRAScheduler\n"; 239 240 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); 241 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); 242 ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ? 243 new SimpleHazardRecognizer : 244 new ScheduleHazardRecognizer(); 245 246 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR); 247 248 // Loop over all of the basic blocks 249 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); 250 MBB != MBBe; ++MBB) { 251 // Initialize register live-range state for scheduling in this block. 252 Scheduler.StartBlock(MBB); 253 254 // Schedule each sequence of instructions not interrupted by a label 255 // or anything else that effectively needs to shut down scheduling. 256 MachineBasicBlock::iterator Current = MBB->end(); 257 unsigned Count = MBB->size(), CurrentCount = Count; 258 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) { 259 MachineInstr *MI = prior(I); 260 if (isSchedulingBoundary(MI, Fn)) { 261 Scheduler.Run(MBB, I, Current, CurrentCount); 262 Scheduler.EmitSchedule(); 263 Current = MI; 264 CurrentCount = Count - 1; 265 Scheduler.Observe(MI, CurrentCount); 266 } 267 I = MI; 268 --Count; 269 } 270 assert(Count == 0 && "Instruction count mismatch!"); 271 assert(MBB->begin() == Current || CurrentCount != 0 && 272 "Instruction count mismatch!"); 273 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount); 274 Scheduler.EmitSchedule(); 275 276 // Clean up register live-range state. 277 Scheduler.FinishBlock(); 278 } 279 280 return true; 281} 282 283/// StartBlock - Initialize register live-range state for scheduling in 284/// this block. 285/// 286void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) { 287 // Call the superclass. 288 ScheduleDAGInstrs::StartBlock(BB); 289 290 // Clear out the register class data. 291 std::fill(Classes, array_endof(Classes), 292 static_cast<const TargetRegisterClass *>(0)); 293 294 // Initialize the indices to indicate that no registers are live. 295 std::fill(KillIndices, array_endof(KillIndices), ~0u); 296 std::fill(DefIndices, array_endof(DefIndices), BB->size()); 297 298 // Determine the live-out physregs for this block. 299 if (!BB->empty() && BB->back().getDesc().isReturn()) 300 // In a return block, examine the function live-out regs. 301 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(), 302 E = MRI.liveout_end(); I != E; ++I) { 303 unsigned Reg = *I; 304 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1); 305 KillIndices[Reg] = BB->size(); 306 DefIndices[Reg] = ~0u; 307 // Repeat, for all aliases. 308 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 309 unsigned AliasReg = *Alias; 310 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1); 311 KillIndices[AliasReg] = BB->size(); 312 DefIndices[AliasReg] = ~0u; 313 } 314 } 315 else 316 // In a non-return block, examine the live-in regs of all successors. 317 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 318 SE = BB->succ_end(); SI != SE; ++SI) 319 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 320 E = (*SI)->livein_end(); I != E; ++I) { 321 unsigned Reg = *I; 322 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1); 323 KillIndices[Reg] = BB->size(); 324 DefIndices[Reg] = ~0u; 325 // Repeat, for all aliases. 326 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 327 unsigned AliasReg = *Alias; 328 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1); 329 KillIndices[AliasReg] = BB->size(); 330 DefIndices[AliasReg] = ~0u; 331 } 332 } 333 334 // Consider callee-saved registers as live-out, since we're running after 335 // prologue/epilogue insertion so there's no way to add additional 336 // saved registers. 337 // 338 // TODO: If the callee saves and restores these, then we can potentially 339 // use them between the save and the restore. To do that, we could scan 340 // the exit blocks to see which of these registers are defined. 341 // Alternatively, callee-saved registers that aren't saved and restored 342 // could be marked live-in in every block. 343 for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) { 344 unsigned Reg = *I; 345 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1); 346 KillIndices[Reg] = BB->size(); 347 DefIndices[Reg] = ~0u; 348 // Repeat, for all aliases. 349 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 350 unsigned AliasReg = *Alias; 351 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1); 352 KillIndices[AliasReg] = BB->size(); 353 DefIndices[AliasReg] = ~0u; 354 } 355 } 356} 357 358/// Schedule - Schedule the instruction range using list scheduling. 359/// 360void SchedulePostRATDList::Schedule() { 361 DOUT << "********** List Scheduling **********\n"; 362 363 // Build the scheduling graph. 364 BuildSchedGraph(); 365 366 if (EnableAntiDepBreaking) { 367 if (BreakAntiDependencies()) { 368 // We made changes. Update the dependency graph. 369 // Theoretically we could update the graph in place: 370 // When a live range is changed to use a different register, remove 371 // the def's anti-dependence *and* output-dependence edges due to 372 // that register, and add new anti-dependence and output-dependence 373 // edges based on the next live range of the register. 374 SUnits.clear(); 375 EntrySU = SUnit(); 376 ExitSU = SUnit(); 377 BuildSchedGraph(); 378 } 379 } 380 381 AvailableQueue.initNodes(SUnits); 382 383 ListScheduleTopDown(); 384 385 AvailableQueue.releaseState(); 386} 387 388/// Observe - Update liveness information to account for the current 389/// instruction, which will not be scheduled. 390/// 391void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) { 392 assert(Count < InsertPosIndex && "Instruction index out of expected range!"); 393 394 // Any register which was defined within the previous scheduling region 395 // may have been rescheduled and its lifetime may overlap with registers 396 // in ways not reflected in our current liveness state. For each such 397 // register, adjust the liveness state to be conservatively correct. 398 for (unsigned Reg = 0; Reg != TargetRegisterInfo::FirstVirtualRegister; ++Reg) 399 if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) { 400 assert(KillIndices[Reg] == ~0u && "Clobbered register is live!"); 401 // Mark this register to be non-renamable. 402 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1); 403 // Move the def index to the end of the previous region, to reflect 404 // that the def could theoretically have been scheduled at the end. 405 DefIndices[Reg] = InsertPosIndex; 406 } 407 408 PrescanInstruction(MI); 409 ScanInstruction(MI, Count); 410} 411 412/// FinishBlock - Clean up register live-range state. 413/// 414void SchedulePostRATDList::FinishBlock() { 415 RegRefs.clear(); 416 417 // Call the superclass. 418 ScheduleDAGInstrs::FinishBlock(); 419} 420 421/// getInstrOperandRegClass - Return register class of the operand of an 422/// instruction of the specified TargetInstrDesc. 423static const TargetRegisterClass* 424getInstrOperandRegClass(const TargetRegisterInfo *TRI, 425 const TargetInstrDesc &II, unsigned Op) { 426 if (Op >= II.getNumOperands()) 427 return NULL; 428 if (II.OpInfo[Op].isLookupPtrRegClass()) 429 return TRI->getPointerRegClass(); 430 return TRI->getRegClass(II.OpInfo[Op].RegClass); 431} 432 433/// CriticalPathStep - Return the next SUnit after SU on the bottom-up 434/// critical path. 435static SDep *CriticalPathStep(SUnit *SU) { 436 SDep *Next = 0; 437 unsigned NextDepth = 0; 438 // Find the predecessor edge with the greatest depth. 439 for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end(); 440 P != PE; ++P) { 441 SUnit *PredSU = P->getSUnit(); 442 unsigned PredLatency = P->getLatency(); 443 unsigned PredTotalLatency = PredSU->getDepth() + PredLatency; 444 // In the case of a latency tie, prefer an anti-dependency edge over 445 // other types of edges. 446 if (NextDepth < PredTotalLatency || 447 (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) { 448 NextDepth = PredTotalLatency; 449 Next = &*P; 450 } 451 } 452 return Next; 453} 454 455void SchedulePostRATDList::PrescanInstruction(MachineInstr *MI) { 456 // Scan the register operands for this instruction and update 457 // Classes and RegRefs. 458 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 459 MachineOperand &MO = MI->getOperand(i); 460 if (!MO.isReg()) continue; 461 unsigned Reg = MO.getReg(); 462 if (Reg == 0) continue; 463 const TargetRegisterClass *NewRC = 464 getInstrOperandRegClass(TRI, MI->getDesc(), i); 465 466 // For now, only allow the register to be changed if its register 467 // class is consistent across all uses. 468 if (!Classes[Reg] && NewRC) 469 Classes[Reg] = NewRC; 470 else if (!NewRC || Classes[Reg] != NewRC) 471 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1); 472 473 // Now check for aliases. 474 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 475 // If an alias of the reg is used during the live range, give up. 476 // Note that this allows us to skip checking if AntiDepReg 477 // overlaps with any of the aliases, among other things. 478 unsigned AliasReg = *Alias; 479 if (Classes[AliasReg]) { 480 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1); 481 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1); 482 } 483 } 484 485 // If we're still willing to consider this register, note the reference. 486 if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1)) 487 RegRefs.insert(std::make_pair(Reg, &MO)); 488 } 489} 490 491void SchedulePostRATDList::ScanInstruction(MachineInstr *MI, 492 unsigned Count) { 493 // Update liveness. 494 // Proceding upwards, registers that are defed but not used in this 495 // instruction are now dead. 496 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 497 MachineOperand &MO = MI->getOperand(i); 498 if (!MO.isReg()) continue; 499 unsigned Reg = MO.getReg(); 500 if (Reg == 0) continue; 501 if (!MO.isDef()) continue; 502 // Ignore two-addr defs. 503 if (MI->isRegReDefinedByTwoAddr(i)) continue; 504 505 DefIndices[Reg] = Count; 506 KillIndices[Reg] = ~0u; 507 assert(((KillIndices[Reg] == ~0u) != 508 (DefIndices[Reg] == ~0u)) && 509 "Kill and Def maps aren't consistent for Reg!"); 510 Classes[Reg] = 0; 511 RegRefs.erase(Reg); 512 // Repeat, for all subregs. 513 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 514 *Subreg; ++Subreg) { 515 unsigned SubregReg = *Subreg; 516 DefIndices[SubregReg] = Count; 517 KillIndices[SubregReg] = ~0u; 518 Classes[SubregReg] = 0; 519 RegRefs.erase(SubregReg); 520 } 521 // Conservatively mark super-registers as unusable. 522 for (const unsigned *Super = TRI->getSuperRegisters(Reg); 523 *Super; ++Super) { 524 unsigned SuperReg = *Super; 525 Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1); 526 } 527 } 528 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 529 MachineOperand &MO = MI->getOperand(i); 530 if (!MO.isReg()) continue; 531 unsigned Reg = MO.getReg(); 532 if (Reg == 0) continue; 533 if (!MO.isUse()) continue; 534 535 const TargetRegisterClass *NewRC = 536 getInstrOperandRegClass(TRI, MI->getDesc(), i); 537 538 // For now, only allow the register to be changed if its register 539 // class is consistent across all uses. 540 if (!Classes[Reg] && NewRC) 541 Classes[Reg] = NewRC; 542 else if (!NewRC || Classes[Reg] != NewRC) 543 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1); 544 545 RegRefs.insert(std::make_pair(Reg, &MO)); 546 547 // It wasn't previously live but now it is, this is a kill. 548 if (KillIndices[Reg] == ~0u) { 549 KillIndices[Reg] = Count; 550 DefIndices[Reg] = ~0u; 551 assert(((KillIndices[Reg] == ~0u) != 552 (DefIndices[Reg] == ~0u)) && 553 "Kill and Def maps aren't consistent for Reg!"); 554 } 555 // Repeat, for all aliases. 556 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 557 unsigned AliasReg = *Alias; 558 if (KillIndices[AliasReg] == ~0u) { 559 KillIndices[AliasReg] = Count; 560 DefIndices[AliasReg] = ~0u; 561 } 562 } 563 } 564} 565 566/// BreakAntiDependencies - Identifiy anti-dependencies along the critical path 567/// of the ScheduleDAG and break them by renaming registers. 568/// 569bool SchedulePostRATDList::BreakAntiDependencies() { 570 // The code below assumes that there is at least one instruction, 571 // so just duck out immediately if the block is empty. 572 if (SUnits.empty()) return false; 573 574 // Find the node at the bottom of the critical path. 575 SUnit *Max = 0; 576 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 577 SUnit *SU = &SUnits[i]; 578 if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency) 579 Max = SU; 580 } 581 582 DOUT << "Critical path has total latency " 583 << (Max->getDepth() + Max->Latency) << "\n"; 584 585 // Track progress along the critical path through the SUnit graph as we walk 586 // the instructions. 587 SUnit *CriticalPathSU = Max; 588 MachineInstr *CriticalPathMI = CriticalPathSU->getInstr(); 589 590 // Consider this pattern: 591 // A = ... 592 // ... = A 593 // A = ... 594 // ... = A 595 // A = ... 596 // ... = A 597 // A = ... 598 // ... = A 599 // There are three anti-dependencies here, and without special care, 600 // we'd break all of them using the same register: 601 // A = ... 602 // ... = A 603 // B = ... 604 // ... = B 605 // B = ... 606 // ... = B 607 // B = ... 608 // ... = B 609 // because at each anti-dependence, B is the first register that 610 // isn't A which is free. This re-introduces anti-dependencies 611 // at all but one of the original anti-dependencies that we were 612 // trying to break. To avoid this, keep track of the most recent 613 // register that each register was replaced with, avoid avoid 614 // using it to repair an anti-dependence on the same register. 615 // This lets us produce this: 616 // A = ... 617 // ... = A 618 // B = ... 619 // ... = B 620 // C = ... 621 // ... = C 622 // B = ... 623 // ... = B 624 // This still has an anti-dependence on B, but at least it isn't on the 625 // original critical path. 626 // 627 // TODO: If we tracked more than one register here, we could potentially 628 // fix that remaining critical edge too. This is a little more involved, 629 // because unlike the most recent register, less recent registers should 630 // still be considered, though only if no other registers are available. 631 unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {}; 632 633 // Attempt to break anti-dependence edges on the critical path. Walk the 634 // instructions from the bottom up, tracking information about liveness 635 // as we go to help determine which registers are available. 636 bool Changed = false; 637 unsigned Count = InsertPosIndex - 1; 638 for (MachineBasicBlock::iterator I = InsertPos, E = Begin; 639 I != E; --Count) { 640 MachineInstr *MI = --I; 641 642 // After regalloc, IMPLICIT_DEF instructions aren't safe to treat as 643 // dependence-breaking. In the case of an INSERT_SUBREG, the IMPLICIT_DEF 644 // is left behind appearing to clobber the super-register, while the 645 // subregister needs to remain live. So we just ignore them. 646 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) 647 continue; 648 649 // Check if this instruction has a dependence on the critical path that 650 // is an anti-dependence that we may be able to break. If it is, set 651 // AntiDepReg to the non-zero register associated with the anti-dependence. 652 // 653 // We limit our attention to the critical path as a heuristic to avoid 654 // breaking anti-dependence edges that aren't going to significantly 655 // impact the overall schedule. There are a limited number of registers 656 // and we want to save them for the important edges. 657 // 658 // TODO: Instructions with multiple defs could have multiple 659 // anti-dependencies. The current code here only knows how to break one 660 // edge per instruction. Note that we'd have to be able to break all of 661 // the anti-dependencies in an instruction in order to be effective. 662 unsigned AntiDepReg = 0; 663 if (MI == CriticalPathMI) { 664 if (SDep *Edge = CriticalPathStep(CriticalPathSU)) { 665 SUnit *NextSU = Edge->getSUnit(); 666 667 // Only consider anti-dependence edges. 668 if (Edge->getKind() == SDep::Anti) { 669 AntiDepReg = Edge->getReg(); 670 assert(AntiDepReg != 0 && "Anti-dependence on reg0?"); 671 // Don't break anti-dependencies on non-allocatable registers. 672 if (!AllocatableSet.test(AntiDepReg)) 673 AntiDepReg = 0; 674 else { 675 // If the SUnit has other dependencies on the SUnit that it 676 // anti-depends on, don't bother breaking the anti-dependency 677 // since those edges would prevent such units from being 678 // scheduled past each other regardless. 679 // 680 // Also, if there are dependencies on other SUnits with the 681 // same register as the anti-dependency, don't attempt to 682 // break it. 683 for (SUnit::pred_iterator P = CriticalPathSU->Preds.begin(), 684 PE = CriticalPathSU->Preds.end(); P != PE; ++P) 685 if (P->getSUnit() == NextSU ? 686 (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) : 687 (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) { 688 AntiDepReg = 0; 689 break; 690 } 691 } 692 } 693 CriticalPathSU = NextSU; 694 CriticalPathMI = CriticalPathSU->getInstr(); 695 } else { 696 // We've reached the end of the critical path. 697 CriticalPathSU = 0; 698 CriticalPathMI = 0; 699 } 700 } 701 702 PrescanInstruction(MI); 703 704 // If this instruction has a use of AntiDepReg, breaking it 705 // is invalid. 706 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 707 MachineOperand &MO = MI->getOperand(i); 708 if (!MO.isReg()) continue; 709 unsigned Reg = MO.getReg(); 710 if (Reg == 0) continue; 711 if (MO.isUse() && AntiDepReg == Reg) { 712 AntiDepReg = 0; 713 break; 714 } 715 } 716 717 // Determine AntiDepReg's register class, if it is live and is 718 // consistently used within a single class. 719 const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0; 720 assert((AntiDepReg == 0 || RC != NULL) && 721 "Register should be live if it's causing an anti-dependence!"); 722 if (RC == reinterpret_cast<TargetRegisterClass *>(-1)) 723 AntiDepReg = 0; 724 725 // Look for a suitable register to use to break the anti-depenence. 726 // 727 // TODO: Instead of picking the first free register, consider which might 728 // be the best. 729 if (AntiDepReg != 0) { 730 for (TargetRegisterClass::iterator R = RC->allocation_order_begin(MF), 731 RE = RC->allocation_order_end(MF); R != RE; ++R) { 732 unsigned NewReg = *R; 733 // Don't replace a register with itself. 734 if (NewReg == AntiDepReg) continue; 735 // Don't replace a register with one that was recently used to repair 736 // an anti-dependence with this AntiDepReg, because that would 737 // re-introduce that anti-dependence. 738 if (NewReg == LastNewReg[AntiDepReg]) continue; 739 // If NewReg is dead and NewReg's most recent def is not before 740 // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg. 741 assert(((KillIndices[AntiDepReg] == ~0u) != (DefIndices[AntiDepReg] == ~0u)) && 742 "Kill and Def maps aren't consistent for AntiDepReg!"); 743 assert(((KillIndices[NewReg] == ~0u) != (DefIndices[NewReg] == ~0u)) && 744 "Kill and Def maps aren't consistent for NewReg!"); 745 if (KillIndices[NewReg] == ~0u && 746 Classes[NewReg] != reinterpret_cast<TargetRegisterClass *>(-1) && 747 KillIndices[AntiDepReg] <= DefIndices[NewReg]) { 748 DOUT << "Breaking anti-dependence edge on " 749 << TRI->getName(AntiDepReg) 750 << " with " << RegRefs.count(AntiDepReg) << " references" 751 << " using " << TRI->getName(NewReg) << "!\n"; 752 753 // Update the references to the old register to refer to the new 754 // register. 755 std::pair<std::multimap<unsigned, MachineOperand *>::iterator, 756 std::multimap<unsigned, MachineOperand *>::iterator> 757 Range = RegRefs.equal_range(AntiDepReg); 758 for (std::multimap<unsigned, MachineOperand *>::iterator 759 Q = Range.first, QE = Range.second; Q != QE; ++Q) 760 Q->second->setReg(NewReg); 761 762 // We just went back in time and modified history; the 763 // liveness information for the anti-depenence reg is now 764 // inconsistent. Set the state as if it were dead. 765 Classes[NewReg] = Classes[AntiDepReg]; 766 DefIndices[NewReg] = DefIndices[AntiDepReg]; 767 KillIndices[NewReg] = KillIndices[AntiDepReg]; 768 assert(((KillIndices[NewReg] == ~0u) != 769 (DefIndices[NewReg] == ~0u)) && 770 "Kill and Def maps aren't consistent for NewReg!"); 771 772 Classes[AntiDepReg] = 0; 773 DefIndices[AntiDepReg] = KillIndices[AntiDepReg]; 774 KillIndices[AntiDepReg] = ~0u; 775 assert(((KillIndices[AntiDepReg] == ~0u) != 776 (DefIndices[AntiDepReg] == ~0u)) && 777 "Kill and Def maps aren't consistent for AntiDepReg!"); 778 779 RegRefs.erase(AntiDepReg); 780 Changed = true; 781 LastNewReg[AntiDepReg] = NewReg; 782 break; 783 } 784 } 785 } 786 787 ScanInstruction(MI, Count); 788 } 789 790 return Changed; 791} 792 793//===----------------------------------------------------------------------===// 794// Top-Down Scheduling 795//===----------------------------------------------------------------------===// 796 797/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 798/// the PendingQueue if the count reaches zero. Also update its cycle bound. 799void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) { 800 SUnit *SuccSU = SuccEdge->getSUnit(); 801 --SuccSU->NumPredsLeft; 802 803#ifndef NDEBUG 804 if (SuccSU->NumPredsLeft < 0) { 805 cerr << "*** Scheduling failed! ***\n"; 806 SuccSU->dump(this); 807 cerr << " has been released too many times!\n"; 808 assert(0); 809 } 810#endif 811 812 // Compute how many cycles it will be before this actually becomes 813 // available. This is the max of the start time of all predecessors plus 814 // their latencies. 815 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency()); 816 817 // If all the node's predecessors are scheduled, this node is ready 818 // to be scheduled. Ignore the special ExitSU node. 819 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 820 PendingQueue.push_back(SuccSU); 821} 822 823/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors. 824void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) { 825 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 826 I != E; ++I) 827 ReleaseSucc(SU, &*I); 828} 829 830/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 831/// count of its successors. If a successor pending count is zero, add it to 832/// the Available queue. 833void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 834 DOUT << "*** Scheduling [" << CurCycle << "]: "; 835 DEBUG(SU->dump(this)); 836 837 Sequence.push_back(SU); 838 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!"); 839 SU->setDepthToAtLeast(CurCycle); 840 841 ReleaseSuccessors(SU); 842 SU->isScheduled = true; 843 AvailableQueue.ScheduledNode(SU); 844} 845 846/// ListScheduleTopDown - The main loop of list scheduling for top-down 847/// schedulers. 848void SchedulePostRATDList::ListScheduleTopDown() { 849 unsigned CurCycle = 0; 850 851 // Release any successors of the special Entry node. 852 ReleaseSuccessors(&EntrySU); 853 854 // All leaves to Available queue. 855 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 856 // It is available if it has no predecessors. 857 if (SUnits[i].Preds.empty()) { 858 AvailableQueue.push(&SUnits[i]); 859 SUnits[i].isAvailable = true; 860 } 861 } 862 863 // While Available queue is not empty, grab the node with the highest 864 // priority. If it is not ready put it back. Schedule the node. 865 std::vector<SUnit*> NotReady; 866 Sequence.reserve(SUnits.size()); 867 while (!AvailableQueue.empty() || !PendingQueue.empty()) { 868 // Check to see if any of the pending instructions are ready to issue. If 869 // so, add them to the available queue. 870 unsigned MinDepth = ~0u; 871 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 872 if (PendingQueue[i]->getDepth() <= CurCycle) { 873 AvailableQueue.push(PendingQueue[i]); 874 PendingQueue[i]->isAvailable = true; 875 PendingQueue[i] = PendingQueue.back(); 876 PendingQueue.pop_back(); 877 --i; --e; 878 } else if (PendingQueue[i]->getDepth() < MinDepth) 879 MinDepth = PendingQueue[i]->getDepth(); 880 } 881 882 // If there are no instructions available, don't try to issue anything, and 883 // don't advance the hazard recognizer. 884 if (AvailableQueue.empty()) { 885 CurCycle = MinDepth != ~0u ? MinDepth : CurCycle + 1; 886 continue; 887 } 888 889 SUnit *FoundSUnit = 0; 890 891 bool HasNoopHazards = false; 892 while (!AvailableQueue.empty()) { 893 SUnit *CurSUnit = AvailableQueue.pop(); 894 895 ScheduleHazardRecognizer::HazardType HT = 896 HazardRec->getHazardType(CurSUnit); 897 if (HT == ScheduleHazardRecognizer::NoHazard) { 898 FoundSUnit = CurSUnit; 899 break; 900 } 901 902 // Remember if this is a noop hazard. 903 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard; 904 905 NotReady.push_back(CurSUnit); 906 } 907 908 // Add the nodes that aren't ready back onto the available list. 909 if (!NotReady.empty()) { 910 AvailableQueue.push_all(NotReady); 911 NotReady.clear(); 912 } 913 914 // If we found a node to schedule, do it now. 915 if (FoundSUnit) { 916 ScheduleNodeTopDown(FoundSUnit, CurCycle); 917 HazardRec->EmitInstruction(FoundSUnit); 918 919 // If this is a pseudo-op node, we don't want to increment the current 920 // cycle. 921 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops! 922 ++CurCycle; 923 } else if (!HasNoopHazards) { 924 // Otherwise, we have a pipeline stall, but no other problem, just advance 925 // the current cycle and try again. 926 DOUT << "*** Advancing cycle, no work to do\n"; 927 HazardRec->AdvanceCycle(); 928 ++NumStalls; 929 ++CurCycle; 930 } else { 931 // Otherwise, we have no instructions to issue and we have instructions 932 // that will fault if we don't do this right. This is the case for 933 // processors without pipeline interlocks and other cases. 934 DOUT << "*** Emitting noop\n"; 935 HazardRec->EmitNoop(); 936 Sequence.push_back(0); // NULL here means noop 937 ++NumNoops; 938 ++CurCycle; 939 } 940 } 941 942#ifndef NDEBUG 943 VerifySchedule(/*isBottomUp=*/false); 944#endif 945} 946 947//===----------------------------------------------------------------------===// 948// Public Constructor Functions 949//===----------------------------------------------------------------------===// 950 951FunctionPass *llvm::createPostRAScheduler() { 952 return new PostRAScheduler(); 953} 954