PostRASchedulerList.cpp revision 1dd8c8560d45d36a8e507cd014352f1d313f9f9e
1//===----- SchedulePostRAList.cpp - list scheduler ------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements a top-down list scheduler, using standard algorithms. 11// The basic approach uses a priority queue of available nodes to schedule. 12// One at a time, nodes are taken from the priority queue (thus in priority 13// order), checked for legality to schedule, and emitted if legal. 14// 15// Nodes may not be legal to schedule either due to structural hazards (e.g. 16// pipeline or resource constraints) or because an input to the instruction has 17// not completed execution. 18// 19//===----------------------------------------------------------------------===// 20 21#define DEBUG_TYPE "post-RA-sched" 22#include "AntiDepBreaker.h" 23#include "AggressiveAntiDepBreaker.h" 24#include "CriticalAntiDepBreaker.h" 25#include "RegisterClassInfo.h" 26#include "ScheduleDAGInstrs.h" 27#include "llvm/CodeGen/Passes.h" 28#include "llvm/CodeGen/LatencyPriorityQueue.h" 29#include "llvm/CodeGen/SchedulerRegistry.h" 30#include "llvm/CodeGen/MachineDominators.h" 31#include "llvm/CodeGen/MachineFrameInfo.h" 32#include "llvm/CodeGen/MachineFunctionPass.h" 33#include "llvm/CodeGen/MachineLoopInfo.h" 34#include "llvm/CodeGen/MachineRegisterInfo.h" 35#include "llvm/CodeGen/ScheduleHazardRecognizer.h" 36#include "llvm/Analysis/AliasAnalysis.h" 37#include "llvm/Target/TargetLowering.h" 38#include "llvm/Target/TargetMachine.h" 39#include "llvm/Target/TargetInstrInfo.h" 40#include "llvm/Target/TargetRegisterInfo.h" 41#include "llvm/Target/TargetSubtargetInfo.h" 42#include "llvm/Support/CommandLine.h" 43#include "llvm/Support/Debug.h" 44#include "llvm/Support/ErrorHandling.h" 45#include "llvm/Support/raw_ostream.h" 46#include "llvm/ADT/BitVector.h" 47#include "llvm/ADT/Statistic.h" 48#include <set> 49using namespace llvm; 50 51STATISTIC(NumNoops, "Number of noops inserted"); 52STATISTIC(NumStalls, "Number of pipeline stalls"); 53STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies"); 54 55// Post-RA scheduling is enabled with 56// TargetSubtargetInfo.enablePostRAScheduler(). This flag can be used to 57// override the target. 58static cl::opt<bool> 59EnablePostRAScheduler("post-RA-scheduler", 60 cl::desc("Enable scheduling after register allocation"), 61 cl::init(false), cl::Hidden); 62static cl::opt<std::string> 63EnableAntiDepBreaking("break-anti-dependencies", 64 cl::desc("Break post-RA scheduling anti-dependencies: " 65 "\"critical\", \"all\", or \"none\""), 66 cl::init("none"), cl::Hidden); 67 68// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 69static cl::opt<int> 70DebugDiv("postra-sched-debugdiv", 71 cl::desc("Debug control MBBs that are scheduled"), 72 cl::init(0), cl::Hidden); 73static cl::opt<int> 74DebugMod("postra-sched-debugmod", 75 cl::desc("Debug control MBBs that are scheduled"), 76 cl::init(0), cl::Hidden); 77 78AntiDepBreaker::~AntiDepBreaker() { } 79 80namespace { 81 class PostRAScheduler : public MachineFunctionPass { 82 AliasAnalysis *AA; 83 const TargetInstrInfo *TII; 84 RegisterClassInfo RegClassInfo; 85 86 public: 87 static char ID; 88 PostRAScheduler() : MachineFunctionPass(ID) {} 89 90 void getAnalysisUsage(AnalysisUsage &AU) const { 91 AU.setPreservesCFG(); 92 AU.addRequired<AliasAnalysis>(); 93 AU.addRequired<TargetPassConfig>(); 94 AU.addRequired<MachineDominatorTree>(); 95 AU.addPreserved<MachineDominatorTree>(); 96 AU.addRequired<MachineLoopInfo>(); 97 AU.addPreserved<MachineLoopInfo>(); 98 MachineFunctionPass::getAnalysisUsage(AU); 99 } 100 101 bool runOnMachineFunction(MachineFunction &Fn); 102 }; 103 char PostRAScheduler::ID = 0; 104 105 class SchedulePostRATDList : public ScheduleDAGInstrs { 106 /// AvailableQueue - The priority queue to use for the available SUnits. 107 /// 108 LatencyPriorityQueue AvailableQueue; 109 110 /// PendingQueue - This contains all of the instructions whose operands have 111 /// been issued, but their results are not ready yet (due to the latency of 112 /// the operation). Once the operands becomes available, the instruction is 113 /// added to the AvailableQueue. 114 std::vector<SUnit*> PendingQueue; 115 116 /// Topo - A topological ordering for SUnits. 117 ScheduleDAGTopologicalSort Topo; 118 119 /// HazardRec - The hazard recognizer to use. 120 ScheduleHazardRecognizer *HazardRec; 121 122 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none 123 AntiDepBreaker *AntiDepBreak; 124 125 /// AA - AliasAnalysis for making memory reference queries. 126 AliasAnalysis *AA; 127 128 /// KillIndices - The index of the most recent kill (proceding bottom-up), 129 /// or ~0u if the register is not live. 130 std::vector<unsigned> KillIndices; 131 132 public: 133 SchedulePostRATDList( 134 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 135 AliasAnalysis *AA, const RegisterClassInfo&, 136 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 137 SmallVectorImpl<TargetRegisterClass*> &CriticalPathRCs); 138 139 ~SchedulePostRATDList(); 140 141 /// StartBlock - Initialize register live-range state for scheduling in 142 /// this block. 143 /// 144 void StartBlock(MachineBasicBlock *BB); 145 146 /// Schedule - Schedule the instruction range using list scheduling. 147 /// 148 void Schedule(); 149 150 /// Observe - Update liveness information to account for the current 151 /// instruction, which will not be scheduled. 152 /// 153 void Observe(MachineInstr *MI, unsigned Count); 154 155 /// FinishBlock - Clean up register live-range state. 156 /// 157 void FinishBlock(); 158 159 /// FixupKills - Fix register kill flags that have been made 160 /// invalid due to scheduling 161 /// 162 void FixupKills(MachineBasicBlock *MBB); 163 164 private: 165 void ReleaseSucc(SUnit *SU, SDep *SuccEdge); 166 void ReleaseSuccessors(SUnit *SU); 167 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle); 168 void ListScheduleTopDown(); 169 void StartBlockForKills(MachineBasicBlock *BB); 170 171 // ToggleKillFlag - Toggle a register operand kill flag. Other 172 // adjustments may be made to the instruction if necessary. Return 173 // true if the operand has been deleted, false if not. 174 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO); 175 }; 176} 177 178char &llvm::PostRASchedulerID = PostRAScheduler::ID; 179 180INITIALIZE_PASS(PostRAScheduler, "post-RA-sched", 181 "Post RA top-down list latency scheduler", false, false) 182 183SchedulePostRATDList::SchedulePostRATDList( 184 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 185 AliasAnalysis *AA, const RegisterClassInfo &RCI, 186 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode, 187 SmallVectorImpl<TargetRegisterClass*> &CriticalPathRCs) 188 : ScheduleDAGInstrs(MF, MLI, MDT, /*IsPostRA=*/true), Topo(SUnits), AA(AA), 189 KillIndices(TRI->getNumRegs()) 190{ 191 const TargetMachine &TM = MF.getTarget(); 192 const InstrItineraryData *InstrItins = TM.getInstrItineraryData(); 193 HazardRec = 194 TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins, this); 195 AntiDepBreak = 196 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_ALL) ? 197 (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) : 198 ((AntiDepMode == TargetSubtargetInfo::ANTIDEP_CRITICAL) ? 199 (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : NULL)); 200} 201 202SchedulePostRATDList::~SchedulePostRATDList() { 203 delete HazardRec; 204 delete AntiDepBreak; 205} 206 207bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { 208 TII = Fn.getTarget().getInstrInfo(); 209 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); 210 MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); 211 AliasAnalysis *AA = &getAnalysis<AliasAnalysis>(); 212 TargetPassConfig *PassConfig = &getAnalysis<TargetPassConfig>(); 213 214 RegClassInfo.runOnMachineFunction(Fn); 215 216 // Check for explicit enable/disable of post-ra scheduling. 217 TargetSubtargetInfo::AntiDepBreakMode AntiDepMode = 218 TargetSubtargetInfo::ANTIDEP_NONE; 219 SmallVector<TargetRegisterClass*, 4> CriticalPathRCs; 220 if (EnablePostRAScheduler.getPosition() > 0) { 221 if (!EnablePostRAScheduler) 222 return false; 223 } else { 224 // Check that post-RA scheduling is enabled for this target. 225 // This may upgrade the AntiDepMode. 226 const TargetSubtargetInfo &ST = Fn.getTarget().getSubtarget<TargetSubtargetInfo>(); 227 if (!ST.enablePostRAScheduler(PassConfig->getOptLevel(), AntiDepMode, 228 CriticalPathRCs)) 229 return false; 230 } 231 232 // Check for antidep breaking override... 233 if (EnableAntiDepBreaking.getPosition() > 0) { 234 AntiDepMode = (EnableAntiDepBreaking == "all") 235 ? TargetSubtargetInfo::ANTIDEP_ALL 236 : ((EnableAntiDepBreaking == "critical") 237 ? TargetSubtargetInfo::ANTIDEP_CRITICAL 238 : TargetSubtargetInfo::ANTIDEP_NONE); 239 } 240 241 DEBUG(dbgs() << "PostRAScheduler\n"); 242 243 SchedulePostRATDList Scheduler(Fn, MLI, MDT, AA, RegClassInfo, AntiDepMode, 244 CriticalPathRCs); 245 246 // Loop over all of the basic blocks 247 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); 248 MBB != MBBe; ++MBB) { 249#ifndef NDEBUG 250 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 251 if (DebugDiv > 0) { 252 static int bbcnt = 0; 253 if (bbcnt++ % DebugDiv != DebugMod) 254 continue; 255 dbgs() << "*** DEBUG scheduling " << Fn.getFunction()->getName() 256 << ":BB#" << MBB->getNumber() << " ***\n"; 257 } 258#endif 259 260 // Initialize register live-range state for scheduling in this block. 261 Scheduler.StartBlock(MBB); 262 263 // Schedule each sequence of instructions not interrupted by a label 264 // or anything else that effectively needs to shut down scheduling. 265 MachineBasicBlock::iterator Current = MBB->end(); 266 unsigned Count = MBB->size(), CurrentCount = Count; 267 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) { 268 MachineInstr *MI = llvm::prior(I); 269 if (TII->isSchedulingBoundary(MI, MBB, Fn)) { 270 Scheduler.Run(MBB, I, Current, CurrentCount); 271 Scheduler.EmitSchedule(); 272 Current = MI; 273 CurrentCount = Count - 1; 274 Scheduler.Observe(MI, CurrentCount); 275 } 276 I = MI; 277 --Count; 278 if (MI->isBundle()) 279 Count -= MI->getBundleSize(); 280 } 281 assert(Count == 0 && "Instruction count mismatch!"); 282 assert((MBB->begin() == Current || CurrentCount != 0) && 283 "Instruction count mismatch!"); 284 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount); 285 Scheduler.EmitSchedule(); 286 287 // Clean up register live-range state. 288 Scheduler.FinishBlock(); 289 290 // Update register kills 291 Scheduler.FixupKills(MBB); 292 } 293 294 return true; 295} 296 297/// StartBlock - Initialize register live-range state for scheduling in 298/// this block. 299/// 300void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) { 301 // Call the superclass. 302 ScheduleDAGInstrs::StartBlock(BB); 303 304 // Reset the hazard recognizer and anti-dep breaker. 305 HazardRec->Reset(); 306 if (AntiDepBreak != NULL) 307 AntiDepBreak->StartBlock(BB); 308} 309 310/// Schedule - Schedule the instruction range using list scheduling. 311/// 312void SchedulePostRATDList::Schedule() { 313 // Build the scheduling graph. 314 BuildSchedGraph(AA); 315 316 if (AntiDepBreak != NULL) { 317 unsigned Broken = 318 AntiDepBreak->BreakAntiDependencies(SUnits, Begin, InsertPos, 319 InsertPosIndex, DbgValues); 320 321 if (Broken != 0) { 322 // We made changes. Update the dependency graph. 323 // Theoretically we could update the graph in place: 324 // When a live range is changed to use a different register, remove 325 // the def's anti-dependence *and* output-dependence edges due to 326 // that register, and add new anti-dependence and output-dependence 327 // edges based on the next live range of the register. 328 SUnits.clear(); 329 Sequence.clear(); 330 EntrySU = SUnit(); 331 ExitSU = SUnit(); 332 BuildSchedGraph(AA); 333 334 NumFixedAnti += Broken; 335 } 336 } 337 338 DEBUG(dbgs() << "********** List Scheduling **********\n"); 339 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 340 SUnits[su].dumpAll(this)); 341 342 AvailableQueue.initNodes(SUnits); 343 ListScheduleTopDown(); 344 AvailableQueue.releaseState(); 345} 346 347/// Observe - Update liveness information to account for the current 348/// instruction, which will not be scheduled. 349/// 350void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) { 351 if (AntiDepBreak != NULL) 352 AntiDepBreak->Observe(MI, Count, InsertPosIndex); 353} 354 355/// FinishBlock - Clean up register live-range state. 356/// 357void SchedulePostRATDList::FinishBlock() { 358 if (AntiDepBreak != NULL) 359 AntiDepBreak->FinishBlock(); 360 361 // Call the superclass. 362 ScheduleDAGInstrs::FinishBlock(); 363} 364 365/// StartBlockForKills - Initialize register live-range state for updating kills 366/// 367void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) { 368 // Initialize the indices to indicate that no registers are live. 369 for (unsigned i = 0; i < TRI->getNumRegs(); ++i) 370 KillIndices[i] = ~0u; 371 372 // Determine the live-out physregs for this block. 373 if (!BB->empty() && BB->back().isReturn()) { 374 // In a return block, examine the function live-out regs. 375 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(), 376 E = MRI.liveout_end(); I != E; ++I) { 377 unsigned Reg = *I; 378 KillIndices[Reg] = BB->size(); 379 // Repeat, for all subregs. 380 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 381 *Subreg; ++Subreg) { 382 KillIndices[*Subreg] = BB->size(); 383 } 384 } 385 } 386 else { 387 // In a non-return block, examine the live-in regs of all successors. 388 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 389 SE = BB->succ_end(); SI != SE; ++SI) { 390 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 391 E = (*SI)->livein_end(); I != E; ++I) { 392 unsigned Reg = *I; 393 KillIndices[Reg] = BB->size(); 394 // Repeat, for all subregs. 395 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 396 *Subreg; ++Subreg) { 397 KillIndices[*Subreg] = BB->size(); 398 } 399 } 400 } 401 } 402} 403 404bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI, 405 MachineOperand &MO) { 406 // Setting kill flag... 407 if (!MO.isKill()) { 408 MO.setIsKill(true); 409 return false; 410 } 411 412 // If MO itself is live, clear the kill flag... 413 if (KillIndices[MO.getReg()] != ~0u) { 414 MO.setIsKill(false); 415 return false; 416 } 417 418 // If any subreg of MO is live, then create an imp-def for that 419 // subreg and keep MO marked as killed. 420 MO.setIsKill(false); 421 bool AllDead = true; 422 const unsigned SuperReg = MO.getReg(); 423 for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg); 424 *Subreg; ++Subreg) { 425 if (KillIndices[*Subreg] != ~0u) { 426 MI->addOperand(MachineOperand::CreateReg(*Subreg, 427 true /*IsDef*/, 428 true /*IsImp*/, 429 false /*IsKill*/, 430 false /*IsDead*/)); 431 AllDead = false; 432 } 433 } 434 435 if(AllDead) 436 MO.setIsKill(true); 437 return false; 438} 439 440/// FixupKills - Fix the register kill flags, they may have been made 441/// incorrect by instruction reordering. 442/// 443void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) { 444 DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n'); 445 446 std::set<unsigned> killedRegs; 447 BitVector ReservedRegs = TRI->getReservedRegs(MF); 448 449 StartBlockForKills(MBB); 450 451 // Examine block from end to start... 452 unsigned Count = MBB->size(); 453 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin(); 454 I != E; --Count) { 455 MachineInstr *MI = --I; 456 if (MI->isDebugValue()) 457 continue; 458 459 // Update liveness. Registers that are defed but not used in this 460 // instruction are now dead. Mark register and all subregs as they 461 // are completely defined. 462 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 463 MachineOperand &MO = MI->getOperand(i); 464 if (!MO.isReg()) continue; 465 unsigned Reg = MO.getReg(); 466 if (Reg == 0) continue; 467 if (!MO.isDef()) continue; 468 // Ignore two-addr defs. 469 if (MI->isRegTiedToUseOperand(i)) continue; 470 471 KillIndices[Reg] = ~0u; 472 473 // Repeat for all subregs. 474 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 475 *Subreg; ++Subreg) { 476 KillIndices[*Subreg] = ~0u; 477 } 478 } 479 480 // Examine all used registers and set/clear kill flag. When a 481 // register is used multiple times we only set the kill flag on 482 // the first use. 483 killedRegs.clear(); 484 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 485 MachineOperand &MO = MI->getOperand(i); 486 if (!MO.isReg() || !MO.isUse()) continue; 487 unsigned Reg = MO.getReg(); 488 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 489 490 bool kill = false; 491 if (killedRegs.find(Reg) == killedRegs.end()) { 492 kill = true; 493 // A register is not killed if any subregs are live... 494 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 495 *Subreg; ++Subreg) { 496 if (KillIndices[*Subreg] != ~0u) { 497 kill = false; 498 break; 499 } 500 } 501 502 // If subreg is not live, then register is killed if it became 503 // live in this instruction 504 if (kill) 505 kill = (KillIndices[Reg] == ~0u); 506 } 507 508 if (MO.isKill() != kill) { 509 DEBUG(dbgs() << "Fixing " << MO << " in "); 510 // Warning: ToggleKillFlag may invalidate MO. 511 ToggleKillFlag(MI, MO); 512 DEBUG(MI->dump()); 513 } 514 515 killedRegs.insert(Reg); 516 } 517 518 // Mark any used register (that is not using undef) and subregs as 519 // now live... 520 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 521 MachineOperand &MO = MI->getOperand(i); 522 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; 523 unsigned Reg = MO.getReg(); 524 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 525 526 KillIndices[Reg] = Count; 527 528 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 529 *Subreg; ++Subreg) { 530 KillIndices[*Subreg] = Count; 531 } 532 } 533 } 534} 535 536//===----------------------------------------------------------------------===// 537// Top-Down Scheduling 538//===----------------------------------------------------------------------===// 539 540/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 541/// the PendingQueue if the count reaches zero. Also update its cycle bound. 542void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) { 543 SUnit *SuccSU = SuccEdge->getSUnit(); 544 545#ifndef NDEBUG 546 if (SuccSU->NumPredsLeft == 0) { 547 dbgs() << "*** Scheduling failed! ***\n"; 548 SuccSU->dump(this); 549 dbgs() << " has been released too many times!\n"; 550 llvm_unreachable(0); 551 } 552#endif 553 --SuccSU->NumPredsLeft; 554 555 // Standard scheduler algorithms will recompute the depth of the successor 556 // here as such: 557 // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency()); 558 // 559 // However, we lazily compute node depth instead. Note that 560 // ScheduleNodeTopDown has already updated the depth of this node which causes 561 // all descendents to be marked dirty. Setting the successor depth explicitly 562 // here would cause depth to be recomputed for all its ancestors. If the 563 // successor is not yet ready (because of a transitively redundant edge) then 564 // this causes depth computation to be quadratic in the size of the DAG. 565 566 // If all the node's predecessors are scheduled, this node is ready 567 // to be scheduled. Ignore the special ExitSU node. 568 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 569 PendingQueue.push_back(SuccSU); 570} 571 572/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors. 573void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) { 574 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 575 I != E; ++I) { 576 ReleaseSucc(SU, &*I); 577 } 578} 579 580/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 581/// count of its successors. If a successor pending count is zero, add it to 582/// the Available queue. 583void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 584 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: "); 585 DEBUG(SU->dump(this)); 586 587 Sequence.push_back(SU); 588 assert(CurCycle >= SU->getDepth() && 589 "Node scheduled above its depth!"); 590 SU->setDepthToAtLeast(CurCycle); 591 592 ReleaseSuccessors(SU); 593 SU->isScheduled = true; 594 AvailableQueue.ScheduledNode(SU); 595} 596 597/// ListScheduleTopDown - The main loop of list scheduling for top-down 598/// schedulers. 599void SchedulePostRATDList::ListScheduleTopDown() { 600 unsigned CurCycle = 0; 601 602 // We're scheduling top-down but we're visiting the regions in 603 // bottom-up order, so we don't know the hazards at the start of a 604 // region. So assume no hazards (this should usually be ok as most 605 // blocks are a single region). 606 HazardRec->Reset(); 607 608 // Release any successors of the special Entry node. 609 ReleaseSuccessors(&EntrySU); 610 611 // Add all leaves to Available queue. 612 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 613 // It is available if it has no predecessors. 614 bool available = SUnits[i].Preds.empty(); 615 if (available) { 616 AvailableQueue.push(&SUnits[i]); 617 SUnits[i].isAvailable = true; 618 } 619 } 620 621 // In any cycle where we can't schedule any instructions, we must 622 // stall or emit a noop, depending on the target. 623 bool CycleHasInsts = false; 624 625 // While Available queue is not empty, grab the node with the highest 626 // priority. If it is not ready put it back. Schedule the node. 627 std::vector<SUnit*> NotReady; 628 Sequence.reserve(SUnits.size()); 629 while (!AvailableQueue.empty() || !PendingQueue.empty()) { 630 // Check to see if any of the pending instructions are ready to issue. If 631 // so, add them to the available queue. 632 unsigned MinDepth = ~0u; 633 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 634 if (PendingQueue[i]->getDepth() <= CurCycle) { 635 AvailableQueue.push(PendingQueue[i]); 636 PendingQueue[i]->isAvailable = true; 637 PendingQueue[i] = PendingQueue.back(); 638 PendingQueue.pop_back(); 639 --i; --e; 640 } else if (PendingQueue[i]->getDepth() < MinDepth) 641 MinDepth = PendingQueue[i]->getDepth(); 642 } 643 644 DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this)); 645 646 SUnit *FoundSUnit = 0; 647 bool HasNoopHazards = false; 648 while (!AvailableQueue.empty()) { 649 SUnit *CurSUnit = AvailableQueue.pop(); 650 651 ScheduleHazardRecognizer::HazardType HT = 652 HazardRec->getHazardType(CurSUnit, 0/*no stalls*/); 653 if (HT == ScheduleHazardRecognizer::NoHazard) { 654 FoundSUnit = CurSUnit; 655 break; 656 } 657 658 // Remember if this is a noop hazard. 659 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard; 660 661 NotReady.push_back(CurSUnit); 662 } 663 664 // Add the nodes that aren't ready back onto the available list. 665 if (!NotReady.empty()) { 666 AvailableQueue.push_all(NotReady); 667 NotReady.clear(); 668 } 669 670 // If we found a node to schedule... 671 if (FoundSUnit) { 672 // ... schedule the node... 673 ScheduleNodeTopDown(FoundSUnit, CurCycle); 674 HazardRec->EmitInstruction(FoundSUnit); 675 CycleHasInsts = true; 676 if (HazardRec->atIssueLimit()) { 677 DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle << '\n'); 678 HazardRec->AdvanceCycle(); 679 ++CurCycle; 680 CycleHasInsts = false; 681 } 682 } else { 683 if (CycleHasInsts) { 684 DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n'); 685 HazardRec->AdvanceCycle(); 686 } else if (!HasNoopHazards) { 687 // Otherwise, we have a pipeline stall, but no other problem, 688 // just advance the current cycle and try again. 689 DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n'); 690 HazardRec->AdvanceCycle(); 691 ++NumStalls; 692 } else { 693 // Otherwise, we have no instructions to issue and we have instructions 694 // that will fault if we don't do this right. This is the case for 695 // processors without pipeline interlocks and other cases. 696 DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n'); 697 HazardRec->EmitNoop(); 698 Sequence.push_back(0); // NULL here means noop 699 ++NumNoops; 700 } 701 702 ++CurCycle; 703 CycleHasInsts = false; 704 } 705 } 706 707#ifndef NDEBUG 708 VerifySchedule(/*isBottomUp=*/false); 709#endif 710} 711