PostRASchedulerList.cpp revision 82c7248518a8b759a567fbb4b3176542ad2cf414
1//===----- SchedulePostRAList.cpp - list scheduler ------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements a top-down list scheduler, using standard algorithms. 11// The basic approach uses a priority queue of available nodes to schedule. 12// One at a time, nodes are taken from the priority queue (thus in priority 13// order), checked for legality to schedule, and emitted if legal. 14// 15// Nodes may not be legal to schedule either due to structural hazards (e.g. 16// pipeline or resource constraints) or because an input to the instruction has 17// not completed execution. 18// 19//===----------------------------------------------------------------------===// 20 21#define DEBUG_TYPE "post-RA-sched" 22#include "AntiDepBreaker.h" 23#include "AggressiveAntiDepBreaker.h" 24#include "CriticalAntiDepBreaker.h" 25#include "ExactHazardRecognizer.h" 26#include "SimpleHazardRecognizer.h" 27#include "ScheduleDAGInstrs.h" 28#include "llvm/CodeGen/Passes.h" 29#include "llvm/CodeGen/LatencyPriorityQueue.h" 30#include "llvm/CodeGen/SchedulerRegistry.h" 31#include "llvm/CodeGen/MachineDominators.h" 32#include "llvm/CodeGen/MachineFrameInfo.h" 33#include "llvm/CodeGen/MachineFunctionPass.h" 34#include "llvm/CodeGen/MachineLoopInfo.h" 35#include "llvm/CodeGen/MachineRegisterInfo.h" 36#include "llvm/CodeGen/ScheduleHazardRecognizer.h" 37#include "llvm/Analysis/AliasAnalysis.h" 38#include "llvm/Target/TargetLowering.h" 39#include "llvm/Target/TargetMachine.h" 40#include "llvm/Target/TargetInstrInfo.h" 41#include "llvm/Target/TargetRegisterInfo.h" 42#include "llvm/Target/TargetSubtarget.h" 43#include "llvm/Support/CommandLine.h" 44#include "llvm/Support/Debug.h" 45#include "llvm/Support/ErrorHandling.h" 46#include "llvm/Support/raw_ostream.h" 47#include "llvm/ADT/BitVector.h" 48#include "llvm/ADT/Statistic.h" 49#include <map> 50#include <set> 51using namespace llvm; 52 53STATISTIC(NumNoops, "Number of noops inserted"); 54STATISTIC(NumStalls, "Number of pipeline stalls"); 55STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies"); 56 57// Post-RA scheduling is enabled with 58// TargetSubtarget.enablePostRAScheduler(). This flag can be used to 59// override the target. 60static cl::opt<bool> 61EnablePostRAScheduler("post-RA-scheduler", 62 cl::desc("Enable scheduling after register allocation"), 63 cl::init(false), cl::Hidden); 64static cl::opt<std::string> 65EnableAntiDepBreaking("break-anti-dependencies", 66 cl::desc("Break post-RA scheduling anti-dependencies: " 67 "\"critical\", \"all\", or \"none\""), 68 cl::init("none"), cl::Hidden); 69static cl::opt<bool> 70EnablePostRAHazardAvoidance("avoid-hazards", 71 cl::desc("Enable exact hazard avoidance"), 72 cl::init(true), cl::Hidden); 73 74// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 75static cl::opt<int> 76DebugDiv("postra-sched-debugdiv", 77 cl::desc("Debug control MBBs that are scheduled"), 78 cl::init(0), cl::Hidden); 79static cl::opt<int> 80DebugMod("postra-sched-debugmod", 81 cl::desc("Debug control MBBs that are scheduled"), 82 cl::init(0), cl::Hidden); 83 84AntiDepBreaker::~AntiDepBreaker() { } 85 86namespace { 87 class PostRAScheduler : public MachineFunctionPass { 88 AliasAnalysis *AA; 89 CodeGenOpt::Level OptLevel; 90 91 public: 92 static char ID; 93 PostRAScheduler(CodeGenOpt::Level ol) : 94 MachineFunctionPass(&ID), OptLevel(ol) {} 95 96 void getAnalysisUsage(AnalysisUsage &AU) const { 97 AU.setPreservesCFG(); 98 AU.addRequired<AliasAnalysis>(); 99 AU.addRequired<MachineDominatorTree>(); 100 AU.addPreserved<MachineDominatorTree>(); 101 AU.addRequired<MachineLoopInfo>(); 102 AU.addPreserved<MachineLoopInfo>(); 103 MachineFunctionPass::getAnalysisUsage(AU); 104 } 105 106 const char *getPassName() const { 107 return "Post RA top-down list latency scheduler"; 108 } 109 110 bool runOnMachineFunction(MachineFunction &Fn); 111 }; 112 char PostRAScheduler::ID = 0; 113 114 class SchedulePostRATDList : public ScheduleDAGInstrs { 115 /// AvailableQueue - The priority queue to use for the available SUnits. 116 /// 117 LatencyPriorityQueue AvailableQueue; 118 119 /// PendingQueue - This contains all of the instructions whose operands have 120 /// been issued, but their results are not ready yet (due to the latency of 121 /// the operation). Once the operands becomes available, the instruction is 122 /// added to the AvailableQueue. 123 std::vector<SUnit*> PendingQueue; 124 125 /// Topo - A topological ordering for SUnits. 126 ScheduleDAGTopologicalSort Topo; 127 128 /// HazardRec - The hazard recognizer to use. 129 ScheduleHazardRecognizer *HazardRec; 130 131 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none 132 AntiDepBreaker *AntiDepBreak; 133 134 /// AA - AliasAnalysis for making memory reference queries. 135 AliasAnalysis *AA; 136 137 /// KillIndices - The index of the most recent kill (proceding bottom-up), 138 /// or ~0u if the register is not live. 139 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister]; 140 141 public: 142 SchedulePostRATDList(MachineFunction &MF, 143 const MachineLoopInfo &MLI, 144 const MachineDominatorTree &MDT, 145 ScheduleHazardRecognizer *HR, 146 AntiDepBreaker *ADB, 147 AliasAnalysis *aa) 148 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits), 149 HazardRec(HR), AntiDepBreak(ADB), AA(aa) {} 150 151 ~SchedulePostRATDList() { 152 } 153 154 /// StartBlock - Initialize register live-range state for scheduling in 155 /// this block. 156 /// 157 void StartBlock(MachineBasicBlock *BB); 158 159 /// Schedule - Schedule the instruction range using list scheduling. 160 /// 161 void Schedule(); 162 163 /// Observe - Update liveness information to account for the current 164 /// instruction, which will not be scheduled. 165 /// 166 void Observe(MachineInstr *MI, unsigned Count); 167 168 /// FinishBlock - Clean up register live-range state. 169 /// 170 void FinishBlock(); 171 172 /// FixupKills - Fix register kill flags that have been made 173 /// invalid due to scheduling 174 /// 175 void FixupKills(MachineBasicBlock *MBB); 176 177 private: 178 void ReleaseSucc(SUnit *SU, SDep *SuccEdge); 179 void ReleaseSuccessors(SUnit *SU); 180 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle); 181 void ListScheduleTopDown(); 182 void StartBlockForKills(MachineBasicBlock *BB); 183 184 // ToggleKillFlag - Toggle a register operand kill flag. Other 185 // adjustments may be made to the instruction if necessary. Return 186 // true if the operand has been deleted, false if not. 187 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO); 188 }; 189} 190 191/// isSchedulingBoundary - Test if the given instruction should be 192/// considered a scheduling boundary. This primarily includes labels 193/// and terminators. 194/// 195static bool isSchedulingBoundary(const MachineInstr *MI, 196 const MachineFunction &MF) { 197 // Terminators and labels can't be scheduled around. 198 if (MI->getDesc().isTerminator() || MI->isLabel()) 199 return true; 200 201 // Don't attempt to schedule around any instruction that modifies 202 // a stack-oriented pointer, as it's unlikely to be profitable. This 203 // saves compile time, because it doesn't require every single 204 // stack slot reference to depend on the instruction that does the 205 // modification. 206 const TargetLowering &TLI = *MF.getTarget().getTargetLowering(); 207 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore())) 208 return true; 209 210 return false; 211} 212 213bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { 214 AA = &getAnalysis<AliasAnalysis>(); 215 216 // Check for explicit enable/disable of post-ra scheduling. 217 TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE; 218 if (EnablePostRAScheduler.getPosition() > 0) { 219 if (!EnablePostRAScheduler) 220 return false; 221 } else { 222 // Check that post-RA scheduling is enabled for this target. 223 const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>(); 224 if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode)) 225 return false; 226 } 227 228 // Check for antidep breaking override... 229 if (EnableAntiDepBreaking.getPosition() > 0) { 230 AntiDepMode = (EnableAntiDepBreaking == "all") ? TargetSubtarget::ANTIDEP_ALL : 231 (EnableAntiDepBreaking == "critical") ? TargetSubtarget::ANTIDEP_CRITICAL : 232 TargetSubtarget::ANTIDEP_NONE; 233 } 234 235 DEBUG(errs() << "PostRAScheduler\n"); 236 237 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); 238 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); 239 const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData(); 240 ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ? 241 (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) : 242 (ScheduleHazardRecognizer *)new SimpleHazardRecognizer(); 243 AntiDepBreaker *ADB = 244 ((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ? 245 (AntiDepBreaker *)new AggressiveAntiDepBreaker(Fn) : 246 ((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ? 247 (AntiDepBreaker *)new CriticalAntiDepBreaker(Fn) : NULL)); 248 249 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, ADB, AA); 250 251 // Loop over all of the basic blocks 252 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); 253 MBB != MBBe; ++MBB) { 254#ifndef NDEBUG 255 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 256 if (DebugDiv > 0) { 257 static int bbcnt = 0; 258 if (bbcnt++ % DebugDiv != DebugMod) 259 continue; 260 errs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() << 261 ":MBB ID#" << MBB->getNumber() << " ***\n"; 262 } 263#endif 264 265 // Initialize register live-range state for scheduling in this block. 266 Scheduler.StartBlock(MBB); 267 268 // Schedule each sequence of instructions not interrupted by a label 269 // or anything else that effectively needs to shut down scheduling. 270 MachineBasicBlock::iterator Current = MBB->end(); 271 unsigned Count = MBB->size(), CurrentCount = Count; 272 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) { 273 MachineInstr *MI = prior(I); 274 if (isSchedulingBoundary(MI, Fn)) { 275 Scheduler.Run(MBB, I, Current, CurrentCount); 276 Scheduler.EmitSchedule(0); 277 Current = MI; 278 CurrentCount = Count - 1; 279 Scheduler.Observe(MI, CurrentCount); 280 } 281 I = MI; 282 --Count; 283 } 284 assert(Count == 0 && "Instruction count mismatch!"); 285 assert((MBB->begin() == Current || CurrentCount != 0) && 286 "Instruction count mismatch!"); 287 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount); 288 Scheduler.EmitSchedule(0); 289 290 // Clean up register live-range state. 291 Scheduler.FinishBlock(); 292 293 // Update register kills 294 Scheduler.FixupKills(MBB); 295 } 296 297 delete HR; 298 delete ADB; 299 300 return true; 301} 302 303/// StartBlock - Initialize register live-range state for scheduling in 304/// this block. 305/// 306void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) { 307 // Call the superclass. 308 ScheduleDAGInstrs::StartBlock(BB); 309 310 // Reset the hazard recognizer and anti-dep breaker. 311 HazardRec->Reset(); 312 if (AntiDepBreak != NULL) 313 AntiDepBreak->StartBlock(BB); 314} 315 316/// Schedule - Schedule the instruction range using list scheduling. 317/// 318void SchedulePostRATDList::Schedule() { 319 // Build the scheduling graph. 320 BuildSchedGraph(AA); 321 322 if (AntiDepBreak != NULL) { 323 for (unsigned i = 0, Trials = AntiDepBreak->GetMaxTrials(); 324 i < Trials; ++i) { 325 DEBUG(errs() << "********** Break Anti-Deps, Trial " << 326 i << " **********\n"); 327 unsigned Broken = 328 AntiDepBreak->BreakAntiDependencies(SUnits, Begin, InsertPos, 329 InsertPosIndex); 330 if (Broken == 0) 331 break; 332 333 // We made changes. Update the dependency graph. 334 // Theoretically we could update the graph in place: 335 // When a live range is changed to use a different register, remove 336 // the def's anti-dependence *and* output-dependence edges due to 337 // that register, and add new anti-dependence and output-dependence 338 // edges based on the next live range of the register. 339 SUnits.clear(); 340 EntrySU = SUnit(); 341 ExitSU = SUnit(); 342 BuildSchedGraph(AA); 343 344 NumFixedAnti += Broken; 345 } 346 } 347 348 DEBUG(errs() << "********** List Scheduling **********\n"); 349 350 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 351 SUnits[su].dumpAll(this)); 352 353 AvailableQueue.initNodes(SUnits); 354 355 ListScheduleTopDown(); 356 357 AvailableQueue.releaseState(); 358} 359 360/// Observe - Update liveness information to account for the current 361/// instruction, which will not be scheduled. 362/// 363void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) { 364 if (AntiDepBreak != NULL) 365 AntiDepBreak->Observe(MI, Count, InsertPosIndex); 366} 367 368/// FinishBlock - Clean up register live-range state. 369/// 370void SchedulePostRATDList::FinishBlock() { 371 if (AntiDepBreak != NULL) 372 AntiDepBreak->FinishBlock(); 373 374 // Call the superclass. 375 ScheduleDAGInstrs::FinishBlock(); 376} 377 378/// StartBlockForKills - Initialize register live-range state for updating kills 379/// 380void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) { 381 // Initialize the indices to indicate that no registers are live. 382 std::fill(KillIndices, array_endof(KillIndices), ~0u); 383 384 // Determine the live-out physregs for this block. 385 if (!BB->empty() && BB->back().getDesc().isReturn()) { 386 // In a return block, examine the function live-out regs. 387 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(), 388 E = MRI.liveout_end(); I != E; ++I) { 389 unsigned Reg = *I; 390 KillIndices[Reg] = BB->size(); 391 // Repeat, for all subregs. 392 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 393 *Subreg; ++Subreg) { 394 KillIndices[*Subreg] = BB->size(); 395 } 396 } 397 } 398 else { 399 // In a non-return block, examine the live-in regs of all successors. 400 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 401 SE = BB->succ_end(); SI != SE; ++SI) { 402 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 403 E = (*SI)->livein_end(); I != E; ++I) { 404 unsigned Reg = *I; 405 KillIndices[Reg] = BB->size(); 406 // Repeat, for all subregs. 407 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 408 *Subreg; ++Subreg) { 409 KillIndices[*Subreg] = BB->size(); 410 } 411 } 412 } 413 } 414} 415 416bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI, 417 MachineOperand &MO) { 418 // Setting kill flag... 419 if (!MO.isKill()) { 420 MO.setIsKill(true); 421 return false; 422 } 423 424 // If MO itself is live, clear the kill flag... 425 if (KillIndices[MO.getReg()] != ~0u) { 426 MO.setIsKill(false); 427 return false; 428 } 429 430 // If any subreg of MO is live, then create an imp-def for that 431 // subreg and keep MO marked as killed. 432 MO.setIsKill(false); 433 bool AllDead = true; 434 const unsigned SuperReg = MO.getReg(); 435 for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg); 436 *Subreg; ++Subreg) { 437 if (KillIndices[*Subreg] != ~0u) { 438 MI->addOperand(MachineOperand::CreateReg(*Subreg, 439 true /*IsDef*/, 440 true /*IsImp*/, 441 false /*IsKill*/, 442 false /*IsDead*/)); 443 AllDead = false; 444 } 445 } 446 447 if(AllDead) 448 MO.setIsKill(true); 449 return false; 450} 451 452/// FixupKills - Fix the register kill flags, they may have been made 453/// incorrect by instruction reordering. 454/// 455void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) { 456 DEBUG(errs() << "Fixup kills for BB ID#" << MBB->getNumber() << '\n'); 457 458 std::set<unsigned> killedRegs; 459 BitVector ReservedRegs = TRI->getReservedRegs(MF); 460 461 StartBlockForKills(MBB); 462 463 // Examine block from end to start... 464 unsigned Count = MBB->size(); 465 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin(); 466 I != E; --Count) { 467 MachineInstr *MI = --I; 468 469 // Update liveness. Registers that are defed but not used in this 470 // instruction are now dead. Mark register and all subregs as they 471 // are completely defined. 472 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 473 MachineOperand &MO = MI->getOperand(i); 474 if (!MO.isReg()) continue; 475 unsigned Reg = MO.getReg(); 476 if (Reg == 0) continue; 477 if (!MO.isDef()) continue; 478 // Ignore two-addr defs. 479 if (MI->isRegTiedToUseOperand(i)) continue; 480 481 KillIndices[Reg] = ~0u; 482 483 // Repeat for all subregs. 484 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 485 *Subreg; ++Subreg) { 486 KillIndices[*Subreg] = ~0u; 487 } 488 } 489 490 // Examine all used registers and set/clear kill flag. When a 491 // register is used multiple times we only set the kill flag on 492 // the first use. 493 killedRegs.clear(); 494 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 495 MachineOperand &MO = MI->getOperand(i); 496 if (!MO.isReg() || !MO.isUse()) continue; 497 unsigned Reg = MO.getReg(); 498 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 499 500 bool kill = false; 501 if (killedRegs.find(Reg) == killedRegs.end()) { 502 kill = true; 503 // A register is not killed if any subregs are live... 504 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 505 *Subreg; ++Subreg) { 506 if (KillIndices[*Subreg] != ~0u) { 507 kill = false; 508 break; 509 } 510 } 511 512 // If subreg is not live, then register is killed if it became 513 // live in this instruction 514 if (kill) 515 kill = (KillIndices[Reg] == ~0u); 516 } 517 518 if (MO.isKill() != kill) { 519 bool removed = ToggleKillFlag(MI, MO); 520 if (removed) { 521 DEBUG(errs() << "Fixed <removed> in "); 522 } else { 523 DEBUG(errs() << "Fixed " << MO << " in "); 524 } 525 DEBUG(MI->dump()); 526 } 527 528 killedRegs.insert(Reg); 529 } 530 531 // Mark any used register (that is not using undef) and subregs as 532 // now live... 533 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 534 MachineOperand &MO = MI->getOperand(i); 535 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; 536 unsigned Reg = MO.getReg(); 537 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 538 539 KillIndices[Reg] = Count; 540 541 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 542 *Subreg; ++Subreg) { 543 KillIndices[*Subreg] = Count; 544 } 545 } 546 } 547} 548 549//===----------------------------------------------------------------------===// 550// Top-Down Scheduling 551//===----------------------------------------------------------------------===// 552 553/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 554/// the PendingQueue if the count reaches zero. Also update its cycle bound. 555void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) { 556 SUnit *SuccSU = SuccEdge->getSUnit(); 557 558#ifndef NDEBUG 559 if (SuccSU->NumPredsLeft == 0) { 560 errs() << "*** Scheduling failed! ***\n"; 561 SuccSU->dump(this); 562 errs() << " has been released too many times!\n"; 563 llvm_unreachable(0); 564 } 565#endif 566 --SuccSU->NumPredsLeft; 567 568 // Compute how many cycles it will be before this actually becomes 569 // available. This is the max of the start time of all predecessors plus 570 // their latencies. 571 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency()); 572 573 // If all the node's predecessors are scheduled, this node is ready 574 // to be scheduled. Ignore the special ExitSU node. 575 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 576 PendingQueue.push_back(SuccSU); 577} 578 579/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors. 580void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) { 581 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 582 I != E; ++I) 583 ReleaseSucc(SU, &*I); 584} 585 586/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 587/// count of its successors. If a successor pending count is zero, add it to 588/// the Available queue. 589void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 590 DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: "); 591 DEBUG(SU->dump(this)); 592 593 Sequence.push_back(SU); 594 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!"); 595 SU->setDepthToAtLeast(CurCycle); 596 597 ReleaseSuccessors(SU); 598 SU->isScheduled = true; 599 AvailableQueue.ScheduledNode(SU); 600} 601 602/// ListScheduleTopDown - The main loop of list scheduling for top-down 603/// schedulers. 604void SchedulePostRATDList::ListScheduleTopDown() { 605 unsigned CurCycle = 0; 606 607 // Release any successors of the special Entry node. 608 ReleaseSuccessors(&EntrySU); 609 610 // All leaves to Available queue. 611 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 612 // It is available if it has no predecessors. 613 if (SUnits[i].Preds.empty()) { 614 AvailableQueue.push(&SUnits[i]); 615 SUnits[i].isAvailable = true; 616 } 617 } 618 619 // In any cycle where we can't schedule any instructions, we must 620 // stall or emit a noop, depending on the target. 621 bool CycleHasInsts = false; 622 623 // While Available queue is not empty, grab the node with the highest 624 // priority. If it is not ready put it back. Schedule the node. 625 std::vector<SUnit*> NotReady; 626 Sequence.reserve(SUnits.size()); 627 while (!AvailableQueue.empty() || !PendingQueue.empty()) { 628 // Check to see if any of the pending instructions are ready to issue. If 629 // so, add them to the available queue. 630 unsigned MinDepth = ~0u; 631 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 632 if (PendingQueue[i]->getDepth() <= CurCycle) { 633 AvailableQueue.push(PendingQueue[i]); 634 PendingQueue[i]->isAvailable = true; 635 PendingQueue[i] = PendingQueue.back(); 636 PendingQueue.pop_back(); 637 --i; --e; 638 } else if (PendingQueue[i]->getDepth() < MinDepth) 639 MinDepth = PendingQueue[i]->getDepth(); 640 } 641 642 DEBUG(errs() << "\n*** Examining Available\n"; 643 LatencyPriorityQueue q = AvailableQueue; 644 while (!q.empty()) { 645 SUnit *su = q.pop(); 646 errs() << "Height " << su->getHeight() << ": "; 647 su->dump(this); 648 }); 649 650 SUnit *FoundSUnit = 0; 651 652 bool HasNoopHazards = false; 653 while (!AvailableQueue.empty()) { 654 SUnit *CurSUnit = AvailableQueue.pop(); 655 656 ScheduleHazardRecognizer::HazardType HT = 657 HazardRec->getHazardType(CurSUnit); 658 if (HT == ScheduleHazardRecognizer::NoHazard) { 659 FoundSUnit = CurSUnit; 660 break; 661 } 662 663 // Remember if this is a noop hazard. 664 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard; 665 666 NotReady.push_back(CurSUnit); 667 } 668 669 // Add the nodes that aren't ready back onto the available list. 670 if (!NotReady.empty()) { 671 AvailableQueue.push_all(NotReady); 672 NotReady.clear(); 673 } 674 675 // If we found a node to schedule, do it now. 676 if (FoundSUnit) { 677 ScheduleNodeTopDown(FoundSUnit, CurCycle); 678 HazardRec->EmitInstruction(FoundSUnit); 679 CycleHasInsts = true; 680 681 // If we are using the target-specific hazards, then don't 682 // advance the cycle time just because we schedule a node. If 683 // the target allows it we can schedule multiple nodes in the 684 // same cycle. 685 if (!EnablePostRAHazardAvoidance) { 686 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops! 687 ++CurCycle; 688 } 689 } else { 690 if (CycleHasInsts) { 691 DEBUG(errs() << "*** Finished cycle " << CurCycle << '\n'); 692 HazardRec->AdvanceCycle(); 693 } else if (!HasNoopHazards) { 694 // Otherwise, we have a pipeline stall, but no other problem, 695 // just advance the current cycle and try again. 696 DEBUG(errs() << "*** Stall in cycle " << CurCycle << '\n'); 697 HazardRec->AdvanceCycle(); 698 ++NumStalls; 699 } else { 700 // Otherwise, we have no instructions to issue and we have instructions 701 // that will fault if we don't do this right. This is the case for 702 // processors without pipeline interlocks and other cases. 703 DEBUG(errs() << "*** Emitting noop in cycle " << CurCycle << '\n'); 704 HazardRec->EmitNoop(); 705 Sequence.push_back(0); // NULL here means noop 706 ++NumNoops; 707 } 708 709 ++CurCycle; 710 CycleHasInsts = false; 711 } 712 } 713 714#ifndef NDEBUG 715 VerifySchedule(/*isBottomUp=*/false); 716#endif 717} 718 719//===----------------------------------------------------------------------===// 720// Public Constructor Functions 721//===----------------------------------------------------------------------===// 722 723FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) { 724 return new PostRAScheduler(OptLevel); 725} 726