PostRASchedulerList.cpp revision 24ff056654cd4eae6c6403b81dfceaa46605f395
1//===----- SchedulePostRAList.cpp - list scheduler ------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements a top-down list scheduler, using standard algorithms. 11// The basic approach uses a priority queue of available nodes to schedule. 12// One at a time, nodes are taken from the priority queue (thus in priority 13// order), checked for legality to schedule, and emitted if legal. 14// 15// Nodes may not be legal to schedule either due to structural hazards (e.g. 16// pipeline or resource constraints) or because an input to the instruction has 17// not completed execution. 18// 19//===----------------------------------------------------------------------===// 20 21#define DEBUG_TYPE "post-RA-sched" 22#include "AntiDepBreaker.h" 23#include "AggressiveAntiDepBreaker.h" 24#include "CriticalAntiDepBreaker.h" 25#include "ScheduleDAGInstrs.h" 26#include "llvm/CodeGen/Passes.h" 27#include "llvm/CodeGen/LatencyPriorityQueue.h" 28#include "llvm/CodeGen/SchedulerRegistry.h" 29#include "llvm/CodeGen/MachineDominators.h" 30#include "llvm/CodeGen/MachineFrameInfo.h" 31#include "llvm/CodeGen/MachineFunctionPass.h" 32#include "llvm/CodeGen/MachineLoopInfo.h" 33#include "llvm/CodeGen/MachineRegisterInfo.h" 34#include "llvm/CodeGen/ScheduleHazardRecognizer.h" 35#include "llvm/Analysis/AliasAnalysis.h" 36#include "llvm/Target/TargetLowering.h" 37#include "llvm/Target/TargetMachine.h" 38#include "llvm/Target/TargetInstrInfo.h" 39#include "llvm/Target/TargetRegisterInfo.h" 40#include "llvm/Target/TargetSubtarget.h" 41#include "llvm/Support/CommandLine.h" 42#include "llvm/Support/Debug.h" 43#include "llvm/Support/ErrorHandling.h" 44#include "llvm/Support/raw_ostream.h" 45#include "llvm/ADT/BitVector.h" 46#include "llvm/ADT/Statistic.h" 47#include <set> 48using namespace llvm; 49 50STATISTIC(NumNoops, "Number of noops inserted"); 51STATISTIC(NumStalls, "Number of pipeline stalls"); 52STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies"); 53 54// Post-RA scheduling is enabled with 55// TargetSubtarget.enablePostRAScheduler(). This flag can be used to 56// override the target. 57static cl::opt<bool> 58EnablePostRAScheduler("post-RA-scheduler", 59 cl::desc("Enable scheduling after register allocation"), 60 cl::init(false), cl::Hidden); 61static cl::opt<std::string> 62EnableAntiDepBreaking("break-anti-dependencies", 63 cl::desc("Break post-RA scheduling anti-dependencies: " 64 "\"critical\", \"all\", or \"none\""), 65 cl::init("none"), cl::Hidden); 66 67// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 68static cl::opt<int> 69DebugDiv("postra-sched-debugdiv", 70 cl::desc("Debug control MBBs that are scheduled"), 71 cl::init(0), cl::Hidden); 72static cl::opt<int> 73DebugMod("postra-sched-debugmod", 74 cl::desc("Debug control MBBs that are scheduled"), 75 cl::init(0), cl::Hidden); 76 77AntiDepBreaker::~AntiDepBreaker() { } 78 79namespace { 80 class PostRAScheduler : public MachineFunctionPass { 81 AliasAnalysis *AA; 82 const TargetInstrInfo *TII; 83 CodeGenOpt::Level OptLevel; 84 85 public: 86 static char ID; 87 PostRAScheduler(CodeGenOpt::Level ol) : 88 MachineFunctionPass(&ID), OptLevel(ol) {} 89 90 void getAnalysisUsage(AnalysisUsage &AU) const { 91 AU.setPreservesCFG(); 92 AU.addRequired<AliasAnalysis>(); 93 AU.addRequired<MachineDominatorTree>(); 94 AU.addPreserved<MachineDominatorTree>(); 95 AU.addRequired<MachineLoopInfo>(); 96 AU.addPreserved<MachineLoopInfo>(); 97 MachineFunctionPass::getAnalysisUsage(AU); 98 } 99 100 const char *getPassName() const { 101 return "Post RA top-down list latency scheduler"; 102 } 103 104 bool runOnMachineFunction(MachineFunction &Fn); 105 }; 106 char PostRAScheduler::ID = 0; 107 108 class SchedulePostRATDList : public ScheduleDAGInstrs { 109 /// AvailableQueue - The priority queue to use for the available SUnits. 110 /// 111 LatencyPriorityQueue AvailableQueue; 112 113 /// PendingQueue - This contains all of the instructions whose operands have 114 /// been issued, but their results are not ready yet (due to the latency of 115 /// the operation). Once the operands becomes available, the instruction is 116 /// added to the AvailableQueue. 117 std::vector<SUnit*> PendingQueue; 118 119 /// Topo - A topological ordering for SUnits. 120 ScheduleDAGTopologicalSort Topo; 121 122 /// HazardRec - The hazard recognizer to use. 123 ScheduleHazardRecognizer *HazardRec; 124 125 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none 126 AntiDepBreaker *AntiDepBreak; 127 128 /// AA - AliasAnalysis for making memory reference queries. 129 AliasAnalysis *AA; 130 131 /// KillIndices - The index of the most recent kill (proceding bottom-up), 132 /// or ~0u if the register is not live. 133 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister]; 134 135 public: 136 SchedulePostRATDList(MachineFunction &MF, 137 const MachineLoopInfo &MLI, 138 const MachineDominatorTree &MDT, 139 ScheduleHazardRecognizer *HR, 140 AntiDepBreaker *ADB, 141 AliasAnalysis *aa) 142 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits), 143 HazardRec(HR), AntiDepBreak(ADB), AA(aa) {} 144 145 ~SchedulePostRATDList() { 146 } 147 148 /// StartBlock - Initialize register live-range state for scheduling in 149 /// this block. 150 /// 151 void StartBlock(MachineBasicBlock *BB); 152 153 /// Schedule - Schedule the instruction range using list scheduling. 154 /// 155 void Schedule(); 156 157 /// Observe - Update liveness information to account for the current 158 /// instruction, which will not be scheduled. 159 /// 160 void Observe(MachineInstr *MI, unsigned Count); 161 162 /// FinishBlock - Clean up register live-range state. 163 /// 164 void FinishBlock(); 165 166 /// FixupKills - Fix register kill flags that have been made 167 /// invalid due to scheduling 168 /// 169 void FixupKills(MachineBasicBlock *MBB); 170 171 private: 172 void ReleaseSucc(SUnit *SU, SDep *SuccEdge); 173 void ReleaseSuccessors(SUnit *SU); 174 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle); 175 void ListScheduleTopDown(); 176 void StartBlockForKills(MachineBasicBlock *BB); 177 178 // ToggleKillFlag - Toggle a register operand kill flag. Other 179 // adjustments may be made to the instruction if necessary. Return 180 // true if the operand has been deleted, false if not. 181 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO); 182 }; 183} 184 185bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { 186 AA = &getAnalysis<AliasAnalysis>(); 187 TII = Fn.getTarget().getInstrInfo(); 188 189 // Check for explicit enable/disable of post-ra scheduling. 190 TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE; 191 SmallVector<TargetRegisterClass*, 4> CriticalPathRCs; 192 if (EnablePostRAScheduler.getPosition() > 0) { 193 if (!EnablePostRAScheduler) 194 return false; 195 } else { 196 // Check that post-RA scheduling is enabled for this target. 197 const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>(); 198 if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode, CriticalPathRCs)) 199 return false; 200 } 201 202 // Check for antidep breaking override... 203 if (EnableAntiDepBreaking.getPosition() > 0) { 204 AntiDepMode = (EnableAntiDepBreaking == "all") ? 205 TargetSubtarget::ANTIDEP_ALL : 206 (EnableAntiDepBreaking == "critical") 207 ? TargetSubtarget::ANTIDEP_CRITICAL : TargetSubtarget::ANTIDEP_NONE; 208 } 209 210 DEBUG(dbgs() << "PostRAScheduler\n"); 211 212 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); 213 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); 214 const TargetMachine &TM = Fn.getTarget(); 215 const InstrItineraryData &InstrItins = TM.getInstrItineraryData(); 216 ScheduleHazardRecognizer *HR = 217 TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins); 218 AntiDepBreaker *ADB = 219 ((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ? 220 (AntiDepBreaker *)new AggressiveAntiDepBreaker(Fn, CriticalPathRCs) : 221 ((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ? 222 (AntiDepBreaker *)new CriticalAntiDepBreaker(Fn) : NULL)); 223 224 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, ADB, AA); 225 226 // Loop over all of the basic blocks 227 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); 228 MBB != MBBe; ++MBB) { 229#ifndef NDEBUG 230 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 231 if (DebugDiv > 0) { 232 static int bbcnt = 0; 233 if (bbcnt++ % DebugDiv != DebugMod) 234 continue; 235 dbgs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() << 236 ":BB#" << MBB->getNumber() << " ***\n"; 237 } 238#endif 239 240 // Initialize register live-range state for scheduling in this block. 241 Scheduler.StartBlock(MBB); 242 243 // Schedule each sequence of instructions not interrupted by a label 244 // or anything else that effectively needs to shut down scheduling. 245 MachineBasicBlock::iterator Current = MBB->end(); 246 unsigned Count = MBB->size(), CurrentCount = Count; 247 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) { 248 MachineInstr *MI = llvm::prior(I); 249 if (TII->isSchedulingBoundary(MI, MBB, Fn)) { 250 Scheduler.Run(MBB, I, Current, CurrentCount); 251 Scheduler.EmitSchedule(); 252 Current = MI; 253 CurrentCount = Count - 1; 254 Scheduler.Observe(MI, CurrentCount); 255 } 256 I = MI; 257 --Count; 258 } 259 assert(Count == 0 && "Instruction count mismatch!"); 260 assert((MBB->begin() == Current || CurrentCount != 0) && 261 "Instruction count mismatch!"); 262 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount); 263 Scheduler.EmitSchedule(); 264 265 // Clean up register live-range state. 266 Scheduler.FinishBlock(); 267 268 // Update register kills 269 Scheduler.FixupKills(MBB); 270 } 271 272 delete HR; 273 delete ADB; 274 275 return true; 276} 277 278/// StartBlock - Initialize register live-range state for scheduling in 279/// this block. 280/// 281void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) { 282 // Call the superclass. 283 ScheduleDAGInstrs::StartBlock(BB); 284 285 // Reset the hazard recognizer and anti-dep breaker. 286 HazardRec->Reset(); 287 if (AntiDepBreak != NULL) 288 AntiDepBreak->StartBlock(BB); 289} 290 291/// Schedule - Schedule the instruction range using list scheduling. 292/// 293void SchedulePostRATDList::Schedule() { 294 // Build the scheduling graph. 295 BuildSchedGraph(AA); 296 297 if (AntiDepBreak != NULL) { 298 unsigned Broken = 299 AntiDepBreak->BreakAntiDependencies(SUnits, Begin, InsertPos, 300 InsertPosIndex); 301 302 if (Broken != 0) { 303 // We made changes. Update the dependency graph. 304 // Theoretically we could update the graph in place: 305 // When a live range is changed to use a different register, remove 306 // the def's anti-dependence *and* output-dependence edges due to 307 // that register, and add new anti-dependence and output-dependence 308 // edges based on the next live range of the register. 309 SUnits.clear(); 310 Sequence.clear(); 311 EntrySU = SUnit(); 312 ExitSU = SUnit(); 313 BuildSchedGraph(AA); 314 315 NumFixedAnti += Broken; 316 } 317 } 318 319 DEBUG(dbgs() << "********** List Scheduling **********\n"); 320 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 321 SUnits[su].dumpAll(this)); 322 323 AvailableQueue.initNodes(SUnits); 324 ListScheduleTopDown(); 325 AvailableQueue.releaseState(); 326} 327 328/// Observe - Update liveness information to account for the current 329/// instruction, which will not be scheduled. 330/// 331void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) { 332 if (AntiDepBreak != NULL) 333 AntiDepBreak->Observe(MI, Count, InsertPosIndex); 334} 335 336/// FinishBlock - Clean up register live-range state. 337/// 338void SchedulePostRATDList::FinishBlock() { 339 if (AntiDepBreak != NULL) 340 AntiDepBreak->FinishBlock(); 341 342 // Call the superclass. 343 ScheduleDAGInstrs::FinishBlock(); 344} 345 346/// StartBlockForKills - Initialize register live-range state for updating kills 347/// 348void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) { 349 // Initialize the indices to indicate that no registers are live. 350 for (unsigned i = 0; i < TRI->getNumRegs(); ++i) 351 KillIndices[i] = ~0u; 352 353 // Determine the live-out physregs for this block. 354 if (!BB->empty() && BB->back().getDesc().isReturn()) { 355 // In a return block, examine the function live-out regs. 356 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(), 357 E = MRI.liveout_end(); I != E; ++I) { 358 unsigned Reg = *I; 359 KillIndices[Reg] = BB->size(); 360 // Repeat, for all subregs. 361 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 362 *Subreg; ++Subreg) { 363 KillIndices[*Subreg] = BB->size(); 364 } 365 } 366 } 367 else { 368 // In a non-return block, examine the live-in regs of all successors. 369 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 370 SE = BB->succ_end(); SI != SE; ++SI) { 371 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 372 E = (*SI)->livein_end(); I != E; ++I) { 373 unsigned Reg = *I; 374 KillIndices[Reg] = BB->size(); 375 // Repeat, for all subregs. 376 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 377 *Subreg; ++Subreg) { 378 KillIndices[*Subreg] = BB->size(); 379 } 380 } 381 } 382 } 383} 384 385bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI, 386 MachineOperand &MO) { 387 // Setting kill flag... 388 if (!MO.isKill()) { 389 MO.setIsKill(true); 390 return false; 391 } 392 393 // If MO itself is live, clear the kill flag... 394 if (KillIndices[MO.getReg()] != ~0u) { 395 MO.setIsKill(false); 396 return false; 397 } 398 399 // If any subreg of MO is live, then create an imp-def for that 400 // subreg and keep MO marked as killed. 401 MO.setIsKill(false); 402 bool AllDead = true; 403 const unsigned SuperReg = MO.getReg(); 404 for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg); 405 *Subreg; ++Subreg) { 406 if (KillIndices[*Subreg] != ~0u) { 407 MI->addOperand(MachineOperand::CreateReg(*Subreg, 408 true /*IsDef*/, 409 true /*IsImp*/, 410 false /*IsKill*/, 411 false /*IsDead*/)); 412 AllDead = false; 413 } 414 } 415 416 if(AllDead) 417 MO.setIsKill(true); 418 return false; 419} 420 421/// FixupKills - Fix the register kill flags, they may have been made 422/// incorrect by instruction reordering. 423/// 424void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) { 425 DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n'); 426 427 std::set<unsigned> killedRegs; 428 BitVector ReservedRegs = TRI->getReservedRegs(MF); 429 430 StartBlockForKills(MBB); 431 432 // Examine block from end to start... 433 unsigned Count = MBB->size(); 434 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin(); 435 I != E; --Count) { 436 MachineInstr *MI = --I; 437 if (MI->isDebugValue()) 438 continue; 439 440 // Update liveness. Registers that are defed but not used in this 441 // instruction are now dead. Mark register and all subregs as they 442 // are completely defined. 443 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 444 MachineOperand &MO = MI->getOperand(i); 445 if (!MO.isReg()) continue; 446 unsigned Reg = MO.getReg(); 447 if (Reg == 0) continue; 448 if (!MO.isDef()) continue; 449 // Ignore two-addr defs. 450 if (MI->isRegTiedToUseOperand(i)) continue; 451 452 KillIndices[Reg] = ~0u; 453 454 // Repeat for all subregs. 455 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 456 *Subreg; ++Subreg) { 457 KillIndices[*Subreg] = ~0u; 458 } 459 } 460 461 // Examine all used registers and set/clear kill flag. When a 462 // register is used multiple times we only set the kill flag on 463 // the first use. 464 killedRegs.clear(); 465 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 466 MachineOperand &MO = MI->getOperand(i); 467 if (!MO.isReg() || !MO.isUse()) continue; 468 unsigned Reg = MO.getReg(); 469 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 470 471 bool kill = false; 472 if (killedRegs.find(Reg) == killedRegs.end()) { 473 kill = true; 474 // A register is not killed if any subregs are live... 475 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 476 *Subreg; ++Subreg) { 477 if (KillIndices[*Subreg] != ~0u) { 478 kill = false; 479 break; 480 } 481 } 482 483 // If subreg is not live, then register is killed if it became 484 // live in this instruction 485 if (kill) 486 kill = (KillIndices[Reg] == ~0u); 487 } 488 489 if (MO.isKill() != kill) { 490 DEBUG(dbgs() << "Fixing " << MO << " in "); 491 // Warning: ToggleKillFlag may invalidate MO. 492 ToggleKillFlag(MI, MO); 493 DEBUG(MI->dump()); 494 } 495 496 killedRegs.insert(Reg); 497 } 498 499 // Mark any used register (that is not using undef) and subregs as 500 // now live... 501 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 502 MachineOperand &MO = MI->getOperand(i); 503 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; 504 unsigned Reg = MO.getReg(); 505 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 506 507 KillIndices[Reg] = Count; 508 509 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 510 *Subreg; ++Subreg) { 511 KillIndices[*Subreg] = Count; 512 } 513 } 514 } 515} 516 517//===----------------------------------------------------------------------===// 518// Top-Down Scheduling 519//===----------------------------------------------------------------------===// 520 521/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 522/// the PendingQueue if the count reaches zero. Also update its cycle bound. 523void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) { 524 SUnit *SuccSU = SuccEdge->getSUnit(); 525 526#ifndef NDEBUG 527 if (SuccSU->NumPredsLeft == 0) { 528 dbgs() << "*** Scheduling failed! ***\n"; 529 SuccSU->dump(this); 530 dbgs() << " has been released too many times!\n"; 531 llvm_unreachable(0); 532 } 533#endif 534 --SuccSU->NumPredsLeft; 535 536 // Compute how many cycles it will be before this actually becomes 537 // available. This is the max of the start time of all predecessors plus 538 // their latencies. 539 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency()); 540 541 // If all the node's predecessors are scheduled, this node is ready 542 // to be scheduled. Ignore the special ExitSU node. 543 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 544 PendingQueue.push_back(SuccSU); 545} 546 547/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors. 548void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) { 549 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 550 I != E; ++I) { 551 ReleaseSucc(SU, &*I); 552 } 553} 554 555/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 556/// count of its successors. If a successor pending count is zero, add it to 557/// the Available queue. 558void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 559 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: "); 560 DEBUG(SU->dump(this)); 561 562 Sequence.push_back(SU); 563 assert(CurCycle >= SU->getDepth() && 564 "Node scheduled above its depth!"); 565 SU->setDepthToAtLeast(CurCycle); 566 567 ReleaseSuccessors(SU); 568 SU->isScheduled = true; 569 AvailableQueue.ScheduledNode(SU); 570} 571 572/// ListScheduleTopDown - The main loop of list scheduling for top-down 573/// schedulers. 574void SchedulePostRATDList::ListScheduleTopDown() { 575 unsigned CurCycle = 0; 576 577 // We're scheduling top-down but we're visiting the regions in 578 // bottom-up order, so we don't know the hazards at the start of a 579 // region. So assume no hazards (this should usually be ok as most 580 // blocks are a single region). 581 HazardRec->Reset(); 582 583 // Release any successors of the special Entry node. 584 ReleaseSuccessors(&EntrySU); 585 586 // Add all leaves to Available queue. 587 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 588 // It is available if it has no predecessors. 589 bool available = SUnits[i].Preds.empty(); 590 if (available) { 591 AvailableQueue.push(&SUnits[i]); 592 SUnits[i].isAvailable = true; 593 } 594 } 595 596 // In any cycle where we can't schedule any instructions, we must 597 // stall or emit a noop, depending on the target. 598 bool CycleHasInsts = false; 599 600 // While Available queue is not empty, grab the node with the highest 601 // priority. If it is not ready put it back. Schedule the node. 602 std::vector<SUnit*> NotReady; 603 Sequence.reserve(SUnits.size()); 604 while (!AvailableQueue.empty() || !PendingQueue.empty()) { 605 // Check to see if any of the pending instructions are ready to issue. If 606 // so, add them to the available queue. 607 unsigned MinDepth = ~0u; 608 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 609 if (PendingQueue[i]->getDepth() <= CurCycle) { 610 AvailableQueue.push(PendingQueue[i]); 611 PendingQueue[i]->isAvailable = true; 612 PendingQueue[i] = PendingQueue.back(); 613 PendingQueue.pop_back(); 614 --i; --e; 615 } else if (PendingQueue[i]->getDepth() < MinDepth) 616 MinDepth = PendingQueue[i]->getDepth(); 617 } 618 619 DEBUG(dbgs() << "\n*** Examining Available\n"; 620 LatencyPriorityQueue q = AvailableQueue; 621 while (!q.empty()) { 622 SUnit *su = q.pop(); 623 dbgs() << "Height " << su->getHeight() << ": "; 624 su->dump(this); 625 }); 626 627 SUnit *FoundSUnit = 0; 628 bool HasNoopHazards = false; 629 while (!AvailableQueue.empty()) { 630 SUnit *CurSUnit = AvailableQueue.pop(); 631 632 ScheduleHazardRecognizer::HazardType HT = 633 HazardRec->getHazardType(CurSUnit); 634 if (HT == ScheduleHazardRecognizer::NoHazard) { 635 FoundSUnit = CurSUnit; 636 break; 637 } 638 639 // Remember if this is a noop hazard. 640 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard; 641 642 NotReady.push_back(CurSUnit); 643 } 644 645 // Add the nodes that aren't ready back onto the available list. 646 if (!NotReady.empty()) { 647 AvailableQueue.push_all(NotReady); 648 NotReady.clear(); 649 } 650 651 // If we found a node to schedule... 652 if (FoundSUnit) { 653 // ... schedule the node... 654 ScheduleNodeTopDown(FoundSUnit, CurCycle); 655 HazardRec->EmitInstruction(FoundSUnit); 656 CycleHasInsts = true; 657 } else { 658 if (CycleHasInsts) { 659 DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n'); 660 HazardRec->AdvanceCycle(); 661 } else if (!HasNoopHazards) { 662 // Otherwise, we have a pipeline stall, but no other problem, 663 // just advance the current cycle and try again. 664 DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n'); 665 HazardRec->AdvanceCycle(); 666 ++NumStalls; 667 } else { 668 // Otherwise, we have no instructions to issue and we have instructions 669 // that will fault if we don't do this right. This is the case for 670 // processors without pipeline interlocks and other cases. 671 DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n'); 672 HazardRec->EmitNoop(); 673 Sequence.push_back(0); // NULL here means noop 674 ++NumNoops; 675 } 676 677 ++CurCycle; 678 CycleHasInsts = false; 679 } 680 } 681 682#ifndef NDEBUG 683 VerifySchedule(/*isBottomUp=*/false); 684#endif 685} 686 687//===----------------------------------------------------------------------===// 688// Public Constructor Functions 689//===----------------------------------------------------------------------===// 690 691FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) { 692 return new PostRAScheduler(OptLevel); 693} 694