PostRASchedulerList.cpp revision fa796dd720f1b34596a043f17f098fac18ecc028
1//===----- SchedulePostRAList.cpp - list scheduler ------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements a top-down list scheduler, using standard algorithms. 11// The basic approach uses a priority queue of available nodes to schedule. 12// One at a time, nodes are taken from the priority queue (thus in priority 13// order), checked for legality to schedule, and emitted if legal. 14// 15// Nodes may not be legal to schedule either due to structural hazards (e.g. 16// pipeline or resource constraints) or because an input to the instruction has 17// not completed execution. 18// 19//===----------------------------------------------------------------------===// 20 21#define DEBUG_TYPE "post-RA-sched" 22#include "AntiDepBreaker.h" 23#include "AggressiveAntiDepBreaker.h" 24#include "CriticalAntiDepBreaker.h" 25#include "RegisterClassInfo.h" 26#include "ScheduleDAGInstrs.h" 27#include "llvm/CodeGen/Passes.h" 28#include "llvm/CodeGen/LatencyPriorityQueue.h" 29#include "llvm/CodeGen/SchedulerRegistry.h" 30#include "llvm/CodeGen/MachineDominators.h" 31#include "llvm/CodeGen/MachineFrameInfo.h" 32#include "llvm/CodeGen/MachineFunctionPass.h" 33#include "llvm/CodeGen/MachineLoopInfo.h" 34#include "llvm/CodeGen/MachineRegisterInfo.h" 35#include "llvm/CodeGen/ScheduleHazardRecognizer.h" 36#include "llvm/Analysis/AliasAnalysis.h" 37#include "llvm/Target/TargetLowering.h" 38#include "llvm/Target/TargetMachine.h" 39#include "llvm/Target/TargetInstrInfo.h" 40#include "llvm/Target/TargetRegisterInfo.h" 41#include "llvm/Target/TargetSubtarget.h" 42#include "llvm/Support/CommandLine.h" 43#include "llvm/Support/Debug.h" 44#include "llvm/Support/ErrorHandling.h" 45#include "llvm/Support/raw_ostream.h" 46#include "llvm/ADT/BitVector.h" 47#include "llvm/ADT/Statistic.h" 48#include <set> 49using namespace llvm; 50 51STATISTIC(NumNoops, "Number of noops inserted"); 52STATISTIC(NumStalls, "Number of pipeline stalls"); 53STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies"); 54 55// Post-RA scheduling is enabled with 56// TargetSubtarget.enablePostRAScheduler(). This flag can be used to 57// override the target. 58static cl::opt<bool> 59EnablePostRAScheduler("post-RA-scheduler", 60 cl::desc("Enable scheduling after register allocation"), 61 cl::init(false), cl::Hidden); 62static cl::opt<std::string> 63EnableAntiDepBreaking("break-anti-dependencies", 64 cl::desc("Break post-RA scheduling anti-dependencies: " 65 "\"critical\", \"all\", or \"none\""), 66 cl::init("none"), cl::Hidden); 67 68// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 69static cl::opt<int> 70DebugDiv("postra-sched-debugdiv", 71 cl::desc("Debug control MBBs that are scheduled"), 72 cl::init(0), cl::Hidden); 73static cl::opt<int> 74DebugMod("postra-sched-debugmod", 75 cl::desc("Debug control MBBs that are scheduled"), 76 cl::init(0), cl::Hidden); 77 78AntiDepBreaker::~AntiDepBreaker() { } 79 80namespace { 81 class PostRAScheduler : public MachineFunctionPass { 82 AliasAnalysis *AA; 83 const TargetInstrInfo *TII; 84 RegisterClassInfo RegClassInfo; 85 CodeGenOpt::Level OptLevel; 86 87 public: 88 static char ID; 89 PostRAScheduler(CodeGenOpt::Level ol) : 90 MachineFunctionPass(ID), OptLevel(ol) {} 91 92 void getAnalysisUsage(AnalysisUsage &AU) const { 93 AU.setPreservesCFG(); 94 AU.addRequired<AliasAnalysis>(); 95 AU.addRequired<MachineDominatorTree>(); 96 AU.addPreserved<MachineDominatorTree>(); 97 AU.addRequired<MachineLoopInfo>(); 98 AU.addPreserved<MachineLoopInfo>(); 99 MachineFunctionPass::getAnalysisUsage(AU); 100 } 101 102 const char *getPassName() const { 103 return "Post RA top-down list latency scheduler"; 104 } 105 106 bool runOnMachineFunction(MachineFunction &Fn); 107 }; 108 char PostRAScheduler::ID = 0; 109 110 class SchedulePostRATDList : public ScheduleDAGInstrs { 111 /// AvailableQueue - The priority queue to use for the available SUnits. 112 /// 113 LatencyPriorityQueue AvailableQueue; 114 115 /// PendingQueue - This contains all of the instructions whose operands have 116 /// been issued, but their results are not ready yet (due to the latency of 117 /// the operation). Once the operands becomes available, the instruction is 118 /// added to the AvailableQueue. 119 std::vector<SUnit*> PendingQueue; 120 121 /// Topo - A topological ordering for SUnits. 122 ScheduleDAGTopologicalSort Topo; 123 124 /// HazardRec - The hazard recognizer to use. 125 ScheduleHazardRecognizer *HazardRec; 126 127 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none 128 AntiDepBreaker *AntiDepBreak; 129 130 /// AA - AliasAnalysis for making memory reference queries. 131 AliasAnalysis *AA; 132 133 /// KillIndices - The index of the most recent kill (proceding bottom-up), 134 /// or ~0u if the register is not live. 135 std::vector<unsigned> KillIndices; 136 137 public: 138 SchedulePostRATDList( 139 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 140 AliasAnalysis *AA, const RegisterClassInfo&, 141 TargetSubtarget::AntiDepBreakMode AntiDepMode, 142 SmallVectorImpl<TargetRegisterClass*> &CriticalPathRCs); 143 144 ~SchedulePostRATDList(); 145 146 /// StartBlock - Initialize register live-range state for scheduling in 147 /// this block. 148 /// 149 void StartBlock(MachineBasicBlock *BB); 150 151 /// Schedule - Schedule the instruction range using list scheduling. 152 /// 153 void Schedule(); 154 155 /// Observe - Update liveness information to account for the current 156 /// instruction, which will not be scheduled. 157 /// 158 void Observe(MachineInstr *MI, unsigned Count); 159 160 /// FinishBlock - Clean up register live-range state. 161 /// 162 void FinishBlock(); 163 164 /// FixupKills - Fix register kill flags that have been made 165 /// invalid due to scheduling 166 /// 167 void FixupKills(MachineBasicBlock *MBB); 168 169 private: 170 void ReleaseSucc(SUnit *SU, SDep *SuccEdge); 171 void ReleaseSuccessors(SUnit *SU); 172 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle); 173 void ListScheduleTopDown(); 174 void StartBlockForKills(MachineBasicBlock *BB); 175 176 // ToggleKillFlag - Toggle a register operand kill flag. Other 177 // adjustments may be made to the instruction if necessary. Return 178 // true if the operand has been deleted, false if not. 179 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO); 180 }; 181} 182 183SchedulePostRATDList::SchedulePostRATDList( 184 MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT, 185 AliasAnalysis *AA, const RegisterClassInfo &RCI, 186 TargetSubtarget::AntiDepBreakMode AntiDepMode, 187 SmallVectorImpl<TargetRegisterClass*> &CriticalPathRCs) 188 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits), AA(AA), 189 KillIndices(TRI->getNumRegs()) 190{ 191 const TargetMachine &TM = MF.getTarget(); 192 const InstrItineraryData *InstrItins = TM.getInstrItineraryData(); 193 HazardRec = 194 TM.getInstrInfo()->CreateTargetPostRAHazardRecognizer(InstrItins, this); 195 AntiDepBreak = 196 ((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ? 197 (AntiDepBreaker *)new AggressiveAntiDepBreaker(MF, RCI, CriticalPathRCs) : 198 ((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ? 199 (AntiDepBreaker *)new CriticalAntiDepBreaker(MF, RCI) : NULL)); 200} 201 202SchedulePostRATDList::~SchedulePostRATDList() { 203 delete HazardRec; 204 delete AntiDepBreak; 205} 206 207bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { 208 TII = Fn.getTarget().getInstrInfo(); 209 MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); 210 MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); 211 AliasAnalysis *AA = &getAnalysis<AliasAnalysis>(); 212 RegClassInfo.runOnMachineFunction(Fn); 213 214 // Check for explicit enable/disable of post-ra scheduling. 215 TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE; 216 SmallVector<TargetRegisterClass*, 4> CriticalPathRCs; 217 if (EnablePostRAScheduler.getPosition() > 0) { 218 if (!EnablePostRAScheduler) 219 return false; 220 } else { 221 // Check that post-RA scheduling is enabled for this target. 222 // This may upgrade the AntiDepMode. 223 const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>(); 224 if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode, CriticalPathRCs)) 225 return false; 226 } 227 228 // Check for antidep breaking override... 229 if (EnableAntiDepBreaking.getPosition() > 0) { 230 AntiDepMode = (EnableAntiDepBreaking == "all") ? 231 TargetSubtarget::ANTIDEP_ALL : 232 (EnableAntiDepBreaking == "critical") 233 ? TargetSubtarget::ANTIDEP_CRITICAL : TargetSubtarget::ANTIDEP_NONE; 234 } 235 236 DEBUG(dbgs() << "PostRAScheduler\n"); 237 238 SchedulePostRATDList Scheduler(Fn, MLI, MDT, AA, RegClassInfo, AntiDepMode, 239 CriticalPathRCs); 240 241 // Loop over all of the basic blocks 242 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); 243 MBB != MBBe; ++MBB) { 244#ifndef NDEBUG 245 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 246 if (DebugDiv > 0) { 247 static int bbcnt = 0; 248 if (bbcnt++ % DebugDiv != DebugMod) 249 continue; 250 dbgs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() << 251 ":BB#" << MBB->getNumber() << " ***\n"; 252 } 253#endif 254 255 // Initialize register live-range state for scheduling in this block. 256 Scheduler.StartBlock(MBB); 257 258 // Schedule each sequence of instructions not interrupted by a label 259 // or anything else that effectively needs to shut down scheduling. 260 MachineBasicBlock::iterator Current = MBB->end(); 261 unsigned Count = MBB->size(), CurrentCount = Count; 262 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) { 263 MachineInstr *MI = llvm::prior(I); 264 if (TII->isSchedulingBoundary(MI, MBB, Fn)) { 265 Scheduler.Run(MBB, I, Current, CurrentCount); 266 Scheduler.EmitSchedule(); 267 Current = MI; 268 CurrentCount = Count - 1; 269 Scheduler.Observe(MI, CurrentCount); 270 } 271 I = MI; 272 --Count; 273 } 274 assert(Count == 0 && "Instruction count mismatch!"); 275 assert((MBB->begin() == Current || CurrentCount != 0) && 276 "Instruction count mismatch!"); 277 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount); 278 Scheduler.EmitSchedule(); 279 280 // Clean up register live-range state. 281 Scheduler.FinishBlock(); 282 283 // Update register kills 284 Scheduler.FixupKills(MBB); 285 } 286 287 return true; 288} 289 290/// StartBlock - Initialize register live-range state for scheduling in 291/// this block. 292/// 293void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) { 294 // Call the superclass. 295 ScheduleDAGInstrs::StartBlock(BB); 296 297 // Reset the hazard recognizer and anti-dep breaker. 298 HazardRec->Reset(); 299 if (AntiDepBreak != NULL) 300 AntiDepBreak->StartBlock(BB); 301} 302 303/// Schedule - Schedule the instruction range using list scheduling. 304/// 305void SchedulePostRATDList::Schedule() { 306 // Build the scheduling graph. 307 BuildSchedGraph(AA); 308 309 if (AntiDepBreak != NULL) { 310 unsigned Broken = 311 AntiDepBreak->BreakAntiDependencies(SUnits, Begin, InsertPos, 312 InsertPosIndex, DbgValues); 313 314 if (Broken != 0) { 315 // We made changes. Update the dependency graph. 316 // Theoretically we could update the graph in place: 317 // When a live range is changed to use a different register, remove 318 // the def's anti-dependence *and* output-dependence edges due to 319 // that register, and add new anti-dependence and output-dependence 320 // edges based on the next live range of the register. 321 SUnits.clear(); 322 Sequence.clear(); 323 EntrySU = SUnit(); 324 ExitSU = SUnit(); 325 BuildSchedGraph(AA); 326 327 NumFixedAnti += Broken; 328 } 329 } 330 331 DEBUG(dbgs() << "********** List Scheduling **********\n"); 332 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 333 SUnits[su].dumpAll(this)); 334 335 AvailableQueue.initNodes(SUnits); 336 ListScheduleTopDown(); 337 AvailableQueue.releaseState(); 338} 339 340/// Observe - Update liveness information to account for the current 341/// instruction, which will not be scheduled. 342/// 343void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) { 344 if (AntiDepBreak != NULL) 345 AntiDepBreak->Observe(MI, Count, InsertPosIndex); 346} 347 348/// FinishBlock - Clean up register live-range state. 349/// 350void SchedulePostRATDList::FinishBlock() { 351 if (AntiDepBreak != NULL) 352 AntiDepBreak->FinishBlock(); 353 354 // Call the superclass. 355 ScheduleDAGInstrs::FinishBlock(); 356} 357 358/// StartBlockForKills - Initialize register live-range state for updating kills 359/// 360void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) { 361 // Initialize the indices to indicate that no registers are live. 362 for (unsigned i = 0; i < TRI->getNumRegs(); ++i) 363 KillIndices[i] = ~0u; 364 365 // Determine the live-out physregs for this block. 366 if (!BB->empty() && BB->back().getDesc().isReturn()) { 367 // In a return block, examine the function live-out regs. 368 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(), 369 E = MRI.liveout_end(); I != E; ++I) { 370 unsigned Reg = *I; 371 KillIndices[Reg] = BB->size(); 372 // Repeat, for all subregs. 373 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 374 *Subreg; ++Subreg) { 375 KillIndices[*Subreg] = BB->size(); 376 } 377 } 378 } 379 else { 380 // In a non-return block, examine the live-in regs of all successors. 381 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 382 SE = BB->succ_end(); SI != SE; ++SI) { 383 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 384 E = (*SI)->livein_end(); I != E; ++I) { 385 unsigned Reg = *I; 386 KillIndices[Reg] = BB->size(); 387 // Repeat, for all subregs. 388 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 389 *Subreg; ++Subreg) { 390 KillIndices[*Subreg] = BB->size(); 391 } 392 } 393 } 394 } 395} 396 397bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI, 398 MachineOperand &MO) { 399 // Setting kill flag... 400 if (!MO.isKill()) { 401 MO.setIsKill(true); 402 return false; 403 } 404 405 // If MO itself is live, clear the kill flag... 406 if (KillIndices[MO.getReg()] != ~0u) { 407 MO.setIsKill(false); 408 return false; 409 } 410 411 // If any subreg of MO is live, then create an imp-def for that 412 // subreg and keep MO marked as killed. 413 MO.setIsKill(false); 414 bool AllDead = true; 415 const unsigned SuperReg = MO.getReg(); 416 for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg); 417 *Subreg; ++Subreg) { 418 if (KillIndices[*Subreg] != ~0u) { 419 MI->addOperand(MachineOperand::CreateReg(*Subreg, 420 true /*IsDef*/, 421 true /*IsImp*/, 422 false /*IsKill*/, 423 false /*IsDead*/)); 424 AllDead = false; 425 } 426 } 427 428 if(AllDead) 429 MO.setIsKill(true); 430 return false; 431} 432 433/// FixupKills - Fix the register kill flags, they may have been made 434/// incorrect by instruction reordering. 435/// 436void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) { 437 DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n'); 438 439 std::set<unsigned> killedRegs; 440 BitVector ReservedRegs = TRI->getReservedRegs(MF); 441 442 StartBlockForKills(MBB); 443 444 // Examine block from end to start... 445 unsigned Count = MBB->size(); 446 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin(); 447 I != E; --Count) { 448 MachineInstr *MI = --I; 449 if (MI->isDebugValue()) 450 continue; 451 452 // Update liveness. Registers that are defed but not used in this 453 // instruction are now dead. Mark register and all subregs as they 454 // are completely defined. 455 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 456 MachineOperand &MO = MI->getOperand(i); 457 if (!MO.isReg()) continue; 458 unsigned Reg = MO.getReg(); 459 if (Reg == 0) continue; 460 if (!MO.isDef()) continue; 461 // Ignore two-addr defs. 462 if (MI->isRegTiedToUseOperand(i)) continue; 463 464 KillIndices[Reg] = ~0u; 465 466 // Repeat for all subregs. 467 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 468 *Subreg; ++Subreg) { 469 KillIndices[*Subreg] = ~0u; 470 } 471 } 472 473 // Examine all used registers and set/clear kill flag. When a 474 // register is used multiple times we only set the kill flag on 475 // the first use. 476 killedRegs.clear(); 477 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 478 MachineOperand &MO = MI->getOperand(i); 479 if (!MO.isReg() || !MO.isUse()) continue; 480 unsigned Reg = MO.getReg(); 481 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 482 483 bool kill = false; 484 if (killedRegs.find(Reg) == killedRegs.end()) { 485 kill = true; 486 // A register is not killed if any subregs are live... 487 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 488 *Subreg; ++Subreg) { 489 if (KillIndices[*Subreg] != ~0u) { 490 kill = false; 491 break; 492 } 493 } 494 495 // If subreg is not live, then register is killed if it became 496 // live in this instruction 497 if (kill) 498 kill = (KillIndices[Reg] == ~0u); 499 } 500 501 if (MO.isKill() != kill) { 502 DEBUG(dbgs() << "Fixing " << MO << " in "); 503 // Warning: ToggleKillFlag may invalidate MO. 504 ToggleKillFlag(MI, MO); 505 DEBUG(MI->dump()); 506 } 507 508 killedRegs.insert(Reg); 509 } 510 511 // Mark any used register (that is not using undef) and subregs as 512 // now live... 513 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 514 MachineOperand &MO = MI->getOperand(i); 515 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; 516 unsigned Reg = MO.getReg(); 517 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 518 519 KillIndices[Reg] = Count; 520 521 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 522 *Subreg; ++Subreg) { 523 KillIndices[*Subreg] = Count; 524 } 525 } 526 } 527} 528 529//===----------------------------------------------------------------------===// 530// Top-Down Scheduling 531//===----------------------------------------------------------------------===// 532 533/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 534/// the PendingQueue if the count reaches zero. Also update its cycle bound. 535void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) { 536 SUnit *SuccSU = SuccEdge->getSUnit(); 537 538#ifndef NDEBUG 539 if (SuccSU->NumPredsLeft == 0) { 540 dbgs() << "*** Scheduling failed! ***\n"; 541 SuccSU->dump(this); 542 dbgs() << " has been released too many times!\n"; 543 llvm_unreachable(0); 544 } 545#endif 546 --SuccSU->NumPredsLeft; 547 548 // Standard scheduler algorithms will recompute the depth of the successor 549 // here as such: 550 // SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency()); 551 // 552 // However, we lazily compute node depth instead. Note that 553 // ScheduleNodeTopDown has already updated the depth of this node which causes 554 // all descendents to be marked dirty. Setting the successor depth explicitly 555 // here would cause depth to be recomputed for all its ancestors. If the 556 // successor is not yet ready (because of a transitively redundant edge) then 557 // this causes depth computation to be quadratic in the size of the DAG. 558 559 // If all the node's predecessors are scheduled, this node is ready 560 // to be scheduled. Ignore the special ExitSU node. 561 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 562 PendingQueue.push_back(SuccSU); 563} 564 565/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors. 566void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) { 567 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 568 I != E; ++I) { 569 ReleaseSucc(SU, &*I); 570 } 571} 572 573/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 574/// count of its successors. If a successor pending count is zero, add it to 575/// the Available queue. 576void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 577 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: "); 578 DEBUG(SU->dump(this)); 579 580 Sequence.push_back(SU); 581 assert(CurCycle >= SU->getDepth() && 582 "Node scheduled above its depth!"); 583 SU->setDepthToAtLeast(CurCycle); 584 585 ReleaseSuccessors(SU); 586 SU->isScheduled = true; 587 AvailableQueue.ScheduledNode(SU); 588} 589 590/// ListScheduleTopDown - The main loop of list scheduling for top-down 591/// schedulers. 592void SchedulePostRATDList::ListScheduleTopDown() { 593 unsigned CurCycle = 0; 594 595 // We're scheduling top-down but we're visiting the regions in 596 // bottom-up order, so we don't know the hazards at the start of a 597 // region. So assume no hazards (this should usually be ok as most 598 // blocks are a single region). 599 HazardRec->Reset(); 600 601 // Release any successors of the special Entry node. 602 ReleaseSuccessors(&EntrySU); 603 604 // Add all leaves to Available queue. 605 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 606 // It is available if it has no predecessors. 607 bool available = SUnits[i].Preds.empty(); 608 if (available) { 609 AvailableQueue.push(&SUnits[i]); 610 SUnits[i].isAvailable = true; 611 } 612 } 613 614 // In any cycle where we can't schedule any instructions, we must 615 // stall or emit a noop, depending on the target. 616 bool CycleHasInsts = false; 617 618 // While Available queue is not empty, grab the node with the highest 619 // priority. If it is not ready put it back. Schedule the node. 620 std::vector<SUnit*> NotReady; 621 Sequence.reserve(SUnits.size()); 622 while (!AvailableQueue.empty() || !PendingQueue.empty()) { 623 // Check to see if any of the pending instructions are ready to issue. If 624 // so, add them to the available queue. 625 unsigned MinDepth = ~0u; 626 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 627 if (PendingQueue[i]->getDepth() <= CurCycle) { 628 AvailableQueue.push(PendingQueue[i]); 629 PendingQueue[i]->isAvailable = true; 630 PendingQueue[i] = PendingQueue.back(); 631 PendingQueue.pop_back(); 632 --i; --e; 633 } else if (PendingQueue[i]->getDepth() < MinDepth) 634 MinDepth = PendingQueue[i]->getDepth(); 635 } 636 637 DEBUG(dbgs() << "\n*** Examining Available\n"; AvailableQueue.dump(this)); 638 639 SUnit *FoundSUnit = 0; 640 bool HasNoopHazards = false; 641 while (!AvailableQueue.empty()) { 642 SUnit *CurSUnit = AvailableQueue.pop(); 643 644 ScheduleHazardRecognizer::HazardType HT = 645 HazardRec->getHazardType(CurSUnit, 0/*no stalls*/); 646 if (HT == ScheduleHazardRecognizer::NoHazard) { 647 FoundSUnit = CurSUnit; 648 break; 649 } 650 651 // Remember if this is a noop hazard. 652 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard; 653 654 NotReady.push_back(CurSUnit); 655 } 656 657 // Add the nodes that aren't ready back onto the available list. 658 if (!NotReady.empty()) { 659 AvailableQueue.push_all(NotReady); 660 NotReady.clear(); 661 } 662 663 // If we found a node to schedule... 664 if (FoundSUnit) { 665 // ... schedule the node... 666 ScheduleNodeTopDown(FoundSUnit, CurCycle); 667 HazardRec->EmitInstruction(FoundSUnit); 668 CycleHasInsts = true; 669 if (HazardRec->atIssueLimit()) { 670 DEBUG(dbgs() << "*** Max instructions per cycle " << CurCycle << '\n'); 671 HazardRec->AdvanceCycle(); 672 ++CurCycle; 673 CycleHasInsts = false; 674 } 675 } else { 676 if (CycleHasInsts) { 677 DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n'); 678 HazardRec->AdvanceCycle(); 679 } else if (!HasNoopHazards) { 680 // Otherwise, we have a pipeline stall, but no other problem, 681 // just advance the current cycle and try again. 682 DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n'); 683 HazardRec->AdvanceCycle(); 684 ++NumStalls; 685 } else { 686 // Otherwise, we have no instructions to issue and we have instructions 687 // that will fault if we don't do this right. This is the case for 688 // processors without pipeline interlocks and other cases. 689 DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n'); 690 HazardRec->EmitNoop(); 691 Sequence.push_back(0); // NULL here means noop 692 ++NumNoops; 693 } 694 695 ++CurCycle; 696 CycleHasInsts = false; 697 } 698 } 699 700#ifndef NDEBUG 701 VerifySchedule(/*isBottomUp=*/false); 702#endif 703} 704 705//===----------------------------------------------------------------------===// 706// Public Constructor Functions 707//===----------------------------------------------------------------------===// 708 709FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) { 710 return new PostRAScheduler(OptLevel); 711} 712