PostRASchedulerList.cpp revision e1b2129471e994cfb7d34cc5134eb99dd1ae39f2
1//===----- SchedulePostRAList.cpp - list scheduler ------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements a top-down list scheduler, using standard algorithms. 11// The basic approach uses a priority queue of available nodes to schedule. 12// One at a time, nodes are taken from the priority queue (thus in priority 13// order), checked for legality to schedule, and emitted if legal. 14// 15// Nodes may not be legal to schedule either due to structural hazards (e.g. 16// pipeline or resource constraints) or because an input to the instruction has 17// not completed execution. 18// 19//===----------------------------------------------------------------------===// 20 21#define DEBUG_TYPE "post-RA-sched" 22#include "AntiDepBreaker.h" 23#include "AggressiveAntiDepBreaker.h" 24#include "CriticalAntiDepBreaker.h" 25#include "ExactHazardRecognizer.h" 26#include "SimpleHazardRecognizer.h" 27#include "ScheduleDAGInstrs.h" 28#include "llvm/CodeGen/Passes.h" 29#include "llvm/CodeGen/LatencyPriorityQueue.h" 30#include "llvm/CodeGen/SchedulerRegistry.h" 31#include "llvm/CodeGen/MachineDominators.h" 32#include "llvm/CodeGen/MachineFrameInfo.h" 33#include "llvm/CodeGen/MachineFunctionPass.h" 34#include "llvm/CodeGen/MachineLoopInfo.h" 35#include "llvm/CodeGen/MachineRegisterInfo.h" 36#include "llvm/CodeGen/ScheduleHazardRecognizer.h" 37#include "llvm/Analysis/AliasAnalysis.h" 38#include "llvm/Target/TargetLowering.h" 39#include "llvm/Target/TargetMachine.h" 40#include "llvm/Target/TargetInstrInfo.h" 41#include "llvm/Target/TargetRegisterInfo.h" 42#include "llvm/Target/TargetSubtarget.h" 43#include "llvm/Support/CommandLine.h" 44#include "llvm/Support/Debug.h" 45#include "llvm/Support/ErrorHandling.h" 46#include "llvm/Support/raw_ostream.h" 47#include "llvm/ADT/BitVector.h" 48#include "llvm/ADT/Statistic.h" 49#include <map> 50#include <set> 51using namespace llvm; 52 53STATISTIC(NumNoops, "Number of noops inserted"); 54STATISTIC(NumStalls, "Number of pipeline stalls"); 55STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies"); 56 57// Post-RA scheduling is enabled with 58// TargetSubtarget.enablePostRAScheduler(). This flag can be used to 59// override the target. 60static cl::opt<bool> 61EnablePostRAScheduler("post-RA-scheduler", 62 cl::desc("Enable scheduling after register allocation"), 63 cl::init(false), cl::Hidden); 64static cl::opt<std::string> 65EnableAntiDepBreaking("break-anti-dependencies", 66 cl::desc("Break post-RA scheduling anti-dependencies: " 67 "\"critical\", \"all\", or \"none\""), 68 cl::init("none"), cl::Hidden); 69static cl::opt<bool> 70EnablePostRAHazardAvoidance("avoid-hazards", 71 cl::desc("Enable exact hazard avoidance"), 72 cl::init(true), cl::Hidden); 73 74// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 75static cl::opt<int> 76DebugDiv("postra-sched-debugdiv", 77 cl::desc("Debug control MBBs that are scheduled"), 78 cl::init(0), cl::Hidden); 79static cl::opt<int> 80DebugMod("postra-sched-debugmod", 81 cl::desc("Debug control MBBs that are scheduled"), 82 cl::init(0), cl::Hidden); 83 84AntiDepBreaker::~AntiDepBreaker() { } 85 86namespace { 87 class PostRAScheduler : public MachineFunctionPass { 88 AliasAnalysis *AA; 89 CodeGenOpt::Level OptLevel; 90 91 public: 92 static char ID; 93 PostRAScheduler(CodeGenOpt::Level ol) : 94 MachineFunctionPass(&ID), OptLevel(ol) {} 95 96 void getAnalysisUsage(AnalysisUsage &AU) const { 97 AU.setPreservesCFG(); 98 AU.addRequired<AliasAnalysis>(); 99 AU.addRequired<MachineDominatorTree>(); 100 AU.addPreserved<MachineDominatorTree>(); 101 AU.addRequired<MachineLoopInfo>(); 102 AU.addPreserved<MachineLoopInfo>(); 103 MachineFunctionPass::getAnalysisUsage(AU); 104 } 105 106 const char *getPassName() const { 107 return "Post RA top-down list latency scheduler"; 108 } 109 110 bool runOnMachineFunction(MachineFunction &Fn); 111 }; 112 char PostRAScheduler::ID = 0; 113 114 class SchedulePostRATDList : public ScheduleDAGInstrs { 115 /// AvailableQueue - The priority queue to use for the available SUnits. 116 /// 117 LatencyPriorityQueue AvailableQueue; 118 119 /// PendingQueue - This contains all of the instructions whose operands have 120 /// been issued, but their results are not ready yet (due to the latency of 121 /// the operation). Once the operands becomes available, the instruction is 122 /// added to the AvailableQueue. 123 std::vector<SUnit*> PendingQueue; 124 125 /// Topo - A topological ordering for SUnits. 126 ScheduleDAGTopologicalSort Topo; 127 128 /// HazardRec - The hazard recognizer to use. 129 ScheduleHazardRecognizer *HazardRec; 130 131 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none 132 AntiDepBreaker *AntiDepBreak; 133 134 /// AA - AliasAnalysis for making memory reference queries. 135 AliasAnalysis *AA; 136 137 /// KillIndices - The index of the most recent kill (proceding bottom-up), 138 /// or ~0u if the register is not live. 139 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister]; 140 141 public: 142 SchedulePostRATDList(MachineFunction &MF, 143 const MachineLoopInfo &MLI, 144 const MachineDominatorTree &MDT, 145 ScheduleHazardRecognizer *HR, 146 AntiDepBreaker *ADB, 147 AliasAnalysis *aa) 148 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits), 149 HazardRec(HR), AntiDepBreak(ADB), AA(aa) {} 150 151 ~SchedulePostRATDList() { 152 } 153 154 /// StartBlock - Initialize register live-range state for scheduling in 155 /// this block. 156 /// 157 void StartBlock(MachineBasicBlock *BB); 158 159 /// Schedule - Schedule the instruction range using list scheduling. 160 /// 161 void Schedule(); 162 163 /// Observe - Update liveness information to account for the current 164 /// instruction, which will not be scheduled. 165 /// 166 void Observe(MachineInstr *MI, unsigned Count); 167 168 /// FinishBlock - Clean up register live-range state. 169 /// 170 void FinishBlock(); 171 172 /// FixupKills - Fix register kill flags that have been made 173 /// invalid due to scheduling 174 /// 175 void FixupKills(MachineBasicBlock *MBB); 176 177 private: 178 void ReleaseSucc(SUnit *SU, SDep *SuccEdge); 179 void ReleaseSuccessors(SUnit *SU); 180 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle); 181 void ListScheduleTopDown(); 182 void StartBlockForKills(MachineBasicBlock *BB); 183 184 // ToggleKillFlag - Toggle a register operand kill flag. Other 185 // adjustments may be made to the instruction if necessary. Return 186 // true if the operand has been deleted, false if not. 187 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO); 188 }; 189} 190 191/// isSchedulingBoundary - Test if the given instruction should be 192/// considered a scheduling boundary. This primarily includes labels 193/// and terminators. 194/// 195static bool isSchedulingBoundary(const MachineInstr *MI, 196 const MachineFunction &MF) { 197 // Terminators and labels can't be scheduled around. 198 if (MI->getDesc().isTerminator() || MI->isLabel()) 199 return true; 200 201 // Don't attempt to schedule around any instruction that modifies 202 // a stack-oriented pointer, as it's unlikely to be profitable. This 203 // saves compile time, because it doesn't require every single 204 // stack slot reference to depend on the instruction that does the 205 // modification. 206 const TargetLowering &TLI = *MF.getTarget().getTargetLowering(); 207 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore())) 208 return true; 209 210 return false; 211} 212 213bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { 214 AA = &getAnalysis<AliasAnalysis>(); 215 216 // Check for explicit enable/disable of post-ra scheduling. 217 TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE; 218 SmallVector<TargetRegisterClass*, 4> CriticalPathRCs; 219 if (EnablePostRAScheduler.getPosition() > 0) { 220 if (!EnablePostRAScheduler) 221 return false; 222 } else { 223 // Check that post-RA scheduling is enabled for this target. 224 const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>(); 225 if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode, CriticalPathRCs)) 226 return false; 227 } 228 229 // Check for antidep breaking override... 230 if (EnableAntiDepBreaking.getPosition() > 0) { 231 AntiDepMode = (EnableAntiDepBreaking == "all") ? TargetSubtarget::ANTIDEP_ALL : 232 (EnableAntiDepBreaking == "critical") ? TargetSubtarget::ANTIDEP_CRITICAL : 233 TargetSubtarget::ANTIDEP_NONE; 234 } 235 236 DEBUG(dbgs() << "PostRAScheduler\n"); 237 238 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); 239 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); 240 const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData(); 241 ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ? 242 (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) : 243 (ScheduleHazardRecognizer *)new SimpleHazardRecognizer(); 244 AntiDepBreaker *ADB = 245 ((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ? 246 (AntiDepBreaker *)new AggressiveAntiDepBreaker(Fn, CriticalPathRCs) : 247 ((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ? 248 (AntiDepBreaker *)new CriticalAntiDepBreaker(Fn) : NULL)); 249 250 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, ADB, AA); 251 252 // Loop over all of the basic blocks 253 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); 254 MBB != MBBe; ++MBB) { 255#ifndef NDEBUG 256 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 257 if (DebugDiv > 0) { 258 static int bbcnt = 0; 259 if (bbcnt++ % DebugDiv != DebugMod) 260 continue; 261 dbgs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() << 262 ":BB#" << MBB->getNumber() << " ***\n"; 263 } 264#endif 265 266 // Initialize register live-range state for scheduling in this block. 267 Scheduler.StartBlock(MBB); 268 269 // Schedule each sequence of instructions not interrupted by a label 270 // or anything else that effectively needs to shut down scheduling. 271 MachineBasicBlock::iterator Current = MBB->end(); 272 unsigned Count = MBB->size(), CurrentCount = Count; 273 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) { 274 MachineInstr *MI = prior(I); 275 if (isSchedulingBoundary(MI, Fn)) { 276 Scheduler.Run(MBB, I, Current, CurrentCount); 277 Scheduler.EmitSchedule(0); 278 Current = MI; 279 CurrentCount = Count - 1; 280 Scheduler.Observe(MI, CurrentCount); 281 } 282 I = MI; 283 --Count; 284 } 285 assert(Count == 0 && "Instruction count mismatch!"); 286 assert((MBB->begin() == Current || CurrentCount != 0) && 287 "Instruction count mismatch!"); 288 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount); 289 Scheduler.EmitSchedule(0); 290 291 // Clean up register live-range state. 292 Scheduler.FinishBlock(); 293 294 // Update register kills 295 Scheduler.FixupKills(MBB); 296 } 297 298 delete HR; 299 delete ADB; 300 301 return true; 302} 303 304/// StartBlock - Initialize register live-range state for scheduling in 305/// this block. 306/// 307void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) { 308 // Call the superclass. 309 ScheduleDAGInstrs::StartBlock(BB); 310 311 // Reset the hazard recognizer and anti-dep breaker. 312 HazardRec->Reset(); 313 if (AntiDepBreak != NULL) 314 AntiDepBreak->StartBlock(BB); 315} 316 317/// Schedule - Schedule the instruction range using list scheduling. 318/// 319void SchedulePostRATDList::Schedule() { 320 // Build the scheduling graph. 321 BuildSchedGraph(AA); 322 323 if (AntiDepBreak != NULL) { 324 unsigned Broken = 325 AntiDepBreak->BreakAntiDependencies(SUnits, Begin, InsertPos, 326 InsertPosIndex); 327 328 if (Broken != 0) { 329 // We made changes. Update the dependency graph. 330 // Theoretically we could update the graph in place: 331 // When a live range is changed to use a different register, remove 332 // the def's anti-dependence *and* output-dependence edges due to 333 // that register, and add new anti-dependence and output-dependence 334 // edges based on the next live range of the register. 335 SUnits.clear(); 336 Sequence.clear(); 337 EntrySU = SUnit(); 338 ExitSU = SUnit(); 339 BuildSchedGraph(AA); 340 341 NumFixedAnti += Broken; 342 } 343 } 344 345 DEBUG(dbgs() << "********** List Scheduling **********\n"); 346 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 347 SUnits[su].dumpAll(this)); 348 349 AvailableQueue.initNodes(SUnits); 350 ListScheduleTopDown(); 351 AvailableQueue.releaseState(); 352} 353 354/// Observe - Update liveness information to account for the current 355/// instruction, which will not be scheduled. 356/// 357void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) { 358 if (AntiDepBreak != NULL) 359 AntiDepBreak->Observe(MI, Count, InsertPosIndex); 360} 361 362/// FinishBlock - Clean up register live-range state. 363/// 364void SchedulePostRATDList::FinishBlock() { 365 if (AntiDepBreak != NULL) 366 AntiDepBreak->FinishBlock(); 367 368 // Call the superclass. 369 ScheduleDAGInstrs::FinishBlock(); 370} 371 372/// StartBlockForKills - Initialize register live-range state for updating kills 373/// 374void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) { 375 // Initialize the indices to indicate that no registers are live. 376 for (unsigned i = 0; i < TRI->getNumRegs(); ++i) 377 KillIndices[i] = ~0u; 378 379 // Determine the live-out physregs for this block. 380 if (!BB->empty() && BB->back().getDesc().isReturn()) { 381 // In a return block, examine the function live-out regs. 382 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(), 383 E = MRI.liveout_end(); I != E; ++I) { 384 unsigned Reg = *I; 385 KillIndices[Reg] = BB->size(); 386 // Repeat, for all subregs. 387 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 388 *Subreg; ++Subreg) { 389 KillIndices[*Subreg] = BB->size(); 390 } 391 } 392 } 393 else { 394 // In a non-return block, examine the live-in regs of all successors. 395 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 396 SE = BB->succ_end(); SI != SE; ++SI) { 397 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 398 E = (*SI)->livein_end(); I != E; ++I) { 399 unsigned Reg = *I; 400 KillIndices[Reg] = BB->size(); 401 // Repeat, for all subregs. 402 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 403 *Subreg; ++Subreg) { 404 KillIndices[*Subreg] = BB->size(); 405 } 406 } 407 } 408 } 409} 410 411bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI, 412 MachineOperand &MO) { 413 // Setting kill flag... 414 if (!MO.isKill()) { 415 MO.setIsKill(true); 416 return false; 417 } 418 419 // If MO itself is live, clear the kill flag... 420 if (KillIndices[MO.getReg()] != ~0u) { 421 MO.setIsKill(false); 422 return false; 423 } 424 425 // If any subreg of MO is live, then create an imp-def for that 426 // subreg and keep MO marked as killed. 427 MO.setIsKill(false); 428 bool AllDead = true; 429 const unsigned SuperReg = MO.getReg(); 430 for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg); 431 *Subreg; ++Subreg) { 432 if (KillIndices[*Subreg] != ~0u) { 433 MI->addOperand(MachineOperand::CreateReg(*Subreg, 434 true /*IsDef*/, 435 true /*IsImp*/, 436 false /*IsKill*/, 437 false /*IsDead*/)); 438 AllDead = false; 439 } 440 } 441 442 if(AllDead) 443 MO.setIsKill(true); 444 return false; 445} 446 447/// FixupKills - Fix the register kill flags, they may have been made 448/// incorrect by instruction reordering. 449/// 450void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) { 451 DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n'); 452 453 std::set<unsigned> killedRegs; 454 BitVector ReservedRegs = TRI->getReservedRegs(MF); 455 456 StartBlockForKills(MBB); 457 458 // Examine block from end to start... 459 unsigned Count = MBB->size(); 460 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin(); 461 I != E; --Count) { 462 MachineInstr *MI = --I; 463 464 // Update liveness. Registers that are defed but not used in this 465 // instruction are now dead. Mark register and all subregs as they 466 // are completely defined. 467 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 468 MachineOperand &MO = MI->getOperand(i); 469 if (!MO.isReg()) continue; 470 unsigned Reg = MO.getReg(); 471 if (Reg == 0) continue; 472 if (!MO.isDef()) continue; 473 // Ignore two-addr defs. 474 if (MI->isRegTiedToUseOperand(i)) continue; 475 476 KillIndices[Reg] = ~0u; 477 478 // Repeat for all subregs. 479 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 480 *Subreg; ++Subreg) { 481 KillIndices[*Subreg] = ~0u; 482 } 483 } 484 485 // Examine all used registers and set/clear kill flag. When a 486 // register is used multiple times we only set the kill flag on 487 // the first use. 488 killedRegs.clear(); 489 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 490 MachineOperand &MO = MI->getOperand(i); 491 if (!MO.isReg() || !MO.isUse()) continue; 492 unsigned Reg = MO.getReg(); 493 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 494 495 bool kill = false; 496 if (killedRegs.find(Reg) == killedRegs.end()) { 497 kill = true; 498 // A register is not killed if any subregs are live... 499 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 500 *Subreg; ++Subreg) { 501 if (KillIndices[*Subreg] != ~0u) { 502 kill = false; 503 break; 504 } 505 } 506 507 // If subreg is not live, then register is killed if it became 508 // live in this instruction 509 if (kill) 510 kill = (KillIndices[Reg] == ~0u); 511 } 512 513 if (MO.isKill() != kill) { 514 DEBUG(dbgs() << "Fixing " << MO << " in "); 515 // Warning: ToggleKillFlag may invalidate MO. 516 ToggleKillFlag(MI, MO); 517 DEBUG(MI->dump()); 518 } 519 520 killedRegs.insert(Reg); 521 } 522 523 // Mark any used register (that is not using undef) and subregs as 524 // now live... 525 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 526 MachineOperand &MO = MI->getOperand(i); 527 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; 528 unsigned Reg = MO.getReg(); 529 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 530 531 KillIndices[Reg] = Count; 532 533 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 534 *Subreg; ++Subreg) { 535 KillIndices[*Subreg] = Count; 536 } 537 } 538 } 539} 540 541//===----------------------------------------------------------------------===// 542// Top-Down Scheduling 543//===----------------------------------------------------------------------===// 544 545/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 546/// the PendingQueue if the count reaches zero. Also update its cycle bound. 547void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) { 548 SUnit *SuccSU = SuccEdge->getSUnit(); 549 550#ifndef NDEBUG 551 if (SuccSU->NumPredsLeft == 0) { 552 dbgs() << "*** Scheduling failed! ***\n"; 553 SuccSU->dump(this); 554 dbgs() << " has been released too many times!\n"; 555 llvm_unreachable(0); 556 } 557#endif 558 --SuccSU->NumPredsLeft; 559 560 // Compute how many cycles it will be before this actually becomes 561 // available. This is the max of the start time of all predecessors plus 562 // their latencies. 563 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency()); 564 565 // If all the node's predecessors are scheduled, this node is ready 566 // to be scheduled. Ignore the special ExitSU node. 567 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 568 PendingQueue.push_back(SuccSU); 569} 570 571/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors. 572void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) { 573 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 574 I != E; ++I) { 575 ReleaseSucc(SU, &*I); 576 } 577} 578 579/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 580/// count of its successors. If a successor pending count is zero, add it to 581/// the Available queue. 582void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 583 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: "); 584 DEBUG(SU->dump(this)); 585 586 Sequence.push_back(SU); 587 assert(CurCycle >= SU->getDepth() && 588 "Node scheduled above its depth!"); 589 SU->setDepthToAtLeast(CurCycle); 590 591 ReleaseSuccessors(SU); 592 SU->isScheduled = true; 593 AvailableQueue.ScheduledNode(SU); 594} 595 596/// ListScheduleTopDown - The main loop of list scheduling for top-down 597/// schedulers. 598void SchedulePostRATDList::ListScheduleTopDown() { 599 unsigned CurCycle = 0; 600 601 // We're scheduling top-down but we're visiting the regions in 602 // bottom-up order, so we don't know the hazards at the start of a 603 // region. So assume no hazards (this should usually be ok as most 604 // blocks are a single region). 605 HazardRec->Reset(); 606 607 // Release any successors of the special Entry node. 608 ReleaseSuccessors(&EntrySU); 609 610 // Add all leaves to Available queue. 611 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 612 // It is available if it has no predecessors. 613 bool available = SUnits[i].Preds.empty(); 614 if (available) { 615 AvailableQueue.push(&SUnits[i]); 616 SUnits[i].isAvailable = true; 617 } 618 } 619 620 // In any cycle where we can't schedule any instructions, we must 621 // stall or emit a noop, depending on the target. 622 bool CycleHasInsts = false; 623 624 // While Available queue is not empty, grab the node with the highest 625 // priority. If it is not ready put it back. Schedule the node. 626 std::vector<SUnit*> NotReady; 627 Sequence.reserve(SUnits.size()); 628 while (!AvailableQueue.empty() || !PendingQueue.empty()) { 629 // Check to see if any of the pending instructions are ready to issue. If 630 // so, add them to the available queue. 631 unsigned MinDepth = ~0u; 632 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 633 if (PendingQueue[i]->getDepth() <= CurCycle) { 634 AvailableQueue.push(PendingQueue[i]); 635 PendingQueue[i]->isAvailable = true; 636 PendingQueue[i] = PendingQueue.back(); 637 PendingQueue.pop_back(); 638 --i; --e; 639 } else if (PendingQueue[i]->getDepth() < MinDepth) 640 MinDepth = PendingQueue[i]->getDepth(); 641 } 642 643 DEBUG(dbgs() << "\n*** Examining Available\n"; 644 LatencyPriorityQueue q = AvailableQueue; 645 while (!q.empty()) { 646 SUnit *su = q.pop(); 647 dbgs() << "Height " << su->getHeight() << ": "; 648 su->dump(this); 649 }); 650 651 SUnit *FoundSUnit = 0; 652 bool HasNoopHazards = false; 653 while (!AvailableQueue.empty()) { 654 SUnit *CurSUnit = AvailableQueue.pop(); 655 656 ScheduleHazardRecognizer::HazardType HT = 657 HazardRec->getHazardType(CurSUnit); 658 if (HT == ScheduleHazardRecognizer::NoHazard) { 659 FoundSUnit = CurSUnit; 660 break; 661 } 662 663 // Remember if this is a noop hazard. 664 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard; 665 666 NotReady.push_back(CurSUnit); 667 } 668 669 // Add the nodes that aren't ready back onto the available list. 670 if (!NotReady.empty()) { 671 AvailableQueue.push_all(NotReady); 672 NotReady.clear(); 673 } 674 675 // If we found a node to schedule... 676 if (FoundSUnit) { 677 // ... schedule the node... 678 ScheduleNodeTopDown(FoundSUnit, CurCycle); 679 HazardRec->EmitInstruction(FoundSUnit); 680 CycleHasInsts = true; 681 682 // If we are using the target-specific hazards, then don't 683 // advance the cycle time just because we schedule a node. If 684 // the target allows it we can schedule multiple nodes in the 685 // same cycle. 686 if (!EnablePostRAHazardAvoidance) { 687 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops! 688 ++CurCycle; 689 } 690 } else { 691 if (CycleHasInsts) { 692 DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n'); 693 HazardRec->AdvanceCycle(); 694 } else if (!HasNoopHazards) { 695 // Otherwise, we have a pipeline stall, but no other problem, 696 // just advance the current cycle and try again. 697 DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n'); 698 HazardRec->AdvanceCycle(); 699 ++NumStalls; 700 } else { 701 // Otherwise, we have no instructions to issue and we have instructions 702 // that will fault if we don't do this right. This is the case for 703 // processors without pipeline interlocks and other cases. 704 DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n'); 705 HazardRec->EmitNoop(); 706 Sequence.push_back(0); // NULL here means noop 707 ++NumNoops; 708 } 709 710 ++CurCycle; 711 CycleHasInsts = false; 712 } 713 } 714 715#ifndef NDEBUG 716 VerifySchedule(/*isBottomUp=*/false); 717#endif 718} 719 720//===----------------------------------------------------------------------===// 721// Public Constructor Functions 722//===----------------------------------------------------------------------===// 723 724FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) { 725 return new PostRAScheduler(OptLevel); 726} 727