PostRASchedulerList.cpp revision 8b3d6682a45e80d08abf32aa1be0491db1977456
1//===----- SchedulePostRAList.cpp - list scheduler ------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements a top-down list scheduler, using standard algorithms. 11// The basic approach uses a priority queue of available nodes to schedule. 12// One at a time, nodes are taken from the priority queue (thus in priority 13// order), checked for legality to schedule, and emitted if legal. 14// 15// Nodes may not be legal to schedule either due to structural hazards (e.g. 16// pipeline or resource constraints) or because an input to the instruction has 17// not completed execution. 18// 19//===----------------------------------------------------------------------===// 20 21#define DEBUG_TYPE "post-RA-sched" 22#include "AntiDepBreaker.h" 23#include "AggressiveAntiDepBreaker.h" 24#include "CriticalAntiDepBreaker.h" 25#include "ExactHazardRecognizer.h" 26#include "SimpleHazardRecognizer.h" 27#include "ScheduleDAGInstrs.h" 28#include "llvm/CodeGen/Passes.h" 29#include "llvm/CodeGen/LatencyPriorityQueue.h" 30#include "llvm/CodeGen/SchedulerRegistry.h" 31#include "llvm/CodeGen/MachineDominators.h" 32#include "llvm/CodeGen/MachineFrameInfo.h" 33#include "llvm/CodeGen/MachineFunctionPass.h" 34#include "llvm/CodeGen/MachineLoopInfo.h" 35#include "llvm/CodeGen/MachineRegisterInfo.h" 36#include "llvm/CodeGen/ScheduleHazardRecognizer.h" 37#include "llvm/Analysis/AliasAnalysis.h" 38#include "llvm/Target/TargetLowering.h" 39#include "llvm/Target/TargetMachine.h" 40#include "llvm/Target/TargetInstrInfo.h" 41#include "llvm/Target/TargetRegisterInfo.h" 42#include "llvm/Target/TargetSubtarget.h" 43#include "llvm/Support/CommandLine.h" 44#include "llvm/Support/Debug.h" 45#include "llvm/Support/ErrorHandling.h" 46#include "llvm/Support/raw_ostream.h" 47#include "llvm/ADT/BitVector.h" 48#include "llvm/ADT/Statistic.h" 49#include <set> 50using namespace llvm; 51 52STATISTIC(NumNoops, "Number of noops inserted"); 53STATISTIC(NumStalls, "Number of pipeline stalls"); 54STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies"); 55 56// Post-RA scheduling is enabled with 57// TargetSubtarget.enablePostRAScheduler(). This flag can be used to 58// override the target. 59static cl::opt<bool> 60EnablePostRAScheduler("post-RA-scheduler", 61 cl::desc("Enable scheduling after register allocation"), 62 cl::init(false), cl::Hidden); 63static cl::opt<std::string> 64EnableAntiDepBreaking("break-anti-dependencies", 65 cl::desc("Break post-RA scheduling anti-dependencies: " 66 "\"critical\", \"all\", or \"none\""), 67 cl::init("none"), cl::Hidden); 68static cl::opt<bool> 69EnablePostRAHazardAvoidance("avoid-hazards", 70 cl::desc("Enable exact hazard avoidance"), 71 cl::init(true), cl::Hidden); 72 73// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 74static cl::opt<int> 75DebugDiv("postra-sched-debugdiv", 76 cl::desc("Debug control MBBs that are scheduled"), 77 cl::init(0), cl::Hidden); 78static cl::opt<int> 79DebugMod("postra-sched-debugmod", 80 cl::desc("Debug control MBBs that are scheduled"), 81 cl::init(0), cl::Hidden); 82 83AntiDepBreaker::~AntiDepBreaker() { } 84 85namespace { 86 class PostRAScheduler : public MachineFunctionPass { 87 AliasAnalysis *AA; 88 CodeGenOpt::Level OptLevel; 89 90 public: 91 static char ID; 92 PostRAScheduler(CodeGenOpt::Level ol) : 93 MachineFunctionPass(&ID), OptLevel(ol) {} 94 95 void getAnalysisUsage(AnalysisUsage &AU) const { 96 AU.setPreservesCFG(); 97 AU.addRequired<AliasAnalysis>(); 98 AU.addRequired<MachineDominatorTree>(); 99 AU.addPreserved<MachineDominatorTree>(); 100 AU.addRequired<MachineLoopInfo>(); 101 AU.addPreserved<MachineLoopInfo>(); 102 MachineFunctionPass::getAnalysisUsage(AU); 103 } 104 105 const char *getPassName() const { 106 return "Post RA top-down list latency scheduler"; 107 } 108 109 bool runOnMachineFunction(MachineFunction &Fn); 110 }; 111 char PostRAScheduler::ID = 0; 112 113 class SchedulePostRATDList : public ScheduleDAGInstrs { 114 /// AvailableQueue - The priority queue to use for the available SUnits. 115 /// 116 LatencyPriorityQueue AvailableQueue; 117 118 /// PendingQueue - This contains all of the instructions whose operands have 119 /// been issued, but their results are not ready yet (due to the latency of 120 /// the operation). Once the operands becomes available, the instruction is 121 /// added to the AvailableQueue. 122 std::vector<SUnit*> PendingQueue; 123 124 /// Topo - A topological ordering for SUnits. 125 ScheduleDAGTopologicalSort Topo; 126 127 /// HazardRec - The hazard recognizer to use. 128 ScheduleHazardRecognizer *HazardRec; 129 130 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none 131 AntiDepBreaker *AntiDepBreak; 132 133 /// AA - AliasAnalysis for making memory reference queries. 134 AliasAnalysis *AA; 135 136 /// KillIndices - The index of the most recent kill (proceding bottom-up), 137 /// or ~0u if the register is not live. 138 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister]; 139 140 public: 141 SchedulePostRATDList(MachineFunction &MF, 142 const MachineLoopInfo &MLI, 143 const MachineDominatorTree &MDT, 144 ScheduleHazardRecognizer *HR, 145 AntiDepBreaker *ADB, 146 AliasAnalysis *aa) 147 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits), 148 HazardRec(HR), AntiDepBreak(ADB), AA(aa) {} 149 150 ~SchedulePostRATDList() { 151 } 152 153 /// StartBlock - Initialize register live-range state for scheduling in 154 /// this block. 155 /// 156 void StartBlock(MachineBasicBlock *BB); 157 158 /// Schedule - Schedule the instruction range using list scheduling. 159 /// 160 void Schedule(); 161 162 /// Observe - Update liveness information to account for the current 163 /// instruction, which will not be scheduled. 164 /// 165 void Observe(MachineInstr *MI, unsigned Count); 166 167 /// FinishBlock - Clean up register live-range state. 168 /// 169 void FinishBlock(); 170 171 /// FixupKills - Fix register kill flags that have been made 172 /// invalid due to scheduling 173 /// 174 void FixupKills(MachineBasicBlock *MBB); 175 176 private: 177 void ReleaseSucc(SUnit *SU, SDep *SuccEdge); 178 void ReleaseSuccessors(SUnit *SU); 179 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle); 180 void ListScheduleTopDown(); 181 void StartBlockForKills(MachineBasicBlock *BB); 182 183 // ToggleKillFlag - Toggle a register operand kill flag. Other 184 // adjustments may be made to the instruction if necessary. Return 185 // true if the operand has been deleted, false if not. 186 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO); 187 }; 188} 189 190/// isSchedulingBoundary - Test if the given instruction should be 191/// considered a scheduling boundary. This primarily includes labels 192/// and terminators. 193/// 194static bool isSchedulingBoundary(const MachineInstr *MI, 195 const MachineFunction &MF) { 196 // Terminators and labels can't be scheduled around. 197 if (MI->getDesc().isTerminator() || MI->isLabel()) 198 return true; 199 200 // Don't attempt to schedule around any instruction that modifies 201 // a stack-oriented pointer, as it's unlikely to be profitable. This 202 // saves compile time, because it doesn't require every single 203 // stack slot reference to depend on the instruction that does the 204 // modification. 205 const TargetLowering &TLI = *MF.getTarget().getTargetLowering(); 206 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore())) 207 return true; 208 209 return false; 210} 211 212bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { 213 AA = &getAnalysis<AliasAnalysis>(); 214 215 // Check for explicit enable/disable of post-ra scheduling. 216 TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE; 217 SmallVector<TargetRegisterClass*, 4> CriticalPathRCs; 218 if (EnablePostRAScheduler.getPosition() > 0) { 219 if (!EnablePostRAScheduler) 220 return false; 221 } else { 222 // Check that post-RA scheduling is enabled for this target. 223 const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>(); 224 if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode, CriticalPathRCs)) 225 return false; 226 } 227 228 // Check for antidep breaking override... 229 if (EnableAntiDepBreaking.getPosition() > 0) { 230 AntiDepMode = (EnableAntiDepBreaking == "all") ? TargetSubtarget::ANTIDEP_ALL : 231 (EnableAntiDepBreaking == "critical") ? TargetSubtarget::ANTIDEP_CRITICAL : 232 TargetSubtarget::ANTIDEP_NONE; 233 } 234 235 DEBUG(dbgs() << "PostRAScheduler\n"); 236 237 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); 238 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); 239 const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData(); 240 ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ? 241 (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) : 242 (ScheduleHazardRecognizer *)new SimpleHazardRecognizer(); 243 AntiDepBreaker *ADB = 244 ((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ? 245 (AntiDepBreaker *)new AggressiveAntiDepBreaker(Fn, CriticalPathRCs) : 246 ((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ? 247 (AntiDepBreaker *)new CriticalAntiDepBreaker(Fn) : NULL)); 248 249 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, ADB, AA); 250 251 // Loop over all of the basic blocks 252 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); 253 MBB != MBBe; ++MBB) { 254#ifndef NDEBUG 255 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 256 if (DebugDiv > 0) { 257 static int bbcnt = 0; 258 if (bbcnt++ % DebugDiv != DebugMod) 259 continue; 260 dbgs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() << 261 ":BB#" << MBB->getNumber() << " ***\n"; 262 } 263#endif 264 265 // Initialize register live-range state for scheduling in this block. 266 Scheduler.StartBlock(MBB); 267 268 // Schedule each sequence of instructions not interrupted by a label 269 // or anything else that effectively needs to shut down scheduling. 270 MachineBasicBlock::iterator Current = MBB->end(); 271 unsigned Count = MBB->size(), CurrentCount = Count; 272 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) { 273 MachineInstr *MI = prior(I); 274 if (isSchedulingBoundary(MI, Fn)) { 275 Scheduler.Run(MBB, I, Current, CurrentCount); 276 Scheduler.EmitSchedule(0); 277 Current = MI; 278 CurrentCount = Count - 1; 279 Scheduler.Observe(MI, CurrentCount); 280 } 281 I = MI; 282 --Count; 283 } 284 assert(Count == 0 && "Instruction count mismatch!"); 285 assert((MBB->begin() == Current || CurrentCount != 0) && 286 "Instruction count mismatch!"); 287 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount); 288 Scheduler.EmitSchedule(0); 289 290 // Clean up register live-range state. 291 Scheduler.FinishBlock(); 292 293 // Update register kills 294 Scheduler.FixupKills(MBB); 295 } 296 297 delete HR; 298 delete ADB; 299 300 return true; 301} 302 303/// StartBlock - Initialize register live-range state for scheduling in 304/// this block. 305/// 306void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) { 307 // Call the superclass. 308 ScheduleDAGInstrs::StartBlock(BB); 309 310 // Reset the hazard recognizer and anti-dep breaker. 311 HazardRec->Reset(); 312 if (AntiDepBreak != NULL) 313 AntiDepBreak->StartBlock(BB); 314} 315 316/// Schedule - Schedule the instruction range using list scheduling. 317/// 318void SchedulePostRATDList::Schedule() { 319 // Build the scheduling graph. 320 BuildSchedGraph(AA); 321 322 if (AntiDepBreak != NULL) { 323 unsigned Broken = 324 AntiDepBreak->BreakAntiDependencies(SUnits, Begin, InsertPos, 325 InsertPosIndex); 326 327 if (Broken != 0) { 328 // We made changes. Update the dependency graph. 329 // Theoretically we could update the graph in place: 330 // When a live range is changed to use a different register, remove 331 // the def's anti-dependence *and* output-dependence edges due to 332 // that register, and add new anti-dependence and output-dependence 333 // edges based on the next live range of the register. 334 SUnits.clear(); 335 Sequence.clear(); 336 EntrySU = SUnit(); 337 ExitSU = SUnit(); 338 BuildSchedGraph(AA); 339 340 NumFixedAnti += Broken; 341 } 342 } 343 344 DEBUG(dbgs() << "********** List Scheduling **********\n"); 345 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 346 SUnits[su].dumpAll(this)); 347 348 AvailableQueue.initNodes(SUnits); 349 ListScheduleTopDown(); 350 AvailableQueue.releaseState(); 351} 352 353/// Observe - Update liveness information to account for the current 354/// instruction, which will not be scheduled. 355/// 356void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) { 357 if (AntiDepBreak != NULL) 358 AntiDepBreak->Observe(MI, Count, InsertPosIndex); 359} 360 361/// FinishBlock - Clean up register live-range state. 362/// 363void SchedulePostRATDList::FinishBlock() { 364 if (AntiDepBreak != NULL) 365 AntiDepBreak->FinishBlock(); 366 367 // Call the superclass. 368 ScheduleDAGInstrs::FinishBlock(); 369} 370 371/// StartBlockForKills - Initialize register live-range state for updating kills 372/// 373void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) { 374 // Initialize the indices to indicate that no registers are live. 375 for (unsigned i = 0; i < TRI->getNumRegs(); ++i) 376 KillIndices[i] = ~0u; 377 378 // Determine the live-out physregs for this block. 379 if (!BB->empty() && BB->back().getDesc().isReturn()) { 380 // In a return block, examine the function live-out regs. 381 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(), 382 E = MRI.liveout_end(); I != E; ++I) { 383 unsigned Reg = *I; 384 KillIndices[Reg] = BB->size(); 385 // Repeat, for all subregs. 386 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 387 *Subreg; ++Subreg) { 388 KillIndices[*Subreg] = BB->size(); 389 } 390 } 391 } 392 else { 393 // In a non-return block, examine the live-in regs of all successors. 394 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 395 SE = BB->succ_end(); SI != SE; ++SI) { 396 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 397 E = (*SI)->livein_end(); I != E; ++I) { 398 unsigned Reg = *I; 399 KillIndices[Reg] = BB->size(); 400 // Repeat, for all subregs. 401 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 402 *Subreg; ++Subreg) { 403 KillIndices[*Subreg] = BB->size(); 404 } 405 } 406 } 407 } 408} 409 410bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI, 411 MachineOperand &MO) { 412 // Setting kill flag... 413 if (!MO.isKill()) { 414 MO.setIsKill(true); 415 return false; 416 } 417 418 // If MO itself is live, clear the kill flag... 419 if (KillIndices[MO.getReg()] != ~0u) { 420 MO.setIsKill(false); 421 return false; 422 } 423 424 // If any subreg of MO is live, then create an imp-def for that 425 // subreg and keep MO marked as killed. 426 MO.setIsKill(false); 427 bool AllDead = true; 428 const unsigned SuperReg = MO.getReg(); 429 for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg); 430 *Subreg; ++Subreg) { 431 if (KillIndices[*Subreg] != ~0u) { 432 MI->addOperand(MachineOperand::CreateReg(*Subreg, 433 true /*IsDef*/, 434 true /*IsImp*/, 435 false /*IsKill*/, 436 false /*IsDead*/)); 437 AllDead = false; 438 } 439 } 440 441 if(AllDead) 442 MO.setIsKill(true); 443 return false; 444} 445 446/// FixupKills - Fix the register kill flags, they may have been made 447/// incorrect by instruction reordering. 448/// 449void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) { 450 DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n'); 451 452 std::set<unsigned> killedRegs; 453 BitVector ReservedRegs = TRI->getReservedRegs(MF); 454 455 StartBlockForKills(MBB); 456 457 // Examine block from end to start... 458 unsigned Count = MBB->size(); 459 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin(); 460 I != E; --Count) { 461 MachineInstr *MI = --I; 462 if (MI->isDebugValue()) 463 continue; 464 465 // Update liveness. Registers that are defed but not used in this 466 // instruction are now dead. Mark register and all subregs as they 467 // are completely defined. 468 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 469 MachineOperand &MO = MI->getOperand(i); 470 if (!MO.isReg()) continue; 471 unsigned Reg = MO.getReg(); 472 if (Reg == 0) continue; 473 if (!MO.isDef()) continue; 474 // Ignore two-addr defs. 475 if (MI->isRegTiedToUseOperand(i)) continue; 476 477 KillIndices[Reg] = ~0u; 478 479 // Repeat for all subregs. 480 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 481 *Subreg; ++Subreg) { 482 KillIndices[*Subreg] = ~0u; 483 } 484 } 485 486 // Examine all used registers and set/clear kill flag. When a 487 // register is used multiple times we only set the kill flag on 488 // the first use. 489 killedRegs.clear(); 490 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 491 MachineOperand &MO = MI->getOperand(i); 492 if (!MO.isReg() || !MO.isUse()) continue; 493 unsigned Reg = MO.getReg(); 494 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 495 496 bool kill = false; 497 if (killedRegs.find(Reg) == killedRegs.end()) { 498 kill = true; 499 // A register is not killed if any subregs are live... 500 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 501 *Subreg; ++Subreg) { 502 if (KillIndices[*Subreg] != ~0u) { 503 kill = false; 504 break; 505 } 506 } 507 508 // If subreg is not live, then register is killed if it became 509 // live in this instruction 510 if (kill) 511 kill = (KillIndices[Reg] == ~0u); 512 } 513 514 if (MO.isKill() != kill) { 515 DEBUG(dbgs() << "Fixing " << MO << " in "); 516 // Warning: ToggleKillFlag may invalidate MO. 517 ToggleKillFlag(MI, MO); 518 DEBUG(MI->dump()); 519 } 520 521 killedRegs.insert(Reg); 522 } 523 524 // Mark any used register (that is not using undef) and subregs as 525 // now live... 526 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 527 MachineOperand &MO = MI->getOperand(i); 528 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; 529 unsigned Reg = MO.getReg(); 530 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 531 532 KillIndices[Reg] = Count; 533 534 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 535 *Subreg; ++Subreg) { 536 KillIndices[*Subreg] = Count; 537 } 538 } 539 } 540} 541 542//===----------------------------------------------------------------------===// 543// Top-Down Scheduling 544//===----------------------------------------------------------------------===// 545 546/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 547/// the PendingQueue if the count reaches zero. Also update its cycle bound. 548void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) { 549 SUnit *SuccSU = SuccEdge->getSUnit(); 550 551#ifndef NDEBUG 552 if (SuccSU->NumPredsLeft == 0) { 553 dbgs() << "*** Scheduling failed! ***\n"; 554 SuccSU->dump(this); 555 dbgs() << " has been released too many times!\n"; 556 llvm_unreachable(0); 557 } 558#endif 559 --SuccSU->NumPredsLeft; 560 561 // Compute how many cycles it will be before this actually becomes 562 // available. This is the max of the start time of all predecessors plus 563 // their latencies. 564 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency()); 565 566 // If all the node's predecessors are scheduled, this node is ready 567 // to be scheduled. Ignore the special ExitSU node. 568 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 569 PendingQueue.push_back(SuccSU); 570} 571 572/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors. 573void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) { 574 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 575 I != E; ++I) { 576 ReleaseSucc(SU, &*I); 577 } 578} 579 580/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 581/// count of its successors. If a successor pending count is zero, add it to 582/// the Available queue. 583void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 584 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: "); 585 DEBUG(SU->dump(this)); 586 587 Sequence.push_back(SU); 588 assert(CurCycle >= SU->getDepth() && 589 "Node scheduled above its depth!"); 590 SU->setDepthToAtLeast(CurCycle); 591 592 ReleaseSuccessors(SU); 593 SU->isScheduled = true; 594 AvailableQueue.ScheduledNode(SU); 595} 596 597/// ListScheduleTopDown - The main loop of list scheduling for top-down 598/// schedulers. 599void SchedulePostRATDList::ListScheduleTopDown() { 600 unsigned CurCycle = 0; 601 602 // We're scheduling top-down but we're visiting the regions in 603 // bottom-up order, so we don't know the hazards at the start of a 604 // region. So assume no hazards (this should usually be ok as most 605 // blocks are a single region). 606 HazardRec->Reset(); 607 608 // Release any successors of the special Entry node. 609 ReleaseSuccessors(&EntrySU); 610 611 // Add all leaves to Available queue. 612 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 613 // It is available if it has no predecessors. 614 bool available = SUnits[i].Preds.empty(); 615 if (available) { 616 AvailableQueue.push(&SUnits[i]); 617 SUnits[i].isAvailable = true; 618 } 619 } 620 621 // In any cycle where we can't schedule any instructions, we must 622 // stall or emit a noop, depending on the target. 623 bool CycleHasInsts = false; 624 625 // While Available queue is not empty, grab the node with the highest 626 // priority. If it is not ready put it back. Schedule the node. 627 std::vector<SUnit*> NotReady; 628 Sequence.reserve(SUnits.size()); 629 while (!AvailableQueue.empty() || !PendingQueue.empty()) { 630 // Check to see if any of the pending instructions are ready to issue. If 631 // so, add them to the available queue. 632 unsigned MinDepth = ~0u; 633 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 634 if (PendingQueue[i]->getDepth() <= CurCycle) { 635 AvailableQueue.push(PendingQueue[i]); 636 PendingQueue[i]->isAvailable = true; 637 PendingQueue[i] = PendingQueue.back(); 638 PendingQueue.pop_back(); 639 --i; --e; 640 } else if (PendingQueue[i]->getDepth() < MinDepth) 641 MinDepth = PendingQueue[i]->getDepth(); 642 } 643 644 DEBUG(dbgs() << "\n*** Examining Available\n"; 645 LatencyPriorityQueue q = AvailableQueue; 646 while (!q.empty()) { 647 SUnit *su = q.pop(); 648 dbgs() << "Height " << su->getHeight() << ": "; 649 su->dump(this); 650 }); 651 652 SUnit *FoundSUnit = 0; 653 bool HasNoopHazards = false; 654 while (!AvailableQueue.empty()) { 655 SUnit *CurSUnit = AvailableQueue.pop(); 656 657 ScheduleHazardRecognizer::HazardType HT = 658 HazardRec->getHazardType(CurSUnit); 659 if (HT == ScheduleHazardRecognizer::NoHazard) { 660 FoundSUnit = CurSUnit; 661 break; 662 } 663 664 // Remember if this is a noop hazard. 665 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard; 666 667 NotReady.push_back(CurSUnit); 668 } 669 670 // Add the nodes that aren't ready back onto the available list. 671 if (!NotReady.empty()) { 672 AvailableQueue.push_all(NotReady); 673 NotReady.clear(); 674 } 675 676 // If we found a node to schedule... 677 if (FoundSUnit) { 678 // ... schedule the node... 679 ScheduleNodeTopDown(FoundSUnit, CurCycle); 680 HazardRec->EmitInstruction(FoundSUnit); 681 CycleHasInsts = true; 682 683 // If we are using the target-specific hazards, then don't 684 // advance the cycle time just because we schedule a node. If 685 // the target allows it we can schedule multiple nodes in the 686 // same cycle. 687 if (!EnablePostRAHazardAvoidance) { 688 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops! 689 ++CurCycle; 690 } 691 } else { 692 if (CycleHasInsts) { 693 DEBUG(dbgs() << "*** Finished cycle " << CurCycle << '\n'); 694 HazardRec->AdvanceCycle(); 695 } else if (!HasNoopHazards) { 696 // Otherwise, we have a pipeline stall, but no other problem, 697 // just advance the current cycle and try again. 698 DEBUG(dbgs() << "*** Stall in cycle " << CurCycle << '\n'); 699 HazardRec->AdvanceCycle(); 700 ++NumStalls; 701 } else { 702 // Otherwise, we have no instructions to issue and we have instructions 703 // that will fault if we don't do this right. This is the case for 704 // processors without pipeline interlocks and other cases. 705 DEBUG(dbgs() << "*** Emitting noop in cycle " << CurCycle << '\n'); 706 HazardRec->EmitNoop(); 707 Sequence.push_back(0); // NULL here means noop 708 ++NumNoops; 709 } 710 711 ++CurCycle; 712 CycleHasInsts = false; 713 } 714 } 715 716#ifndef NDEBUG 717 VerifySchedule(/*isBottomUp=*/false); 718#endif 719} 720 721//===----------------------------------------------------------------------===// 722// Public Constructor Functions 723//===----------------------------------------------------------------------===// 724 725FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) { 726 return new PostRAScheduler(OptLevel); 727} 728