PostRASchedulerList.cpp revision 0a4c09e724bd3ab7c9a1d3a1615894e7bf209179
1//===----- SchedulePostRAList.cpp - list scheduler ------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements a top-down list scheduler, using standard algorithms. 11// The basic approach uses a priority queue of available nodes to schedule. 12// One at a time, nodes are taken from the priority queue (thus in priority 13// order), checked for legality to schedule, and emitted if legal. 14// 15// Nodes may not be legal to schedule either due to structural hazards (e.g. 16// pipeline or resource constraints) or because an input to the instruction has 17// not completed execution. 18// 19//===----------------------------------------------------------------------===// 20 21#define DEBUG_TYPE "post-RA-sched" 22#include "ExactHazardRecognizer.h" 23#include "SimpleHazardRecognizer.h" 24#include "ScheduleDAGInstrs.h" 25#include "llvm/CodeGen/Passes.h" 26#include "llvm/CodeGen/LatencyPriorityQueue.h" 27#include "llvm/CodeGen/SchedulerRegistry.h" 28#include "llvm/CodeGen/MachineDominators.h" 29#include "llvm/CodeGen/MachineFrameInfo.h" 30#include "llvm/CodeGen/MachineFunctionPass.h" 31#include "llvm/CodeGen/MachineLoopInfo.h" 32#include "llvm/CodeGen/MachineRegisterInfo.h" 33#include "llvm/CodeGen/ScheduleHazardRecognizer.h" 34#include "llvm/Analysis/AliasAnalysis.h" 35#include "llvm/Target/TargetLowering.h" 36#include "llvm/Target/TargetMachine.h" 37#include "llvm/Target/TargetInstrInfo.h" 38#include "llvm/Target/TargetRegisterInfo.h" 39#include "llvm/Target/TargetSubtarget.h" 40#include "llvm/Support/Compiler.h" 41#include "llvm/Support/Debug.h" 42#include "llvm/Support/ErrorHandling.h" 43#include "llvm/Support/raw_ostream.h" 44#include "llvm/ADT/Statistic.h" 45#include <map> 46#include <set> 47using namespace llvm; 48 49STATISTIC(NumNoops, "Number of noops inserted"); 50STATISTIC(NumStalls, "Number of pipeline stalls"); 51 52// Post-RA scheduling is enabled with 53// TargetSubtarget.enablePostRAScheduler(). This flag can be used to 54// override the target. 55static cl::opt<bool> 56EnablePostRAScheduler("post-RA-scheduler", 57 cl::desc("Enable scheduling after register allocation"), 58 cl::init(false), cl::Hidden); 59static cl::opt<bool> 60EnableAntiDepBreaking("break-anti-dependencies", 61 cl::desc("Break post-RA scheduling anti-dependencies"), 62 cl::init(true), cl::Hidden); 63static cl::opt<bool> 64EnablePostRAHazardAvoidance("avoid-hazards", 65 cl::desc("Enable exact hazard avoidance"), 66 cl::init(true), cl::Hidden); 67 68// If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 69static cl::opt<int> 70DebugDiv("postra-sched-debugdiv", 71 cl::desc("Debug control MBBs that are scheduled"), 72 cl::init(0), cl::Hidden); 73static cl::opt<int> 74DebugMod("postra-sched-debugmod", 75 cl::desc("Debug control MBBs that are scheduled"), 76 cl::init(0), cl::Hidden); 77 78namespace { 79 class VISIBILITY_HIDDEN PostRAScheduler : public MachineFunctionPass { 80 AliasAnalysis *AA; 81 82 public: 83 static char ID; 84 PostRAScheduler() : MachineFunctionPass(&ID) {} 85 86 void getAnalysisUsage(AnalysisUsage &AU) const { 87 AU.setPreservesCFG(); 88 AU.addRequired<AliasAnalysis>(); 89 AU.addRequired<MachineDominatorTree>(); 90 AU.addPreserved<MachineDominatorTree>(); 91 AU.addRequired<MachineLoopInfo>(); 92 AU.addPreserved<MachineLoopInfo>(); 93 MachineFunctionPass::getAnalysisUsage(AU); 94 } 95 96 const char *getPassName() const { 97 return "Post RA top-down list latency scheduler"; 98 } 99 100 bool runOnMachineFunction(MachineFunction &Fn); 101 }; 102 char PostRAScheduler::ID = 0; 103 104 class VISIBILITY_HIDDEN SchedulePostRATDList : public ScheduleDAGInstrs { 105 /// AvailableQueue - The priority queue to use for the available SUnits. 106 /// 107 LatencyPriorityQueue AvailableQueue; 108 109 /// PendingQueue - This contains all of the instructions whose operands have 110 /// been issued, but their results are not ready yet (due to the latency of 111 /// the operation). Once the operands becomes available, the instruction is 112 /// added to the AvailableQueue. 113 std::vector<SUnit*> PendingQueue; 114 115 /// Topo - A topological ordering for SUnits. 116 ScheduleDAGTopologicalSort Topo; 117 118 /// AllocatableSet - The set of allocatable registers. 119 /// We'll be ignoring anti-dependencies on non-allocatable registers, 120 /// because they may not be safe to break. 121 const BitVector AllocatableSet; 122 123 /// HazardRec - The hazard recognizer to use. 124 ScheduleHazardRecognizer *HazardRec; 125 126 /// AA - AliasAnalysis for making memory reference queries. 127 AliasAnalysis *AA; 128 129 /// Classes - For live regs that are only used in one register class in a 130 /// live range, the register class. If the register is not live, the 131 /// corresponding value is null. If the register is live but used in 132 /// multiple register classes, the corresponding value is -1 casted to a 133 /// pointer. 134 const TargetRegisterClass * 135 Classes[TargetRegisterInfo::FirstVirtualRegister]; 136 137 /// RegRegs - Map registers to all their references within a live range. 138 std::multimap<unsigned, MachineOperand *> RegRefs; 139 140 /// KillIndices - The index of the most recent kill (proceding bottom-up), 141 /// or ~0u if the register is not live. 142 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister]; 143 144 /// DefIndices - The index of the most recent complete def (proceding bottom 145 /// up), or ~0u if the register is live. 146 unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister]; 147 148 /// KeepRegs - A set of registers which are live and cannot be changed to 149 /// break anti-dependencies. 150 SmallSet<unsigned, 4> KeepRegs; 151 152 public: 153 SchedulePostRATDList(MachineFunction &MF, 154 const MachineLoopInfo &MLI, 155 const MachineDominatorTree &MDT, 156 ScheduleHazardRecognizer *HR, 157 AliasAnalysis *aa) 158 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits), 159 AllocatableSet(TRI->getAllocatableSet(MF)), 160 HazardRec(HR), AA(aa) {} 161 162 ~SchedulePostRATDList() { 163 delete HazardRec; 164 } 165 166 /// StartBlock - Initialize register live-range state for scheduling in 167 /// this block. 168 /// 169 void StartBlock(MachineBasicBlock *BB); 170 171 /// Schedule - Schedule the instruction range using list scheduling. 172 /// 173 void Schedule(); 174 175 /// FixupKills - Fix register kill flags that have been made 176 /// invalid due to scheduling 177 /// 178 void FixupKills(MachineBasicBlock *MBB); 179 180 /// Observe - Update liveness information to account for the current 181 /// instruction, which will not be scheduled. 182 /// 183 void Observe(MachineInstr *MI, unsigned Count); 184 185 /// FinishBlock - Clean up register live-range state. 186 /// 187 void FinishBlock(); 188 189 private: 190 void PrescanInstruction(MachineInstr *MI); 191 void ScanInstruction(MachineInstr *MI, unsigned Count); 192 void ReleaseSucc(SUnit *SU, SDep *SuccEdge); 193 void ReleaseSuccessors(SUnit *SU); 194 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle); 195 void ListScheduleTopDown(); 196 bool BreakAntiDependencies(); 197 unsigned findSuitableFreeRegister(unsigned AntiDepReg, 198 unsigned LastNewReg, 199 const TargetRegisterClass *); 200 void StartBlockForKills(MachineBasicBlock *BB); 201 202 // ToggleKillFlag - Toggle a register operand kill flag. Other 203 // adjustments may be made to the instruction if necessary. Return 204 // true if the operand has been deleted, false if not. 205 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO); 206 }; 207} 208 209/// isSchedulingBoundary - Test if the given instruction should be 210/// considered a scheduling boundary. This primarily includes labels 211/// and terminators. 212/// 213static bool isSchedulingBoundary(const MachineInstr *MI, 214 const MachineFunction &MF) { 215 // Terminators and labels can't be scheduled around. 216 if (MI->getDesc().isTerminator() || MI->isLabel()) 217 return true; 218 219 // Don't attempt to schedule around any instruction that modifies 220 // a stack-oriented pointer, as it's unlikely to be profitable. This 221 // saves compile time, because it doesn't require every single 222 // stack slot reference to depend on the instruction that does the 223 // modification. 224 const TargetLowering &TLI = *MF.getTarget().getTargetLowering(); 225 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore())) 226 return true; 227 228 return false; 229} 230 231bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) { 232 // Check for explicit enable/disable of post-ra scheduling. 233 if (EnablePostRAScheduler.getPosition() > 0) { 234 if (!EnablePostRAScheduler) 235 return true; 236 } else { 237 // Check that post-RA scheduling is enabled for this function 238 const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>(); 239 if (!ST.enablePostRAScheduler()) 240 return true; 241 } 242 243 DEBUG(errs() << "PostRAScheduler\n"); 244 245 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>(); 246 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>(); 247 const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData(); 248 ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ? 249 (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) : 250 (ScheduleHazardRecognizer *)new SimpleHazardRecognizer(); 251 252 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, AA); 253 254 // Loop over all of the basic blocks 255 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); 256 MBB != MBBe; ++MBB) { 257#ifndef NDEBUG 258 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod 259 if (DebugDiv > 0) { 260 static int bbcnt = 0; 261 if (bbcnt++ % DebugDiv != DebugMod) 262 continue; 263 errs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() << 264 ":MBB ID#" << MBB->getNumber() << " ***\n"; 265 } 266#endif 267 268 // Initialize register live-range state for scheduling in this block. 269 Scheduler.StartBlock(MBB); 270 271 // Schedule each sequence of instructions not interrupted by a label 272 // or anything else that effectively needs to shut down scheduling. 273 MachineBasicBlock::iterator Current = MBB->end(); 274 unsigned Count = MBB->size(), CurrentCount = Count; 275 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) { 276 MachineInstr *MI = prior(I); 277 if (isSchedulingBoundary(MI, Fn)) { 278 Scheduler.Run(MBB, I, Current, CurrentCount); 279 Scheduler.EmitSchedule(0); 280 Current = MI; 281 CurrentCount = Count - 1; 282 Scheduler.Observe(MI, CurrentCount); 283 } 284 I = MI; 285 --Count; 286 } 287 assert(Count == 0 && "Instruction count mismatch!"); 288 assert((MBB->begin() == Current || CurrentCount != 0) && 289 "Instruction count mismatch!"); 290 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount); 291 Scheduler.EmitSchedule(0); 292 293 // Clean up register live-range state. 294 Scheduler.FinishBlock(); 295 296 // Update register kills 297 Scheduler.FixupKills(MBB); 298 } 299 300 return true; 301} 302 303/// StartBlock - Initialize register live-range state for scheduling in 304/// this block. 305/// 306void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) { 307 // Call the superclass. 308 ScheduleDAGInstrs::StartBlock(BB); 309 310 // Reset the hazard recognizer. 311 HazardRec->Reset(); 312 313 // Clear out the register class data. 314 std::fill(Classes, array_endof(Classes), 315 static_cast<const TargetRegisterClass *>(0)); 316 317 // Initialize the indices to indicate that no registers are live. 318 std::fill(KillIndices, array_endof(KillIndices), ~0u); 319 std::fill(DefIndices, array_endof(DefIndices), BB->size()); 320 321 // Clear "do not change" set. 322 KeepRegs.clear(); 323 324 bool IsReturnBlock = (!BB->empty() && BB->back().getDesc().isReturn()); 325 326 // Determine the live-out physregs for this block. 327 if (IsReturnBlock) { 328 // In a return block, examine the function live-out regs. 329 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(), 330 E = MRI.liveout_end(); I != E; ++I) { 331 unsigned Reg = *I; 332 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1); 333 KillIndices[Reg] = BB->size(); 334 DefIndices[Reg] = ~0u; 335 // Repeat, for all aliases. 336 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 337 unsigned AliasReg = *Alias; 338 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1); 339 KillIndices[AliasReg] = BB->size(); 340 DefIndices[AliasReg] = ~0u; 341 } 342 } 343 } else { 344 // In a non-return block, examine the live-in regs of all successors. 345 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 346 SE = BB->succ_end(); SI != SE; ++SI) 347 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 348 E = (*SI)->livein_end(); I != E; ++I) { 349 unsigned Reg = *I; 350 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1); 351 KillIndices[Reg] = BB->size(); 352 DefIndices[Reg] = ~0u; 353 // Repeat, for all aliases. 354 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 355 unsigned AliasReg = *Alias; 356 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1); 357 KillIndices[AliasReg] = BB->size(); 358 DefIndices[AliasReg] = ~0u; 359 } 360 } 361 } 362 363 // Mark live-out callee-saved registers. In a return block this is 364 // all callee-saved registers. In non-return this is any 365 // callee-saved register that is not saved in the prolog. 366 const MachineFrameInfo *MFI = MF.getFrameInfo(); 367 BitVector Pristine = MFI->getPristineRegs(BB); 368 for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) { 369 unsigned Reg = *I; 370 if (!IsReturnBlock && !Pristine.test(Reg)) continue; 371 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1); 372 KillIndices[Reg] = BB->size(); 373 DefIndices[Reg] = ~0u; 374 // Repeat, for all aliases. 375 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 376 unsigned AliasReg = *Alias; 377 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1); 378 KillIndices[AliasReg] = BB->size(); 379 DefIndices[AliasReg] = ~0u; 380 } 381 } 382} 383 384/// Schedule - Schedule the instruction range using list scheduling. 385/// 386void SchedulePostRATDList::Schedule() { 387 DEBUG(errs() << "********** List Scheduling **********\n"); 388 389 // Build the scheduling graph. 390 BuildSchedGraph(AA); 391 392 if (EnableAntiDepBreaking) { 393 if (BreakAntiDependencies()) { 394 // We made changes. Update the dependency graph. 395 // Theoretically we could update the graph in place: 396 // When a live range is changed to use a different register, remove 397 // the def's anti-dependence *and* output-dependence edges due to 398 // that register, and add new anti-dependence and output-dependence 399 // edges based on the next live range of the register. 400 SUnits.clear(); 401 EntrySU = SUnit(); 402 ExitSU = SUnit(); 403 BuildSchedGraph(AA); 404 } 405 } 406 407 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 408 SUnits[su].dumpAll(this)); 409 410 AvailableQueue.initNodes(SUnits); 411 412 ListScheduleTopDown(); 413 414 AvailableQueue.releaseState(); 415} 416 417/// Observe - Update liveness information to account for the current 418/// instruction, which will not be scheduled. 419/// 420void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) { 421 assert(Count < InsertPosIndex && "Instruction index out of expected range!"); 422 423 // Any register which was defined within the previous scheduling region 424 // may have been rescheduled and its lifetime may overlap with registers 425 // in ways not reflected in our current liveness state. For each such 426 // register, adjust the liveness state to be conservatively correct. 427 for (unsigned Reg = 0; Reg != TargetRegisterInfo::FirstVirtualRegister; ++Reg) 428 if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) { 429 assert(KillIndices[Reg] == ~0u && "Clobbered register is live!"); 430 // Mark this register to be non-renamable. 431 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1); 432 // Move the def index to the end of the previous region, to reflect 433 // that the def could theoretically have been scheduled at the end. 434 DefIndices[Reg] = InsertPosIndex; 435 } 436 437 PrescanInstruction(MI); 438 ScanInstruction(MI, Count); 439} 440 441/// FinishBlock - Clean up register live-range state. 442/// 443void SchedulePostRATDList::FinishBlock() { 444 RegRefs.clear(); 445 446 // Call the superclass. 447 ScheduleDAGInstrs::FinishBlock(); 448} 449 450/// CriticalPathStep - Return the next SUnit after SU on the bottom-up 451/// critical path. 452static SDep *CriticalPathStep(SUnit *SU) { 453 SDep *Next = 0; 454 unsigned NextDepth = 0; 455 // Find the predecessor edge with the greatest depth. 456 for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end(); 457 P != PE; ++P) { 458 SUnit *PredSU = P->getSUnit(); 459 unsigned PredLatency = P->getLatency(); 460 unsigned PredTotalLatency = PredSU->getDepth() + PredLatency; 461 // In the case of a latency tie, prefer an anti-dependency edge over 462 // other types of edges. 463 if (NextDepth < PredTotalLatency || 464 (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) { 465 NextDepth = PredTotalLatency; 466 Next = &*P; 467 } 468 } 469 return Next; 470} 471 472void SchedulePostRATDList::PrescanInstruction(MachineInstr *MI) { 473 // Scan the register operands for this instruction and update 474 // Classes and RegRefs. 475 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 476 MachineOperand &MO = MI->getOperand(i); 477 if (!MO.isReg()) continue; 478 unsigned Reg = MO.getReg(); 479 if (Reg == 0) continue; 480 const TargetRegisterClass *NewRC = 0; 481 482 if (i < MI->getDesc().getNumOperands()) 483 NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI); 484 485 // For now, only allow the register to be changed if its register 486 // class is consistent across all uses. 487 if (!Classes[Reg] && NewRC) 488 Classes[Reg] = NewRC; 489 else if (!NewRC || Classes[Reg] != NewRC) 490 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1); 491 492 // Now check for aliases. 493 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 494 // If an alias of the reg is used during the live range, give up. 495 // Note that this allows us to skip checking if AntiDepReg 496 // overlaps with any of the aliases, among other things. 497 unsigned AliasReg = *Alias; 498 if (Classes[AliasReg]) { 499 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1); 500 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1); 501 } 502 } 503 504 // If we're still willing to consider this register, note the reference. 505 if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1)) 506 RegRefs.insert(std::make_pair(Reg, &MO)); 507 508 // It's not safe to change register allocation for source operands of 509 // that have special allocation requirements. 510 if (MO.isUse() && MI->getDesc().hasExtraSrcRegAllocReq()) { 511 if (KeepRegs.insert(Reg)) { 512 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 513 *Subreg; ++Subreg) 514 KeepRegs.insert(*Subreg); 515 } 516 } 517 } 518} 519 520void SchedulePostRATDList::ScanInstruction(MachineInstr *MI, 521 unsigned Count) { 522 // Update liveness. 523 // Proceding upwards, registers that are defed but not used in this 524 // instruction are now dead. 525 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 526 MachineOperand &MO = MI->getOperand(i); 527 if (!MO.isReg()) continue; 528 unsigned Reg = MO.getReg(); 529 if (Reg == 0) continue; 530 if (!MO.isDef()) continue; 531 // Ignore two-addr defs. 532 if (MI->isRegTiedToUseOperand(i)) continue; 533 534 DefIndices[Reg] = Count; 535 KillIndices[Reg] = ~0u; 536 assert(((KillIndices[Reg] == ~0u) != 537 (DefIndices[Reg] == ~0u)) && 538 "Kill and Def maps aren't consistent for Reg!"); 539 KeepRegs.erase(Reg); 540 Classes[Reg] = 0; 541 RegRefs.erase(Reg); 542 // Repeat, for all subregs. 543 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 544 *Subreg; ++Subreg) { 545 unsigned SubregReg = *Subreg; 546 DefIndices[SubregReg] = Count; 547 KillIndices[SubregReg] = ~0u; 548 KeepRegs.erase(SubregReg); 549 Classes[SubregReg] = 0; 550 RegRefs.erase(SubregReg); 551 } 552 // Conservatively mark super-registers as unusable. 553 for (const unsigned *Super = TRI->getSuperRegisters(Reg); 554 *Super; ++Super) { 555 unsigned SuperReg = *Super; 556 Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1); 557 } 558 } 559 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 560 MachineOperand &MO = MI->getOperand(i); 561 if (!MO.isReg()) continue; 562 unsigned Reg = MO.getReg(); 563 if (Reg == 0) continue; 564 if (!MO.isUse()) continue; 565 566 const TargetRegisterClass *NewRC = 0; 567 if (i < MI->getDesc().getNumOperands()) 568 NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI); 569 570 // For now, only allow the register to be changed if its register 571 // class is consistent across all uses. 572 if (!Classes[Reg] && NewRC) 573 Classes[Reg] = NewRC; 574 else if (!NewRC || Classes[Reg] != NewRC) 575 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1); 576 577 RegRefs.insert(std::make_pair(Reg, &MO)); 578 579 // It wasn't previously live but now it is, this is a kill. 580 if (KillIndices[Reg] == ~0u) { 581 KillIndices[Reg] = Count; 582 DefIndices[Reg] = ~0u; 583 assert(((KillIndices[Reg] == ~0u) != 584 (DefIndices[Reg] == ~0u)) && 585 "Kill and Def maps aren't consistent for Reg!"); 586 } 587 // Repeat, for all aliases. 588 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 589 unsigned AliasReg = *Alias; 590 if (KillIndices[AliasReg] == ~0u) { 591 KillIndices[AliasReg] = Count; 592 DefIndices[AliasReg] = ~0u; 593 } 594 } 595 } 596} 597 598unsigned 599SchedulePostRATDList::findSuitableFreeRegister(unsigned AntiDepReg, 600 unsigned LastNewReg, 601 const TargetRegisterClass *RC) { 602 for (TargetRegisterClass::iterator R = RC->allocation_order_begin(MF), 603 RE = RC->allocation_order_end(MF); R != RE; ++R) { 604 unsigned NewReg = *R; 605 // Don't replace a register with itself. 606 if (NewReg == AntiDepReg) continue; 607 // Don't replace a register with one that was recently used to repair 608 // an anti-dependence with this AntiDepReg, because that would 609 // re-introduce that anti-dependence. 610 if (NewReg == LastNewReg) continue; 611 // If NewReg is dead and NewReg's most recent def is not before 612 // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg. 613 assert(((KillIndices[AntiDepReg] == ~0u) != (DefIndices[AntiDepReg] == ~0u)) && 614 "Kill and Def maps aren't consistent for AntiDepReg!"); 615 assert(((KillIndices[NewReg] == ~0u) != (DefIndices[NewReg] == ~0u)) && 616 "Kill and Def maps aren't consistent for NewReg!"); 617 if (KillIndices[NewReg] != ~0u || 618 Classes[NewReg] == reinterpret_cast<TargetRegisterClass *>(-1) || 619 KillIndices[AntiDepReg] > DefIndices[NewReg]) 620 continue; 621 return NewReg; 622 } 623 624 // No registers are free and available! 625 return 0; 626} 627 628/// BreakAntiDependencies - Identifiy anti-dependencies along the critical path 629/// of the ScheduleDAG and break them by renaming registers. 630/// 631bool SchedulePostRATDList::BreakAntiDependencies() { 632 // The code below assumes that there is at least one instruction, 633 // so just duck out immediately if the block is empty. 634 if (SUnits.empty()) return false; 635 636 // Find the node at the bottom of the critical path. 637 SUnit *Max = 0; 638 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 639 SUnit *SU = &SUnits[i]; 640 if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency) 641 Max = SU; 642 } 643 644 DEBUG(errs() << "Critical path has total latency " 645 << (Max->getDepth() + Max->Latency) << "\n"); 646 647 // Track progress along the critical path through the SUnit graph as we walk 648 // the instructions. 649 SUnit *CriticalPathSU = Max; 650 MachineInstr *CriticalPathMI = CriticalPathSU->getInstr(); 651 652 // Consider this pattern: 653 // A = ... 654 // ... = A 655 // A = ... 656 // ... = A 657 // A = ... 658 // ... = A 659 // A = ... 660 // ... = A 661 // There are three anti-dependencies here, and without special care, 662 // we'd break all of them using the same register: 663 // A = ... 664 // ... = A 665 // B = ... 666 // ... = B 667 // B = ... 668 // ... = B 669 // B = ... 670 // ... = B 671 // because at each anti-dependence, B is the first register that 672 // isn't A which is free. This re-introduces anti-dependencies 673 // at all but one of the original anti-dependencies that we were 674 // trying to break. To avoid this, keep track of the most recent 675 // register that each register was replaced with, avoid 676 // using it to repair an anti-dependence on the same register. 677 // This lets us produce this: 678 // A = ... 679 // ... = A 680 // B = ... 681 // ... = B 682 // C = ... 683 // ... = C 684 // B = ... 685 // ... = B 686 // This still has an anti-dependence on B, but at least it isn't on the 687 // original critical path. 688 // 689 // TODO: If we tracked more than one register here, we could potentially 690 // fix that remaining critical edge too. This is a little more involved, 691 // because unlike the most recent register, less recent registers should 692 // still be considered, though only if no other registers are available. 693 unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {}; 694 695 // Attempt to break anti-dependence edges on the critical path. Walk the 696 // instructions from the bottom up, tracking information about liveness 697 // as we go to help determine which registers are available. 698 bool Changed = false; 699 unsigned Count = InsertPosIndex - 1; 700 for (MachineBasicBlock::iterator I = InsertPos, E = Begin; 701 I != E; --Count) { 702 MachineInstr *MI = --I; 703 704 // Check if this instruction has a dependence on the critical path that 705 // is an anti-dependence that we may be able to break. If it is, set 706 // AntiDepReg to the non-zero register associated with the anti-dependence. 707 // 708 // We limit our attention to the critical path as a heuristic to avoid 709 // breaking anti-dependence edges that aren't going to significantly 710 // impact the overall schedule. There are a limited number of registers 711 // and we want to save them for the important edges. 712 // 713 // TODO: Instructions with multiple defs could have multiple 714 // anti-dependencies. The current code here only knows how to break one 715 // edge per instruction. Note that we'd have to be able to break all of 716 // the anti-dependencies in an instruction in order to be effective. 717 unsigned AntiDepReg = 0; 718 if (MI == CriticalPathMI) { 719 if (SDep *Edge = CriticalPathStep(CriticalPathSU)) { 720 SUnit *NextSU = Edge->getSUnit(); 721 722 // Only consider anti-dependence edges. 723 if (Edge->getKind() == SDep::Anti) { 724 AntiDepReg = Edge->getReg(); 725 assert(AntiDepReg != 0 && "Anti-dependence on reg0?"); 726 if (!AllocatableSet.test(AntiDepReg)) 727 // Don't break anti-dependencies on non-allocatable registers. 728 AntiDepReg = 0; 729 else if (KeepRegs.count(AntiDepReg)) 730 // Don't break anti-dependencies if an use down below requires 731 // this exact register. 732 AntiDepReg = 0; 733 else { 734 // If the SUnit has other dependencies on the SUnit that it 735 // anti-depends on, don't bother breaking the anti-dependency 736 // since those edges would prevent such units from being 737 // scheduled past each other regardless. 738 // 739 // Also, if there are dependencies on other SUnits with the 740 // same register as the anti-dependency, don't attempt to 741 // break it. 742 for (SUnit::pred_iterator P = CriticalPathSU->Preds.begin(), 743 PE = CriticalPathSU->Preds.end(); P != PE; ++P) 744 if (P->getSUnit() == NextSU ? 745 (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) : 746 (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) { 747 AntiDepReg = 0; 748 break; 749 } 750 } 751 } 752 CriticalPathSU = NextSU; 753 CriticalPathMI = CriticalPathSU->getInstr(); 754 } else { 755 // We've reached the end of the critical path. 756 CriticalPathSU = 0; 757 CriticalPathMI = 0; 758 } 759 } 760 761 PrescanInstruction(MI); 762 763 if (MI->getDesc().hasExtraDefRegAllocReq()) 764 // If this instruction's defs have special allocation requirement, don't 765 // break this anti-dependency. 766 AntiDepReg = 0; 767 else if (AntiDepReg) { 768 // If this instruction has a use of AntiDepReg, breaking it 769 // is invalid. 770 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 771 MachineOperand &MO = MI->getOperand(i); 772 if (!MO.isReg()) continue; 773 unsigned Reg = MO.getReg(); 774 if (Reg == 0) continue; 775 if (MO.isUse() && AntiDepReg == Reg) { 776 AntiDepReg = 0; 777 break; 778 } 779 } 780 } 781 782 // Determine AntiDepReg's register class, if it is live and is 783 // consistently used within a single class. 784 const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0; 785 assert((AntiDepReg == 0 || RC != NULL) && 786 "Register should be live if it's causing an anti-dependence!"); 787 if (RC == reinterpret_cast<TargetRegisterClass *>(-1)) 788 AntiDepReg = 0; 789 790 // Look for a suitable register to use to break the anti-depenence. 791 // 792 // TODO: Instead of picking the first free register, consider which might 793 // be the best. 794 if (AntiDepReg != 0) { 795 if (unsigned NewReg = findSuitableFreeRegister(AntiDepReg, 796 LastNewReg[AntiDepReg], 797 RC)) { 798 DEBUG(errs() << "Breaking anti-dependence edge on " 799 << TRI->getName(AntiDepReg) 800 << " with " << RegRefs.count(AntiDepReg) << " references" 801 << " using " << TRI->getName(NewReg) << "!\n"); 802 803 // Update the references to the old register to refer to the new 804 // register. 805 std::pair<std::multimap<unsigned, MachineOperand *>::iterator, 806 std::multimap<unsigned, MachineOperand *>::iterator> 807 Range = RegRefs.equal_range(AntiDepReg); 808 for (std::multimap<unsigned, MachineOperand *>::iterator 809 Q = Range.first, QE = Range.second; Q != QE; ++Q) 810 Q->second->setReg(NewReg); 811 812 // We just went back in time and modified history; the 813 // liveness information for the anti-depenence reg is now 814 // inconsistent. Set the state as if it were dead. 815 Classes[NewReg] = Classes[AntiDepReg]; 816 DefIndices[NewReg] = DefIndices[AntiDepReg]; 817 KillIndices[NewReg] = KillIndices[AntiDepReg]; 818 assert(((KillIndices[NewReg] == ~0u) != 819 (DefIndices[NewReg] == ~0u)) && 820 "Kill and Def maps aren't consistent for NewReg!"); 821 822 Classes[AntiDepReg] = 0; 823 DefIndices[AntiDepReg] = KillIndices[AntiDepReg]; 824 KillIndices[AntiDepReg] = ~0u; 825 assert(((KillIndices[AntiDepReg] == ~0u) != 826 (DefIndices[AntiDepReg] == ~0u)) && 827 "Kill and Def maps aren't consistent for AntiDepReg!"); 828 829 RegRefs.erase(AntiDepReg); 830 Changed = true; 831 LastNewReg[AntiDepReg] = NewReg; 832 } 833 } 834 835 ScanInstruction(MI, Count); 836 } 837 838 return Changed; 839} 840 841/// StartBlockForKills - Initialize register live-range state for updating kills 842/// 843void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) { 844 // Initialize the indices to indicate that no registers are live. 845 std::fill(KillIndices, array_endof(KillIndices), ~0u); 846 847 // Determine the live-out physregs for this block. 848 if (!BB->empty() && BB->back().getDesc().isReturn()) { 849 // In a return block, examine the function live-out regs. 850 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(), 851 E = MRI.liveout_end(); I != E; ++I) { 852 unsigned Reg = *I; 853 KillIndices[Reg] = BB->size(); 854 // Repeat, for all subregs. 855 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 856 *Subreg; ++Subreg) { 857 KillIndices[*Subreg] = BB->size(); 858 } 859 } 860 } 861 else { 862 // In a non-return block, examine the live-in regs of all successors. 863 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 864 SE = BB->succ_end(); SI != SE; ++SI) { 865 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 866 E = (*SI)->livein_end(); I != E; ++I) { 867 unsigned Reg = *I; 868 KillIndices[Reg] = BB->size(); 869 // Repeat, for all subregs. 870 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 871 *Subreg; ++Subreg) { 872 KillIndices[*Subreg] = BB->size(); 873 } 874 } 875 } 876 } 877} 878 879bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI, 880 MachineOperand &MO) { 881 // Setting kill flag... 882 if (!MO.isKill()) { 883 MO.setIsKill(true); 884 return false; 885 } 886 887 // If MO itself is live, clear the kill flag... 888 if (KillIndices[MO.getReg()] != ~0u) { 889 MO.setIsKill(false); 890 return false; 891 } 892 893 // If any subreg of MO is live, then create an imp-def for that 894 // subreg and keep MO marked as killed. 895 MO.setIsKill(false); 896 bool AllDead = true; 897 const unsigned SuperReg = MO.getReg(); 898 for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg); 899 *Subreg; ++Subreg) { 900 if (KillIndices[*Subreg] != ~0u) { 901 MI->addOperand(MachineOperand::CreateReg(*Subreg, 902 true /*IsDef*/, 903 true /*IsImp*/, 904 false /*IsKill*/, 905 false /*IsDead*/)); 906 AllDead = false; 907 } 908 } 909 910 if(AllDead) 911 MO.setIsKill(true); 912 return false; 913} 914 915/// FixupKills - Fix the register kill flags, they may have been made 916/// incorrect by instruction reordering. 917/// 918void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) { 919 DEBUG(errs() << "Fixup kills for BB ID#" << MBB->getNumber() << '\n'); 920 921 std::set<unsigned> killedRegs; 922 BitVector ReservedRegs = TRI->getReservedRegs(MF); 923 924 StartBlockForKills(MBB); 925 926 // Examine block from end to start... 927 unsigned Count = MBB->size(); 928 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin(); 929 I != E; --Count) { 930 MachineInstr *MI = --I; 931 932 // Update liveness. Registers that are defed but not used in this 933 // instruction are now dead. Mark register and all subregs as they 934 // are completely defined. 935 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 936 MachineOperand &MO = MI->getOperand(i); 937 if (!MO.isReg()) continue; 938 unsigned Reg = MO.getReg(); 939 if (Reg == 0) continue; 940 if (!MO.isDef()) continue; 941 // Ignore two-addr defs. 942 if (MI->isRegTiedToUseOperand(i)) continue; 943 944 KillIndices[Reg] = ~0u; 945 946 // Repeat for all subregs. 947 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 948 *Subreg; ++Subreg) { 949 KillIndices[*Subreg] = ~0u; 950 } 951 } 952 953 // Examine all used registers and set/clear kill flag. When a 954 // register is used multiple times we only set the kill flag on 955 // the first use. 956 killedRegs.clear(); 957 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 958 MachineOperand &MO = MI->getOperand(i); 959 if (!MO.isReg() || !MO.isUse()) continue; 960 unsigned Reg = MO.getReg(); 961 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 962 963 bool kill = false; 964 if (killedRegs.find(Reg) == killedRegs.end()) { 965 kill = true; 966 // A register is not killed if any subregs are live... 967 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 968 *Subreg; ++Subreg) { 969 if (KillIndices[*Subreg] != ~0u) { 970 kill = false; 971 break; 972 } 973 } 974 975 // If subreg is not live, then register is killed if it became 976 // live in this instruction 977 if (kill) 978 kill = (KillIndices[Reg] == ~0u); 979 } 980 981 if (MO.isKill() != kill) { 982 bool removed = ToggleKillFlag(MI, MO); 983 if (removed) { 984 DEBUG(errs() << "Fixed <removed> in "); 985 } else { 986 DEBUG(errs() << "Fixed " << MO << " in "); 987 } 988 DEBUG(MI->dump()); 989 } 990 991 killedRegs.insert(Reg); 992 } 993 994 // Mark any used register (that is not using undef) and subregs as 995 // now live... 996 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 997 MachineOperand &MO = MI->getOperand(i); 998 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; 999 unsigned Reg = MO.getReg(); 1000 if ((Reg == 0) || ReservedRegs.test(Reg)) continue; 1001 1002 KillIndices[Reg] = Count; 1003 1004 for (const unsigned *Subreg = TRI->getSubRegisters(Reg); 1005 *Subreg; ++Subreg) { 1006 KillIndices[*Subreg] = Count; 1007 } 1008 } 1009 } 1010} 1011 1012//===----------------------------------------------------------------------===// 1013// Top-Down Scheduling 1014//===----------------------------------------------------------------------===// 1015 1016/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 1017/// the PendingQueue if the count reaches zero. Also update its cycle bound. 1018void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) { 1019 SUnit *SuccSU = SuccEdge->getSUnit(); 1020 1021#ifndef NDEBUG 1022 if (SuccSU->NumPredsLeft == 0) { 1023 errs() << "*** Scheduling failed! ***\n"; 1024 SuccSU->dump(this); 1025 errs() << " has been released too many times!\n"; 1026 llvm_unreachable(0); 1027 } 1028#endif 1029 --SuccSU->NumPredsLeft; 1030 1031 // Compute how many cycles it will be before this actually becomes 1032 // available. This is the max of the start time of all predecessors plus 1033 // their latencies. 1034 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency()); 1035 1036 // If all the node's predecessors are scheduled, this node is ready 1037 // to be scheduled. Ignore the special ExitSU node. 1038 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) 1039 PendingQueue.push_back(SuccSU); 1040} 1041 1042/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors. 1043void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) { 1044 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1045 I != E; ++I) 1046 ReleaseSucc(SU, &*I); 1047} 1048 1049/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 1050/// count of its successors. If a successor pending count is zero, add it to 1051/// the Available queue. 1052void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 1053 DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: "); 1054 DEBUG(SU->dump(this)); 1055 1056 Sequence.push_back(SU); 1057 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!"); 1058 SU->setDepthToAtLeast(CurCycle); 1059 1060 ReleaseSuccessors(SU); 1061 SU->isScheduled = true; 1062 AvailableQueue.ScheduledNode(SU); 1063} 1064 1065/// ListScheduleTopDown - The main loop of list scheduling for top-down 1066/// schedulers. 1067void SchedulePostRATDList::ListScheduleTopDown() { 1068 unsigned CurCycle = 0; 1069 1070 // Release any successors of the special Entry node. 1071 ReleaseSuccessors(&EntrySU); 1072 1073 // All leaves to Available queue. 1074 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 1075 // It is available if it has no predecessors. 1076 if (SUnits[i].Preds.empty()) { 1077 AvailableQueue.push(&SUnits[i]); 1078 SUnits[i].isAvailable = true; 1079 } 1080 } 1081 1082 // In any cycle where we can't schedule any instructions, we must 1083 // stall or emit a noop, depending on the target. 1084 bool CycleHasInsts = false; 1085 1086 // While Available queue is not empty, grab the node with the highest 1087 // priority. If it is not ready put it back. Schedule the node. 1088 std::vector<SUnit*> NotReady; 1089 Sequence.reserve(SUnits.size()); 1090 while (!AvailableQueue.empty() || !PendingQueue.empty()) { 1091 // Check to see if any of the pending instructions are ready to issue. If 1092 // so, add them to the available queue. 1093 unsigned MinDepth = ~0u; 1094 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 1095 if (PendingQueue[i]->getDepth() <= CurCycle) { 1096 AvailableQueue.push(PendingQueue[i]); 1097 PendingQueue[i]->isAvailable = true; 1098 PendingQueue[i] = PendingQueue.back(); 1099 PendingQueue.pop_back(); 1100 --i; --e; 1101 } else if (PendingQueue[i]->getDepth() < MinDepth) 1102 MinDepth = PendingQueue[i]->getDepth(); 1103 } 1104 1105 DEBUG(errs() << "\n*** Examining Available\n"; 1106 LatencyPriorityQueue q = AvailableQueue; 1107 while (!q.empty()) { 1108 SUnit *su = q.pop(); 1109 errs() << "Height " << su->getHeight() << ": "; 1110 su->dump(this); 1111 }); 1112 1113 SUnit *FoundSUnit = 0; 1114 1115 bool HasNoopHazards = false; 1116 while (!AvailableQueue.empty()) { 1117 SUnit *CurSUnit = AvailableQueue.pop(); 1118 1119 ScheduleHazardRecognizer::HazardType HT = 1120 HazardRec->getHazardType(CurSUnit); 1121 if (HT == ScheduleHazardRecognizer::NoHazard) { 1122 FoundSUnit = CurSUnit; 1123 break; 1124 } 1125 1126 // Remember if this is a noop hazard. 1127 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard; 1128 1129 NotReady.push_back(CurSUnit); 1130 } 1131 1132 // Add the nodes that aren't ready back onto the available list. 1133 if (!NotReady.empty()) { 1134 AvailableQueue.push_all(NotReady); 1135 NotReady.clear(); 1136 } 1137 1138 // If we found a node to schedule, do it now. 1139 if (FoundSUnit) { 1140 ScheduleNodeTopDown(FoundSUnit, CurCycle); 1141 HazardRec->EmitInstruction(FoundSUnit); 1142 CycleHasInsts = true; 1143 1144 // If we are using the target-specific hazards, then don't 1145 // advance the cycle time just because we schedule a node. If 1146 // the target allows it we can schedule multiple nodes in the 1147 // same cycle. 1148 if (!EnablePostRAHazardAvoidance) { 1149 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops! 1150 ++CurCycle; 1151 } 1152 } else { 1153 if (CycleHasInsts) { 1154 DEBUG(errs() << "*** Finished cycle " << CurCycle << '\n'); 1155 HazardRec->AdvanceCycle(); 1156 } else if (!HasNoopHazards) { 1157 // Otherwise, we have a pipeline stall, but no other problem, 1158 // just advance the current cycle and try again. 1159 DEBUG(errs() << "*** Stall in cycle " << CurCycle << '\n'); 1160 HazardRec->AdvanceCycle(); 1161 ++NumStalls; 1162 } else { 1163 // Otherwise, we have no instructions to issue and we have instructions 1164 // that will fault if we don't do this right. This is the case for 1165 // processors without pipeline interlocks and other cases. 1166 DEBUG(errs() << "*** Emitting noop in cycle " << CurCycle << '\n'); 1167 HazardRec->EmitNoop(); 1168 Sequence.push_back(0); // NULL here means noop 1169 ++NumNoops; 1170 } 1171 1172 ++CurCycle; 1173 CycleHasInsts = false; 1174 } 1175 } 1176 1177#ifndef NDEBUG 1178 VerifySchedule(/*isBottomUp=*/false); 1179#endif 1180} 1181 1182//===----------------------------------------------------------------------===// 1183// Public Constructor Functions 1184//===----------------------------------------------------------------------===// 1185 1186FunctionPass *llvm::createPostRAScheduler() { 1187 return new PostRAScheduler(); 1188} 1189