ScheduleDAGRRList.cpp revision ae692f2baedf53504af2715993b166950e185a55
1//===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements bottom-up and top-down register pressure reduction list 11// schedulers, using standard algorithms. The basic approach uses a priority 12// queue of available nodes to schedule. One at a time, nodes are taken from 13// the priority queue (thus in priority order), checked for legality to 14// schedule, and emitted if legal. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "pre-RA-sched" 19#include "ScheduleDAGSDNodes.h" 20#include "llvm/InlineAsm.h" 21#include "llvm/CodeGen/SchedulerRegistry.h" 22#include "llvm/CodeGen/SelectionDAGISel.h" 23#include "llvm/CodeGen/ScheduleHazardRecognizer.h" 24#include "llvm/Target/TargetRegisterInfo.h" 25#include "llvm/DataLayout.h" 26#include "llvm/Target/TargetMachine.h" 27#include "llvm/Target/TargetInstrInfo.h" 28#include "llvm/Target/TargetLowering.h" 29#include "llvm/ADT/SmallSet.h" 30#include "llvm/ADT/Statistic.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/Support/Debug.h" 33#include "llvm/Support/ErrorHandling.h" 34#include "llvm/Support/raw_ostream.h" 35#include <climits> 36using namespace llvm; 37 38STATISTIC(NumBacktracks, "Number of times scheduler backtracked"); 39STATISTIC(NumUnfolds, "Number of nodes unfolded"); 40STATISTIC(NumDups, "Number of duplicated nodes"); 41STATISTIC(NumPRCopies, "Number of physical register copies"); 42 43static RegisterScheduler 44 burrListDAGScheduler("list-burr", 45 "Bottom-up register reduction list scheduling", 46 createBURRListDAGScheduler); 47static RegisterScheduler 48 sourceListDAGScheduler("source", 49 "Similar to list-burr but schedules in source " 50 "order when possible", 51 createSourceListDAGScheduler); 52 53static RegisterScheduler 54 hybridListDAGScheduler("list-hybrid", 55 "Bottom-up register pressure aware list scheduling " 56 "which tries to balance latency and register pressure", 57 createHybridListDAGScheduler); 58 59static RegisterScheduler 60 ILPListDAGScheduler("list-ilp", 61 "Bottom-up register pressure aware list scheduling " 62 "which tries to balance ILP and register pressure", 63 createILPListDAGScheduler); 64 65static cl::opt<bool> DisableSchedCycles( 66 "disable-sched-cycles", cl::Hidden, cl::init(false), 67 cl::desc("Disable cycle-level precision during preRA scheduling")); 68 69// Temporary sched=list-ilp flags until the heuristics are robust. 70// Some options are also available under sched=list-hybrid. 71static cl::opt<bool> DisableSchedRegPressure( 72 "disable-sched-reg-pressure", cl::Hidden, cl::init(false), 73 cl::desc("Disable regpressure priority in sched=list-ilp")); 74static cl::opt<bool> DisableSchedLiveUses( 75 "disable-sched-live-uses", cl::Hidden, cl::init(true), 76 cl::desc("Disable live use priority in sched=list-ilp")); 77static cl::opt<bool> DisableSchedVRegCycle( 78 "disable-sched-vrcycle", cl::Hidden, cl::init(false), 79 cl::desc("Disable virtual register cycle interference checks")); 80static cl::opt<bool> DisableSchedPhysRegJoin( 81 "disable-sched-physreg-join", cl::Hidden, cl::init(false), 82 cl::desc("Disable physreg def-use affinity")); 83static cl::opt<bool> DisableSchedStalls( 84 "disable-sched-stalls", cl::Hidden, cl::init(true), 85 cl::desc("Disable no-stall priority in sched=list-ilp")); 86static cl::opt<bool> DisableSchedCriticalPath( 87 "disable-sched-critical-path", cl::Hidden, cl::init(false), 88 cl::desc("Disable critical path priority in sched=list-ilp")); 89static cl::opt<bool> DisableSchedHeight( 90 "disable-sched-height", cl::Hidden, cl::init(false), 91 cl::desc("Disable scheduled-height priority in sched=list-ilp")); 92static cl::opt<bool> Disable2AddrHack( 93 "disable-2addr-hack", cl::Hidden, cl::init(true), 94 cl::desc("Disable scheduler's two-address hack")); 95 96static cl::opt<int> MaxReorderWindow( 97 "max-sched-reorder", cl::Hidden, cl::init(6), 98 cl::desc("Number of instructions to allow ahead of the critical path " 99 "in sched=list-ilp")); 100 101static cl::opt<unsigned> AvgIPC( 102 "sched-avg-ipc", cl::Hidden, cl::init(1), 103 cl::desc("Average inst/cycle whan no target itinerary exists.")); 104 105namespace { 106//===----------------------------------------------------------------------===// 107/// ScheduleDAGRRList - The actual register reduction list scheduler 108/// implementation. This supports both top-down and bottom-up scheduling. 109/// 110class ScheduleDAGRRList : public ScheduleDAGSDNodes { 111private: 112 /// NeedLatency - True if the scheduler will make use of latency information. 113 /// 114 bool NeedLatency; 115 116 /// AvailableQueue - The priority queue to use for the available SUnits. 117 SchedulingPriorityQueue *AvailableQueue; 118 119 /// PendingQueue - This contains all of the instructions whose operands have 120 /// been issued, but their results are not ready yet (due to the latency of 121 /// the operation). Once the operands becomes available, the instruction is 122 /// added to the AvailableQueue. 123 std::vector<SUnit*> PendingQueue; 124 125 /// HazardRec - The hazard recognizer to use. 126 ScheduleHazardRecognizer *HazardRec; 127 128 /// CurCycle - The current scheduler state corresponds to this cycle. 129 unsigned CurCycle; 130 131 /// MinAvailableCycle - Cycle of the soonest available instruction. 132 unsigned MinAvailableCycle; 133 134 /// IssueCount - Count instructions issued in this cycle 135 /// Currently valid only for bottom-up scheduling. 136 unsigned IssueCount; 137 138 /// LiveRegDefs - A set of physical registers and their definition 139 /// that are "live". These nodes must be scheduled before any other nodes that 140 /// modifies the registers can be scheduled. 141 unsigned NumLiveRegs; 142 std::vector<SUnit*> LiveRegDefs; 143 std::vector<SUnit*> LiveRegGens; 144 145 /// Topo - A topological ordering for SUnits which permits fast IsReachable 146 /// and similar queries. 147 ScheduleDAGTopologicalSort Topo; 148 149 // Hack to keep track of the inverse of FindCallSeqStart without more crazy 150 // DAG crawling. 151 DenseMap<SUnit*, SUnit*> CallSeqEndForStart; 152 153public: 154 ScheduleDAGRRList(MachineFunction &mf, bool needlatency, 155 SchedulingPriorityQueue *availqueue, 156 CodeGenOpt::Level OptLevel) 157 : ScheduleDAGSDNodes(mf), 158 NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0), 159 Topo(SUnits, NULL) { 160 161 const TargetMachine &tm = mf.getTarget(); 162 if (DisableSchedCycles || !NeedLatency) 163 HazardRec = new ScheduleHazardRecognizer(); 164 else 165 HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this); 166 } 167 168 ~ScheduleDAGRRList() { 169 delete HazardRec; 170 delete AvailableQueue; 171 } 172 173 void Schedule(); 174 175 ScheduleHazardRecognizer *getHazardRec() { return HazardRec; } 176 177 /// IsReachable - Checks if SU is reachable from TargetSU. 178 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) { 179 return Topo.IsReachable(SU, TargetSU); 180 } 181 182 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will 183 /// create a cycle. 184 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) { 185 return Topo.WillCreateCycle(SU, TargetSU); 186 } 187 188 /// AddPred - adds a predecessor edge to SUnit SU. 189 /// This returns true if this is a new predecessor. 190 /// Updates the topological ordering if required. 191 void AddPred(SUnit *SU, const SDep &D) { 192 Topo.AddPred(SU, D.getSUnit()); 193 SU->addPred(D); 194 } 195 196 /// RemovePred - removes a predecessor edge from SUnit SU. 197 /// This returns true if an edge was removed. 198 /// Updates the topological ordering if required. 199 void RemovePred(SUnit *SU, const SDep &D) { 200 Topo.RemovePred(SU, D.getSUnit()); 201 SU->removePred(D); 202 } 203 204private: 205 bool isReady(SUnit *SU) { 206 return DisableSchedCycles || !AvailableQueue->hasReadyFilter() || 207 AvailableQueue->isReady(SU); 208 } 209 210 void ReleasePred(SUnit *SU, const SDep *PredEdge); 211 void ReleasePredecessors(SUnit *SU); 212 void ReleasePending(); 213 void AdvanceToCycle(unsigned NextCycle); 214 void AdvancePastStalls(SUnit *SU); 215 void EmitNode(SUnit *SU); 216 void ScheduleNodeBottomUp(SUnit*); 217 void CapturePred(SDep *PredEdge); 218 void UnscheduleNodeBottomUp(SUnit*); 219 void RestoreHazardCheckerBottomUp(); 220 void BacktrackBottomUp(SUnit*, SUnit*); 221 SUnit *CopyAndMoveSuccessors(SUnit*); 222 void InsertCopiesAndMoveSuccs(SUnit*, unsigned, 223 const TargetRegisterClass*, 224 const TargetRegisterClass*, 225 SmallVector<SUnit*, 2>&); 226 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&); 227 228 SUnit *PickNodeToScheduleBottomUp(); 229 void ListScheduleBottomUp(); 230 231 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it. 232 /// Updates the topological ordering if required. 233 SUnit *CreateNewSUnit(SDNode *N) { 234 unsigned NumSUnits = SUnits.size(); 235 SUnit *NewNode = newSUnit(N); 236 // Update the topological ordering. 237 if (NewNode->NodeNum >= NumSUnits) 238 Topo.InitDAGTopologicalSorting(); 239 return NewNode; 240 } 241 242 /// CreateClone - Creates a new SUnit from an existing one. 243 /// Updates the topological ordering if required. 244 SUnit *CreateClone(SUnit *N) { 245 unsigned NumSUnits = SUnits.size(); 246 SUnit *NewNode = Clone(N); 247 // Update the topological ordering. 248 if (NewNode->NodeNum >= NumSUnits) 249 Topo.InitDAGTopologicalSorting(); 250 return NewNode; 251 } 252 253 /// forceUnitLatencies - Register-pressure-reducing scheduling doesn't 254 /// need actual latency information but the hybrid scheduler does. 255 bool forceUnitLatencies() const { 256 return !NeedLatency; 257 } 258}; 259} // end anonymous namespace 260 261/// GetCostForDef - Looks up the register class and cost for a given definition. 262/// Typically this just means looking up the representative register class, 263/// but for untyped values (MVT::Untyped) it means inspecting the node's 264/// opcode to determine what register class is being generated. 265static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos, 266 const TargetLowering *TLI, 267 const TargetInstrInfo *TII, 268 const TargetRegisterInfo *TRI, 269 unsigned &RegClass, unsigned &Cost, 270 const MachineFunction &MF) { 271 EVT VT = RegDefPos.GetValue(); 272 273 // Special handling for untyped values. These values can only come from 274 // the expansion of custom DAG-to-DAG patterns. 275 if (VT == MVT::Untyped) { 276 const SDNode *Node = RegDefPos.GetNode(); 277 unsigned Opcode = Node->getMachineOpcode(); 278 279 if (Opcode == TargetOpcode::REG_SEQUENCE) { 280 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue(); 281 const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx); 282 RegClass = RC->getID(); 283 Cost = 1; 284 return; 285 } 286 287 unsigned Idx = RegDefPos.GetIdx(); 288 const MCInstrDesc Desc = TII->get(Opcode); 289 const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI, MF); 290 RegClass = RC->getID(); 291 // FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a 292 // better way to determine it. 293 Cost = 1; 294 } else { 295 RegClass = TLI->getRepRegClassFor(VT)->getID(); 296 Cost = TLI->getRepRegClassCostFor(VT); 297 } 298} 299 300/// Schedule - Schedule the DAG using list scheduling. 301void ScheduleDAGRRList::Schedule() { 302 DEBUG(dbgs() 303 << "********** List Scheduling BB#" << BB->getNumber() 304 << " '" << BB->getName() << "' **********\n"); 305 306 CurCycle = 0; 307 IssueCount = 0; 308 MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX; 309 NumLiveRegs = 0; 310 // Allocate slots for each physical register, plus one for a special register 311 // to track the virtual resource of a calling sequence. 312 LiveRegDefs.resize(TRI->getNumRegs() + 1, NULL); 313 LiveRegGens.resize(TRI->getNumRegs() + 1, NULL); 314 CallSeqEndForStart.clear(); 315 316 // Build the scheduling graph. 317 BuildSchedGraph(NULL); 318 319 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 320 SUnits[su].dumpAll(this)); 321 Topo.InitDAGTopologicalSorting(); 322 323 AvailableQueue->initNodes(SUnits); 324 325 HazardRec->Reset(); 326 327 // Execute the actual scheduling loop. 328 ListScheduleBottomUp(); 329 330 AvailableQueue->releaseState(); 331 332 DEBUG({ 333 dbgs() << "*** Final schedule ***\n"; 334 dumpSchedule(); 335 dbgs() << '\n'; 336 }); 337} 338 339//===----------------------------------------------------------------------===// 340// Bottom-Up Scheduling 341//===----------------------------------------------------------------------===// 342 343/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to 344/// the AvailableQueue if the count reaches zero. Also update its cycle bound. 345void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) { 346 SUnit *PredSU = PredEdge->getSUnit(); 347 348#ifndef NDEBUG 349 if (PredSU->NumSuccsLeft == 0) { 350 dbgs() << "*** Scheduling failed! ***\n"; 351 PredSU->dump(this); 352 dbgs() << " has been released too many times!\n"; 353 llvm_unreachable(0); 354 } 355#endif 356 --PredSU->NumSuccsLeft; 357 358 if (!forceUnitLatencies()) { 359 // Updating predecessor's height. This is now the cycle when the 360 // predecessor can be scheduled without causing a pipeline stall. 361 PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency()); 362 } 363 364 // If all the node's successors are scheduled, this node is ready 365 // to be scheduled. Ignore the special EntrySU node. 366 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) { 367 PredSU->isAvailable = true; 368 369 unsigned Height = PredSU->getHeight(); 370 if (Height < MinAvailableCycle) 371 MinAvailableCycle = Height; 372 373 if (isReady(PredSU)) { 374 AvailableQueue->push(PredSU); 375 } 376 // CapturePred and others may have left the node in the pending queue, avoid 377 // adding it twice. 378 else if (!PredSU->isPending) { 379 PredSU->isPending = true; 380 PendingQueue.push_back(PredSU); 381 } 382 } 383} 384 385/// IsChainDependent - Test if Outer is reachable from Inner through 386/// chain dependencies. 387static bool IsChainDependent(SDNode *Outer, SDNode *Inner, 388 unsigned NestLevel, 389 const TargetInstrInfo *TII) { 390 SDNode *N = Outer; 391 for (;;) { 392 if (N == Inner) 393 return true; 394 // For a TokenFactor, examine each operand. There may be multiple ways 395 // to get to the CALLSEQ_BEGIN, but we need to find the path with the 396 // most nesting in order to ensure that we find the corresponding match. 397 if (N->getOpcode() == ISD::TokenFactor) { 398 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 399 if (IsChainDependent(N->getOperand(i).getNode(), Inner, NestLevel, TII)) 400 return true; 401 return false; 402 } 403 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END. 404 if (N->isMachineOpcode()) { 405 if (N->getMachineOpcode() == 406 (unsigned)TII->getCallFrameDestroyOpcode()) { 407 ++NestLevel; 408 } else if (N->getMachineOpcode() == 409 (unsigned)TII->getCallFrameSetupOpcode()) { 410 if (NestLevel == 0) 411 return false; 412 --NestLevel; 413 } 414 } 415 // Otherwise, find the chain and continue climbing. 416 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 417 if (N->getOperand(i).getValueType() == MVT::Other) { 418 N = N->getOperand(i).getNode(); 419 goto found_chain_operand; 420 } 421 return false; 422 found_chain_operand:; 423 if (N->getOpcode() == ISD::EntryToken) 424 return false; 425 } 426} 427 428/// FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate 429/// the corresponding (lowered) CALLSEQ_BEGIN node. 430/// 431/// NestLevel and MaxNested are used in recursion to indcate the current level 432/// of nesting of CALLSEQ_BEGIN and CALLSEQ_END pairs, as well as the maximum 433/// level seen so far. 434/// 435/// TODO: It would be better to give CALLSEQ_END an explicit operand to point 436/// to the corresponding CALLSEQ_BEGIN to avoid needing to search for it. 437static SDNode * 438FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest, 439 const TargetInstrInfo *TII) { 440 for (;;) { 441 // For a TokenFactor, examine each operand. There may be multiple ways 442 // to get to the CALLSEQ_BEGIN, but we need to find the path with the 443 // most nesting in order to ensure that we find the corresponding match. 444 if (N->getOpcode() == ISD::TokenFactor) { 445 SDNode *Best = 0; 446 unsigned BestMaxNest = MaxNest; 447 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 448 unsigned MyNestLevel = NestLevel; 449 unsigned MyMaxNest = MaxNest; 450 if (SDNode *New = FindCallSeqStart(N->getOperand(i).getNode(), 451 MyNestLevel, MyMaxNest, TII)) 452 if (!Best || (MyMaxNest > BestMaxNest)) { 453 Best = New; 454 BestMaxNest = MyMaxNest; 455 } 456 } 457 assert(Best); 458 MaxNest = BestMaxNest; 459 return Best; 460 } 461 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END. 462 if (N->isMachineOpcode()) { 463 if (N->getMachineOpcode() == 464 (unsigned)TII->getCallFrameDestroyOpcode()) { 465 ++NestLevel; 466 MaxNest = std::max(MaxNest, NestLevel); 467 } else if (N->getMachineOpcode() == 468 (unsigned)TII->getCallFrameSetupOpcode()) { 469 assert(NestLevel != 0); 470 --NestLevel; 471 if (NestLevel == 0) 472 return N; 473 } 474 } 475 // Otherwise, find the chain and continue climbing. 476 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 477 if (N->getOperand(i).getValueType() == MVT::Other) { 478 N = N->getOperand(i).getNode(); 479 goto found_chain_operand; 480 } 481 return 0; 482 found_chain_operand:; 483 if (N->getOpcode() == ISD::EntryToken) 484 return 0; 485 } 486} 487 488/// Call ReleasePred for each predecessor, then update register live def/gen. 489/// Always update LiveRegDefs for a register dependence even if the current SU 490/// also defines the register. This effectively create one large live range 491/// across a sequence of two-address node. This is important because the 492/// entire chain must be scheduled together. Example: 493/// 494/// flags = (3) add 495/// flags = (2) addc flags 496/// flags = (1) addc flags 497/// 498/// results in 499/// 500/// LiveRegDefs[flags] = 3 501/// LiveRegGens[flags] = 1 502/// 503/// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid 504/// interference on flags. 505void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) { 506 // Bottom up: release predecessors 507 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 508 I != E; ++I) { 509 ReleasePred(SU, &*I); 510 if (I->isAssignedRegDep()) { 511 // This is a physical register dependency and it's impossible or 512 // expensive to copy the register. Make sure nothing that can 513 // clobber the register is scheduled between the predecessor and 514 // this node. 515 SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef; 516 assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) && 517 "interference on register dependence"); 518 LiveRegDefs[I->getReg()] = I->getSUnit(); 519 if (!LiveRegGens[I->getReg()]) { 520 ++NumLiveRegs; 521 LiveRegGens[I->getReg()] = SU; 522 } 523 } 524 } 525 526 // If we're scheduling a lowered CALLSEQ_END, find the corresponding 527 // CALLSEQ_BEGIN. Inject an artificial physical register dependence between 528 // these nodes, to prevent other calls from being interscheduled with them. 529 unsigned CallResource = TRI->getNumRegs(); 530 if (!LiveRegDefs[CallResource]) 531 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) 532 if (Node->isMachineOpcode() && 533 Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) { 534 unsigned NestLevel = 0; 535 unsigned MaxNest = 0; 536 SDNode *N = FindCallSeqStart(Node, NestLevel, MaxNest, TII); 537 538 SUnit *Def = &SUnits[N->getNodeId()]; 539 CallSeqEndForStart[Def] = SU; 540 541 ++NumLiveRegs; 542 LiveRegDefs[CallResource] = Def; 543 LiveRegGens[CallResource] = SU; 544 break; 545 } 546} 547 548/// Check to see if any of the pending instructions are ready to issue. If 549/// so, add them to the available queue. 550void ScheduleDAGRRList::ReleasePending() { 551 if (DisableSchedCycles) { 552 assert(PendingQueue.empty() && "pending instrs not allowed in this mode"); 553 return; 554 } 555 556 // If the available queue is empty, it is safe to reset MinAvailableCycle. 557 if (AvailableQueue->empty()) 558 MinAvailableCycle = UINT_MAX; 559 560 // Check to see if any of the pending instructions are ready to issue. If 561 // so, add them to the available queue. 562 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 563 unsigned ReadyCycle = PendingQueue[i]->getHeight(); 564 if (ReadyCycle < MinAvailableCycle) 565 MinAvailableCycle = ReadyCycle; 566 567 if (PendingQueue[i]->isAvailable) { 568 if (!isReady(PendingQueue[i])) 569 continue; 570 AvailableQueue->push(PendingQueue[i]); 571 } 572 PendingQueue[i]->isPending = false; 573 PendingQueue[i] = PendingQueue.back(); 574 PendingQueue.pop_back(); 575 --i; --e; 576 } 577} 578 579/// Move the scheduler state forward by the specified number of Cycles. 580void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) { 581 if (NextCycle <= CurCycle) 582 return; 583 584 IssueCount = 0; 585 AvailableQueue->setCurCycle(NextCycle); 586 if (!HazardRec->isEnabled()) { 587 // Bypass lots of virtual calls in case of long latency. 588 CurCycle = NextCycle; 589 } 590 else { 591 for (; CurCycle != NextCycle; ++CurCycle) { 592 HazardRec->RecedeCycle(); 593 } 594 } 595 // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the 596 // available Q to release pending nodes at least once before popping. 597 ReleasePending(); 598} 599 600/// Move the scheduler state forward until the specified node's dependents are 601/// ready and can be scheduled with no resource conflicts. 602void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) { 603 if (DisableSchedCycles) 604 return; 605 606 // FIXME: Nodes such as CopyFromReg probably should not advance the current 607 // cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node 608 // has predecessors the cycle will be advanced when they are scheduled. 609 // But given the crude nature of modeling latency though such nodes, we 610 // currently need to treat these nodes like real instructions. 611 // if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return; 612 613 unsigned ReadyCycle = SU->getHeight(); 614 615 // Bump CurCycle to account for latency. We assume the latency of other 616 // available instructions may be hidden by the stall (not a full pipe stall). 617 // This updates the hazard recognizer's cycle before reserving resources for 618 // this instruction. 619 AdvanceToCycle(ReadyCycle); 620 621 // Calls are scheduled in their preceding cycle, so don't conflict with 622 // hazards from instructions after the call. EmitNode will reset the 623 // scoreboard state before emitting the call. 624 if (SU->isCall) 625 return; 626 627 // FIXME: For resource conflicts in very long non-pipelined stages, we 628 // should probably skip ahead here to avoid useless scoreboard checks. 629 int Stalls = 0; 630 while (true) { 631 ScheduleHazardRecognizer::HazardType HT = 632 HazardRec->getHazardType(SU, -Stalls); 633 634 if (HT == ScheduleHazardRecognizer::NoHazard) 635 break; 636 637 ++Stalls; 638 } 639 AdvanceToCycle(CurCycle + Stalls); 640} 641 642/// Record this SUnit in the HazardRecognizer. 643/// Does not update CurCycle. 644void ScheduleDAGRRList::EmitNode(SUnit *SU) { 645 if (!HazardRec->isEnabled()) 646 return; 647 648 // Check for phys reg copy. 649 if (!SU->getNode()) 650 return; 651 652 switch (SU->getNode()->getOpcode()) { 653 default: 654 assert(SU->getNode()->isMachineOpcode() && 655 "This target-independent node should not be scheduled."); 656 break; 657 case ISD::MERGE_VALUES: 658 case ISD::TokenFactor: 659 case ISD::LIFETIME_START: 660 case ISD::LIFETIME_END: 661 case ISD::CopyToReg: 662 case ISD::CopyFromReg: 663 case ISD::EH_LABEL: 664 // Noops don't affect the scoreboard state. Copies are likely to be 665 // removed. 666 return; 667 case ISD::INLINEASM: 668 // For inline asm, clear the pipeline state. 669 HazardRec->Reset(); 670 return; 671 } 672 if (SU->isCall) { 673 // Calls are scheduled with their preceding instructions. For bottom-up 674 // scheduling, clear the pipeline state before emitting. 675 HazardRec->Reset(); 676 } 677 678 HazardRec->EmitInstruction(SU); 679} 680 681static void resetVRegCycle(SUnit *SU); 682 683/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending 684/// count of its predecessors. If a predecessor pending count is zero, add it to 685/// the Available queue. 686void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) { 687 DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: "); 688 DEBUG(SU->dump(this)); 689 690#ifndef NDEBUG 691 if (CurCycle < SU->getHeight()) 692 DEBUG(dbgs() << " Height [" << SU->getHeight() 693 << "] pipeline stall!\n"); 694#endif 695 696 // FIXME: Do not modify node height. It may interfere with 697 // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the 698 // node its ready cycle can aid heuristics, and after scheduling it can 699 // indicate the scheduled cycle. 700 SU->setHeightToAtLeast(CurCycle); 701 702 // Reserve resources for the scheduled intruction. 703 EmitNode(SU); 704 705 Sequence.push_back(SU); 706 707 AvailableQueue->scheduledNode(SU); 708 709 // If HazardRec is disabled, and each inst counts as one cycle, then 710 // advance CurCycle before ReleasePredecessors to avoid useless pushes to 711 // PendingQueue for schedulers that implement HasReadyFilter. 712 if (!HazardRec->isEnabled() && AvgIPC < 2) 713 AdvanceToCycle(CurCycle + 1); 714 715 // Update liveness of predecessors before successors to avoid treating a 716 // two-address node as a live range def. 717 ReleasePredecessors(SU); 718 719 // Release all the implicit physical register defs that are live. 720 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 721 I != E; ++I) { 722 // LiveRegDegs[I->getReg()] != SU when SU is a two-address node. 723 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) { 724 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!"); 725 --NumLiveRegs; 726 LiveRegDefs[I->getReg()] = NULL; 727 LiveRegGens[I->getReg()] = NULL; 728 } 729 } 730 // Release the special call resource dependence, if this is the beginning 731 // of a call. 732 unsigned CallResource = TRI->getNumRegs(); 733 if (LiveRegDefs[CallResource] == SU) 734 for (const SDNode *SUNode = SU->getNode(); SUNode; 735 SUNode = SUNode->getGluedNode()) { 736 if (SUNode->isMachineOpcode() && 737 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) { 738 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!"); 739 --NumLiveRegs; 740 LiveRegDefs[CallResource] = NULL; 741 LiveRegGens[CallResource] = NULL; 742 } 743 } 744 745 resetVRegCycle(SU); 746 747 SU->isScheduled = true; 748 749 // Conditions under which the scheduler should eagerly advance the cycle: 750 // (1) No available instructions 751 // (2) All pipelines full, so available instructions must have hazards. 752 // 753 // If HazardRec is disabled, the cycle was pre-advanced before calling 754 // ReleasePredecessors. In that case, IssueCount should remain 0. 755 // 756 // Check AvailableQueue after ReleasePredecessors in case of zero latency. 757 if (HazardRec->isEnabled() || AvgIPC > 1) { 758 if (SU->getNode() && SU->getNode()->isMachineOpcode()) 759 ++IssueCount; 760 if ((HazardRec->isEnabled() && HazardRec->atIssueLimit()) 761 || (!HazardRec->isEnabled() && IssueCount == AvgIPC)) 762 AdvanceToCycle(CurCycle + 1); 763 } 764} 765 766/// CapturePred - This does the opposite of ReleasePred. Since SU is being 767/// unscheduled, incrcease the succ left count of its predecessors. Remove 768/// them from AvailableQueue if necessary. 769void ScheduleDAGRRList::CapturePred(SDep *PredEdge) { 770 SUnit *PredSU = PredEdge->getSUnit(); 771 if (PredSU->isAvailable) { 772 PredSU->isAvailable = false; 773 if (!PredSU->isPending) 774 AvailableQueue->remove(PredSU); 775 } 776 777 assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!"); 778 ++PredSU->NumSuccsLeft; 779} 780 781/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and 782/// its predecessor states to reflect the change. 783void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) { 784 DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: "); 785 DEBUG(SU->dump(this)); 786 787 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 788 I != E; ++I) { 789 CapturePred(&*I); 790 if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){ 791 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!"); 792 assert(LiveRegDefs[I->getReg()] == I->getSUnit() && 793 "Physical register dependency violated?"); 794 --NumLiveRegs; 795 LiveRegDefs[I->getReg()] = NULL; 796 LiveRegGens[I->getReg()] = NULL; 797 } 798 } 799 800 // Reclaim the special call resource dependence, if this is the beginning 801 // of a call. 802 unsigned CallResource = TRI->getNumRegs(); 803 for (const SDNode *SUNode = SU->getNode(); SUNode; 804 SUNode = SUNode->getGluedNode()) { 805 if (SUNode->isMachineOpcode() && 806 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) { 807 ++NumLiveRegs; 808 LiveRegDefs[CallResource] = SU; 809 LiveRegGens[CallResource] = CallSeqEndForStart[SU]; 810 } 811 } 812 813 // Release the special call resource dependence, if this is the end 814 // of a call. 815 if (LiveRegGens[CallResource] == SU) 816 for (const SDNode *SUNode = SU->getNode(); SUNode; 817 SUNode = SUNode->getGluedNode()) { 818 if (SUNode->isMachineOpcode() && 819 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) { 820 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!"); 821 --NumLiveRegs; 822 LiveRegDefs[CallResource] = NULL; 823 LiveRegGens[CallResource] = NULL; 824 } 825 } 826 827 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 828 I != E; ++I) { 829 if (I->isAssignedRegDep()) { 830 if (!LiveRegDefs[I->getReg()]) 831 ++NumLiveRegs; 832 // This becomes the nearest def. Note that an earlier def may still be 833 // pending if this is a two-address node. 834 LiveRegDefs[I->getReg()] = SU; 835 if (LiveRegGens[I->getReg()] == NULL || 836 I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight()) 837 LiveRegGens[I->getReg()] = I->getSUnit(); 838 } 839 } 840 if (SU->getHeight() < MinAvailableCycle) 841 MinAvailableCycle = SU->getHeight(); 842 843 SU->setHeightDirty(); 844 SU->isScheduled = false; 845 SU->isAvailable = true; 846 if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) { 847 // Don't make available until backtracking is complete. 848 SU->isPending = true; 849 PendingQueue.push_back(SU); 850 } 851 else { 852 AvailableQueue->push(SU); 853 } 854 AvailableQueue->unscheduledNode(SU); 855} 856 857/// After backtracking, the hazard checker needs to be restored to a state 858/// corresponding the current cycle. 859void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() { 860 HazardRec->Reset(); 861 862 unsigned LookAhead = std::min((unsigned)Sequence.size(), 863 HazardRec->getMaxLookAhead()); 864 if (LookAhead == 0) 865 return; 866 867 std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead); 868 unsigned HazardCycle = (*I)->getHeight(); 869 for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) { 870 SUnit *SU = *I; 871 for (; SU->getHeight() > HazardCycle; ++HazardCycle) { 872 HazardRec->RecedeCycle(); 873 } 874 EmitNode(SU); 875 } 876} 877 878/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in 879/// BTCycle in order to schedule a specific node. 880void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) { 881 SUnit *OldSU = Sequence.back(); 882 while (true) { 883 Sequence.pop_back(); 884 if (SU->isSucc(OldSU)) 885 // Don't try to remove SU from AvailableQueue. 886 SU->isAvailable = false; 887 // FIXME: use ready cycle instead of height 888 CurCycle = OldSU->getHeight(); 889 UnscheduleNodeBottomUp(OldSU); 890 AvailableQueue->setCurCycle(CurCycle); 891 if (OldSU == BtSU) 892 break; 893 OldSU = Sequence.back(); 894 } 895 896 assert(!SU->isSucc(OldSU) && "Something is wrong!"); 897 898 RestoreHazardCheckerBottomUp(); 899 900 ReleasePending(); 901 902 ++NumBacktracks; 903} 904 905static bool isOperandOf(const SUnit *SU, SDNode *N) { 906 for (const SDNode *SUNode = SU->getNode(); SUNode; 907 SUNode = SUNode->getGluedNode()) { 908 if (SUNode->isOperandOf(N)) 909 return true; 910 } 911 return false; 912} 913 914/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled 915/// successors to the newly created node. 916SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) { 917 SDNode *N = SU->getNode(); 918 if (!N) 919 return NULL; 920 921 if (SU->getNode()->getGluedNode()) 922 return NULL; 923 924 SUnit *NewSU; 925 bool TryUnfold = false; 926 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { 927 EVT VT = N->getValueType(i); 928 if (VT == MVT::Glue) 929 return NULL; 930 else if (VT == MVT::Other) 931 TryUnfold = true; 932 } 933 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 934 const SDValue &Op = N->getOperand(i); 935 EVT VT = Op.getNode()->getValueType(Op.getResNo()); 936 if (VT == MVT::Glue) 937 return NULL; 938 } 939 940 if (TryUnfold) { 941 SmallVector<SDNode*, 2> NewNodes; 942 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes)) 943 return NULL; 944 945 // unfolding an x86 DEC64m operation results in store, dec, load which 946 // can't be handled here so quit 947 if (NewNodes.size() == 3) 948 return NULL; 949 950 DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n"); 951 assert(NewNodes.size() == 2 && "Expected a load folding node!"); 952 953 N = NewNodes[1]; 954 SDNode *LoadNode = NewNodes[0]; 955 unsigned NumVals = N->getNumValues(); 956 unsigned OldNumVals = SU->getNode()->getNumValues(); 957 for (unsigned i = 0; i != NumVals; ++i) 958 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i)); 959 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1), 960 SDValue(LoadNode, 1)); 961 962 // LoadNode may already exist. This can happen when there is another 963 // load from the same location and producing the same type of value 964 // but it has different alignment or volatileness. 965 bool isNewLoad = true; 966 SUnit *LoadSU; 967 if (LoadNode->getNodeId() != -1) { 968 LoadSU = &SUnits[LoadNode->getNodeId()]; 969 isNewLoad = false; 970 } else { 971 LoadSU = CreateNewSUnit(LoadNode); 972 LoadNode->setNodeId(LoadSU->NodeNum); 973 974 InitNumRegDefsLeft(LoadSU); 975 computeLatency(LoadSU); 976 } 977 978 SUnit *NewSU = CreateNewSUnit(N); 979 assert(N->getNodeId() == -1 && "Node already inserted!"); 980 N->setNodeId(NewSU->NodeNum); 981 982 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 983 for (unsigned i = 0; i != MCID.getNumOperands(); ++i) { 984 if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) { 985 NewSU->isTwoAddress = true; 986 break; 987 } 988 } 989 if (MCID.isCommutable()) 990 NewSU->isCommutable = true; 991 992 InitNumRegDefsLeft(NewSU); 993 computeLatency(NewSU); 994 995 // Record all the edges to and from the old SU, by category. 996 SmallVector<SDep, 4> ChainPreds; 997 SmallVector<SDep, 4> ChainSuccs; 998 SmallVector<SDep, 4> LoadPreds; 999 SmallVector<SDep, 4> NodePreds; 1000 SmallVector<SDep, 4> NodeSuccs; 1001 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1002 I != E; ++I) { 1003 if (I->isCtrl()) 1004 ChainPreds.push_back(*I); 1005 else if (isOperandOf(I->getSUnit(), LoadNode)) 1006 LoadPreds.push_back(*I); 1007 else 1008 NodePreds.push_back(*I); 1009 } 1010 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1011 I != E; ++I) { 1012 if (I->isCtrl()) 1013 ChainSuccs.push_back(*I); 1014 else 1015 NodeSuccs.push_back(*I); 1016 } 1017 1018 // Now assign edges to the newly-created nodes. 1019 for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) { 1020 const SDep &Pred = ChainPreds[i]; 1021 RemovePred(SU, Pred); 1022 if (isNewLoad) 1023 AddPred(LoadSU, Pred); 1024 } 1025 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) { 1026 const SDep &Pred = LoadPreds[i]; 1027 RemovePred(SU, Pred); 1028 if (isNewLoad) 1029 AddPred(LoadSU, Pred); 1030 } 1031 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) { 1032 const SDep &Pred = NodePreds[i]; 1033 RemovePred(SU, Pred); 1034 AddPred(NewSU, Pred); 1035 } 1036 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) { 1037 SDep D = NodeSuccs[i]; 1038 SUnit *SuccDep = D.getSUnit(); 1039 D.setSUnit(SU); 1040 RemovePred(SuccDep, D); 1041 D.setSUnit(NewSU); 1042 AddPred(SuccDep, D); 1043 // Balance register pressure. 1044 if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled 1045 && !D.isCtrl() && NewSU->NumRegDefsLeft > 0) 1046 --NewSU->NumRegDefsLeft; 1047 } 1048 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) { 1049 SDep D = ChainSuccs[i]; 1050 SUnit *SuccDep = D.getSUnit(); 1051 D.setSUnit(SU); 1052 RemovePred(SuccDep, D); 1053 if (isNewLoad) { 1054 D.setSUnit(LoadSU); 1055 AddPred(SuccDep, D); 1056 } 1057 } 1058 1059 // Add a data dependency to reflect that NewSU reads the value defined 1060 // by LoadSU. 1061 SDep D(LoadSU, SDep::Data, 0); 1062 D.setLatency(LoadSU->Latency); 1063 AddPred(NewSU, D); 1064 1065 if (isNewLoad) 1066 AvailableQueue->addNode(LoadSU); 1067 AvailableQueue->addNode(NewSU); 1068 1069 ++NumUnfolds; 1070 1071 if (NewSU->NumSuccsLeft == 0) { 1072 NewSU->isAvailable = true; 1073 return NewSU; 1074 } 1075 SU = NewSU; 1076 } 1077 1078 DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n"); 1079 NewSU = CreateClone(SU); 1080 1081 // New SUnit has the exact same predecessors. 1082 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1083 I != E; ++I) 1084 if (!I->isArtificial()) 1085 AddPred(NewSU, *I); 1086 1087 // Only copy scheduled successors. Cut them from old node's successor 1088 // list and move them over. 1089 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps; 1090 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1091 I != E; ++I) { 1092 if (I->isArtificial()) 1093 continue; 1094 SUnit *SuccSU = I->getSUnit(); 1095 if (SuccSU->isScheduled) { 1096 SDep D = *I; 1097 D.setSUnit(NewSU); 1098 AddPred(SuccSU, D); 1099 D.setSUnit(SU); 1100 DelDeps.push_back(std::make_pair(SuccSU, D)); 1101 } 1102 } 1103 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) 1104 RemovePred(DelDeps[i].first, DelDeps[i].second); 1105 1106 AvailableQueue->updateNode(SU); 1107 AvailableQueue->addNode(NewSU); 1108 1109 ++NumDups; 1110 return NewSU; 1111} 1112 1113/// InsertCopiesAndMoveSuccs - Insert register copies and move all 1114/// scheduled successors of the given SUnit to the last copy. 1115void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg, 1116 const TargetRegisterClass *DestRC, 1117 const TargetRegisterClass *SrcRC, 1118 SmallVector<SUnit*, 2> &Copies) { 1119 SUnit *CopyFromSU = CreateNewSUnit(NULL); 1120 CopyFromSU->CopySrcRC = SrcRC; 1121 CopyFromSU->CopyDstRC = DestRC; 1122 1123 SUnit *CopyToSU = CreateNewSUnit(NULL); 1124 CopyToSU->CopySrcRC = DestRC; 1125 CopyToSU->CopyDstRC = SrcRC; 1126 1127 // Only copy scheduled successors. Cut them from old node's successor 1128 // list and move them over. 1129 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps; 1130 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1131 I != E; ++I) { 1132 if (I->isArtificial()) 1133 continue; 1134 SUnit *SuccSU = I->getSUnit(); 1135 if (SuccSU->isScheduled) { 1136 SDep D = *I; 1137 D.setSUnit(CopyToSU); 1138 AddPred(SuccSU, D); 1139 DelDeps.push_back(std::make_pair(SuccSU, *I)); 1140 } 1141 else { 1142 // Avoid scheduling the def-side copy before other successors. Otherwise 1143 // we could introduce another physreg interference on the copy and 1144 // continue inserting copies indefinitely. 1145 AddPred(SuccSU, SDep(CopyFromSU, SDep::Artificial)); 1146 } 1147 } 1148 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) 1149 RemovePred(DelDeps[i].first, DelDeps[i].second); 1150 1151 SDep FromDep(SU, SDep::Data, Reg); 1152 FromDep.setLatency(SU->Latency); 1153 AddPred(CopyFromSU, FromDep); 1154 SDep ToDep(CopyFromSU, SDep::Data, 0); 1155 ToDep.setLatency(CopyFromSU->Latency); 1156 AddPred(CopyToSU, ToDep); 1157 1158 AvailableQueue->updateNode(SU); 1159 AvailableQueue->addNode(CopyFromSU); 1160 AvailableQueue->addNode(CopyToSU); 1161 Copies.push_back(CopyFromSU); 1162 Copies.push_back(CopyToSU); 1163 1164 ++NumPRCopies; 1165} 1166 1167/// getPhysicalRegisterVT - Returns the ValueType of the physical register 1168/// definition of the specified node. 1169/// FIXME: Move to SelectionDAG? 1170static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg, 1171 const TargetInstrInfo *TII) { 1172 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 1173 assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!"); 1174 unsigned NumRes = MCID.getNumDefs(); 1175 for (const uint16_t *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) { 1176 if (Reg == *ImpDef) 1177 break; 1178 ++NumRes; 1179 } 1180 return N->getValueType(NumRes); 1181} 1182 1183/// CheckForLiveRegDef - Return true and update live register vector if the 1184/// specified register def of the specified SUnit clobbers any "live" registers. 1185static void CheckForLiveRegDef(SUnit *SU, unsigned Reg, 1186 std::vector<SUnit*> &LiveRegDefs, 1187 SmallSet<unsigned, 4> &RegAdded, 1188 SmallVector<unsigned, 4> &LRegs, 1189 const TargetRegisterInfo *TRI) { 1190 for (MCRegAliasIterator AliasI(Reg, TRI, true); AliasI.isValid(); ++AliasI) { 1191 1192 // Check if Ref is live. 1193 if (!LiveRegDefs[*AliasI]) continue; 1194 1195 // Allow multiple uses of the same def. 1196 if (LiveRegDefs[*AliasI] == SU) continue; 1197 1198 // Add Reg to the set of interfering live regs. 1199 if (RegAdded.insert(*AliasI)) { 1200 LRegs.push_back(*AliasI); 1201 } 1202 } 1203} 1204 1205/// CheckForLiveRegDefMasked - Check for any live physregs that are clobbered 1206/// by RegMask, and add them to LRegs. 1207static void CheckForLiveRegDefMasked(SUnit *SU, const uint32_t *RegMask, 1208 std::vector<SUnit*> &LiveRegDefs, 1209 SmallSet<unsigned, 4> &RegAdded, 1210 SmallVector<unsigned, 4> &LRegs) { 1211 // Look at all live registers. Skip Reg0 and the special CallResource. 1212 for (unsigned i = 1, e = LiveRegDefs.size()-1; i != e; ++i) { 1213 if (!LiveRegDefs[i]) continue; 1214 if (LiveRegDefs[i] == SU) continue; 1215 if (!MachineOperand::clobbersPhysReg(RegMask, i)) continue; 1216 if (RegAdded.insert(i)) 1217 LRegs.push_back(i); 1218 } 1219} 1220 1221/// getNodeRegMask - Returns the register mask attached to an SDNode, if any. 1222static const uint32_t *getNodeRegMask(const SDNode *N) { 1223 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 1224 if (const RegisterMaskSDNode *Op = 1225 dyn_cast<RegisterMaskSDNode>(N->getOperand(i).getNode())) 1226 return Op->getRegMask(); 1227 return NULL; 1228} 1229 1230/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay 1231/// scheduling of the given node to satisfy live physical register dependencies. 1232/// If the specific node is the last one that's available to schedule, do 1233/// whatever is necessary (i.e. backtracking or cloning) to make it possible. 1234bool ScheduleDAGRRList:: 1235DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) { 1236 if (NumLiveRegs == 0) 1237 return false; 1238 1239 SmallSet<unsigned, 4> RegAdded; 1240 // If this node would clobber any "live" register, then it's not ready. 1241 // 1242 // If SU is the currently live definition of the same register that it uses, 1243 // then we are free to schedule it. 1244 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1245 I != E; ++I) { 1246 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU) 1247 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs, 1248 RegAdded, LRegs, TRI); 1249 } 1250 1251 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) { 1252 if (Node->getOpcode() == ISD::INLINEASM) { 1253 // Inline asm can clobber physical defs. 1254 unsigned NumOps = Node->getNumOperands(); 1255 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue) 1256 --NumOps; // Ignore the glue operand. 1257 1258 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { 1259 unsigned Flags = 1260 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue(); 1261 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); 1262 1263 ++i; // Skip the ID value. 1264 if (InlineAsm::isRegDefKind(Flags) || 1265 InlineAsm::isRegDefEarlyClobberKind(Flags) || 1266 InlineAsm::isClobberKind(Flags)) { 1267 // Check for def of register or earlyclobber register. 1268 for (; NumVals; --NumVals, ++i) { 1269 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg(); 1270 if (TargetRegisterInfo::isPhysicalRegister(Reg)) 1271 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI); 1272 } 1273 } else 1274 i += NumVals; 1275 } 1276 continue; 1277 } 1278 1279 if (!Node->isMachineOpcode()) 1280 continue; 1281 // If we're in the middle of scheduling a call, don't begin scheduling 1282 // another call. Also, don't allow any physical registers to be live across 1283 // the call. 1284 if (Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) { 1285 // Check the special calling-sequence resource. 1286 unsigned CallResource = TRI->getNumRegs(); 1287 if (LiveRegDefs[CallResource]) { 1288 SDNode *Gen = LiveRegGens[CallResource]->getNode(); 1289 while (SDNode *Glued = Gen->getGluedNode()) 1290 Gen = Glued; 1291 if (!IsChainDependent(Gen, Node, 0, TII) && RegAdded.insert(CallResource)) 1292 LRegs.push_back(CallResource); 1293 } 1294 } 1295 if (const uint32_t *RegMask = getNodeRegMask(Node)) 1296 CheckForLiveRegDefMasked(SU, RegMask, LiveRegDefs, RegAdded, LRegs); 1297 1298 const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode()); 1299 if (!MCID.ImplicitDefs) 1300 continue; 1301 for (const uint16_t *Reg = MCID.getImplicitDefs(); *Reg; ++Reg) 1302 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI); 1303 } 1304 1305 return !LRegs.empty(); 1306} 1307 1308/// Return a node that can be scheduled in this cycle. Requirements: 1309/// (1) Ready: latency has been satisfied 1310/// (2) No Hazards: resources are available 1311/// (3) No Interferences: may unschedule to break register interferences. 1312SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() { 1313 SmallVector<SUnit*, 4> Interferences; 1314 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap; 1315 1316 SUnit *CurSU = AvailableQueue->pop(); 1317 while (CurSU) { 1318 SmallVector<unsigned, 4> LRegs; 1319 if (!DelayForLiveRegsBottomUp(CurSU, LRegs)) 1320 break; 1321 LRegsMap.insert(std::make_pair(CurSU, LRegs)); 1322 1323 CurSU->isPending = true; // This SU is not in AvailableQueue right now. 1324 Interferences.push_back(CurSU); 1325 CurSU = AvailableQueue->pop(); 1326 } 1327 if (CurSU) { 1328 // Add the nodes that aren't ready back onto the available list. 1329 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) { 1330 Interferences[i]->isPending = false; 1331 assert(Interferences[i]->isAvailable && "must still be available"); 1332 AvailableQueue->push(Interferences[i]); 1333 } 1334 return CurSU; 1335 } 1336 1337 // All candidates are delayed due to live physical reg dependencies. 1338 // Try backtracking, code duplication, or inserting cross class copies 1339 // to resolve it. 1340 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) { 1341 SUnit *TrySU = Interferences[i]; 1342 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU]; 1343 1344 // Try unscheduling up to the point where it's safe to schedule 1345 // this node. 1346 SUnit *BtSU = NULL; 1347 unsigned LiveCycle = UINT_MAX; 1348 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) { 1349 unsigned Reg = LRegs[j]; 1350 if (LiveRegGens[Reg]->getHeight() < LiveCycle) { 1351 BtSU = LiveRegGens[Reg]; 1352 LiveCycle = BtSU->getHeight(); 1353 } 1354 } 1355 if (!WillCreateCycle(TrySU, BtSU)) { 1356 BacktrackBottomUp(TrySU, BtSU); 1357 1358 // Force the current node to be scheduled before the node that 1359 // requires the physical reg dep. 1360 if (BtSU->isAvailable) { 1361 BtSU->isAvailable = false; 1362 if (!BtSU->isPending) 1363 AvailableQueue->remove(BtSU); 1364 } 1365 AddPred(TrySU, SDep(BtSU, SDep::Artificial)); 1366 1367 // If one or more successors has been unscheduled, then the current 1368 // node is no longer avaialable. Schedule a successor that's now 1369 // available instead. 1370 if (!TrySU->isAvailable) { 1371 CurSU = AvailableQueue->pop(); 1372 } 1373 else { 1374 CurSU = TrySU; 1375 TrySU->isPending = false; 1376 Interferences.erase(Interferences.begin()+i); 1377 } 1378 break; 1379 } 1380 } 1381 1382 if (!CurSU) { 1383 // Can't backtrack. If it's too expensive to copy the value, then try 1384 // duplicate the nodes that produces these "too expensive to copy" 1385 // values to break the dependency. In case even that doesn't work, 1386 // insert cross class copies. 1387 // If it's not too expensive, i.e. cost != -1, issue copies. 1388 SUnit *TrySU = Interferences[0]; 1389 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU]; 1390 assert(LRegs.size() == 1 && "Can't handle this yet!"); 1391 unsigned Reg = LRegs[0]; 1392 SUnit *LRDef = LiveRegDefs[Reg]; 1393 EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII); 1394 const TargetRegisterClass *RC = 1395 TRI->getMinimalPhysRegClass(Reg, VT); 1396 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC); 1397 1398 // If cross copy register class is the same as RC, then it must be possible 1399 // copy the value directly. Do not try duplicate the def. 1400 // If cross copy register class is not the same as RC, then it's possible to 1401 // copy the value but it require cross register class copies and it is 1402 // expensive. 1403 // If cross copy register class is null, then it's not possible to copy 1404 // the value at all. 1405 SUnit *NewDef = 0; 1406 if (DestRC != RC) { 1407 NewDef = CopyAndMoveSuccessors(LRDef); 1408 if (!DestRC && !NewDef) 1409 report_fatal_error("Can't handle live physical register dependency!"); 1410 } 1411 if (!NewDef) { 1412 // Issue copies, these can be expensive cross register class copies. 1413 SmallVector<SUnit*, 2> Copies; 1414 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies); 1415 DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum 1416 << " to SU #" << Copies.front()->NodeNum << "\n"); 1417 AddPred(TrySU, SDep(Copies.front(), SDep::Artificial)); 1418 NewDef = Copies.back(); 1419 } 1420 1421 DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum 1422 << " to SU #" << TrySU->NodeNum << "\n"); 1423 LiveRegDefs[Reg] = NewDef; 1424 AddPred(NewDef, SDep(TrySU, SDep::Artificial)); 1425 TrySU->isAvailable = false; 1426 CurSU = NewDef; 1427 } 1428 1429 assert(CurSU && "Unable to resolve live physical register dependencies!"); 1430 1431 // Add the nodes that aren't ready back onto the available list. 1432 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) { 1433 Interferences[i]->isPending = false; 1434 // May no longer be available due to backtracking. 1435 if (Interferences[i]->isAvailable) { 1436 AvailableQueue->push(Interferences[i]); 1437 } 1438 } 1439 return CurSU; 1440} 1441 1442/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up 1443/// schedulers. 1444void ScheduleDAGRRList::ListScheduleBottomUp() { 1445 // Release any predecessors of the special Exit node. 1446 ReleasePredecessors(&ExitSU); 1447 1448 // Add root to Available queue. 1449 if (!SUnits.empty()) { 1450 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()]; 1451 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!"); 1452 RootSU->isAvailable = true; 1453 AvailableQueue->push(RootSU); 1454 } 1455 1456 // While Available queue is not empty, grab the node with the highest 1457 // priority. If it is not ready put it back. Schedule the node. 1458 Sequence.reserve(SUnits.size()); 1459 while (!AvailableQueue->empty()) { 1460 DEBUG(dbgs() << "\nExamining Available:\n"; 1461 AvailableQueue->dump(this)); 1462 1463 // Pick the best node to schedule taking all constraints into 1464 // consideration. 1465 SUnit *SU = PickNodeToScheduleBottomUp(); 1466 1467 AdvancePastStalls(SU); 1468 1469 ScheduleNodeBottomUp(SU); 1470 1471 while (AvailableQueue->empty() && !PendingQueue.empty()) { 1472 // Advance the cycle to free resources. Skip ahead to the next ready SU. 1473 assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized"); 1474 AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle)); 1475 } 1476 } 1477 1478 // Reverse the order if it is bottom up. 1479 std::reverse(Sequence.begin(), Sequence.end()); 1480 1481#ifndef NDEBUG 1482 VerifyScheduledSequence(/*isBottomUp=*/true); 1483#endif 1484} 1485 1486//===----------------------------------------------------------------------===// 1487// RegReductionPriorityQueue Definition 1488//===----------------------------------------------------------------------===// 1489// 1490// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers 1491// to reduce register pressure. 1492// 1493namespace { 1494class RegReductionPQBase; 1495 1496struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> { 1497 bool isReady(SUnit* SU, unsigned CurCycle) const { return true; } 1498}; 1499 1500#ifndef NDEBUG 1501template<class SF> 1502struct reverse_sort : public queue_sort { 1503 SF &SortFunc; 1504 reverse_sort(SF &sf) : SortFunc(sf) {} 1505 reverse_sort(const reverse_sort &RHS) : SortFunc(RHS.SortFunc) {} 1506 1507 bool operator()(SUnit* left, SUnit* right) const { 1508 // reverse left/right rather than simply !SortFunc(left, right) 1509 // to expose different paths in the comparison logic. 1510 return SortFunc(right, left); 1511 } 1512}; 1513#endif // NDEBUG 1514 1515/// bu_ls_rr_sort - Priority function for bottom up register pressure 1516// reduction scheduler. 1517struct bu_ls_rr_sort : public queue_sort { 1518 enum { 1519 IsBottomUp = true, 1520 HasReadyFilter = false 1521 }; 1522 1523 RegReductionPQBase *SPQ; 1524 bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {} 1525 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {} 1526 1527 bool operator()(SUnit* left, SUnit* right) const; 1528}; 1529 1530// src_ls_rr_sort - Priority function for source order scheduler. 1531struct src_ls_rr_sort : public queue_sort { 1532 enum { 1533 IsBottomUp = true, 1534 HasReadyFilter = false 1535 }; 1536 1537 RegReductionPQBase *SPQ; 1538 src_ls_rr_sort(RegReductionPQBase *spq) 1539 : SPQ(spq) {} 1540 src_ls_rr_sort(const src_ls_rr_sort &RHS) 1541 : SPQ(RHS.SPQ) {} 1542 1543 bool operator()(SUnit* left, SUnit* right) const; 1544}; 1545 1546// hybrid_ls_rr_sort - Priority function for hybrid scheduler. 1547struct hybrid_ls_rr_sort : public queue_sort { 1548 enum { 1549 IsBottomUp = true, 1550 HasReadyFilter = false 1551 }; 1552 1553 RegReductionPQBase *SPQ; 1554 hybrid_ls_rr_sort(RegReductionPQBase *spq) 1555 : SPQ(spq) {} 1556 hybrid_ls_rr_sort(const hybrid_ls_rr_sort &RHS) 1557 : SPQ(RHS.SPQ) {} 1558 1559 bool isReady(SUnit *SU, unsigned CurCycle) const; 1560 1561 bool operator()(SUnit* left, SUnit* right) const; 1562}; 1563 1564// ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism) 1565// scheduler. 1566struct ilp_ls_rr_sort : public queue_sort { 1567 enum { 1568 IsBottomUp = true, 1569 HasReadyFilter = false 1570 }; 1571 1572 RegReductionPQBase *SPQ; 1573 ilp_ls_rr_sort(RegReductionPQBase *spq) 1574 : SPQ(spq) {} 1575 ilp_ls_rr_sort(const ilp_ls_rr_sort &RHS) 1576 : SPQ(RHS.SPQ) {} 1577 1578 bool isReady(SUnit *SU, unsigned CurCycle) const; 1579 1580 bool operator()(SUnit* left, SUnit* right) const; 1581}; 1582 1583class RegReductionPQBase : public SchedulingPriorityQueue { 1584protected: 1585 std::vector<SUnit*> Queue; 1586 unsigned CurQueueId; 1587 bool TracksRegPressure; 1588 bool SrcOrder; 1589 1590 // SUnits - The SUnits for the current graph. 1591 std::vector<SUnit> *SUnits; 1592 1593 MachineFunction &MF; 1594 const TargetInstrInfo *TII; 1595 const TargetRegisterInfo *TRI; 1596 const TargetLowering *TLI; 1597 ScheduleDAGRRList *scheduleDAG; 1598 1599 // SethiUllmanNumbers - The SethiUllman number for each node. 1600 std::vector<unsigned> SethiUllmanNumbers; 1601 1602 /// RegPressure - Tracking current reg pressure per register class. 1603 /// 1604 std::vector<unsigned> RegPressure; 1605 1606 /// RegLimit - Tracking the number of allocatable registers per register 1607 /// class. 1608 std::vector<unsigned> RegLimit; 1609 1610public: 1611 RegReductionPQBase(MachineFunction &mf, 1612 bool hasReadyFilter, 1613 bool tracksrp, 1614 bool srcorder, 1615 const TargetInstrInfo *tii, 1616 const TargetRegisterInfo *tri, 1617 const TargetLowering *tli) 1618 : SchedulingPriorityQueue(hasReadyFilter), 1619 CurQueueId(0), TracksRegPressure(tracksrp), SrcOrder(srcorder), 1620 MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) { 1621 if (TracksRegPressure) { 1622 unsigned NumRC = TRI->getNumRegClasses(); 1623 RegLimit.resize(NumRC); 1624 RegPressure.resize(NumRC); 1625 std::fill(RegLimit.begin(), RegLimit.end(), 0); 1626 std::fill(RegPressure.begin(), RegPressure.end(), 0); 1627 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(), 1628 E = TRI->regclass_end(); I != E; ++I) 1629 RegLimit[(*I)->getID()] = tri->getRegPressureLimit(*I, MF); 1630 } 1631 } 1632 1633 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) { 1634 scheduleDAG = scheduleDag; 1635 } 1636 1637 ScheduleHazardRecognizer* getHazardRec() { 1638 return scheduleDAG->getHazardRec(); 1639 } 1640 1641 void initNodes(std::vector<SUnit> &sunits); 1642 1643 void addNode(const SUnit *SU); 1644 1645 void updateNode(const SUnit *SU); 1646 1647 void releaseState() { 1648 SUnits = 0; 1649 SethiUllmanNumbers.clear(); 1650 std::fill(RegPressure.begin(), RegPressure.end(), 0); 1651 } 1652 1653 unsigned getNodePriority(const SUnit *SU) const; 1654 1655 unsigned getNodeOrdering(const SUnit *SU) const { 1656 if (!SU->getNode()) return 0; 1657 1658 return scheduleDAG->DAG->GetOrdering(SU->getNode()); 1659 } 1660 1661 bool empty() const { return Queue.empty(); } 1662 1663 void push(SUnit *U) { 1664 assert(!U->NodeQueueId && "Node in the queue already"); 1665 U->NodeQueueId = ++CurQueueId; 1666 Queue.push_back(U); 1667 } 1668 1669 void remove(SUnit *SU) { 1670 assert(!Queue.empty() && "Queue is empty!"); 1671 assert(SU->NodeQueueId != 0 && "Not in queue!"); 1672 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(), 1673 SU); 1674 if (I != prior(Queue.end())) 1675 std::swap(*I, Queue.back()); 1676 Queue.pop_back(); 1677 SU->NodeQueueId = 0; 1678 } 1679 1680 bool tracksRegPressure() const { return TracksRegPressure; } 1681 1682 void dumpRegPressure() const; 1683 1684 bool HighRegPressure(const SUnit *SU) const; 1685 1686 bool MayReduceRegPressure(SUnit *SU) const; 1687 1688 int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const; 1689 1690 void scheduledNode(SUnit *SU); 1691 1692 void unscheduledNode(SUnit *SU); 1693 1694protected: 1695 bool canClobber(const SUnit *SU, const SUnit *Op); 1696 void AddPseudoTwoAddrDeps(); 1697 void PrescheduleNodesWithMultipleUses(); 1698 void CalculateSethiUllmanNumbers(); 1699}; 1700 1701template<class SF> 1702static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) { 1703 std::vector<SUnit *>::iterator Best = Q.begin(); 1704 for (std::vector<SUnit *>::iterator I = llvm::next(Q.begin()), 1705 E = Q.end(); I != E; ++I) 1706 if (Picker(*Best, *I)) 1707 Best = I; 1708 SUnit *V = *Best; 1709 if (Best != prior(Q.end())) 1710 std::swap(*Best, Q.back()); 1711 Q.pop_back(); 1712 return V; 1713} 1714 1715template<class SF> 1716SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker, ScheduleDAG *DAG) { 1717#ifndef NDEBUG 1718 if (DAG->StressSched) { 1719 reverse_sort<SF> RPicker(Picker); 1720 return popFromQueueImpl(Q, RPicker); 1721 } 1722#endif 1723 (void)DAG; 1724 return popFromQueueImpl(Q, Picker); 1725} 1726 1727template<class SF> 1728class RegReductionPriorityQueue : public RegReductionPQBase { 1729 SF Picker; 1730 1731public: 1732 RegReductionPriorityQueue(MachineFunction &mf, 1733 bool tracksrp, 1734 bool srcorder, 1735 const TargetInstrInfo *tii, 1736 const TargetRegisterInfo *tri, 1737 const TargetLowering *tli) 1738 : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, srcorder, 1739 tii, tri, tli), 1740 Picker(this) {} 1741 1742 bool isBottomUp() const { return SF::IsBottomUp; } 1743 1744 bool isReady(SUnit *U) const { 1745 return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle()); 1746 } 1747 1748 SUnit *pop() { 1749 if (Queue.empty()) return NULL; 1750 1751 SUnit *V = popFromQueue(Queue, Picker, scheduleDAG); 1752 V->NodeQueueId = 0; 1753 return V; 1754 } 1755 1756#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1757 void dump(ScheduleDAG *DAG) const { 1758 // Emulate pop() without clobbering NodeQueueIds. 1759 std::vector<SUnit*> DumpQueue = Queue; 1760 SF DumpPicker = Picker; 1761 while (!DumpQueue.empty()) { 1762 SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG); 1763 dbgs() << "Height " << SU->getHeight() << ": "; 1764 SU->dump(DAG); 1765 } 1766 } 1767#endif 1768}; 1769 1770typedef RegReductionPriorityQueue<bu_ls_rr_sort> 1771BURegReductionPriorityQueue; 1772 1773typedef RegReductionPriorityQueue<src_ls_rr_sort> 1774SrcRegReductionPriorityQueue; 1775 1776typedef RegReductionPriorityQueue<hybrid_ls_rr_sort> 1777HybridBURRPriorityQueue; 1778 1779typedef RegReductionPriorityQueue<ilp_ls_rr_sort> 1780ILPBURRPriorityQueue; 1781} // end anonymous namespace 1782 1783//===----------------------------------------------------------------------===// 1784// Static Node Priority for Register Pressure Reduction 1785//===----------------------------------------------------------------------===// 1786 1787// Check for special nodes that bypass scheduling heuristics. 1788// Currently this pushes TokenFactor nodes down, but may be used for other 1789// pseudo-ops as well. 1790// 1791// Return -1 to schedule right above left, 1 for left above right. 1792// Return 0 if no bias exists. 1793static int checkSpecialNodes(const SUnit *left, const SUnit *right) { 1794 bool LSchedLow = left->isScheduleLow; 1795 bool RSchedLow = right->isScheduleLow; 1796 if (LSchedLow != RSchedLow) 1797 return LSchedLow < RSchedLow ? 1 : -1; 1798 return 0; 1799} 1800 1801/// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number. 1802/// Smaller number is the higher priority. 1803static unsigned 1804CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) { 1805 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum]; 1806 if (SethiUllmanNumber != 0) 1807 return SethiUllmanNumber; 1808 1809 unsigned Extra = 0; 1810 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1811 I != E; ++I) { 1812 if (I->isCtrl()) continue; // ignore chain preds 1813 SUnit *PredSU = I->getSUnit(); 1814 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers); 1815 if (PredSethiUllman > SethiUllmanNumber) { 1816 SethiUllmanNumber = PredSethiUllman; 1817 Extra = 0; 1818 } else if (PredSethiUllman == SethiUllmanNumber) 1819 ++Extra; 1820 } 1821 1822 SethiUllmanNumber += Extra; 1823 1824 if (SethiUllmanNumber == 0) 1825 SethiUllmanNumber = 1; 1826 1827 return SethiUllmanNumber; 1828} 1829 1830/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all 1831/// scheduling units. 1832void RegReductionPQBase::CalculateSethiUllmanNumbers() { 1833 SethiUllmanNumbers.assign(SUnits->size(), 0); 1834 1835 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) 1836 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers); 1837} 1838 1839void RegReductionPQBase::addNode(const SUnit *SU) { 1840 unsigned SUSize = SethiUllmanNumbers.size(); 1841 if (SUnits->size() > SUSize) 1842 SethiUllmanNumbers.resize(SUSize*2, 0); 1843 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers); 1844} 1845 1846void RegReductionPQBase::updateNode(const SUnit *SU) { 1847 SethiUllmanNumbers[SU->NodeNum] = 0; 1848 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers); 1849} 1850 1851// Lower priority means schedule further down. For bottom-up scheduling, lower 1852// priority SUs are scheduled before higher priority SUs. 1853unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const { 1854 assert(SU->NodeNum < SethiUllmanNumbers.size()); 1855 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0; 1856 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg) 1857 // CopyToReg should be close to its uses to facilitate coalescing and 1858 // avoid spilling. 1859 return 0; 1860 if (Opc == TargetOpcode::EXTRACT_SUBREG || 1861 Opc == TargetOpcode::SUBREG_TO_REG || 1862 Opc == TargetOpcode::INSERT_SUBREG) 1863 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be 1864 // close to their uses to facilitate coalescing. 1865 return 0; 1866 if (SU->NumSuccs == 0 && SU->NumPreds != 0) 1867 // If SU does not have a register use, i.e. it doesn't produce a value 1868 // that would be consumed (e.g. store), then it terminates a chain of 1869 // computation. Give it a large SethiUllman number so it will be 1870 // scheduled right before its predecessors that it doesn't lengthen 1871 // their live ranges. 1872 return 0xffff; 1873 if (SU->NumPreds == 0 && SU->NumSuccs != 0) 1874 // If SU does not have a register def, schedule it close to its uses 1875 // because it does not lengthen any live ranges. 1876 return 0; 1877#if 1 1878 return SethiUllmanNumbers[SU->NodeNum]; 1879#else 1880 unsigned Priority = SethiUllmanNumbers[SU->NodeNum]; 1881 if (SU->isCallOp) { 1882 // FIXME: This assumes all of the defs are used as call operands. 1883 int NP = (int)Priority - SU->getNode()->getNumValues(); 1884 return (NP > 0) ? NP : 0; 1885 } 1886 return Priority; 1887#endif 1888} 1889 1890//===----------------------------------------------------------------------===// 1891// Register Pressure Tracking 1892//===----------------------------------------------------------------------===// 1893 1894void RegReductionPQBase::dumpRegPressure() const { 1895#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1896 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(), 1897 E = TRI->regclass_end(); I != E; ++I) { 1898 const TargetRegisterClass *RC = *I; 1899 unsigned Id = RC->getID(); 1900 unsigned RP = RegPressure[Id]; 1901 if (!RP) continue; 1902 DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id] 1903 << '\n'); 1904 } 1905#endif 1906} 1907 1908bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const { 1909 if (!TLI) 1910 return false; 1911 1912 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end(); 1913 I != E; ++I) { 1914 if (I->isCtrl()) 1915 continue; 1916 SUnit *PredSU = I->getSUnit(); 1917 // NumRegDefsLeft is zero when enough uses of this node have been scheduled 1918 // to cover the number of registers defined (they are all live). 1919 if (PredSU->NumRegDefsLeft == 0) { 1920 continue; 1921 } 1922 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG); 1923 RegDefPos.IsValid(); RegDefPos.Advance()) { 1924 unsigned RCId, Cost; 1925 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF); 1926 1927 if ((RegPressure[RCId] + Cost) >= RegLimit[RCId]) 1928 return true; 1929 } 1930 } 1931 return false; 1932} 1933 1934bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const { 1935 const SDNode *N = SU->getNode(); 1936 1937 if (!N->isMachineOpcode() || !SU->NumSuccs) 1938 return false; 1939 1940 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs(); 1941 for (unsigned i = 0; i != NumDefs; ++i) { 1942 EVT VT = N->getValueType(i); 1943 if (!N->hasAnyUseOfValue(i)) 1944 continue; 1945 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 1946 if (RegPressure[RCId] >= RegLimit[RCId]) 1947 return true; 1948 } 1949 return false; 1950} 1951 1952// Compute the register pressure contribution by this instruction by count up 1953// for uses that are not live and down for defs. Only count register classes 1954// that are already under high pressure. As a side effect, compute the number of 1955// uses of registers that are already live. 1956// 1957// FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure 1958// so could probably be factored. 1959int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const { 1960 LiveUses = 0; 1961 int PDiff = 0; 1962 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end(); 1963 I != E; ++I) { 1964 if (I->isCtrl()) 1965 continue; 1966 SUnit *PredSU = I->getSUnit(); 1967 // NumRegDefsLeft is zero when enough uses of this node have been scheduled 1968 // to cover the number of registers defined (they are all live). 1969 if (PredSU->NumRegDefsLeft == 0) { 1970 if (PredSU->getNode()->isMachineOpcode()) 1971 ++LiveUses; 1972 continue; 1973 } 1974 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG); 1975 RegDefPos.IsValid(); RegDefPos.Advance()) { 1976 EVT VT = RegDefPos.GetValue(); 1977 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 1978 if (RegPressure[RCId] >= RegLimit[RCId]) 1979 ++PDiff; 1980 } 1981 } 1982 const SDNode *N = SU->getNode(); 1983 1984 if (!N || !N->isMachineOpcode() || !SU->NumSuccs) 1985 return PDiff; 1986 1987 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs(); 1988 for (unsigned i = 0; i != NumDefs; ++i) { 1989 EVT VT = N->getValueType(i); 1990 if (!N->hasAnyUseOfValue(i)) 1991 continue; 1992 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 1993 if (RegPressure[RCId] >= RegLimit[RCId]) 1994 --PDiff; 1995 } 1996 return PDiff; 1997} 1998 1999void RegReductionPQBase::scheduledNode(SUnit *SU) { 2000 if (!TracksRegPressure) 2001 return; 2002 2003 if (!SU->getNode()) 2004 return; 2005 2006 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 2007 I != E; ++I) { 2008 if (I->isCtrl()) 2009 continue; 2010 SUnit *PredSU = I->getSUnit(); 2011 // NumRegDefsLeft is zero when enough uses of this node have been scheduled 2012 // to cover the number of registers defined (they are all live). 2013 if (PredSU->NumRegDefsLeft == 0) { 2014 continue; 2015 } 2016 // FIXME: The ScheduleDAG currently loses information about which of a 2017 // node's values is consumed by each dependence. Consequently, if the node 2018 // defines multiple register classes, we don't know which to pressurize 2019 // here. Instead the following loop consumes the register defs in an 2020 // arbitrary order. At least it handles the common case of clustered loads 2021 // to the same class. For precise liveness, each SDep needs to indicate the 2022 // result number. But that tightly couples the ScheduleDAG with the 2023 // SelectionDAG making updates tricky. A simpler hack would be to attach a 2024 // value type or register class to SDep. 2025 // 2026 // The most important aspect of register tracking is balancing the increase 2027 // here with the reduction further below. Note that this SU may use multiple 2028 // defs in PredSU. The can't be determined here, but we've already 2029 // compensated by reducing NumRegDefsLeft in PredSU during 2030 // ScheduleDAGSDNodes::AddSchedEdges. 2031 --PredSU->NumRegDefsLeft; 2032 unsigned SkipRegDefs = PredSU->NumRegDefsLeft; 2033 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG); 2034 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) { 2035 if (SkipRegDefs) 2036 continue; 2037 2038 unsigned RCId, Cost; 2039 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF); 2040 RegPressure[RCId] += Cost; 2041 break; 2042 } 2043 } 2044 2045 // We should have this assert, but there may be dead SDNodes that never 2046 // materialize as SUnits, so they don't appear to generate liveness. 2047 //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses"); 2048 int SkipRegDefs = (int)SU->NumRegDefsLeft; 2049 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG); 2050 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) { 2051 if (SkipRegDefs > 0) 2052 continue; 2053 unsigned RCId, Cost; 2054 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF); 2055 if (RegPressure[RCId] < Cost) { 2056 // Register pressure tracking is imprecise. This can happen. But we try 2057 // hard not to let it happen because it likely results in poor scheduling. 2058 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n"); 2059 RegPressure[RCId] = 0; 2060 } 2061 else { 2062 RegPressure[RCId] -= Cost; 2063 } 2064 } 2065 dumpRegPressure(); 2066} 2067 2068void RegReductionPQBase::unscheduledNode(SUnit *SU) { 2069 if (!TracksRegPressure) 2070 return; 2071 2072 const SDNode *N = SU->getNode(); 2073 if (!N) return; 2074 2075 if (!N->isMachineOpcode()) { 2076 if (N->getOpcode() != ISD::CopyToReg) 2077 return; 2078 } else { 2079 unsigned Opc = N->getMachineOpcode(); 2080 if (Opc == TargetOpcode::EXTRACT_SUBREG || 2081 Opc == TargetOpcode::INSERT_SUBREG || 2082 Opc == TargetOpcode::SUBREG_TO_REG || 2083 Opc == TargetOpcode::REG_SEQUENCE || 2084 Opc == TargetOpcode::IMPLICIT_DEF) 2085 return; 2086 } 2087 2088 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 2089 I != E; ++I) { 2090 if (I->isCtrl()) 2091 continue; 2092 SUnit *PredSU = I->getSUnit(); 2093 // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only 2094 // counts data deps. 2095 if (PredSU->NumSuccsLeft != PredSU->Succs.size()) 2096 continue; 2097 const SDNode *PN = PredSU->getNode(); 2098 if (!PN->isMachineOpcode()) { 2099 if (PN->getOpcode() == ISD::CopyFromReg) { 2100 EVT VT = PN->getValueType(0); 2101 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 2102 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT); 2103 } 2104 continue; 2105 } 2106 unsigned POpc = PN->getMachineOpcode(); 2107 if (POpc == TargetOpcode::IMPLICIT_DEF) 2108 continue; 2109 if (POpc == TargetOpcode::EXTRACT_SUBREG || 2110 POpc == TargetOpcode::INSERT_SUBREG || 2111 POpc == TargetOpcode::SUBREG_TO_REG) { 2112 EVT VT = PN->getValueType(0); 2113 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 2114 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT); 2115 continue; 2116 } 2117 unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs(); 2118 for (unsigned i = 0; i != NumDefs; ++i) { 2119 EVT VT = PN->getValueType(i); 2120 if (!PN->hasAnyUseOfValue(i)) 2121 continue; 2122 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 2123 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT)) 2124 // Register pressure tracking is imprecise. This can happen. 2125 RegPressure[RCId] = 0; 2126 else 2127 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT); 2128 } 2129 } 2130 2131 // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses() 2132 // may transfer data dependencies to CopyToReg. 2133 if (SU->NumSuccs && N->isMachineOpcode()) { 2134 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs(); 2135 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) { 2136 EVT VT = N->getValueType(i); 2137 if (VT == MVT::Glue || VT == MVT::Other) 2138 continue; 2139 if (!N->hasAnyUseOfValue(i)) 2140 continue; 2141 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 2142 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT); 2143 } 2144 } 2145 2146 dumpRegPressure(); 2147} 2148 2149//===----------------------------------------------------------------------===// 2150// Dynamic Node Priority for Register Pressure Reduction 2151//===----------------------------------------------------------------------===// 2152 2153/// closestSucc - Returns the scheduled cycle of the successor which is 2154/// closest to the current cycle. 2155static unsigned closestSucc(const SUnit *SU) { 2156 unsigned MaxHeight = 0; 2157 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 2158 I != E; ++I) { 2159 if (I->isCtrl()) continue; // ignore chain succs 2160 unsigned Height = I->getSUnit()->getHeight(); 2161 // If there are bunch of CopyToRegs stacked up, they should be considered 2162 // to be at the same position. 2163 if (I->getSUnit()->getNode() && 2164 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg) 2165 Height = closestSucc(I->getSUnit())+1; 2166 if (Height > MaxHeight) 2167 MaxHeight = Height; 2168 } 2169 return MaxHeight; 2170} 2171 2172/// calcMaxScratches - Returns an cost estimate of the worse case requirement 2173/// for scratch registers, i.e. number of data dependencies. 2174static unsigned calcMaxScratches(const SUnit *SU) { 2175 unsigned Scratches = 0; 2176 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 2177 I != E; ++I) { 2178 if (I->isCtrl()) continue; // ignore chain preds 2179 Scratches++; 2180 } 2181 return Scratches; 2182} 2183 2184/// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are 2185/// CopyFromReg from a virtual register. 2186static bool hasOnlyLiveInOpers(const SUnit *SU) { 2187 bool RetVal = false; 2188 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 2189 I != E; ++I) { 2190 if (I->isCtrl()) continue; 2191 const SUnit *PredSU = I->getSUnit(); 2192 if (PredSU->getNode() && 2193 PredSU->getNode()->getOpcode() == ISD::CopyFromReg) { 2194 unsigned Reg = 2195 cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg(); 2196 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 2197 RetVal = true; 2198 continue; 2199 } 2200 } 2201 return false; 2202 } 2203 return RetVal; 2204} 2205 2206/// hasOnlyLiveOutUses - Return true if SU has only value successors that are 2207/// CopyToReg to a virtual register. This SU def is probably a liveout and 2208/// it has no other use. It should be scheduled closer to the terminator. 2209static bool hasOnlyLiveOutUses(const SUnit *SU) { 2210 bool RetVal = false; 2211 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 2212 I != E; ++I) { 2213 if (I->isCtrl()) continue; 2214 const SUnit *SuccSU = I->getSUnit(); 2215 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) { 2216 unsigned Reg = 2217 cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg(); 2218 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 2219 RetVal = true; 2220 continue; 2221 } 2222 } 2223 return false; 2224 } 2225 return RetVal; 2226} 2227 2228// Set isVRegCycle for a node with only live in opers and live out uses. Also 2229// set isVRegCycle for its CopyFromReg operands. 2230// 2231// This is only relevant for single-block loops, in which case the VRegCycle 2232// node is likely an induction variable in which the operand and target virtual 2233// registers should be coalesced (e.g. pre/post increment values). Setting the 2234// isVRegCycle flag helps the scheduler prioritize other uses of the same 2235// CopyFromReg so that this node becomes the virtual register "kill". This 2236// avoids interference between the values live in and out of the block and 2237// eliminates a copy inside the loop. 2238static void initVRegCycle(SUnit *SU) { 2239 if (DisableSchedVRegCycle) 2240 return; 2241 2242 if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU)) 2243 return; 2244 2245 DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n"); 2246 2247 SU->isVRegCycle = true; 2248 2249 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 2250 I != E; ++I) { 2251 if (I->isCtrl()) continue; 2252 I->getSUnit()->isVRegCycle = true; 2253 } 2254} 2255 2256// After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of 2257// CopyFromReg operands. We should no longer penalize other uses of this VReg. 2258static void resetVRegCycle(SUnit *SU) { 2259 if (!SU->isVRegCycle) 2260 return; 2261 2262 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end(); 2263 I != E; ++I) { 2264 if (I->isCtrl()) continue; // ignore chain preds 2265 SUnit *PredSU = I->getSUnit(); 2266 if (PredSU->isVRegCycle) { 2267 assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg && 2268 "VRegCycle def must be CopyFromReg"); 2269 I->getSUnit()->isVRegCycle = 0; 2270 } 2271 } 2272} 2273 2274// Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This 2275// means a node that defines the VRegCycle has not been scheduled yet. 2276static bool hasVRegCycleUse(const SUnit *SU) { 2277 // If this SU also defines the VReg, don't hoist it as a "use". 2278 if (SU->isVRegCycle) 2279 return false; 2280 2281 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end(); 2282 I != E; ++I) { 2283 if (I->isCtrl()) continue; // ignore chain preds 2284 if (I->getSUnit()->isVRegCycle && 2285 I->getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) { 2286 DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n"); 2287 return true; 2288 } 2289 } 2290 return false; 2291} 2292 2293// Check for either a dependence (latency) or resource (hazard) stall. 2294// 2295// Note: The ScheduleHazardRecognizer interface requires a non-const SU. 2296static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) { 2297 if ((int)SPQ->getCurCycle() < Height) return true; 2298 if (SPQ->getHazardRec()->getHazardType(SU, 0) 2299 != ScheduleHazardRecognizer::NoHazard) 2300 return true; 2301 return false; 2302} 2303 2304// Return -1 if left has higher priority, 1 if right has higher priority. 2305// Return 0 if latency-based priority is equivalent. 2306static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref, 2307 RegReductionPQBase *SPQ) { 2308 // Scheduling an instruction that uses a VReg whose postincrement has not yet 2309 // been scheduled will induce a copy. Model this as an extra cycle of latency. 2310 int LPenalty = hasVRegCycleUse(left) ? 1 : 0; 2311 int RPenalty = hasVRegCycleUse(right) ? 1 : 0; 2312 int LHeight = (int)left->getHeight() + LPenalty; 2313 int RHeight = (int)right->getHeight() + RPenalty; 2314 2315 bool LStall = (!checkPref || left->SchedulingPref == Sched::ILP) && 2316 BUHasStall(left, LHeight, SPQ); 2317 bool RStall = (!checkPref || right->SchedulingPref == Sched::ILP) && 2318 BUHasStall(right, RHeight, SPQ); 2319 2320 // If scheduling one of the node will cause a pipeline stall, delay it. 2321 // If scheduling either one of the node will cause a pipeline stall, sort 2322 // them according to their height. 2323 if (LStall) { 2324 if (!RStall) 2325 return 1; 2326 if (LHeight != RHeight) 2327 return LHeight > RHeight ? 1 : -1; 2328 } else if (RStall) 2329 return -1; 2330 2331 // If either node is scheduling for latency, sort them by height/depth 2332 // and latency. 2333 if (!checkPref || (left->SchedulingPref == Sched::ILP || 2334 right->SchedulingPref == Sched::ILP)) { 2335 // If neither instruction stalls (!LStall && !RStall) and HazardRecognizer 2336 // is enabled, grouping instructions by cycle, then its height is already 2337 // covered so only its depth matters. We also reach this point if both stall 2338 // but have the same height. 2339 if (!SPQ->getHazardRec()->isEnabled()) { 2340 if (LHeight != RHeight) 2341 return LHeight > RHeight ? 1 : -1; 2342 } 2343 int LDepth = left->getDepth() - LPenalty; 2344 int RDepth = right->getDepth() - RPenalty; 2345 if (LDepth != RDepth) { 2346 DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum 2347 << ") depth " << LDepth << " vs SU (" << right->NodeNum 2348 << ") depth " << RDepth << "\n"); 2349 return LDepth < RDepth ? 1 : -1; 2350 } 2351 if (left->Latency != right->Latency) 2352 return left->Latency > right->Latency ? 1 : -1; 2353 } 2354 return 0; 2355} 2356 2357static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) { 2358 // Schedule physical register definitions close to their use. This is 2359 // motivated by microarchitectures that can fuse cmp+jump macro-ops. But as 2360 // long as shortening physreg live ranges is generally good, we can defer 2361 // creating a subtarget hook. 2362 if (!DisableSchedPhysRegJoin) { 2363 bool LHasPhysReg = left->hasPhysRegDefs; 2364 bool RHasPhysReg = right->hasPhysRegDefs; 2365 if (LHasPhysReg != RHasPhysReg) { 2366 #ifndef NDEBUG 2367 const char *const PhysRegMsg[] = {" has no physreg"," defines a physreg"}; 2368 #endif 2369 DEBUG(dbgs() << " SU (" << left->NodeNum << ") " 2370 << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") " 2371 << PhysRegMsg[RHasPhysReg] << "\n"); 2372 return LHasPhysReg < RHasPhysReg; 2373 } 2374 } 2375 2376 // Prioritize by Sethi-Ulmann number and push CopyToReg nodes down. 2377 unsigned LPriority = SPQ->getNodePriority(left); 2378 unsigned RPriority = SPQ->getNodePriority(right); 2379 2380 // Be really careful about hoisting call operands above previous calls. 2381 // Only allows it if it would reduce register pressure. 2382 if (left->isCall && right->isCallOp) { 2383 unsigned RNumVals = right->getNode()->getNumValues(); 2384 RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0; 2385 } 2386 if (right->isCall && left->isCallOp) { 2387 unsigned LNumVals = left->getNode()->getNumValues(); 2388 LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0; 2389 } 2390 2391 if (LPriority != RPriority) 2392 return LPriority > RPriority; 2393 2394 // One or both of the nodes are calls and their sethi-ullman numbers are the 2395 // same, then keep source order. 2396 if (left->isCall || right->isCall) { 2397 unsigned LOrder = SPQ->getNodeOrdering(left); 2398 unsigned ROrder = SPQ->getNodeOrdering(right); 2399 2400 // Prefer an ordering where the lower the non-zero order number, the higher 2401 // the preference. 2402 if ((LOrder || ROrder) && LOrder != ROrder) 2403 return LOrder != 0 && (LOrder < ROrder || ROrder == 0); 2404 } 2405 2406 // Try schedule def + use closer when Sethi-Ullman numbers are the same. 2407 // e.g. 2408 // t1 = op t2, c1 2409 // t3 = op t4, c2 2410 // 2411 // and the following instructions are both ready. 2412 // t2 = op c3 2413 // t4 = op c4 2414 // 2415 // Then schedule t2 = op first. 2416 // i.e. 2417 // t4 = op c4 2418 // t2 = op c3 2419 // t1 = op t2, c1 2420 // t3 = op t4, c2 2421 // 2422 // This creates more short live intervals. 2423 unsigned LDist = closestSucc(left); 2424 unsigned RDist = closestSucc(right); 2425 if (LDist != RDist) 2426 return LDist < RDist; 2427 2428 // How many registers becomes live when the node is scheduled. 2429 unsigned LScratch = calcMaxScratches(left); 2430 unsigned RScratch = calcMaxScratches(right); 2431 if (LScratch != RScratch) 2432 return LScratch > RScratch; 2433 2434 // Comparing latency against a call makes little sense unless the node 2435 // is register pressure-neutral. 2436 if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0)) 2437 return (left->NodeQueueId > right->NodeQueueId); 2438 2439 // Do not compare latencies when one or both of the nodes are calls. 2440 if (!DisableSchedCycles && 2441 !(left->isCall || right->isCall)) { 2442 int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ); 2443 if (result != 0) 2444 return result > 0; 2445 } 2446 else { 2447 if (left->getHeight() != right->getHeight()) 2448 return left->getHeight() > right->getHeight(); 2449 2450 if (left->getDepth() != right->getDepth()) 2451 return left->getDepth() < right->getDepth(); 2452 } 2453 2454 assert(left->NodeQueueId && right->NodeQueueId && 2455 "NodeQueueId cannot be zero"); 2456 return (left->NodeQueueId > right->NodeQueueId); 2457} 2458 2459// Bottom up 2460bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const { 2461 if (int res = checkSpecialNodes(left, right)) 2462 return res > 0; 2463 2464 return BURRSort(left, right, SPQ); 2465} 2466 2467// Source order, otherwise bottom up. 2468bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const { 2469 if (int res = checkSpecialNodes(left, right)) 2470 return res > 0; 2471 2472 unsigned LOrder = SPQ->getNodeOrdering(left); 2473 unsigned ROrder = SPQ->getNodeOrdering(right); 2474 2475 // Prefer an ordering where the lower the non-zero order number, the higher 2476 // the preference. 2477 if ((LOrder || ROrder) && LOrder != ROrder) 2478 return LOrder != 0 && (LOrder < ROrder || ROrder == 0); 2479 2480 return BURRSort(left, right, SPQ); 2481} 2482 2483// If the time between now and when the instruction will be ready can cover 2484// the spill code, then avoid adding it to the ready queue. This gives long 2485// stalls highest priority and allows hoisting across calls. It should also 2486// speed up processing the available queue. 2487bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const { 2488 static const unsigned ReadyDelay = 3; 2489 2490 if (SPQ->MayReduceRegPressure(SU)) return true; 2491 2492 if (SU->getHeight() > (CurCycle + ReadyDelay)) return false; 2493 2494 if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay) 2495 != ScheduleHazardRecognizer::NoHazard) 2496 return false; 2497 2498 return true; 2499} 2500 2501// Return true if right should be scheduled with higher priority than left. 2502bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const { 2503 if (int res = checkSpecialNodes(left, right)) 2504 return res > 0; 2505 2506 if (left->isCall || right->isCall) 2507 // No way to compute latency of calls. 2508 return BURRSort(left, right, SPQ); 2509 2510 bool LHigh = SPQ->HighRegPressure(left); 2511 bool RHigh = SPQ->HighRegPressure(right); 2512 // Avoid causing spills. If register pressure is high, schedule for 2513 // register pressure reduction. 2514 if (LHigh && !RHigh) { 2515 DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU(" 2516 << right->NodeNum << ")\n"); 2517 return true; 2518 } 2519 else if (!LHigh && RHigh) { 2520 DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU(" 2521 << left->NodeNum << ")\n"); 2522 return false; 2523 } 2524 if (!LHigh && !RHigh) { 2525 int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ); 2526 if (result != 0) 2527 return result > 0; 2528 } 2529 return BURRSort(left, right, SPQ); 2530} 2531 2532// Schedule as many instructions in each cycle as possible. So don't make an 2533// instruction available unless it is ready in the current cycle. 2534bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const { 2535 if (SU->getHeight() > CurCycle) return false; 2536 2537 if (SPQ->getHazardRec()->getHazardType(SU, 0) 2538 != ScheduleHazardRecognizer::NoHazard) 2539 return false; 2540 2541 return true; 2542} 2543 2544static bool canEnableCoalescing(SUnit *SU) { 2545 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0; 2546 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg) 2547 // CopyToReg should be close to its uses to facilitate coalescing and 2548 // avoid spilling. 2549 return true; 2550 2551 if (Opc == TargetOpcode::EXTRACT_SUBREG || 2552 Opc == TargetOpcode::SUBREG_TO_REG || 2553 Opc == TargetOpcode::INSERT_SUBREG) 2554 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be 2555 // close to their uses to facilitate coalescing. 2556 return true; 2557 2558 if (SU->NumPreds == 0 && SU->NumSuccs != 0) 2559 // If SU does not have a register def, schedule it close to its uses 2560 // because it does not lengthen any live ranges. 2561 return true; 2562 2563 return false; 2564} 2565 2566// list-ilp is currently an experimental scheduler that allows various 2567// heuristics to be enabled prior to the normal register reduction logic. 2568bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const { 2569 if (int res = checkSpecialNodes(left, right)) 2570 return res > 0; 2571 2572 if (left->isCall || right->isCall) 2573 // No way to compute latency of calls. 2574 return BURRSort(left, right, SPQ); 2575 2576 unsigned LLiveUses = 0, RLiveUses = 0; 2577 int LPDiff = 0, RPDiff = 0; 2578 if (!DisableSchedRegPressure || !DisableSchedLiveUses) { 2579 LPDiff = SPQ->RegPressureDiff(left, LLiveUses); 2580 RPDiff = SPQ->RegPressureDiff(right, RLiveUses); 2581 } 2582 if (!DisableSchedRegPressure && LPDiff != RPDiff) { 2583 DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff 2584 << " != SU(" << right->NodeNum << "): " << RPDiff << "\n"); 2585 return LPDiff > RPDiff; 2586 } 2587 2588 if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) { 2589 bool LReduce = canEnableCoalescing(left); 2590 bool RReduce = canEnableCoalescing(right); 2591 if (LReduce && !RReduce) return false; 2592 if (RReduce && !LReduce) return true; 2593 } 2594 2595 if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) { 2596 DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses 2597 << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n"); 2598 return LLiveUses < RLiveUses; 2599 } 2600 2601 if (!DisableSchedStalls) { 2602 bool LStall = BUHasStall(left, left->getHeight(), SPQ); 2603 bool RStall = BUHasStall(right, right->getHeight(), SPQ); 2604 if (LStall != RStall) 2605 return left->getHeight() > right->getHeight(); 2606 } 2607 2608 if (!DisableSchedCriticalPath) { 2609 int spread = (int)left->getDepth() - (int)right->getDepth(); 2610 if (std::abs(spread) > MaxReorderWindow) { 2611 DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): " 2612 << left->getDepth() << " != SU(" << right->NodeNum << "): " 2613 << right->getDepth() << "\n"); 2614 return left->getDepth() < right->getDepth(); 2615 } 2616 } 2617 2618 if (!DisableSchedHeight && left->getHeight() != right->getHeight()) { 2619 int spread = (int)left->getHeight() - (int)right->getHeight(); 2620 if (std::abs(spread) > MaxReorderWindow) 2621 return left->getHeight() > right->getHeight(); 2622 } 2623 2624 return BURRSort(left, right, SPQ); 2625} 2626 2627void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) { 2628 SUnits = &sunits; 2629 // Add pseudo dependency edges for two-address nodes. 2630 if (!Disable2AddrHack) 2631 AddPseudoTwoAddrDeps(); 2632 // Reroute edges to nodes with multiple uses. 2633 if (!TracksRegPressure && !SrcOrder) 2634 PrescheduleNodesWithMultipleUses(); 2635 // Calculate node priorities. 2636 CalculateSethiUllmanNumbers(); 2637 2638 // For single block loops, mark nodes that look like canonical IV increments. 2639 if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) { 2640 for (unsigned i = 0, e = sunits.size(); i != e; ++i) { 2641 initVRegCycle(&sunits[i]); 2642 } 2643 } 2644} 2645 2646//===----------------------------------------------------------------------===// 2647// Preschedule for Register Pressure 2648//===----------------------------------------------------------------------===// 2649 2650bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) { 2651 if (SU->isTwoAddress) { 2652 unsigned Opc = SU->getNode()->getMachineOpcode(); 2653 const MCInstrDesc &MCID = TII->get(Opc); 2654 unsigned NumRes = MCID.getNumDefs(); 2655 unsigned NumOps = MCID.getNumOperands() - NumRes; 2656 for (unsigned i = 0; i != NumOps; ++i) { 2657 if (MCID.getOperandConstraint(i+NumRes, MCOI::TIED_TO) != -1) { 2658 SDNode *DU = SU->getNode()->getOperand(i).getNode(); 2659 if (DU->getNodeId() != -1 && 2660 Op->OrigNode == &(*SUnits)[DU->getNodeId()]) 2661 return true; 2662 } 2663 } 2664 } 2665 return false; 2666} 2667 2668/// canClobberReachingPhysRegUse - True if SU would clobber one of it's 2669/// successor's explicit physregs whose definition can reach DepSU. 2670/// i.e. DepSU should not be scheduled above SU. 2671static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU, 2672 ScheduleDAGRRList *scheduleDAG, 2673 const TargetInstrInfo *TII, 2674 const TargetRegisterInfo *TRI) { 2675 const uint16_t *ImpDefs 2676 = TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs(); 2677 const uint32_t *RegMask = getNodeRegMask(SU->getNode()); 2678 if(!ImpDefs && !RegMask) 2679 return false; 2680 2681 for (SUnit::const_succ_iterator SI = SU->Succs.begin(), SE = SU->Succs.end(); 2682 SI != SE; ++SI) { 2683 SUnit *SuccSU = SI->getSUnit(); 2684 for (SUnit::const_pred_iterator PI = SuccSU->Preds.begin(), 2685 PE = SuccSU->Preds.end(); PI != PE; ++PI) { 2686 if (!PI->isAssignedRegDep()) 2687 continue; 2688 2689 if (RegMask && MachineOperand::clobbersPhysReg(RegMask, PI->getReg()) && 2690 scheduleDAG->IsReachable(DepSU, PI->getSUnit())) 2691 return true; 2692 2693 if (ImpDefs) 2694 for (const uint16_t *ImpDef = ImpDefs; *ImpDef; ++ImpDef) 2695 // Return true if SU clobbers this physical register use and the 2696 // definition of the register reaches from DepSU. IsReachable queries 2697 // a topological forward sort of the DAG (following the successors). 2698 if (TRI->regsOverlap(*ImpDef, PI->getReg()) && 2699 scheduleDAG->IsReachable(DepSU, PI->getSUnit())) 2700 return true; 2701 } 2702 } 2703 return false; 2704} 2705 2706/// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's 2707/// physical register defs. 2708static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU, 2709 const TargetInstrInfo *TII, 2710 const TargetRegisterInfo *TRI) { 2711 SDNode *N = SuccSU->getNode(); 2712 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs(); 2713 const uint16_t *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs(); 2714 assert(ImpDefs && "Caller should check hasPhysRegDefs"); 2715 for (const SDNode *SUNode = SU->getNode(); SUNode; 2716 SUNode = SUNode->getGluedNode()) { 2717 if (!SUNode->isMachineOpcode()) 2718 continue; 2719 const uint16_t *SUImpDefs = 2720 TII->get(SUNode->getMachineOpcode()).getImplicitDefs(); 2721 const uint32_t *SURegMask = getNodeRegMask(SUNode); 2722 if (!SUImpDefs && !SURegMask) 2723 continue; 2724 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) { 2725 EVT VT = N->getValueType(i); 2726 if (VT == MVT::Glue || VT == MVT::Other) 2727 continue; 2728 if (!N->hasAnyUseOfValue(i)) 2729 continue; 2730 unsigned Reg = ImpDefs[i - NumDefs]; 2731 if (SURegMask && MachineOperand::clobbersPhysReg(SURegMask, Reg)) 2732 return true; 2733 if (!SUImpDefs) 2734 continue; 2735 for (;*SUImpDefs; ++SUImpDefs) { 2736 unsigned SUReg = *SUImpDefs; 2737 if (TRI->regsOverlap(Reg, SUReg)) 2738 return true; 2739 } 2740 } 2741 } 2742 return false; 2743} 2744 2745/// PrescheduleNodesWithMultipleUses - Nodes with multiple uses 2746/// are not handled well by the general register pressure reduction 2747/// heuristics. When presented with code like this: 2748/// 2749/// N 2750/// / | 2751/// / | 2752/// U store 2753/// | 2754/// ... 2755/// 2756/// the heuristics tend to push the store up, but since the 2757/// operand of the store has another use (U), this would increase 2758/// the length of that other use (the U->N edge). 2759/// 2760/// This function transforms code like the above to route U's 2761/// dependence through the store when possible, like this: 2762/// 2763/// N 2764/// || 2765/// || 2766/// store 2767/// | 2768/// U 2769/// | 2770/// ... 2771/// 2772/// This results in the store being scheduled immediately 2773/// after N, which shortens the U->N live range, reducing 2774/// register pressure. 2775/// 2776void RegReductionPQBase::PrescheduleNodesWithMultipleUses() { 2777 // Visit all the nodes in topological order, working top-down. 2778 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) { 2779 SUnit *SU = &(*SUnits)[i]; 2780 // For now, only look at nodes with no data successors, such as stores. 2781 // These are especially important, due to the heuristics in 2782 // getNodePriority for nodes with no data successors. 2783 if (SU->NumSuccs != 0) 2784 continue; 2785 // For now, only look at nodes with exactly one data predecessor. 2786 if (SU->NumPreds != 1) 2787 continue; 2788 // Avoid prescheduling copies to virtual registers, which don't behave 2789 // like other nodes from the perspective of scheduling heuristics. 2790 if (SDNode *N = SU->getNode()) 2791 if (N->getOpcode() == ISD::CopyToReg && 2792 TargetRegisterInfo::isVirtualRegister 2793 (cast<RegisterSDNode>(N->getOperand(1))->getReg())) 2794 continue; 2795 2796 // Locate the single data predecessor. 2797 SUnit *PredSU = 0; 2798 for (SUnit::const_pred_iterator II = SU->Preds.begin(), 2799 EE = SU->Preds.end(); II != EE; ++II) 2800 if (!II->isCtrl()) { 2801 PredSU = II->getSUnit(); 2802 break; 2803 } 2804 assert(PredSU); 2805 2806 // Don't rewrite edges that carry physregs, because that requires additional 2807 // support infrastructure. 2808 if (PredSU->hasPhysRegDefs) 2809 continue; 2810 // Short-circuit the case where SU is PredSU's only data successor. 2811 if (PredSU->NumSuccs == 1) 2812 continue; 2813 // Avoid prescheduling to copies from virtual registers, which don't behave 2814 // like other nodes from the perspective of scheduling heuristics. 2815 if (SDNode *N = SU->getNode()) 2816 if (N->getOpcode() == ISD::CopyFromReg && 2817 TargetRegisterInfo::isVirtualRegister 2818 (cast<RegisterSDNode>(N->getOperand(1))->getReg())) 2819 continue; 2820 2821 // Perform checks on the successors of PredSU. 2822 for (SUnit::const_succ_iterator II = PredSU->Succs.begin(), 2823 EE = PredSU->Succs.end(); II != EE; ++II) { 2824 SUnit *PredSuccSU = II->getSUnit(); 2825 if (PredSuccSU == SU) continue; 2826 // If PredSU has another successor with no data successors, for 2827 // now don't attempt to choose either over the other. 2828 if (PredSuccSU->NumSuccs == 0) 2829 goto outer_loop_continue; 2830 // Don't break physical register dependencies. 2831 if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs) 2832 if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI)) 2833 goto outer_loop_continue; 2834 // Don't introduce graph cycles. 2835 if (scheduleDAG->IsReachable(SU, PredSuccSU)) 2836 goto outer_loop_continue; 2837 } 2838 2839 // Ok, the transformation is safe and the heuristics suggest it is 2840 // profitable. Update the graph. 2841 DEBUG(dbgs() << " Prescheduling SU #" << SU->NodeNum 2842 << " next to PredSU #" << PredSU->NodeNum 2843 << " to guide scheduling in the presence of multiple uses\n"); 2844 for (unsigned i = 0; i != PredSU->Succs.size(); ++i) { 2845 SDep Edge = PredSU->Succs[i]; 2846 assert(!Edge.isAssignedRegDep()); 2847 SUnit *SuccSU = Edge.getSUnit(); 2848 if (SuccSU != SU) { 2849 Edge.setSUnit(PredSU); 2850 scheduleDAG->RemovePred(SuccSU, Edge); 2851 scheduleDAG->AddPred(SU, Edge); 2852 Edge.setSUnit(SU); 2853 scheduleDAG->AddPred(SuccSU, Edge); 2854 --i; 2855 } 2856 } 2857 outer_loop_continue:; 2858 } 2859} 2860 2861/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses 2862/// it as a def&use operand. Add a pseudo control edge from it to the other 2863/// node (if it won't create a cycle) so the two-address one will be scheduled 2864/// first (lower in the schedule). If both nodes are two-address, favor the 2865/// one that has a CopyToReg use (more likely to be a loop induction update). 2866/// If both are two-address, but one is commutable while the other is not 2867/// commutable, favor the one that's not commutable. 2868void RegReductionPQBase::AddPseudoTwoAddrDeps() { 2869 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) { 2870 SUnit *SU = &(*SUnits)[i]; 2871 if (!SU->isTwoAddress) 2872 continue; 2873 2874 SDNode *Node = SU->getNode(); 2875 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode()) 2876 continue; 2877 2878 bool isLiveOut = hasOnlyLiveOutUses(SU); 2879 unsigned Opc = Node->getMachineOpcode(); 2880 const MCInstrDesc &MCID = TII->get(Opc); 2881 unsigned NumRes = MCID.getNumDefs(); 2882 unsigned NumOps = MCID.getNumOperands() - NumRes; 2883 for (unsigned j = 0; j != NumOps; ++j) { 2884 if (MCID.getOperandConstraint(j+NumRes, MCOI::TIED_TO) == -1) 2885 continue; 2886 SDNode *DU = SU->getNode()->getOperand(j).getNode(); 2887 if (DU->getNodeId() == -1) 2888 continue; 2889 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()]; 2890 if (!DUSU) continue; 2891 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(), 2892 E = DUSU->Succs.end(); I != E; ++I) { 2893 if (I->isCtrl()) continue; 2894 SUnit *SuccSU = I->getSUnit(); 2895 if (SuccSU == SU) 2896 continue; 2897 // Be conservative. Ignore if nodes aren't at roughly the same 2898 // depth and height. 2899 if (SuccSU->getHeight() < SU->getHeight() && 2900 (SU->getHeight() - SuccSU->getHeight()) > 1) 2901 continue; 2902 // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge 2903 // constrains whatever is using the copy, instead of the copy 2904 // itself. In the case that the copy is coalesced, this 2905 // preserves the intent of the pseudo two-address heurietics. 2906 while (SuccSU->Succs.size() == 1 && 2907 SuccSU->getNode()->isMachineOpcode() && 2908 SuccSU->getNode()->getMachineOpcode() == 2909 TargetOpcode::COPY_TO_REGCLASS) 2910 SuccSU = SuccSU->Succs.front().getSUnit(); 2911 // Don't constrain non-instruction nodes. 2912 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode()) 2913 continue; 2914 // Don't constrain nodes with physical register defs if the 2915 // predecessor can clobber them. 2916 if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) { 2917 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI)) 2918 continue; 2919 } 2920 // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG; 2921 // these may be coalesced away. We want them close to their uses. 2922 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode(); 2923 if (SuccOpc == TargetOpcode::EXTRACT_SUBREG || 2924 SuccOpc == TargetOpcode::INSERT_SUBREG || 2925 SuccOpc == TargetOpcode::SUBREG_TO_REG) 2926 continue; 2927 if (!canClobberReachingPhysRegUse(SuccSU, SU, scheduleDAG, TII, TRI) && 2928 (!canClobber(SuccSU, DUSU) || 2929 (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) || 2930 (!SU->isCommutable && SuccSU->isCommutable)) && 2931 !scheduleDAG->IsReachable(SuccSU, SU)) { 2932 DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #" 2933 << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n"); 2934 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Artificial)); 2935 } 2936 } 2937 } 2938 } 2939} 2940 2941//===----------------------------------------------------------------------===// 2942// Public Constructor Functions 2943//===----------------------------------------------------------------------===// 2944 2945llvm::ScheduleDAGSDNodes * 2946llvm::createBURRListDAGScheduler(SelectionDAGISel *IS, 2947 CodeGenOpt::Level OptLevel) { 2948 const TargetMachine &TM = IS->TM; 2949 const TargetInstrInfo *TII = TM.getInstrInfo(); 2950 const TargetRegisterInfo *TRI = TM.getRegisterInfo(); 2951 2952 BURegReductionPriorityQueue *PQ = 2953 new BURegReductionPriorityQueue(*IS->MF, false, false, TII, TRI, 0); 2954 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel); 2955 PQ->setScheduleDAG(SD); 2956 return SD; 2957} 2958 2959llvm::ScheduleDAGSDNodes * 2960llvm::createSourceListDAGScheduler(SelectionDAGISel *IS, 2961 CodeGenOpt::Level OptLevel) { 2962 const TargetMachine &TM = IS->TM; 2963 const TargetInstrInfo *TII = TM.getInstrInfo(); 2964 const TargetRegisterInfo *TRI = TM.getRegisterInfo(); 2965 2966 SrcRegReductionPriorityQueue *PQ = 2967 new SrcRegReductionPriorityQueue(*IS->MF, false, true, TII, TRI, 0); 2968 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel); 2969 PQ->setScheduleDAG(SD); 2970 return SD; 2971} 2972 2973llvm::ScheduleDAGSDNodes * 2974llvm::createHybridListDAGScheduler(SelectionDAGISel *IS, 2975 CodeGenOpt::Level OptLevel) { 2976 const TargetMachine &TM = IS->TM; 2977 const TargetInstrInfo *TII = TM.getInstrInfo(); 2978 const TargetRegisterInfo *TRI = TM.getRegisterInfo(); 2979 const TargetLowering *TLI = &IS->getTargetLowering(); 2980 2981 HybridBURRPriorityQueue *PQ = 2982 new HybridBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI); 2983 2984 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel); 2985 PQ->setScheduleDAG(SD); 2986 return SD; 2987} 2988 2989llvm::ScheduleDAGSDNodes * 2990llvm::createILPListDAGScheduler(SelectionDAGISel *IS, 2991 CodeGenOpt::Level OptLevel) { 2992 const TargetMachine &TM = IS->TM; 2993 const TargetInstrInfo *TII = TM.getInstrInfo(); 2994 const TargetRegisterInfo *TRI = TM.getRegisterInfo(); 2995 const TargetLowering *TLI = &IS->getTargetLowering(); 2996 2997 ILPBURRPriorityQueue *PQ = 2998 new ILPBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI); 2999 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel); 3000 PQ->setScheduleDAG(SD); 3001 return SD; 3002} 3003