ScheduleDAGRRList.cpp revision 40ae0f03c8becc9749d4339bfc7ff0b08a0202a1
1//===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements bottom-up and top-down register pressure reduction list 11// schedulers, using standard algorithms. The basic approach uses a priority 12// queue of available nodes to schedule. One at a time, nodes are taken from 13// the priority queue (thus in priority order), checked for legality to 14// schedule, and emitted if legal. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "pre-RA-sched" 19#include "llvm/CodeGen/ScheduleDAG.h" 20#include "llvm/CodeGen/SchedulerRegistry.h" 21#include "llvm/Target/TargetRegisterInfo.h" 22#include "llvm/Target/TargetData.h" 23#include "llvm/Target/TargetMachine.h" 24#include "llvm/Target/TargetInstrInfo.h" 25#include "llvm/Support/Debug.h" 26#include "llvm/Support/Compiler.h" 27#include "llvm/ADT/BitVector.h" 28#include "llvm/ADT/PriorityQueue.h" 29#include "llvm/ADT/SmallPtrSet.h" 30#include "llvm/ADT/SmallSet.h" 31#include "llvm/ADT/Statistic.h" 32#include "llvm/ADT/STLExtras.h" 33#include <climits> 34#include "llvm/Support/CommandLine.h" 35using namespace llvm; 36 37STATISTIC(NumBacktracks, "Number of times scheduler backtracked"); 38STATISTIC(NumUnfolds, "Number of nodes unfolded"); 39STATISTIC(NumDups, "Number of duplicated nodes"); 40STATISTIC(NumCCCopies, "Number of cross class copies"); 41 42static RegisterScheduler 43 burrListDAGScheduler("list-burr", 44 "Bottom-up register reduction list scheduling", 45 createBURRListDAGScheduler); 46static RegisterScheduler 47 tdrListrDAGScheduler("list-tdrr", 48 "Top-down register reduction list scheduling", 49 createTDRRListDAGScheduler); 50 51namespace { 52//===----------------------------------------------------------------------===// 53/// ScheduleDAGRRList - The actual register reduction list scheduler 54/// implementation. This supports both top-down and bottom-up scheduling. 55/// 56class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAG { 57private: 58 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if 59 /// it is top-down. 60 bool isBottomUp; 61 62 /// Fast - True if we are performing fast scheduling. 63 /// 64 bool Fast; 65 66 /// AvailableQueue - The priority queue to use for the available SUnits. 67 SchedulingPriorityQueue *AvailableQueue; 68 69 /// LiveRegDefs - A set of physical registers and their definition 70 /// that are "live". These nodes must be scheduled before any other nodes that 71 /// modifies the registers can be scheduled. 72 unsigned NumLiveRegs; 73 std::vector<SUnit*> LiveRegDefs; 74 std::vector<unsigned> LiveRegCycles; 75 76public: 77 ScheduleDAGRRList(SelectionDAG *dag, MachineBasicBlock *bb, 78 const TargetMachine &tm, bool isbottomup, bool f, 79 SchedulingPriorityQueue *availqueue) 80 : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup), Fast(f), 81 AvailableQueue(availqueue) { 82 } 83 84 ~ScheduleDAGRRList() { 85 delete AvailableQueue; 86 } 87 88 void Schedule(); 89 90 /// IsReachable - Checks if SU is reachable from TargetSU. 91 bool IsReachable(const SUnit *SU, const SUnit *TargetSU); 92 93 /// willCreateCycle - Returns true if adding an edge from SU to TargetSU will 94 /// create a cycle. 95 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU); 96 97 /// AddPred - This adds the specified node X as a predecessor of 98 /// the current node Y if not already. 99 /// This returns true if this is a new predecessor. 100 /// Updates the topological ordering if required. 101 bool AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial, 102 unsigned PhyReg = 0, int Cost = 1); 103 104 /// RemovePred - This removes the specified node N from the predecessors of 105 /// the current node M. Updates the topological ordering if required. 106 bool RemovePred(SUnit *M, SUnit *N, bool isCtrl, bool isSpecial); 107 108private: 109 void ReleasePred(SUnit*, bool, unsigned); 110 void ReleaseSucc(SUnit*, bool isChain, unsigned); 111 void CapturePred(SUnit*, SUnit*, bool); 112 void ScheduleNodeBottomUp(SUnit*, unsigned); 113 void ScheduleNodeTopDown(SUnit*, unsigned); 114 void UnscheduleNodeBottomUp(SUnit*); 115 void BacktrackBottomUp(SUnit*, unsigned, unsigned&); 116 SUnit *CopyAndMoveSuccessors(SUnit*); 117 void InsertCCCopiesAndMoveSuccs(SUnit*, unsigned, 118 const TargetRegisterClass*, 119 const TargetRegisterClass*, 120 SmallVector<SUnit*, 2>&); 121 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&); 122 void ListScheduleTopDown(); 123 void ListScheduleBottomUp(); 124 void CommuteNodesToReducePressure(); 125 126 127 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it. 128 /// Updates the topological ordering if required. 129 SUnit *CreateNewSUnit(SDNode *N) { 130 SUnit *NewNode = NewSUnit(N); 131 // Update the topological ordering. 132 if (NewNode->NodeNum >= Node2Index.size()) 133 InitDAGTopologicalSorting(); 134 return NewNode; 135 } 136 137 /// CreateClone - Creates a new SUnit from an existing one. 138 /// Updates the topological ordering if required. 139 SUnit *CreateClone(SUnit *N) { 140 SUnit *NewNode = Clone(N); 141 // Update the topological ordering. 142 if (NewNode->NodeNum >= Node2Index.size()) 143 InitDAGTopologicalSorting(); 144 return NewNode; 145 } 146 147 /// Functions for preserving the topological ordering 148 /// even after dynamic insertions of new edges. 149 /// This allows a very fast implementation of IsReachable. 150 151 /// InitDAGTopologicalSorting - create the initial topological 152 /// ordering from the DAG to be scheduled. 153 void InitDAGTopologicalSorting(); 154 155 /// DFS - make a DFS traversal and mark all nodes affected by the 156 /// edge insertion. These nodes will later get new topological indexes 157 /// by means of the Shift method. 158 void DFS(const SUnit *SU, int UpperBound, bool& HasLoop); 159 160 /// Shift - reassign topological indexes for the nodes in the DAG 161 /// to preserve the topological ordering. 162 void Shift(BitVector& Visited, int LowerBound, int UpperBound); 163 164 /// Allocate - assign the topological index to the node n. 165 void Allocate(int n, int index); 166 167 /// Index2Node - Maps topological index to the node number. 168 std::vector<int> Index2Node; 169 /// Node2Index - Maps the node number to its topological index. 170 std::vector<int> Node2Index; 171 /// Visited - a set of nodes visited during a DFS traversal. 172 BitVector Visited; 173}; 174} // end anonymous namespace 175 176 177/// Schedule - Schedule the DAG using list scheduling. 178void ScheduleDAGRRList::Schedule() { 179 DOUT << "********** List Scheduling **********\n"; 180 181 NumLiveRegs = 0; 182 LiveRegDefs.resize(TRI->getNumRegs(), NULL); 183 LiveRegCycles.resize(TRI->getNumRegs(), 0); 184 185 // Build scheduling units. 186 BuildSchedUnits(); 187 188 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 189 SUnits[su].dumpAll(DAG)); 190 if (!Fast) { 191 CalculateDepths(); 192 CalculateHeights(); 193 } 194 InitDAGTopologicalSorting(); 195 196 AvailableQueue->initNodes(SUnits); 197 198 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate. 199 if (isBottomUp) 200 ListScheduleBottomUp(); 201 else 202 ListScheduleTopDown(); 203 204 AvailableQueue->releaseState(); 205 206 if (!Fast) 207 CommuteNodesToReducePressure(); 208} 209 210/// CommuteNodesToReducePressure - If a node is two-address and commutable, and 211/// it is not the last use of its first operand, add it to the CommuteSet if 212/// possible. It will be commuted when it is translated to a MI. 213void ScheduleDAGRRList::CommuteNodesToReducePressure() { 214 SmallPtrSet<SUnit*, 4> OperandSeen; 215 for (unsigned i = Sequence.size(); i != 0; ) { 216 --i; 217 SUnit *SU = Sequence[i]; 218 if (!SU || !SU->getNode()) continue; 219 if (SU->isCommutable) { 220 unsigned Opc = SU->getNode()->getMachineOpcode(); 221 const TargetInstrDesc &TID = TII->get(Opc); 222 unsigned NumRes = TID.getNumDefs(); 223 unsigned NumOps = TID.getNumOperands() - NumRes; 224 for (unsigned j = 0; j != NumOps; ++j) { 225 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1) 226 continue; 227 228 SDNode *OpN = SU->getNode()->getOperand(j).getNode(); 229 SUnit *OpSU = isPassiveNode(OpN) ? NULL : &SUnits[OpN->getNodeId()]; 230 if (OpSU && OperandSeen.count(OpSU) == 1) { 231 // Ok, so SU is not the last use of OpSU, but SU is two-address so 232 // it will clobber OpSU. Try to commute SU if no other source operands 233 // are live below. 234 bool DoCommute = true; 235 for (unsigned k = 0; k < NumOps; ++k) { 236 if (k != j) { 237 OpN = SU->getNode()->getOperand(k).getNode(); 238 OpSU = isPassiveNode(OpN) ? NULL : &SUnits[OpN->getNodeId()]; 239 if (OpSU && OperandSeen.count(OpSU) == 1) { 240 DoCommute = false; 241 break; 242 } 243 } 244 } 245 if (DoCommute) 246 CommuteSet.insert(SU->getNode()); 247 } 248 249 // Only look at the first use&def node for now. 250 break; 251 } 252 } 253 254 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 255 I != E; ++I) { 256 if (!I->isCtrl) 257 OperandSeen.insert(I->Dep->OrigNode); 258 } 259 } 260} 261 262//===----------------------------------------------------------------------===// 263// Bottom-Up Scheduling 264//===----------------------------------------------------------------------===// 265 266/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to 267/// the AvailableQueue if the count reaches zero. Also update its cycle bound. 268void ScheduleDAGRRList::ReleasePred(SUnit *PredSU, bool isChain, 269 unsigned CurCycle) { 270 // FIXME: the distance between two nodes is not always == the predecessor's 271 // latency. For example, the reader can very well read the register written 272 // by the predecessor later than the issue cycle. It also depends on the 273 // interrupt model (drain vs. freeze). 274 PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency); 275 276 --PredSU->NumSuccsLeft; 277 278#ifndef NDEBUG 279 if (PredSU->NumSuccsLeft < 0) { 280 cerr << "*** List scheduling failed! ***\n"; 281 PredSU->dump(DAG); 282 cerr << " has been released too many times!\n"; 283 assert(0); 284 } 285#endif 286 287 if (PredSU->NumSuccsLeft == 0) { 288 PredSU->isAvailable = true; 289 AvailableQueue->push(PredSU); 290 } 291} 292 293/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending 294/// count of its predecessors. If a predecessor pending count is zero, add it to 295/// the Available queue. 296void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) { 297 DOUT << "*** Scheduling [" << CurCycle << "]: "; 298 DEBUG(SU->dump(DAG)); 299 SU->Cycle = CurCycle; 300 301 AvailableQueue->ScheduledNode(SU); 302 303 // Bottom up: release predecessors 304 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 305 I != E; ++I) { 306 ReleasePred(I->Dep, I->isCtrl, CurCycle); 307 if (I->Cost < 0) { 308 // This is a physical register dependency and it's impossible or 309 // expensive to copy the register. Make sure nothing that can 310 // clobber the register is scheduled between the predecessor and 311 // this node. 312 if (!LiveRegDefs[I->Reg]) { 313 ++NumLiveRegs; 314 LiveRegDefs[I->Reg] = I->Dep; 315 LiveRegCycles[I->Reg] = CurCycle; 316 } 317 } 318 } 319 320 // Release all the implicit physical register defs that are live. 321 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 322 I != E; ++I) { 323 if (I->Cost < 0) { 324 if (LiveRegCycles[I->Reg] == I->Dep->Cycle) { 325 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!"); 326 assert(LiveRegDefs[I->Reg] == SU && 327 "Physical register dependency violated?"); 328 --NumLiveRegs; 329 LiveRegDefs[I->Reg] = NULL; 330 LiveRegCycles[I->Reg] = 0; 331 } 332 } 333 } 334 335 SU->isScheduled = true; 336} 337 338/// CapturePred - This does the opposite of ReleasePred. Since SU is being 339/// unscheduled, incrcease the succ left count of its predecessors. Remove 340/// them from AvailableQueue if necessary. 341void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) { 342 unsigned CycleBound = 0; 343 for (SUnit::succ_iterator I = PredSU->Succs.begin(), E = PredSU->Succs.end(); 344 I != E; ++I) { 345 if (I->Dep == SU) 346 continue; 347 CycleBound = std::max(CycleBound, 348 I->Dep->Cycle + PredSU->Latency); 349 } 350 351 if (PredSU->isAvailable) { 352 PredSU->isAvailable = false; 353 if (!PredSU->isPending) 354 AvailableQueue->remove(PredSU); 355 } 356 357 PredSU->CycleBound = CycleBound; 358 ++PredSU->NumSuccsLeft; 359} 360 361/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and 362/// its predecessor states to reflect the change. 363void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) { 364 DOUT << "*** Unscheduling [" << SU->Cycle << "]: "; 365 DEBUG(SU->dump(DAG)); 366 367 AvailableQueue->UnscheduledNode(SU); 368 369 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 370 I != E; ++I) { 371 CapturePred(I->Dep, SU, I->isCtrl); 372 if (I->Cost < 0 && SU->Cycle == LiveRegCycles[I->Reg]) { 373 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!"); 374 assert(LiveRegDefs[I->Reg] == I->Dep && 375 "Physical register dependency violated?"); 376 --NumLiveRegs; 377 LiveRegDefs[I->Reg] = NULL; 378 LiveRegCycles[I->Reg] = 0; 379 } 380 } 381 382 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 383 I != E; ++I) { 384 if (I->Cost < 0) { 385 if (!LiveRegDefs[I->Reg]) { 386 LiveRegDefs[I->Reg] = SU; 387 ++NumLiveRegs; 388 } 389 if (I->Dep->Cycle < LiveRegCycles[I->Reg]) 390 LiveRegCycles[I->Reg] = I->Dep->Cycle; 391 } 392 } 393 394 SU->Cycle = 0; 395 SU->isScheduled = false; 396 SU->isAvailable = true; 397 AvailableQueue->push(SU); 398} 399 400/// IsReachable - Checks if SU is reachable from TargetSU. 401bool ScheduleDAGRRList::IsReachable(const SUnit *SU, const SUnit *TargetSU) { 402 // If insertion of the edge SU->TargetSU would create a cycle 403 // then there is a path from TargetSU to SU. 404 int UpperBound, LowerBound; 405 LowerBound = Node2Index[TargetSU->NodeNum]; 406 UpperBound = Node2Index[SU->NodeNum]; 407 bool HasLoop = false; 408 // Is Ord(TargetSU) < Ord(SU) ? 409 if (LowerBound < UpperBound) { 410 Visited.reset(); 411 // There may be a path from TargetSU to SU. Check for it. 412 DFS(TargetSU, UpperBound, HasLoop); 413 } 414 return HasLoop; 415} 416 417/// Allocate - assign the topological index to the node n. 418inline void ScheduleDAGRRList::Allocate(int n, int index) { 419 Node2Index[n] = index; 420 Index2Node[index] = n; 421} 422 423/// InitDAGTopologicalSorting - create the initial topological 424/// ordering from the DAG to be scheduled. 425 426/// The idea of the algorithm is taken from 427/// "Online algorithms for managing the topological order of 428/// a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly 429/// This is the MNR algorithm, which was first introduced by 430/// A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in 431/// "Maintaining a topological order under edge insertions". 432/// 433/// Short description of the algorithm: 434/// 435/// Topological ordering, ord, of a DAG maps each node to a topological 436/// index so that for all edges X->Y it is the case that ord(X) < ord(Y). 437/// 438/// This means that if there is a path from the node X to the node Z, 439/// then ord(X) < ord(Z). 440/// 441/// This property can be used to check for reachability of nodes: 442/// if Z is reachable from X, then an insertion of the edge Z->X would 443/// create a cycle. 444/// 445/// The algorithm first computes a topological ordering for the DAG by 446/// initializing the Index2Node and Node2Index arrays and then tries to keep 447/// the ordering up-to-date after edge insertions by reordering the DAG. 448/// 449/// On insertion of the edge X->Y, the algorithm first marks by calling DFS 450/// the nodes reachable from Y, and then shifts them using Shift to lie 451/// immediately after X in Index2Node. 452void ScheduleDAGRRList::InitDAGTopologicalSorting() { 453 unsigned DAGSize = SUnits.size(); 454 std::vector<SUnit*> WorkList; 455 WorkList.reserve(DAGSize); 456 457 Index2Node.resize(DAGSize); 458 Node2Index.resize(DAGSize); 459 460 // Initialize the data structures. 461 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 462 SUnit *SU = &SUnits[i]; 463 int NodeNum = SU->NodeNum; 464 unsigned Degree = SU->Succs.size(); 465 // Temporarily use the Node2Index array as scratch space for degree counts. 466 Node2Index[NodeNum] = Degree; 467 468 // Is it a node without dependencies? 469 if (Degree == 0) { 470 assert(SU->Succs.empty() && "SUnit should have no successors"); 471 // Collect leaf nodes. 472 WorkList.push_back(SU); 473 } 474 } 475 476 int Id = DAGSize; 477 while (!WorkList.empty()) { 478 SUnit *SU = WorkList.back(); 479 WorkList.pop_back(); 480 Allocate(SU->NodeNum, --Id); 481 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 482 I != E; ++I) { 483 SUnit *SU = I->Dep; 484 if (!--Node2Index[SU->NodeNum]) 485 // If all dependencies of the node are processed already, 486 // then the node can be computed now. 487 WorkList.push_back(SU); 488 } 489 } 490 491 Visited.resize(DAGSize); 492 493#ifndef NDEBUG 494 // Check correctness of the ordering 495 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 496 SUnit *SU = &SUnits[i]; 497 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 498 I != E; ++I) { 499 assert(Node2Index[SU->NodeNum] > Node2Index[I->Dep->NodeNum] && 500 "Wrong topological sorting"); 501 } 502 } 503#endif 504} 505 506/// AddPred - adds an edge from SUnit X to SUnit Y. 507/// Updates the topological ordering if required. 508bool ScheduleDAGRRList::AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial, 509 unsigned PhyReg, int Cost) { 510 int UpperBound, LowerBound; 511 LowerBound = Node2Index[Y->NodeNum]; 512 UpperBound = Node2Index[X->NodeNum]; 513 bool HasLoop = false; 514 // Is Ord(X) < Ord(Y) ? 515 if (LowerBound < UpperBound) { 516 // Update the topological order. 517 Visited.reset(); 518 DFS(Y, UpperBound, HasLoop); 519 assert(!HasLoop && "Inserted edge creates a loop!"); 520 // Recompute topological indexes. 521 Shift(Visited, LowerBound, UpperBound); 522 } 523 // Now really insert the edge. 524 return Y->addPred(X, isCtrl, isSpecial, PhyReg, Cost); 525} 526 527/// RemovePred - This removes the specified node N from the predecessors of 528/// the current node M. Updates the topological ordering if required. 529bool ScheduleDAGRRList::RemovePred(SUnit *M, SUnit *N, 530 bool isCtrl, bool isSpecial) { 531 // InitDAGTopologicalSorting(); 532 return M->removePred(N, isCtrl, isSpecial); 533} 534 535/// DFS - Make a DFS traversal to mark all nodes reachable from SU and mark 536/// all nodes affected by the edge insertion. These nodes will later get new 537/// topological indexes by means of the Shift method. 538void ScheduleDAGRRList::DFS(const SUnit *SU, int UpperBound, bool& HasLoop) { 539 std::vector<const SUnit*> WorkList; 540 WorkList.reserve(SUnits.size()); 541 542 WorkList.push_back(SU); 543 while (!WorkList.empty()) { 544 SU = WorkList.back(); 545 WorkList.pop_back(); 546 Visited.set(SU->NodeNum); 547 for (int I = SU->Succs.size()-1; I >= 0; --I) { 548 int s = SU->Succs[I].Dep->NodeNum; 549 if (Node2Index[s] == UpperBound) { 550 HasLoop = true; 551 return; 552 } 553 // Visit successors if not already and in affected region. 554 if (!Visited.test(s) && Node2Index[s] < UpperBound) { 555 WorkList.push_back(SU->Succs[I].Dep); 556 } 557 } 558 } 559} 560 561/// Shift - Renumber the nodes so that the topological ordering is 562/// preserved. 563void ScheduleDAGRRList::Shift(BitVector& Visited, int LowerBound, 564 int UpperBound) { 565 std::vector<int> L; 566 int shift = 0; 567 int i; 568 569 for (i = LowerBound; i <= UpperBound; ++i) { 570 // w is node at topological index i. 571 int w = Index2Node[i]; 572 if (Visited.test(w)) { 573 // Unmark. 574 Visited.reset(w); 575 L.push_back(w); 576 shift = shift + 1; 577 } else { 578 Allocate(w, i - shift); 579 } 580 } 581 582 for (unsigned j = 0; j < L.size(); ++j) { 583 Allocate(L[j], i - shift); 584 i = i + 1; 585 } 586} 587 588 589/// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will 590/// create a cycle. 591bool ScheduleDAGRRList::WillCreateCycle(SUnit *SU, SUnit *TargetSU) { 592 if (IsReachable(TargetSU, SU)) 593 return true; 594 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 595 I != E; ++I) 596 if (I->Cost < 0 && IsReachable(TargetSU, I->Dep)) 597 return true; 598 return false; 599} 600 601/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in 602/// BTCycle in order to schedule a specific node. Returns the last unscheduled 603/// SUnit. Also returns if a successor is unscheduled in the process. 604void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle, 605 unsigned &CurCycle) { 606 SUnit *OldSU = NULL; 607 while (CurCycle > BtCycle) { 608 OldSU = Sequence.back(); 609 Sequence.pop_back(); 610 if (SU->isSucc(OldSU)) 611 // Don't try to remove SU from AvailableQueue. 612 SU->isAvailable = false; 613 UnscheduleNodeBottomUp(OldSU); 614 --CurCycle; 615 } 616 617 618 if (SU->isSucc(OldSU)) { 619 assert(false && "Something is wrong!"); 620 abort(); 621 } 622 623 ++NumBacktracks; 624} 625 626/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled 627/// successors to the newly created node. 628SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) { 629 if (SU->getNode()->getFlaggedNode()) 630 return NULL; 631 632 SDNode *N = SU->getNode(); 633 if (!N) 634 return NULL; 635 636 SUnit *NewSU; 637 bool TryUnfold = false; 638 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { 639 MVT VT = N->getValueType(i); 640 if (VT == MVT::Flag) 641 return NULL; 642 else if (VT == MVT::Other) 643 TryUnfold = true; 644 } 645 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 646 const SDValue &Op = N->getOperand(i); 647 MVT VT = Op.getNode()->getValueType(Op.getResNo()); 648 if (VT == MVT::Flag) 649 return NULL; 650 } 651 652 if (TryUnfold) { 653 SmallVector<SDNode*, 2> NewNodes; 654 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes)) 655 return NULL; 656 657 DOUT << "Unfolding SU # " << SU->NodeNum << "\n"; 658 assert(NewNodes.size() == 2 && "Expected a load folding node!"); 659 660 N = NewNodes[1]; 661 SDNode *LoadNode = NewNodes[0]; 662 unsigned NumVals = N->getNumValues(); 663 unsigned OldNumVals = SU->getNode()->getNumValues(); 664 for (unsigned i = 0; i != NumVals; ++i) 665 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i)); 666 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1), 667 SDValue(LoadNode, 1)); 668 669 // LoadNode may already exist. This can happen when there is another 670 // load from the same location and producing the same type of value 671 // but it has different alignment or volatileness. 672 bool isNewLoad = true; 673 SUnit *LoadSU; 674 if (LoadNode->getNodeId() != -1) { 675 LoadSU = &SUnits[LoadNode->getNodeId()]; 676 isNewLoad = false; 677 } else { 678 LoadSU = CreateNewSUnit(LoadNode); 679 LoadNode->setNodeId(LoadSU->NodeNum); 680 681 LoadSU->Depth = SU->Depth; 682 LoadSU->Height = SU->Height; 683 ComputeLatency(LoadSU); 684 } 685 686 SUnit *NewSU = CreateNewSUnit(N); 687 assert(N->getNodeId() == -1 && "Node already inserted!"); 688 N->setNodeId(NewSU->NodeNum); 689 690 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode()); 691 for (unsigned i = 0; i != TID.getNumOperands(); ++i) { 692 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) { 693 NewSU->isTwoAddress = true; 694 break; 695 } 696 } 697 if (TID.isCommutable()) 698 NewSU->isCommutable = true; 699 // FIXME: Calculate height / depth and propagate the changes? 700 NewSU->Depth = SU->Depth; 701 NewSU->Height = SU->Height; 702 ComputeLatency(NewSU); 703 704 SUnit *ChainPred = NULL; 705 SmallVector<SDep, 4> ChainSuccs; 706 SmallVector<SDep, 4> LoadPreds; 707 SmallVector<SDep, 4> NodePreds; 708 SmallVector<SDep, 4> NodeSuccs; 709 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 710 I != E; ++I) { 711 if (I->isCtrl) 712 ChainPred = I->Dep; 713 else if (I->Dep->getNode() && I->Dep->getNode()->isOperandOf(LoadNode)) 714 LoadPreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false)); 715 else 716 NodePreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false)); 717 } 718 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 719 I != E; ++I) { 720 if (I->isCtrl) 721 ChainSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost, 722 I->isCtrl, I->isSpecial)); 723 else 724 NodeSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost, 725 I->isCtrl, I->isSpecial)); 726 } 727 728 if (ChainPred) { 729 RemovePred(SU, ChainPred, true, false); 730 if (isNewLoad) 731 AddPred(LoadSU, ChainPred, true, false); 732 } 733 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) { 734 SDep *Pred = &LoadPreds[i]; 735 RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial); 736 if (isNewLoad) { 737 AddPred(LoadSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial, 738 Pred->Reg, Pred->Cost); 739 } 740 } 741 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) { 742 SDep *Pred = &NodePreds[i]; 743 RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial); 744 AddPred(NewSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial, 745 Pred->Reg, Pred->Cost); 746 } 747 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) { 748 SDep *Succ = &NodeSuccs[i]; 749 RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial); 750 AddPred(Succ->Dep, NewSU, Succ->isCtrl, Succ->isSpecial, 751 Succ->Reg, Succ->Cost); 752 } 753 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) { 754 SDep *Succ = &ChainSuccs[i]; 755 RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial); 756 if (isNewLoad) { 757 AddPred(Succ->Dep, LoadSU, Succ->isCtrl, Succ->isSpecial, 758 Succ->Reg, Succ->Cost); 759 } 760 } 761 if (isNewLoad) { 762 AddPred(NewSU, LoadSU, false, false); 763 } 764 765 if (isNewLoad) 766 AvailableQueue->addNode(LoadSU); 767 AvailableQueue->addNode(NewSU); 768 769 ++NumUnfolds; 770 771 if (NewSU->NumSuccsLeft == 0) { 772 NewSU->isAvailable = true; 773 return NewSU; 774 } 775 SU = NewSU; 776 } 777 778 DOUT << "Duplicating SU # " << SU->NodeNum << "\n"; 779 NewSU = CreateClone(SU); 780 781 // New SUnit has the exact same predecessors. 782 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 783 I != E; ++I) 784 if (!I->isSpecial) { 785 AddPred(NewSU, I->Dep, I->isCtrl, false, I->Reg, I->Cost); 786 NewSU->Depth = std::max(NewSU->Depth, I->Dep->Depth+1); 787 } 788 789 // Only copy scheduled successors. Cut them from old node's successor 790 // list and move them over. 791 SmallVector<std::pair<SUnit*, bool>, 4> DelDeps; 792 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 793 I != E; ++I) { 794 if (I->isSpecial) 795 continue; 796 if (I->Dep->isScheduled) { 797 NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1); 798 AddPred(I->Dep, NewSU, I->isCtrl, false, I->Reg, I->Cost); 799 DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl)); 800 } 801 } 802 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) { 803 SUnit *Succ = DelDeps[i].first; 804 bool isCtrl = DelDeps[i].second; 805 RemovePred(Succ, SU, isCtrl, false); 806 } 807 808 AvailableQueue->updateNode(SU); 809 AvailableQueue->addNode(NewSU); 810 811 ++NumDups; 812 return NewSU; 813} 814 815/// InsertCCCopiesAndMoveSuccs - Insert expensive cross register class copies 816/// and move all scheduled successors of the given SUnit to the last copy. 817void ScheduleDAGRRList::InsertCCCopiesAndMoveSuccs(SUnit *SU, unsigned Reg, 818 const TargetRegisterClass *DestRC, 819 const TargetRegisterClass *SrcRC, 820 SmallVector<SUnit*, 2> &Copies) { 821 SUnit *CopyFromSU = CreateNewSUnit(NULL); 822 CopyFromSU->CopySrcRC = SrcRC; 823 CopyFromSU->CopyDstRC = DestRC; 824 CopyFromSU->Depth = SU->Depth; 825 CopyFromSU->Height = SU->Height; 826 827 SUnit *CopyToSU = CreateNewSUnit(NULL); 828 CopyToSU->CopySrcRC = DestRC; 829 CopyToSU->CopyDstRC = SrcRC; 830 831 // Only copy scheduled successors. Cut them from old node's successor 832 // list and move them over. 833 SmallVector<std::pair<SUnit*, bool>, 4> DelDeps; 834 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 835 I != E; ++I) { 836 if (I->isSpecial) 837 continue; 838 if (I->Dep->isScheduled) { 839 CopyToSU->Height = std::max(CopyToSU->Height, I->Dep->Height+1); 840 AddPred(I->Dep, CopyToSU, I->isCtrl, false, I->Reg, I->Cost); 841 DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl)); 842 } 843 } 844 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) { 845 SUnit *Succ = DelDeps[i].first; 846 bool isCtrl = DelDeps[i].second; 847 RemovePred(Succ, SU, isCtrl, false); 848 } 849 850 AddPred(CopyFromSU, SU, false, false, Reg, -1); 851 AddPred(CopyToSU, CopyFromSU, false, false, Reg, 1); 852 853 AvailableQueue->updateNode(SU); 854 AvailableQueue->addNode(CopyFromSU); 855 AvailableQueue->addNode(CopyToSU); 856 Copies.push_back(CopyFromSU); 857 Copies.push_back(CopyToSU); 858 859 ++NumCCCopies; 860} 861 862/// getPhysicalRegisterVT - Returns the ValueType of the physical register 863/// definition of the specified node. 864/// FIXME: Move to SelectionDAG? 865static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg, 866 const TargetInstrInfo *TII) { 867 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode()); 868 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!"); 869 unsigned NumRes = TID.getNumDefs(); 870 for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) { 871 if (Reg == *ImpDef) 872 break; 873 ++NumRes; 874 } 875 return N->getValueType(NumRes); 876} 877 878/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay 879/// scheduling of the given node to satisfy live physical register dependencies. 880/// If the specific node is the last one that's available to schedule, do 881/// whatever is necessary (i.e. backtracking or cloning) to make it possible. 882bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU, 883 SmallVector<unsigned, 4> &LRegs){ 884 if (NumLiveRegs == 0) 885 return false; 886 887 SmallSet<unsigned, 4> RegAdded; 888 // If this node would clobber any "live" register, then it's not ready. 889 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 890 I != E; ++I) { 891 if (I->Cost < 0) { 892 unsigned Reg = I->Reg; 893 if (LiveRegDefs[Reg] && LiveRegDefs[Reg] != I->Dep) { 894 if (RegAdded.insert(Reg)) 895 LRegs.push_back(Reg); 896 } 897 for (const unsigned *Alias = TRI->getAliasSet(Reg); 898 *Alias; ++Alias) 899 if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != I->Dep) { 900 if (RegAdded.insert(*Alias)) 901 LRegs.push_back(*Alias); 902 } 903 } 904 } 905 906 for (SDNode *Node = SU->getNode(); Node; Node = Node->getFlaggedNode()) { 907 if (!Node->isMachineOpcode()) 908 continue; 909 const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode()); 910 if (!TID.ImplicitDefs) 911 continue; 912 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) { 913 if (LiveRegDefs[*Reg] && LiveRegDefs[*Reg] != SU) { 914 if (RegAdded.insert(*Reg)) 915 LRegs.push_back(*Reg); 916 } 917 for (const unsigned *Alias = TRI->getAliasSet(*Reg); 918 *Alias; ++Alias) 919 if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != SU) { 920 if (RegAdded.insert(*Alias)) 921 LRegs.push_back(*Alias); 922 } 923 } 924 } 925 return !LRegs.empty(); 926} 927 928 929/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up 930/// schedulers. 931void ScheduleDAGRRList::ListScheduleBottomUp() { 932 unsigned CurCycle = 0; 933 // Add root to Available queue. 934 if (!SUnits.empty()) { 935 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()]; 936 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!"); 937 RootSU->isAvailable = true; 938 AvailableQueue->push(RootSU); 939 } 940 941 // While Available queue is not empty, grab the node with the highest 942 // priority. If it is not ready put it back. Schedule the node. 943 SmallVector<SUnit*, 4> NotReady; 944 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap; 945 Sequence.reserve(SUnits.size()); 946 while (!AvailableQueue->empty()) { 947 bool Delayed = false; 948 LRegsMap.clear(); 949 SUnit *CurSU = AvailableQueue->pop(); 950 while (CurSU) { 951 if (CurSU->CycleBound <= CurCycle) { 952 SmallVector<unsigned, 4> LRegs; 953 if (!DelayForLiveRegsBottomUp(CurSU, LRegs)) 954 break; 955 Delayed = true; 956 LRegsMap.insert(std::make_pair(CurSU, LRegs)); 957 } 958 959 CurSU->isPending = true; // This SU is not in AvailableQueue right now. 960 NotReady.push_back(CurSU); 961 CurSU = AvailableQueue->pop(); 962 } 963 964 // All candidates are delayed due to live physical reg dependencies. 965 // Try backtracking, code duplication, or inserting cross class copies 966 // to resolve it. 967 if (Delayed && !CurSU) { 968 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) { 969 SUnit *TrySU = NotReady[i]; 970 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU]; 971 972 // Try unscheduling up to the point where it's safe to schedule 973 // this node. 974 unsigned LiveCycle = CurCycle; 975 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) { 976 unsigned Reg = LRegs[j]; 977 unsigned LCycle = LiveRegCycles[Reg]; 978 LiveCycle = std::min(LiveCycle, LCycle); 979 } 980 SUnit *OldSU = Sequence[LiveCycle]; 981 if (!WillCreateCycle(TrySU, OldSU)) { 982 BacktrackBottomUp(TrySU, LiveCycle, CurCycle); 983 // Force the current node to be scheduled before the node that 984 // requires the physical reg dep. 985 if (OldSU->isAvailable) { 986 OldSU->isAvailable = false; 987 AvailableQueue->remove(OldSU); 988 } 989 AddPred(TrySU, OldSU, true, true); 990 // If one or more successors has been unscheduled, then the current 991 // node is no longer avaialable. Schedule a successor that's now 992 // available instead. 993 if (!TrySU->isAvailable) 994 CurSU = AvailableQueue->pop(); 995 else { 996 CurSU = TrySU; 997 TrySU->isPending = false; 998 NotReady.erase(NotReady.begin()+i); 999 } 1000 break; 1001 } 1002 } 1003 1004 if (!CurSU) { 1005 // Can't backtrack. Try duplicating the nodes that produces these 1006 // "expensive to copy" values to break the dependency. In case even 1007 // that doesn't work, insert cross class copies. 1008 SUnit *TrySU = NotReady[0]; 1009 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU]; 1010 assert(LRegs.size() == 1 && "Can't handle this yet!"); 1011 unsigned Reg = LRegs[0]; 1012 SUnit *LRDef = LiveRegDefs[Reg]; 1013 SUnit *NewDef = CopyAndMoveSuccessors(LRDef); 1014 if (!NewDef) { 1015 // Issue expensive cross register class copies. 1016 MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII); 1017 const TargetRegisterClass *RC = 1018 TRI->getPhysicalRegisterRegClass(Reg, VT); 1019 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC); 1020 if (!DestRC) { 1021 assert(false && "Don't know how to copy this physical register!"); 1022 abort(); 1023 } 1024 SmallVector<SUnit*, 2> Copies; 1025 InsertCCCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies); 1026 DOUT << "Adding an edge from SU # " << TrySU->NodeNum 1027 << " to SU #" << Copies.front()->NodeNum << "\n"; 1028 AddPred(TrySU, Copies.front(), true, true); 1029 NewDef = Copies.back(); 1030 } 1031 1032 DOUT << "Adding an edge from SU # " << NewDef->NodeNum 1033 << " to SU #" << TrySU->NodeNum << "\n"; 1034 LiveRegDefs[Reg] = NewDef; 1035 AddPred(NewDef, TrySU, true, true); 1036 TrySU->isAvailable = false; 1037 CurSU = NewDef; 1038 } 1039 1040 if (!CurSU) { 1041 assert(false && "Unable to resolve live physical register dependencies!"); 1042 abort(); 1043 } 1044 } 1045 1046 // Add the nodes that aren't ready back onto the available list. 1047 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) { 1048 NotReady[i]->isPending = false; 1049 // May no longer be available due to backtracking. 1050 if (NotReady[i]->isAvailable) 1051 AvailableQueue->push(NotReady[i]); 1052 } 1053 NotReady.clear(); 1054 1055 if (!CurSU) 1056 Sequence.push_back(0); 1057 else { 1058 ScheduleNodeBottomUp(CurSU, CurCycle); 1059 Sequence.push_back(CurSU); 1060 } 1061 ++CurCycle; 1062 } 1063 1064 // Reverse the order if it is bottom up. 1065 std::reverse(Sequence.begin(), Sequence.end()); 1066 1067 1068#ifndef NDEBUG 1069 // Verify that all SUnits were scheduled. 1070 bool AnyNotSched = false; 1071 unsigned DeadNodes = 0; 1072 unsigned Noops = 0; 1073 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 1074 if (!SUnits[i].isScheduled) { 1075 if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) { 1076 ++DeadNodes; 1077 continue; 1078 } 1079 if (!AnyNotSched) 1080 cerr << "*** List scheduling failed! ***\n"; 1081 SUnits[i].dump(DAG); 1082 cerr << "has not been scheduled!\n"; 1083 AnyNotSched = true; 1084 } 1085 if (SUnits[i].NumSuccsLeft != 0) { 1086 if (!AnyNotSched) 1087 cerr << "*** List scheduling failed! ***\n"; 1088 SUnits[i].dump(DAG); 1089 cerr << "has successors left!\n"; 1090 AnyNotSched = true; 1091 } 1092 } 1093 for (unsigned i = 0, e = Sequence.size(); i != e; ++i) 1094 if (!Sequence[i]) 1095 ++Noops; 1096 assert(!AnyNotSched); 1097 assert(Sequence.size() + DeadNodes - Noops == SUnits.size() && 1098 "The number of nodes scheduled doesn't match the expected number!"); 1099#endif 1100} 1101 1102//===----------------------------------------------------------------------===// 1103// Top-Down Scheduling 1104//===----------------------------------------------------------------------===// 1105 1106/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 1107/// the AvailableQueue if the count reaches zero. Also update its cycle bound. 1108void ScheduleDAGRRList::ReleaseSucc(SUnit *SuccSU, bool isChain, 1109 unsigned CurCycle) { 1110 // FIXME: the distance between two nodes is not always == the predecessor's 1111 // latency. For example, the reader can very well read the register written 1112 // by the predecessor later than the issue cycle. It also depends on the 1113 // interrupt model (drain vs. freeze). 1114 SuccSU->CycleBound = std::max(SuccSU->CycleBound, CurCycle + SuccSU->Latency); 1115 1116 --SuccSU->NumPredsLeft; 1117 1118#ifndef NDEBUG 1119 if (SuccSU->NumPredsLeft < 0) { 1120 cerr << "*** List scheduling failed! ***\n"; 1121 SuccSU->dump(DAG); 1122 cerr << " has been released too many times!\n"; 1123 assert(0); 1124 } 1125#endif 1126 1127 if (SuccSU->NumPredsLeft == 0) { 1128 SuccSU->isAvailable = true; 1129 AvailableQueue->push(SuccSU); 1130 } 1131} 1132 1133 1134/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 1135/// count of its successors. If a successor pending count is zero, add it to 1136/// the Available queue. 1137void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 1138 DOUT << "*** Scheduling [" << CurCycle << "]: "; 1139 DEBUG(SU->dump(DAG)); 1140 SU->Cycle = CurCycle; 1141 1142 AvailableQueue->ScheduledNode(SU); 1143 1144 // Top down: release successors 1145 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1146 I != E; ++I) 1147 ReleaseSucc(I->Dep, I->isCtrl, CurCycle); 1148 SU->isScheduled = true; 1149} 1150 1151/// ListScheduleTopDown - The main loop of list scheduling for top-down 1152/// schedulers. 1153void ScheduleDAGRRList::ListScheduleTopDown() { 1154 unsigned CurCycle = 0; 1155 1156 // All leaves to Available queue. 1157 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 1158 // It is available if it has no predecessors. 1159 if (SUnits[i].Preds.empty()) { 1160 AvailableQueue->push(&SUnits[i]); 1161 SUnits[i].isAvailable = true; 1162 } 1163 } 1164 1165 // While Available queue is not empty, grab the node with the highest 1166 // priority. If it is not ready put it back. Schedule the node. 1167 std::vector<SUnit*> NotReady; 1168 Sequence.reserve(SUnits.size()); 1169 while (!AvailableQueue->empty()) { 1170 SUnit *CurSU = AvailableQueue->pop(); 1171 while (CurSU && CurSU->CycleBound > CurCycle) { 1172 NotReady.push_back(CurSU); 1173 CurSU = AvailableQueue->pop(); 1174 } 1175 1176 // Add the nodes that aren't ready back onto the available list. 1177 AvailableQueue->push_all(NotReady); 1178 NotReady.clear(); 1179 1180 if (!CurSU) 1181 Sequence.push_back(0); 1182 else { 1183 ScheduleNodeTopDown(CurSU, CurCycle); 1184 Sequence.push_back(CurSU); 1185 } 1186 ++CurCycle; 1187 } 1188 1189 1190#ifndef NDEBUG 1191 // Verify that all SUnits were scheduled. 1192 bool AnyNotSched = false; 1193 unsigned DeadNodes = 0; 1194 unsigned Noops = 0; 1195 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 1196 if (!SUnits[i].isScheduled) { 1197 if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) { 1198 ++DeadNodes; 1199 continue; 1200 } 1201 if (!AnyNotSched) 1202 cerr << "*** List scheduling failed! ***\n"; 1203 SUnits[i].dump(DAG); 1204 cerr << "has not been scheduled!\n"; 1205 AnyNotSched = true; 1206 } 1207 if (SUnits[i].NumPredsLeft != 0) { 1208 if (!AnyNotSched) 1209 cerr << "*** List scheduling failed! ***\n"; 1210 SUnits[i].dump(DAG); 1211 cerr << "has predecessors left!\n"; 1212 AnyNotSched = true; 1213 } 1214 } 1215 for (unsigned i = 0, e = Sequence.size(); i != e; ++i) 1216 if (!Sequence[i]) 1217 ++Noops; 1218 assert(!AnyNotSched); 1219 assert(Sequence.size() + DeadNodes - Noops == SUnits.size() && 1220 "The number of nodes scheduled doesn't match the expected number!"); 1221#endif 1222} 1223 1224 1225 1226//===----------------------------------------------------------------------===// 1227// RegReductionPriorityQueue Implementation 1228//===----------------------------------------------------------------------===// 1229// 1230// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers 1231// to reduce register pressure. 1232// 1233namespace { 1234 template<class SF> 1235 class RegReductionPriorityQueue; 1236 1237 /// Sorting functions for the Available queue. 1238 struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> { 1239 RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ; 1240 bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {} 1241 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {} 1242 1243 bool operator()(const SUnit* left, const SUnit* right) const; 1244 }; 1245 1246 struct bu_ls_rr_fast_sort : public std::binary_function<SUnit*, SUnit*, bool>{ 1247 RegReductionPriorityQueue<bu_ls_rr_fast_sort> *SPQ; 1248 bu_ls_rr_fast_sort(RegReductionPriorityQueue<bu_ls_rr_fast_sort> *spq) 1249 : SPQ(spq) {} 1250 bu_ls_rr_fast_sort(const bu_ls_rr_fast_sort &RHS) : SPQ(RHS.SPQ) {} 1251 1252 bool operator()(const SUnit* left, const SUnit* right) const; 1253 }; 1254 1255 struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> { 1256 RegReductionPriorityQueue<td_ls_rr_sort> *SPQ; 1257 td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {} 1258 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {} 1259 1260 bool operator()(const SUnit* left, const SUnit* right) const; 1261 }; 1262} // end anonymous namespace 1263 1264static inline bool isCopyFromLiveIn(const SUnit *SU) { 1265 SDNode *N = SU->getNode(); 1266 return N && N->getOpcode() == ISD::CopyFromReg && 1267 N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag; 1268} 1269 1270/// CalcNodeBUSethiUllmanNumber - Compute Sethi Ullman number for bottom up 1271/// scheduling. Smaller number is the higher priority. 1272static unsigned 1273CalcNodeBUSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) { 1274 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum]; 1275 if (SethiUllmanNumber != 0) 1276 return SethiUllmanNumber; 1277 1278 unsigned Extra = 0; 1279 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1280 I != E; ++I) { 1281 if (I->isCtrl) continue; // ignore chain preds 1282 SUnit *PredSU = I->Dep; 1283 unsigned PredSethiUllman = CalcNodeBUSethiUllmanNumber(PredSU, SUNumbers); 1284 if (PredSethiUllman > SethiUllmanNumber) { 1285 SethiUllmanNumber = PredSethiUllman; 1286 Extra = 0; 1287 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl) 1288 ++Extra; 1289 } 1290 1291 SethiUllmanNumber += Extra; 1292 1293 if (SethiUllmanNumber == 0) 1294 SethiUllmanNumber = 1; 1295 1296 return SethiUllmanNumber; 1297} 1298 1299/// CalcNodeTDSethiUllmanNumber - Compute Sethi Ullman number for top down 1300/// scheduling. Smaller number is the higher priority. 1301static unsigned 1302CalcNodeTDSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) { 1303 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum]; 1304 if (SethiUllmanNumber != 0) 1305 return SethiUllmanNumber; 1306 1307 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0; 1308 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg) 1309 SethiUllmanNumber = 0xffff; 1310 else if (SU->NumSuccsLeft == 0) 1311 // If SU does not have a use, i.e. it doesn't produce a value that would 1312 // be consumed (e.g. store), then it terminates a chain of computation. 1313 // Give it a small SethiUllman number so it will be scheduled right before 1314 // its predecessors that it doesn't lengthen their live ranges. 1315 SethiUllmanNumber = 0; 1316 else if (SU->NumPredsLeft == 0 && 1317 (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU))) 1318 SethiUllmanNumber = 0xffff; 1319 else { 1320 int Extra = 0; 1321 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1322 I != E; ++I) { 1323 if (I->isCtrl) continue; // ignore chain preds 1324 SUnit *PredSU = I->Dep; 1325 unsigned PredSethiUllman = CalcNodeTDSethiUllmanNumber(PredSU, SUNumbers); 1326 if (PredSethiUllman > SethiUllmanNumber) { 1327 SethiUllmanNumber = PredSethiUllman; 1328 Extra = 0; 1329 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl) 1330 ++Extra; 1331 } 1332 1333 SethiUllmanNumber += Extra; 1334 } 1335 1336 return SethiUllmanNumber; 1337} 1338 1339 1340namespace { 1341 template<class SF> 1342 class VISIBILITY_HIDDEN RegReductionPriorityQueue 1343 : public SchedulingPriorityQueue { 1344 PriorityQueue<SUnit*, std::vector<SUnit*>, SF> Queue; 1345 unsigned currentQueueId; 1346 1347 public: 1348 RegReductionPriorityQueue() : 1349 Queue(SF(this)), currentQueueId(0) {} 1350 1351 virtual void initNodes(std::vector<SUnit> &sunits) = 0; 1352 1353 virtual void addNode(const SUnit *SU) = 0; 1354 1355 virtual void updateNode(const SUnit *SU) = 0; 1356 1357 virtual void releaseState() = 0; 1358 1359 virtual unsigned getNodePriority(const SUnit *SU) const = 0; 1360 1361 unsigned size() const { return Queue.size(); } 1362 1363 bool empty() const { return Queue.empty(); } 1364 1365 void push(SUnit *U) { 1366 assert(!U->NodeQueueId && "Node in the queue already"); 1367 U->NodeQueueId = ++currentQueueId; 1368 Queue.push(U); 1369 } 1370 1371 void push_all(const std::vector<SUnit *> &Nodes) { 1372 for (unsigned i = 0, e = Nodes.size(); i != e; ++i) 1373 push(Nodes[i]); 1374 } 1375 1376 SUnit *pop() { 1377 if (empty()) return NULL; 1378 SUnit *V = Queue.top(); 1379 Queue.pop(); 1380 V->NodeQueueId = 0; 1381 return V; 1382 } 1383 1384 void remove(SUnit *SU) { 1385 assert(!Queue.empty() && "Queue is empty!"); 1386 assert(SU->NodeQueueId != 0 && "Not in queue!"); 1387 Queue.erase_one(SU); 1388 SU->NodeQueueId = 0; 1389 } 1390 }; 1391 1392 class VISIBILITY_HIDDEN BURegReductionPriorityQueue 1393 : public RegReductionPriorityQueue<bu_ls_rr_sort> { 1394 // SUnits - The SUnits for the current graph. 1395 std::vector<SUnit> *SUnits; 1396 1397 // SethiUllmanNumbers - The SethiUllman number for each node. 1398 std::vector<unsigned> SethiUllmanNumbers; 1399 1400 const TargetInstrInfo *TII; 1401 const TargetRegisterInfo *TRI; 1402 ScheduleDAGRRList *scheduleDAG; 1403 1404 public: 1405 explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii, 1406 const TargetRegisterInfo *tri) 1407 : TII(tii), TRI(tri), scheduleDAG(NULL) {} 1408 1409 void initNodes(std::vector<SUnit> &sunits) { 1410 SUnits = &sunits; 1411 // Add pseudo dependency edges for two-address nodes. 1412 AddPseudoTwoAddrDeps(); 1413 // Calculate node priorities. 1414 CalculateSethiUllmanNumbers(); 1415 } 1416 1417 void addNode(const SUnit *SU) { 1418 unsigned SUSize = SethiUllmanNumbers.size(); 1419 if (SUnits->size() > SUSize) 1420 SethiUllmanNumbers.resize(SUSize*2, 0); 1421 CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers); 1422 } 1423 1424 void updateNode(const SUnit *SU) { 1425 SethiUllmanNumbers[SU->NodeNum] = 0; 1426 CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers); 1427 } 1428 1429 void releaseState() { 1430 SUnits = 0; 1431 SethiUllmanNumbers.clear(); 1432 } 1433 1434 unsigned getNodePriority(const SUnit *SU) const { 1435 assert(SU->NodeNum < SethiUllmanNumbers.size()); 1436 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0; 1437 if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU)) 1438 // CopyFromReg should be close to its def because it restricts 1439 // allocation choices. But if it is a livein then perhaps we want it 1440 // closer to its uses so it can be coalesced. 1441 return 0xffff; 1442 else if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg) 1443 // CopyToReg should be close to its uses to facilitate coalescing and 1444 // avoid spilling. 1445 return 0; 1446 else if (Opc == TargetInstrInfo::EXTRACT_SUBREG || 1447 Opc == TargetInstrInfo::INSERT_SUBREG) 1448 // EXTRACT_SUBREG / INSERT_SUBREG should be close to its use to 1449 // facilitate coalescing. 1450 return 0; 1451 else if (SU->NumSuccs == 0) 1452 // If SU does not have a use, i.e. it doesn't produce a value that would 1453 // be consumed (e.g. store), then it terminates a chain of computation. 1454 // Give it a large SethiUllman number so it will be scheduled right 1455 // before its predecessors that it doesn't lengthen their live ranges. 1456 return 0xffff; 1457 else if (SU->NumPreds == 0) 1458 // If SU does not have a def, schedule it close to its uses because it 1459 // does not lengthen any live ranges. 1460 return 0; 1461 else 1462 return SethiUllmanNumbers[SU->NodeNum]; 1463 } 1464 1465 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) { 1466 scheduleDAG = scheduleDag; 1467 } 1468 1469 private: 1470 bool canClobber(const SUnit *SU, const SUnit *Op); 1471 void AddPseudoTwoAddrDeps(); 1472 void CalculateSethiUllmanNumbers(); 1473 }; 1474 1475 1476 class VISIBILITY_HIDDEN BURegReductionFastPriorityQueue 1477 : public RegReductionPriorityQueue<bu_ls_rr_fast_sort> { 1478 // SUnits - The SUnits for the current graph. 1479 const std::vector<SUnit> *SUnits; 1480 1481 // SethiUllmanNumbers - The SethiUllman number for each node. 1482 std::vector<unsigned> SethiUllmanNumbers; 1483 public: 1484 explicit BURegReductionFastPriorityQueue() {} 1485 1486 void initNodes(std::vector<SUnit> &sunits) { 1487 SUnits = &sunits; 1488 // Calculate node priorities. 1489 CalculateSethiUllmanNumbers(); 1490 } 1491 1492 void addNode(const SUnit *SU) { 1493 unsigned SUSize = SethiUllmanNumbers.size(); 1494 if (SUnits->size() > SUSize) 1495 SethiUllmanNumbers.resize(SUSize*2, 0); 1496 CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers); 1497 } 1498 1499 void updateNode(const SUnit *SU) { 1500 SethiUllmanNumbers[SU->NodeNum] = 0; 1501 CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers); 1502 } 1503 1504 void releaseState() { 1505 SUnits = 0; 1506 SethiUllmanNumbers.clear(); 1507 } 1508 1509 unsigned getNodePriority(const SUnit *SU) const { 1510 return SethiUllmanNumbers[SU->NodeNum]; 1511 } 1512 1513 private: 1514 void CalculateSethiUllmanNumbers(); 1515 }; 1516 1517 1518 class VISIBILITY_HIDDEN TDRegReductionPriorityQueue 1519 : public RegReductionPriorityQueue<td_ls_rr_sort> { 1520 // SUnits - The SUnits for the current graph. 1521 const std::vector<SUnit> *SUnits; 1522 1523 // SethiUllmanNumbers - The SethiUllman number for each node. 1524 std::vector<unsigned> SethiUllmanNumbers; 1525 1526 public: 1527 TDRegReductionPriorityQueue() {} 1528 1529 void initNodes(std::vector<SUnit> &sunits) { 1530 SUnits = &sunits; 1531 // Calculate node priorities. 1532 CalculateSethiUllmanNumbers(); 1533 } 1534 1535 void addNode(const SUnit *SU) { 1536 unsigned SUSize = SethiUllmanNumbers.size(); 1537 if (SUnits->size() > SUSize) 1538 SethiUllmanNumbers.resize(SUSize*2, 0); 1539 CalcNodeTDSethiUllmanNumber(SU, SethiUllmanNumbers); 1540 } 1541 1542 void updateNode(const SUnit *SU) { 1543 SethiUllmanNumbers[SU->NodeNum] = 0; 1544 CalcNodeTDSethiUllmanNumber(SU, SethiUllmanNumbers); 1545 } 1546 1547 void releaseState() { 1548 SUnits = 0; 1549 SethiUllmanNumbers.clear(); 1550 } 1551 1552 unsigned getNodePriority(const SUnit *SU) const { 1553 assert(SU->NodeNum < SethiUllmanNumbers.size()); 1554 return SethiUllmanNumbers[SU->NodeNum]; 1555 } 1556 1557 private: 1558 void CalculateSethiUllmanNumbers(); 1559 }; 1560} 1561 1562/// closestSucc - Returns the scheduled cycle of the successor which is 1563/// closet to the current cycle. 1564static unsigned closestSucc(const SUnit *SU) { 1565 unsigned MaxCycle = 0; 1566 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1567 I != E; ++I) { 1568 unsigned Cycle = I->Dep->Cycle; 1569 // If there are bunch of CopyToRegs stacked up, they should be considered 1570 // to be at the same position. 1571 if (I->Dep->getNode() && I->Dep->getNode()->getOpcode() == ISD::CopyToReg) 1572 Cycle = closestSucc(I->Dep)+1; 1573 if (Cycle > MaxCycle) 1574 MaxCycle = Cycle; 1575 } 1576 return MaxCycle; 1577} 1578 1579/// calcMaxScratches - Returns an cost estimate of the worse case requirement 1580/// for scratch registers. Live-in operands and live-out results don't count 1581/// since they are "fixed". 1582static unsigned calcMaxScratches(const SUnit *SU) { 1583 unsigned Scratches = 0; 1584 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1585 I != E; ++I) { 1586 if (I->isCtrl) continue; // ignore chain preds 1587 if (!I->Dep->getNode() || I->Dep->getNode()->getOpcode() != ISD::CopyFromReg) 1588 Scratches++; 1589 } 1590 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1591 I != E; ++I) { 1592 if (I->isCtrl) continue; // ignore chain succs 1593 if (!I->Dep->getNode() || I->Dep->getNode()->getOpcode() != ISD::CopyToReg) 1594 Scratches += 10; 1595 } 1596 return Scratches; 1597} 1598 1599// Bottom up 1600bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const { 1601 unsigned LPriority = SPQ->getNodePriority(left); 1602 unsigned RPriority = SPQ->getNodePriority(right); 1603 if (LPriority != RPriority) 1604 return LPriority > RPriority; 1605 1606 // Try schedule def + use closer when Sethi-Ullman numbers are the same. 1607 // e.g. 1608 // t1 = op t2, c1 1609 // t3 = op t4, c2 1610 // 1611 // and the following instructions are both ready. 1612 // t2 = op c3 1613 // t4 = op c4 1614 // 1615 // Then schedule t2 = op first. 1616 // i.e. 1617 // t4 = op c4 1618 // t2 = op c3 1619 // t1 = op t2, c1 1620 // t3 = op t4, c2 1621 // 1622 // This creates more short live intervals. 1623 unsigned LDist = closestSucc(left); 1624 unsigned RDist = closestSucc(right); 1625 if (LDist != RDist) 1626 return LDist < RDist; 1627 1628 // Intuitively, it's good to push down instructions whose results are 1629 // liveout so their long live ranges won't conflict with other values 1630 // which are needed inside the BB. Further prioritize liveout instructions 1631 // by the number of operands which are calculated within the BB. 1632 unsigned LScratch = calcMaxScratches(left); 1633 unsigned RScratch = calcMaxScratches(right); 1634 if (LScratch != RScratch) 1635 return LScratch > RScratch; 1636 1637 if (left->Height != right->Height) 1638 return left->Height > right->Height; 1639 1640 if (left->Depth != right->Depth) 1641 return left->Depth < right->Depth; 1642 1643 if (left->CycleBound != right->CycleBound) 1644 return left->CycleBound > right->CycleBound; 1645 1646 assert(left->NodeQueueId && right->NodeQueueId && 1647 "NodeQueueId cannot be zero"); 1648 return (left->NodeQueueId > right->NodeQueueId); 1649} 1650 1651bool 1652bu_ls_rr_fast_sort::operator()(const SUnit *left, const SUnit *right) const { 1653 unsigned LPriority = SPQ->getNodePriority(left); 1654 unsigned RPriority = SPQ->getNodePriority(right); 1655 if (LPriority != RPriority) 1656 return LPriority > RPriority; 1657 assert(left->NodeQueueId && right->NodeQueueId && 1658 "NodeQueueId cannot be zero"); 1659 return (left->NodeQueueId > right->NodeQueueId); 1660} 1661 1662bool 1663BURegReductionPriorityQueue::canClobber(const SUnit *SU, const SUnit *Op) { 1664 if (SU->isTwoAddress) { 1665 unsigned Opc = SU->getNode()->getMachineOpcode(); 1666 const TargetInstrDesc &TID = TII->get(Opc); 1667 unsigned NumRes = TID.getNumDefs(); 1668 unsigned NumOps = TID.getNumOperands() - NumRes; 1669 for (unsigned i = 0; i != NumOps; ++i) { 1670 if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) { 1671 SDNode *DU = SU->getNode()->getOperand(i).getNode(); 1672 if (DU->getNodeId() != -1 && 1673 Op->OrigNode == &(*SUnits)[DU->getNodeId()]) 1674 return true; 1675 } 1676 } 1677 } 1678 return false; 1679} 1680 1681 1682/// hasCopyToRegUse - Return true if SU has a value successor that is a 1683/// CopyToReg node. 1684static bool hasCopyToRegUse(const SUnit *SU) { 1685 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1686 I != E; ++I) { 1687 if (I->isCtrl) continue; 1688 const SUnit *SuccSU = I->Dep; 1689 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) 1690 return true; 1691 } 1692 return false; 1693} 1694 1695/// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's 1696/// physical register defs. 1697static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU, 1698 const TargetInstrInfo *TII, 1699 const TargetRegisterInfo *TRI) { 1700 SDNode *N = SuccSU->getNode(); 1701 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs(); 1702 const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs(); 1703 assert(ImpDefs && "Caller should check hasPhysRegDefs"); 1704 const unsigned *SUImpDefs = 1705 TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs(); 1706 if (!SUImpDefs) 1707 return false; 1708 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) { 1709 MVT VT = N->getValueType(i); 1710 if (VT == MVT::Flag || VT == MVT::Other) 1711 continue; 1712 if (!N->hasAnyUseOfValue(i)) 1713 continue; 1714 unsigned Reg = ImpDefs[i - NumDefs]; 1715 for (;*SUImpDefs; ++SUImpDefs) { 1716 unsigned SUReg = *SUImpDefs; 1717 if (TRI->regsOverlap(Reg, SUReg)) 1718 return true; 1719 } 1720 } 1721 return false; 1722} 1723 1724/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses 1725/// it as a def&use operand. Add a pseudo control edge from it to the other 1726/// node (if it won't create a cycle) so the two-address one will be scheduled 1727/// first (lower in the schedule). If both nodes are two-address, favor the 1728/// one that has a CopyToReg use (more likely to be a loop induction update). 1729/// If both are two-address, but one is commutable while the other is not 1730/// commutable, favor the one that's not commutable. 1731void BURegReductionPriorityQueue::AddPseudoTwoAddrDeps() { 1732 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) { 1733 SUnit *SU = &(*SUnits)[i]; 1734 if (!SU->isTwoAddress) 1735 continue; 1736 1737 SDNode *Node = SU->getNode(); 1738 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getFlaggedNode()) 1739 continue; 1740 1741 unsigned Opc = Node->getMachineOpcode(); 1742 const TargetInstrDesc &TID = TII->get(Opc); 1743 unsigned NumRes = TID.getNumDefs(); 1744 unsigned NumOps = TID.getNumOperands() - NumRes; 1745 for (unsigned j = 0; j != NumOps; ++j) { 1746 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) != -1) { 1747 SDNode *DU = SU->getNode()->getOperand(j).getNode(); 1748 if (DU->getNodeId() == -1) 1749 continue; 1750 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()]; 1751 if (!DUSU) continue; 1752 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(), 1753 E = DUSU->Succs.end(); I != E; ++I) { 1754 if (I->isCtrl) continue; 1755 SUnit *SuccSU = I->Dep; 1756 if (SuccSU == SU) 1757 continue; 1758 // Be conservative. Ignore if nodes aren't at roughly the same 1759 // depth and height. 1760 if (SuccSU->Height < SU->Height && (SU->Height - SuccSU->Height) > 1) 1761 continue; 1762 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode()) 1763 continue; 1764 // Don't constrain nodes with physical register defs if the 1765 // predecessor can clobber them. 1766 if (SuccSU->hasPhysRegDefs) { 1767 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI)) 1768 continue; 1769 } 1770 // Don't constraint extract_subreg / insert_subreg these may be 1771 // coalesced away. We don't them close to their uses. 1772 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode(); 1773 if (SuccOpc == TargetInstrInfo::EXTRACT_SUBREG || 1774 SuccOpc == TargetInstrInfo::INSERT_SUBREG) 1775 continue; 1776 if ((!canClobber(SuccSU, DUSU) || 1777 (hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) || 1778 (!SU->isCommutable && SuccSU->isCommutable)) && 1779 !scheduleDAG->IsReachable(SuccSU, SU)) { 1780 DOUT << "Adding an edge from SU # " << SU->NodeNum 1781 << " to SU #" << SuccSU->NodeNum << "\n"; 1782 scheduleDAG->AddPred(SU, SuccSU, true, true); 1783 } 1784 } 1785 } 1786 } 1787 } 1788} 1789 1790/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all 1791/// scheduling units. 1792void BURegReductionPriorityQueue::CalculateSethiUllmanNumbers() { 1793 SethiUllmanNumbers.assign(SUnits->size(), 0); 1794 1795 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) 1796 CalcNodeBUSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers); 1797} 1798void BURegReductionFastPriorityQueue::CalculateSethiUllmanNumbers() { 1799 SethiUllmanNumbers.assign(SUnits->size(), 0); 1800 1801 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) 1802 CalcNodeBUSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers); 1803} 1804 1805/// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled 1806/// predecessors of the successors of the SUnit SU. Stop when the provided 1807/// limit is exceeded. 1808static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU, 1809 unsigned Limit) { 1810 unsigned Sum = 0; 1811 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1812 I != E; ++I) { 1813 const SUnit *SuccSU = I->Dep; 1814 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(), 1815 EE = SuccSU->Preds.end(); II != EE; ++II) { 1816 SUnit *PredSU = II->Dep; 1817 if (!PredSU->isScheduled) 1818 if (++Sum > Limit) 1819 return Sum; 1820 } 1821 } 1822 return Sum; 1823} 1824 1825 1826// Top down 1827bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const { 1828 unsigned LPriority = SPQ->getNodePriority(left); 1829 unsigned RPriority = SPQ->getNodePriority(right); 1830 bool LIsTarget = left->getNode() && left->getNode()->isMachineOpcode(); 1831 bool RIsTarget = right->getNode() && right->getNode()->isMachineOpcode(); 1832 bool LIsFloater = LIsTarget && left->NumPreds == 0; 1833 bool RIsFloater = RIsTarget && right->NumPreds == 0; 1834 unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0; 1835 unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0; 1836 1837 if (left->NumSuccs == 0 && right->NumSuccs != 0) 1838 return false; 1839 else if (left->NumSuccs != 0 && right->NumSuccs == 0) 1840 return true; 1841 1842 if (LIsFloater) 1843 LBonus -= 2; 1844 if (RIsFloater) 1845 RBonus -= 2; 1846 if (left->NumSuccs == 1) 1847 LBonus += 2; 1848 if (right->NumSuccs == 1) 1849 RBonus += 2; 1850 1851 if (LPriority+LBonus != RPriority+RBonus) 1852 return LPriority+LBonus < RPriority+RBonus; 1853 1854 if (left->Depth != right->Depth) 1855 return left->Depth < right->Depth; 1856 1857 if (left->NumSuccsLeft != right->NumSuccsLeft) 1858 return left->NumSuccsLeft > right->NumSuccsLeft; 1859 1860 if (left->CycleBound != right->CycleBound) 1861 return left->CycleBound > right->CycleBound; 1862 1863 assert(left->NodeQueueId && right->NodeQueueId && 1864 "NodeQueueId cannot be zero"); 1865 return (left->NodeQueueId > right->NodeQueueId); 1866} 1867 1868/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all 1869/// scheduling units. 1870void TDRegReductionPriorityQueue::CalculateSethiUllmanNumbers() { 1871 SethiUllmanNumbers.assign(SUnits->size(), 0); 1872 1873 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) 1874 CalcNodeTDSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers); 1875} 1876 1877//===----------------------------------------------------------------------===// 1878// Public Constructor Functions 1879//===----------------------------------------------------------------------===// 1880 1881llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS, 1882 SelectionDAG *DAG, 1883 const TargetMachine *TM, 1884 MachineBasicBlock *BB, 1885 bool Fast) { 1886 if (Fast) 1887 return new ScheduleDAGRRList(DAG, BB, *TM, true, true, 1888 new BURegReductionFastPriorityQueue()); 1889 1890 const TargetInstrInfo *TII = TM->getInstrInfo(); 1891 const TargetRegisterInfo *TRI = TM->getRegisterInfo(); 1892 1893 BURegReductionPriorityQueue *PQ = new BURegReductionPriorityQueue(TII, TRI); 1894 1895 ScheduleDAGRRList *SD = 1896 new ScheduleDAGRRList(DAG, BB, *TM, true, false, PQ); 1897 PQ->setScheduleDAG(SD); 1898 return SD; 1899} 1900 1901llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS, 1902 SelectionDAG *DAG, 1903 const TargetMachine *TM, 1904 MachineBasicBlock *BB, 1905 bool Fast) { 1906 return new ScheduleDAGRRList(DAG, BB, *TM, false, Fast, 1907 new TDRegReductionPriorityQueue()); 1908} 1909