ScheduleDAGRRList.cpp revision 83ec4b6711980242ef3c55a4fa36b2d7a39c1bfb
1//===----- ScheduleDAGList.cpp - Reg pressure reduction list scheduler ----===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements bottom-up and top-down register pressure reduction list 11// schedulers, using standard algorithms. The basic approach uses a priority 12// queue of available nodes to schedule. One at a time, nodes are taken from 13// the priority queue (thus in priority order), checked for legality to 14// schedule, and emitted if legal. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "pre-RA-sched" 19#include "llvm/CodeGen/ScheduleDAG.h" 20#include "llvm/CodeGen/SchedulerRegistry.h" 21#include "llvm/Target/TargetRegisterInfo.h" 22#include "llvm/Target/TargetData.h" 23#include "llvm/Target/TargetMachine.h" 24#include "llvm/Target/TargetInstrInfo.h" 25#include "llvm/Support/Debug.h" 26#include "llvm/Support/Compiler.h" 27#include "llvm/ADT/SmallPtrSet.h" 28#include "llvm/ADT/SmallSet.h" 29#include "llvm/ADT/Statistic.h" 30#include "llvm/ADT/STLExtras.h" 31#include <climits> 32#include <queue> 33#include "llvm/Support/CommandLine.h" 34using namespace llvm; 35 36STATISTIC(NumBacktracks, "Number of times scheduler backtracked"); 37STATISTIC(NumUnfolds, "Number of nodes unfolded"); 38STATISTIC(NumDups, "Number of duplicated nodes"); 39STATISTIC(NumCCCopies, "Number of cross class copies"); 40 41static RegisterScheduler 42 burrListDAGScheduler("list-burr", 43 " Bottom-up register reduction list scheduling", 44 createBURRListDAGScheduler); 45static RegisterScheduler 46 tdrListrDAGScheduler("list-tdrr", 47 " Top-down register reduction list scheduling", 48 createTDRRListDAGScheduler); 49 50namespace { 51//===----------------------------------------------------------------------===// 52/// ScheduleDAGRRList - The actual register reduction list scheduler 53/// implementation. This supports both top-down and bottom-up scheduling. 54/// 55class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAG { 56private: 57 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if 58 /// it is top-down. 59 bool isBottomUp; 60 61 /// AvailableQueue - The priority queue to use for the available SUnits. 62 SchedulingPriorityQueue *AvailableQueue; 63 64 /// LiveRegs / LiveRegDefs - A set of physical registers and their definition 65 /// that are "live". These nodes must be scheduled before any other nodes that 66 /// modifies the registers can be scheduled. 67 SmallSet<unsigned, 4> LiveRegs; 68 std::vector<SUnit*> LiveRegDefs; 69 std::vector<unsigned> LiveRegCycles; 70 71public: 72 ScheduleDAGRRList(SelectionDAG &dag, MachineBasicBlock *bb, 73 const TargetMachine &tm, bool isbottomup, 74 SchedulingPriorityQueue *availqueue) 75 : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup), 76 AvailableQueue(availqueue) { 77 } 78 79 ~ScheduleDAGRRList() { 80 delete AvailableQueue; 81 } 82 83 void Schedule(); 84 85 /// IsReachable - Checks if SU is reachable from TargetSU. 86 bool IsReachable(SUnit *SU, SUnit *TargetSU); 87 88 /// willCreateCycle - Returns true if adding an edge from SU to TargetSU will 89 /// create a cycle. 90 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU); 91 92 /// AddPred - This adds the specified node X as a predecessor of 93 /// the current node Y if not already. 94 /// This returns true if this is a new predecessor. 95 /// Updates the topological ordering if required. 96 bool AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial, 97 unsigned PhyReg = 0, int Cost = 1); 98 99 /// RemovePred - This removes the specified node N from the predecessors of 100 /// the current node M. Updates the topological ordering if required. 101 bool RemovePred(SUnit *M, SUnit *N, bool isCtrl, bool isSpecial); 102 103private: 104 void ReleasePred(SUnit*, bool, unsigned); 105 void ReleaseSucc(SUnit*, bool isChain, unsigned); 106 void CapturePred(SUnit*, SUnit*, bool); 107 void ScheduleNodeBottomUp(SUnit*, unsigned); 108 void ScheduleNodeTopDown(SUnit*, unsigned); 109 void UnscheduleNodeBottomUp(SUnit*); 110 void BacktrackBottomUp(SUnit*, unsigned, unsigned&); 111 SUnit *CopyAndMoveSuccessors(SUnit*); 112 void InsertCCCopiesAndMoveSuccs(SUnit*, unsigned, 113 const TargetRegisterClass*, 114 const TargetRegisterClass*, 115 SmallVector<SUnit*, 2>&); 116 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&); 117 void ListScheduleTopDown(); 118 void ListScheduleBottomUp(); 119 void CommuteNodesToReducePressure(); 120 121 122 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it. 123 /// Updates the topological ordering if required. 124 SUnit *CreateNewSUnit(SDNode *N) { 125 SUnit *NewNode = NewSUnit(N); 126 // Update the topological ordering. 127 if (NewNode->NodeNum >= Node2Index.size()) 128 InitDAGTopologicalSorting(); 129 return NewNode; 130 } 131 132 /// CreateClone - Creates a new SUnit from an existing one. 133 /// Updates the topological ordering if required. 134 SUnit *CreateClone(SUnit *N) { 135 SUnit *NewNode = Clone(N); 136 // Update the topological ordering. 137 if (NewNode->NodeNum >= Node2Index.size()) 138 InitDAGTopologicalSorting(); 139 return NewNode; 140 } 141 142 /// Functions for preserving the topological ordering 143 /// even after dynamic insertions of new edges. 144 /// This allows a very fast implementation of IsReachable. 145 146 147 /** 148 The idea of the algorithm is taken from 149 "Online algorithms for managing the topological order of 150 a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly 151 This is the MNR algorithm, which was first introduced by 152 A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in 153 "Maintaining a topological order under edge insertions". 154 155 Short description of the algorithm: 156 157 Topological ordering, ord, of a DAG maps each node to a topological 158 index so that for all edges X->Y it is the case that ord(X) < ord(Y). 159 160 This means that if there is a path from the node X to the node Z, 161 then ord(X) < ord(Z). 162 163 This property can be used to check for reachability of nodes: 164 if Z is reachable from X, then an insertion of the edge Z->X would 165 create a cycle. 166 167 The algorithm first computes a topological ordering for the DAG by initializing 168 the Index2Node and Node2Index arrays and then tries to keep the ordering 169 up-to-date after edge insertions by reordering the DAG. 170 171 On insertion of the edge X->Y, the algorithm first marks by calling DFS the 172 nodes reachable from Y, and then shifts them using Shift to lie immediately 173 after X in Index2Node. 174 */ 175 176 /// InitDAGTopologicalSorting - create the initial topological 177 /// ordering from the DAG to be scheduled. 178 void InitDAGTopologicalSorting(); 179 180 /// DFS - make a DFS traversal and mark all nodes affected by the 181 /// edge insertion. These nodes will later get new topological indexes 182 /// by means of the Shift method. 183 void DFS(SUnit *SU, int UpperBound, bool& HasLoop); 184 185 /// Shift - reassign topological indexes for the nodes in the DAG 186 /// to preserve the topological ordering. 187 void Shift(BitVector& Visited, int LowerBound, int UpperBound); 188 189 /// Allocate - assign the topological index to the node n. 190 void Allocate(int n, int index); 191 192 /// Index2Node - Maps topological index to the node number. 193 std::vector<int> Index2Node; 194 /// Node2Index - Maps the node number to its topological index. 195 std::vector<int> Node2Index; 196 /// Visited - a set of nodes visited during a DFS traversal. 197 BitVector Visited; 198}; 199} // end anonymous namespace 200 201 202/// Schedule - Schedule the DAG using list scheduling. 203void ScheduleDAGRRList::Schedule() { 204 DOUT << "********** List Scheduling **********\n"; 205 206 LiveRegDefs.resize(TRI->getNumRegs(), NULL); 207 LiveRegCycles.resize(TRI->getNumRegs(), 0); 208 209 // Build scheduling units. 210 BuildSchedUnits(); 211 212 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 213 SUnits[su].dumpAll(&DAG)); 214 CalculateDepths(); 215 CalculateHeights(); 216 InitDAGTopologicalSorting(); 217 218 AvailableQueue->initNodes(SUnitMap, SUnits); 219 220 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate. 221 if (isBottomUp) 222 ListScheduleBottomUp(); 223 else 224 ListScheduleTopDown(); 225 226 AvailableQueue->releaseState(); 227 228 CommuteNodesToReducePressure(); 229 230 DOUT << "*** Final schedule ***\n"; 231 DEBUG(dumpSchedule()); 232 DOUT << "\n"; 233 234 // Emit in scheduled order 235 EmitSchedule(); 236} 237 238/// CommuteNodesToReducePressure - If a node is two-address and commutable, and 239/// it is not the last use of its first operand, add it to the CommuteSet if 240/// possible. It will be commuted when it is translated to a MI. 241void ScheduleDAGRRList::CommuteNodesToReducePressure() { 242 SmallPtrSet<SUnit*, 4> OperandSeen; 243 for (unsigned i = Sequence.size(); i != 0; ) { 244 --i; 245 SUnit *SU = Sequence[i]; 246 if (!SU || !SU->Node) continue; 247 if (SU->isCommutable) { 248 unsigned Opc = SU->Node->getTargetOpcode(); 249 const TargetInstrDesc &TID = TII->get(Opc); 250 unsigned NumRes = TID.getNumDefs(); 251 unsigned NumOps = TID.getNumOperands() - NumRes; 252 for (unsigned j = 0; j != NumOps; ++j) { 253 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1) 254 continue; 255 256 SDNode *OpN = SU->Node->getOperand(j).Val; 257 SUnit *OpSU = isPassiveNode(OpN) ? NULL : SUnitMap[OpN][SU->InstanceNo]; 258 if (OpSU && OperandSeen.count(OpSU) == 1) { 259 // Ok, so SU is not the last use of OpSU, but SU is two-address so 260 // it will clobber OpSU. Try to commute SU if no other source operands 261 // are live below. 262 bool DoCommute = true; 263 for (unsigned k = 0; k < NumOps; ++k) { 264 if (k != j) { 265 OpN = SU->Node->getOperand(k).Val; 266 OpSU = isPassiveNode(OpN) ? NULL : SUnitMap[OpN][SU->InstanceNo]; 267 if (OpSU && OperandSeen.count(OpSU) == 1) { 268 DoCommute = false; 269 break; 270 } 271 } 272 } 273 if (DoCommute) 274 CommuteSet.insert(SU->Node); 275 } 276 277 // Only look at the first use&def node for now. 278 break; 279 } 280 } 281 282 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 283 I != E; ++I) { 284 if (!I->isCtrl) 285 OperandSeen.insert(I->Dep); 286 } 287 } 288} 289 290//===----------------------------------------------------------------------===// 291// Bottom-Up Scheduling 292//===----------------------------------------------------------------------===// 293 294/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to 295/// the AvailableQueue if the count reaches zero. Also update its cycle bound. 296void ScheduleDAGRRList::ReleasePred(SUnit *PredSU, bool isChain, 297 unsigned CurCycle) { 298 // FIXME: the distance between two nodes is not always == the predecessor's 299 // latency. For example, the reader can very well read the register written 300 // by the predecessor later than the issue cycle. It also depends on the 301 // interrupt model (drain vs. freeze). 302 PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency); 303 304 --PredSU->NumSuccsLeft; 305 306#ifndef NDEBUG 307 if (PredSU->NumSuccsLeft < 0) { 308 cerr << "*** List scheduling failed! ***\n"; 309 PredSU->dump(&DAG); 310 cerr << " has been released too many times!\n"; 311 assert(0); 312 } 313#endif 314 315 if (PredSU->NumSuccsLeft == 0) { 316 PredSU->isAvailable = true; 317 AvailableQueue->push(PredSU); 318 } 319} 320 321/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending 322/// count of its predecessors. If a predecessor pending count is zero, add it to 323/// the Available queue. 324void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) { 325 DOUT << "*** Scheduling [" << CurCycle << "]: "; 326 DEBUG(SU->dump(&DAG)); 327 SU->Cycle = CurCycle; 328 329 AvailableQueue->ScheduledNode(SU); 330 331 // Bottom up: release predecessors 332 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 333 I != E; ++I) { 334 ReleasePred(I->Dep, I->isCtrl, CurCycle); 335 if (I->Cost < 0) { 336 // This is a physical register dependency and it's impossible or 337 // expensive to copy the register. Make sure nothing that can 338 // clobber the register is scheduled between the predecessor and 339 // this node. 340 if (LiveRegs.insert(I->Reg)) { 341 LiveRegDefs[I->Reg] = I->Dep; 342 LiveRegCycles[I->Reg] = CurCycle; 343 } 344 } 345 } 346 347 // Release all the implicit physical register defs that are live. 348 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 349 I != E; ++I) { 350 if (I->Cost < 0) { 351 if (LiveRegCycles[I->Reg] == I->Dep->Cycle) { 352 LiveRegs.erase(I->Reg); 353 assert(LiveRegDefs[I->Reg] == SU && 354 "Physical register dependency violated?"); 355 LiveRegDefs[I->Reg] = NULL; 356 LiveRegCycles[I->Reg] = 0; 357 } 358 } 359 } 360 361 SU->isScheduled = true; 362} 363 364/// CapturePred - This does the opposite of ReleasePred. Since SU is being 365/// unscheduled, incrcease the succ left count of its predecessors. Remove 366/// them from AvailableQueue if necessary. 367void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) { 368 unsigned CycleBound = 0; 369 for (SUnit::succ_iterator I = PredSU->Succs.begin(), E = PredSU->Succs.end(); 370 I != E; ++I) { 371 if (I->Dep == SU) 372 continue; 373 CycleBound = std::max(CycleBound, 374 I->Dep->Cycle + PredSU->Latency); 375 } 376 377 if (PredSU->isAvailable) { 378 PredSU->isAvailable = false; 379 if (!PredSU->isPending) 380 AvailableQueue->remove(PredSU); 381 } 382 383 PredSU->CycleBound = CycleBound; 384 ++PredSU->NumSuccsLeft; 385} 386 387/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and 388/// its predecessor states to reflect the change. 389void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) { 390 DOUT << "*** Unscheduling [" << SU->Cycle << "]: "; 391 DEBUG(SU->dump(&DAG)); 392 393 AvailableQueue->UnscheduledNode(SU); 394 395 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 396 I != E; ++I) { 397 CapturePred(I->Dep, SU, I->isCtrl); 398 if (I->Cost < 0 && SU->Cycle == LiveRegCycles[I->Reg]) { 399 LiveRegs.erase(I->Reg); 400 assert(LiveRegDefs[I->Reg] == I->Dep && 401 "Physical register dependency violated?"); 402 LiveRegDefs[I->Reg] = NULL; 403 LiveRegCycles[I->Reg] = 0; 404 } 405 } 406 407 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 408 I != E; ++I) { 409 if (I->Cost < 0) { 410 if (LiveRegs.insert(I->Reg)) { 411 assert(!LiveRegDefs[I->Reg] && 412 "Physical register dependency violated?"); 413 LiveRegDefs[I->Reg] = SU; 414 } 415 if (I->Dep->Cycle < LiveRegCycles[I->Reg]) 416 LiveRegCycles[I->Reg] = I->Dep->Cycle; 417 } 418 } 419 420 SU->Cycle = 0; 421 SU->isScheduled = false; 422 SU->isAvailable = true; 423 AvailableQueue->push(SU); 424} 425 426/// IsReachable - Checks if SU is reachable from TargetSU. 427bool ScheduleDAGRRList::IsReachable(SUnit *SU, SUnit *TargetSU) { 428 // If insertion of the edge SU->TargetSU would create a cycle 429 // then there is a path from TargetSU to SU. 430 int UpperBound, LowerBound; 431 LowerBound = Node2Index[TargetSU->NodeNum]; 432 UpperBound = Node2Index[SU->NodeNum]; 433 bool HasLoop = false; 434 // Is Ord(TargetSU) < Ord(SU) ? 435 if (LowerBound < UpperBound) { 436 Visited.reset(); 437 // There may be a path from TargetSU to SU. Check for it. 438 DFS(TargetSU, UpperBound, HasLoop); 439 } 440 return HasLoop; 441} 442 443/// Allocate - assign the topological index to the node n. 444inline void ScheduleDAGRRList::Allocate(int n, int index) { 445 Node2Index[n] = index; 446 Index2Node[index] = n; 447} 448 449/// InitDAGTopologicalSorting - create the initial topological 450/// ordering from the DAG to be scheduled. 451void ScheduleDAGRRList::InitDAGTopologicalSorting() { 452 unsigned DAGSize = SUnits.size(); 453 std::vector<unsigned> InDegree(DAGSize); 454 std::vector<SUnit*> WorkList; 455 WorkList.reserve(DAGSize); 456 std::vector<SUnit*> TopOrder; 457 TopOrder.reserve(DAGSize); 458 459 // Initialize the data structures. 460 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 461 SUnit *SU = &SUnits[i]; 462 int NodeNum = SU->NodeNum; 463 unsigned Degree = SU->Succs.size(); 464 InDegree[NodeNum] = Degree; 465 466 // Is it a node without dependencies? 467 if (Degree == 0) { 468 assert(SU->Succs.empty() && "SUnit should have no successors"); 469 // Collect leaf nodes. 470 WorkList.push_back(SU); 471 } 472 } 473 474 while (!WorkList.empty()) { 475 SUnit *SU = WorkList.back(); 476 WorkList.pop_back(); 477 TopOrder.push_back(SU); 478 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 479 I != E; ++I) { 480 SUnit *SU = I->Dep; 481 if (!--InDegree[SU->NodeNum]) 482 // If all dependencies of the node are processed already, 483 // then the node can be computed now. 484 WorkList.push_back(SU); 485 } 486 } 487 488 // Second pass, assign the actual topological order as node ids. 489 int Id = 0; 490 491 Index2Node.clear(); 492 Node2Index.clear(); 493 Index2Node.resize(DAGSize); 494 Node2Index.resize(DAGSize); 495 Visited.resize(DAGSize); 496 497 for (std::vector<SUnit*>::reverse_iterator TI = TopOrder.rbegin(), 498 TE = TopOrder.rend();TI != TE; ++TI) { 499 Allocate((*TI)->NodeNum, Id); 500 Id++; 501 } 502 503#ifndef NDEBUG 504 // Check correctness of the ordering 505 for (unsigned i = 0, e = DAGSize; i != e; ++i) { 506 SUnit *SU = &SUnits[i]; 507 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 508 I != E; ++I) { 509 assert(Node2Index[SU->NodeNum] > Node2Index[I->Dep->NodeNum] && 510 "Wrong topological sorting"); 511 } 512 } 513#endif 514} 515 516/// AddPred - adds an edge from SUnit X to SUnit Y. 517/// Updates the topological ordering if required. 518bool ScheduleDAGRRList::AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial, 519 unsigned PhyReg, int Cost) { 520 int UpperBound, LowerBound; 521 LowerBound = Node2Index[Y->NodeNum]; 522 UpperBound = Node2Index[X->NodeNum]; 523 bool HasLoop = false; 524 // Is Ord(X) < Ord(Y) ? 525 if (LowerBound < UpperBound) { 526 // Update the topological order. 527 Visited.reset(); 528 DFS(Y, UpperBound, HasLoop); 529 assert(!HasLoop && "Inserted edge creates a loop!"); 530 // Recompute topological indexes. 531 Shift(Visited, LowerBound, UpperBound); 532 } 533 // Now really insert the edge. 534 return Y->addPred(X, isCtrl, isSpecial, PhyReg, Cost); 535} 536 537/// RemovePred - This removes the specified node N from the predecessors of 538/// the current node M. Updates the topological ordering if required. 539bool ScheduleDAGRRList::RemovePred(SUnit *M, SUnit *N, 540 bool isCtrl, bool isSpecial) { 541 // InitDAGTopologicalSorting(); 542 return M->removePred(N, isCtrl, isSpecial); 543} 544 545/// DFS - Make a DFS traversal to mark all nodes reachable from SU and mark 546/// all nodes affected by the edge insertion. These nodes will later get new 547/// topological indexes by means of the Shift method. 548void ScheduleDAGRRList::DFS(SUnit *SU, int UpperBound, bool& HasLoop) { 549 std::vector<SUnit*> WorkList; 550 WorkList.reserve(SUnits.size()); 551 552 WorkList.push_back(SU); 553 while (!WorkList.empty()) { 554 SU = WorkList.back(); 555 WorkList.pop_back(); 556 Visited.set(SU->NodeNum); 557 for (int I = SU->Succs.size()-1; I >= 0; --I) { 558 int s = SU->Succs[I].Dep->NodeNum; 559 if (Node2Index[s] == UpperBound) { 560 HasLoop = true; 561 return; 562 } 563 // Visit successors if not already and in affected region. 564 if (!Visited.test(s) && Node2Index[s] < UpperBound) { 565 WorkList.push_back(SU->Succs[I].Dep); 566 } 567 } 568 } 569} 570 571/// Shift - Renumber the nodes so that the topological ordering is 572/// preserved. 573void ScheduleDAGRRList::Shift(BitVector& Visited, int LowerBound, 574 int UpperBound) { 575 std::vector<int> L; 576 int shift = 0; 577 int i; 578 579 for (i = LowerBound; i <= UpperBound; ++i) { 580 // w is node at topological index i. 581 int w = Index2Node[i]; 582 if (Visited.test(w)) { 583 // Unmark. 584 Visited.reset(w); 585 L.push_back(w); 586 shift = shift + 1; 587 } else { 588 Allocate(w, i - shift); 589 } 590 } 591 592 for (unsigned j = 0; j < L.size(); ++j) { 593 Allocate(L[j], i - shift); 594 i = i + 1; 595 } 596} 597 598 599/// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will 600/// create a cycle. 601bool ScheduleDAGRRList::WillCreateCycle(SUnit *SU, SUnit *TargetSU) { 602 if (IsReachable(TargetSU, SU)) 603 return true; 604 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 605 I != E; ++I) 606 if (I->Cost < 0 && IsReachable(TargetSU, I->Dep)) 607 return true; 608 return false; 609} 610 611/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in 612/// BTCycle in order to schedule a specific node. Returns the last unscheduled 613/// SUnit. Also returns if a successor is unscheduled in the process. 614void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle, 615 unsigned &CurCycle) { 616 SUnit *OldSU = NULL; 617 while (CurCycle > BtCycle) { 618 OldSU = Sequence.back(); 619 Sequence.pop_back(); 620 if (SU->isSucc(OldSU)) 621 // Don't try to remove SU from AvailableQueue. 622 SU->isAvailable = false; 623 UnscheduleNodeBottomUp(OldSU); 624 --CurCycle; 625 } 626 627 628 if (SU->isSucc(OldSU)) { 629 assert(false && "Something is wrong!"); 630 abort(); 631 } 632 633 ++NumBacktracks; 634} 635 636/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled 637/// successors to the newly created node. 638SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) { 639 if (SU->FlaggedNodes.size()) 640 return NULL; 641 642 SDNode *N = SU->Node; 643 if (!N) 644 return NULL; 645 646 SUnit *NewSU; 647 bool TryUnfold = false; 648 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { 649 MVT VT = N->getValueType(i); 650 if (VT == MVT::Flag) 651 return NULL; 652 else if (VT == MVT::Other) 653 TryUnfold = true; 654 } 655 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 656 const SDOperand &Op = N->getOperand(i); 657 MVT VT = Op.Val->getValueType(Op.ResNo); 658 if (VT == MVT::Flag) 659 return NULL; 660 } 661 662 if (TryUnfold) { 663 SmallVector<SDNode*, 4> NewNodes; 664 if (!TII->unfoldMemoryOperand(DAG, N, NewNodes)) 665 return NULL; 666 667 DOUT << "Unfolding SU # " << SU->NodeNum << "\n"; 668 assert(NewNodes.size() == 2 && "Expected a load folding node!"); 669 670 N = NewNodes[1]; 671 SDNode *LoadNode = NewNodes[0]; 672 unsigned NumVals = N->getNumValues(); 673 unsigned OldNumVals = SU->Node->getNumValues(); 674 for (unsigned i = 0; i != NumVals; ++i) 675 DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, i), SDOperand(N, i)); 676 DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, OldNumVals-1), 677 SDOperand(LoadNode, 1)); 678 679 SUnit *NewSU = CreateNewSUnit(N); 680 SUnitMap[N].push_back(NewSU); 681 const TargetInstrDesc &TID = TII->get(N->getTargetOpcode()); 682 for (unsigned i = 0; i != TID.getNumOperands(); ++i) { 683 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) { 684 NewSU->isTwoAddress = true; 685 break; 686 } 687 } 688 if (TID.isCommutable()) 689 NewSU->isCommutable = true; 690 // FIXME: Calculate height / depth and propagate the changes? 691 NewSU->Depth = SU->Depth; 692 NewSU->Height = SU->Height; 693 ComputeLatency(NewSU); 694 695 // LoadNode may already exist. This can happen when there is another 696 // load from the same location and producing the same type of value 697 // but it has different alignment or volatileness. 698 bool isNewLoad = true; 699 SUnit *LoadSU; 700 DenseMap<SDNode*, std::vector<SUnit*> >::iterator SMI = 701 SUnitMap.find(LoadNode); 702 if (SMI != SUnitMap.end()) { 703 LoadSU = SMI->second.front(); 704 isNewLoad = false; 705 } else { 706 LoadSU = CreateNewSUnit(LoadNode); 707 SUnitMap[LoadNode].push_back(LoadSU); 708 709 LoadSU->Depth = SU->Depth; 710 LoadSU->Height = SU->Height; 711 ComputeLatency(LoadSU); 712 } 713 714 SUnit *ChainPred = NULL; 715 SmallVector<SDep, 4> ChainSuccs; 716 SmallVector<SDep, 4> LoadPreds; 717 SmallVector<SDep, 4> NodePreds; 718 SmallVector<SDep, 4> NodeSuccs; 719 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 720 I != E; ++I) { 721 if (I->isCtrl) 722 ChainPred = I->Dep; 723 else if (I->Dep->Node && I->Dep->Node->isOperandOf(LoadNode)) 724 LoadPreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false)); 725 else 726 NodePreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false)); 727 } 728 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 729 I != E; ++I) { 730 if (I->isCtrl) 731 ChainSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost, 732 I->isCtrl, I->isSpecial)); 733 else 734 NodeSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost, 735 I->isCtrl, I->isSpecial)); 736 } 737 738 if (ChainPred) { 739 RemovePred(SU, ChainPred, true, false); 740 if (isNewLoad) 741 AddPred(LoadSU, ChainPred, true, false); 742 } 743 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) { 744 SDep *Pred = &LoadPreds[i]; 745 RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial); 746 if (isNewLoad) { 747 AddPred(LoadSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial, 748 Pred->Reg, Pred->Cost); 749 } 750 } 751 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) { 752 SDep *Pred = &NodePreds[i]; 753 RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial); 754 AddPred(NewSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial, 755 Pred->Reg, Pred->Cost); 756 } 757 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) { 758 SDep *Succ = &NodeSuccs[i]; 759 RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial); 760 AddPred(Succ->Dep, NewSU, Succ->isCtrl, Succ->isSpecial, 761 Succ->Reg, Succ->Cost); 762 } 763 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) { 764 SDep *Succ = &ChainSuccs[i]; 765 RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial); 766 if (isNewLoad) { 767 AddPred(Succ->Dep, LoadSU, Succ->isCtrl, Succ->isSpecial, 768 Succ->Reg, Succ->Cost); 769 } 770 } 771 if (isNewLoad) { 772 AddPred(NewSU, LoadSU, false, false); 773 } 774 775 if (isNewLoad) 776 AvailableQueue->addNode(LoadSU); 777 AvailableQueue->addNode(NewSU); 778 779 ++NumUnfolds; 780 781 if (NewSU->NumSuccsLeft == 0) { 782 NewSU->isAvailable = true; 783 return NewSU; 784 } 785 SU = NewSU; 786 } 787 788 DOUT << "Duplicating SU # " << SU->NodeNum << "\n"; 789 NewSU = CreateClone(SU); 790 791 // New SUnit has the exact same predecessors. 792 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 793 I != E; ++I) 794 if (!I->isSpecial) { 795 AddPred(NewSU, I->Dep, I->isCtrl, false, I->Reg, I->Cost); 796 NewSU->Depth = std::max(NewSU->Depth, I->Dep->Depth+1); 797 } 798 799 // Only copy scheduled successors. Cut them from old node's successor 800 // list and move them over. 801 SmallVector<std::pair<SUnit*, bool>, 4> DelDeps; 802 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 803 I != E; ++I) { 804 if (I->isSpecial) 805 continue; 806 if (I->Dep->isScheduled) { 807 NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1); 808 AddPred(I->Dep, NewSU, I->isCtrl, false, I->Reg, I->Cost); 809 DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl)); 810 } 811 } 812 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) { 813 SUnit *Succ = DelDeps[i].first; 814 bool isCtrl = DelDeps[i].second; 815 RemovePred(Succ, SU, isCtrl, false); 816 } 817 818 AvailableQueue->updateNode(SU); 819 AvailableQueue->addNode(NewSU); 820 821 ++NumDups; 822 return NewSU; 823} 824 825/// InsertCCCopiesAndMoveSuccs - Insert expensive cross register class copies 826/// and move all scheduled successors of the given SUnit to the last copy. 827void ScheduleDAGRRList::InsertCCCopiesAndMoveSuccs(SUnit *SU, unsigned Reg, 828 const TargetRegisterClass *DestRC, 829 const TargetRegisterClass *SrcRC, 830 SmallVector<SUnit*, 2> &Copies) { 831 SUnit *CopyFromSU = CreateNewSUnit(NULL); 832 CopyFromSU->CopySrcRC = SrcRC; 833 CopyFromSU->CopyDstRC = DestRC; 834 CopyFromSU->Depth = SU->Depth; 835 CopyFromSU->Height = SU->Height; 836 837 SUnit *CopyToSU = CreateNewSUnit(NULL); 838 CopyToSU->CopySrcRC = DestRC; 839 CopyToSU->CopyDstRC = SrcRC; 840 841 // Only copy scheduled successors. Cut them from old node's successor 842 // list and move them over. 843 SmallVector<std::pair<SUnit*, bool>, 4> DelDeps; 844 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 845 I != E; ++I) { 846 if (I->isSpecial) 847 continue; 848 if (I->Dep->isScheduled) { 849 CopyToSU->Height = std::max(CopyToSU->Height, I->Dep->Height+1); 850 AddPred(I->Dep, CopyToSU, I->isCtrl, false, I->Reg, I->Cost); 851 DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl)); 852 } 853 } 854 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) { 855 SUnit *Succ = DelDeps[i].first; 856 bool isCtrl = DelDeps[i].second; 857 RemovePred(Succ, SU, isCtrl, false); 858 } 859 860 AddPred(CopyFromSU, SU, false, false, Reg, -1); 861 AddPred(CopyToSU, CopyFromSU, false, false, Reg, 1); 862 863 AvailableQueue->updateNode(SU); 864 AvailableQueue->addNode(CopyFromSU); 865 AvailableQueue->addNode(CopyToSU); 866 Copies.push_back(CopyFromSU); 867 Copies.push_back(CopyToSU); 868 869 ++NumCCCopies; 870} 871 872/// getPhysicalRegisterVT - Returns the ValueType of the physical register 873/// definition of the specified node. 874/// FIXME: Move to SelectionDAG? 875static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg, 876 const TargetInstrInfo *TII) { 877 const TargetInstrDesc &TID = TII->get(N->getTargetOpcode()); 878 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!"); 879 unsigned NumRes = TID.getNumDefs(); 880 for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) { 881 if (Reg == *ImpDef) 882 break; 883 ++NumRes; 884 } 885 return N->getValueType(NumRes); 886} 887 888/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay 889/// scheduling of the given node to satisfy live physical register dependencies. 890/// If the specific node is the last one that's available to schedule, do 891/// whatever is necessary (i.e. backtracking or cloning) to make it possible. 892bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU, 893 SmallVector<unsigned, 4> &LRegs){ 894 if (LiveRegs.empty()) 895 return false; 896 897 SmallSet<unsigned, 4> RegAdded; 898 // If this node would clobber any "live" register, then it's not ready. 899 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 900 I != E; ++I) { 901 if (I->Cost < 0) { 902 unsigned Reg = I->Reg; 903 if (LiveRegs.count(Reg) && LiveRegDefs[Reg] != I->Dep) { 904 if (RegAdded.insert(Reg)) 905 LRegs.push_back(Reg); 906 } 907 for (const unsigned *Alias = TRI->getAliasSet(Reg); 908 *Alias; ++Alias) 909 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != I->Dep) { 910 if (RegAdded.insert(*Alias)) 911 LRegs.push_back(*Alias); 912 } 913 } 914 } 915 916 for (unsigned i = 0, e = SU->FlaggedNodes.size()+1; i != e; ++i) { 917 SDNode *Node = (i == 0) ? SU->Node : SU->FlaggedNodes[i-1]; 918 if (!Node || !Node->isTargetOpcode()) 919 continue; 920 const TargetInstrDesc &TID = TII->get(Node->getTargetOpcode()); 921 if (!TID.ImplicitDefs) 922 continue; 923 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) { 924 if (LiveRegs.count(*Reg) && LiveRegDefs[*Reg] != SU) { 925 if (RegAdded.insert(*Reg)) 926 LRegs.push_back(*Reg); 927 } 928 for (const unsigned *Alias = TRI->getAliasSet(*Reg); 929 *Alias; ++Alias) 930 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != SU) { 931 if (RegAdded.insert(*Alias)) 932 LRegs.push_back(*Alias); 933 } 934 } 935 } 936 return !LRegs.empty(); 937} 938 939 940/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up 941/// schedulers. 942void ScheduleDAGRRList::ListScheduleBottomUp() { 943 unsigned CurCycle = 0; 944 // Add root to Available queue. 945 if (!SUnits.empty()) { 946 SUnit *RootSU = SUnitMap[DAG.getRoot().Val].front(); 947 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!"); 948 RootSU->isAvailable = true; 949 AvailableQueue->push(RootSU); 950 } 951 952 // While Available queue is not empty, grab the node with the highest 953 // priority. If it is not ready put it back. Schedule the node. 954 SmallVector<SUnit*, 4> NotReady; 955 while (!AvailableQueue->empty()) { 956 bool Delayed = false; 957 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap; 958 SUnit *CurSU = AvailableQueue->pop(); 959 while (CurSU) { 960 if (CurSU->CycleBound <= CurCycle) { 961 SmallVector<unsigned, 4> LRegs; 962 if (!DelayForLiveRegsBottomUp(CurSU, LRegs)) 963 break; 964 Delayed = true; 965 LRegsMap.insert(std::make_pair(CurSU, LRegs)); 966 } 967 968 CurSU->isPending = true; // This SU is not in AvailableQueue right now. 969 NotReady.push_back(CurSU); 970 CurSU = AvailableQueue->pop(); 971 } 972 973 // All candidates are delayed due to live physical reg dependencies. 974 // Try backtracking, code duplication, or inserting cross class copies 975 // to resolve it. 976 if (Delayed && !CurSU) { 977 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) { 978 SUnit *TrySU = NotReady[i]; 979 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU]; 980 981 // Try unscheduling up to the point where it's safe to schedule 982 // this node. 983 unsigned LiveCycle = CurCycle; 984 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) { 985 unsigned Reg = LRegs[j]; 986 unsigned LCycle = LiveRegCycles[Reg]; 987 LiveCycle = std::min(LiveCycle, LCycle); 988 } 989 SUnit *OldSU = Sequence[LiveCycle]; 990 if (!WillCreateCycle(TrySU, OldSU)) { 991 BacktrackBottomUp(TrySU, LiveCycle, CurCycle); 992 // Force the current node to be scheduled before the node that 993 // requires the physical reg dep. 994 if (OldSU->isAvailable) { 995 OldSU->isAvailable = false; 996 AvailableQueue->remove(OldSU); 997 } 998 AddPred(TrySU, OldSU, true, true); 999 // If one or more successors has been unscheduled, then the current 1000 // node is no longer avaialable. Schedule a successor that's now 1001 // available instead. 1002 if (!TrySU->isAvailable) 1003 CurSU = AvailableQueue->pop(); 1004 else { 1005 CurSU = TrySU; 1006 TrySU->isPending = false; 1007 NotReady.erase(NotReady.begin()+i); 1008 } 1009 break; 1010 } 1011 } 1012 1013 if (!CurSU) { 1014 // Can't backtrack. Try duplicating the nodes that produces these 1015 // "expensive to copy" values to break the dependency. In case even 1016 // that doesn't work, insert cross class copies. 1017 SUnit *TrySU = NotReady[0]; 1018 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU]; 1019 assert(LRegs.size() == 1 && "Can't handle this yet!"); 1020 unsigned Reg = LRegs[0]; 1021 SUnit *LRDef = LiveRegDefs[Reg]; 1022 SUnit *NewDef = CopyAndMoveSuccessors(LRDef); 1023 if (!NewDef) { 1024 // Issue expensive cross register class copies. 1025 MVT VT = getPhysicalRegisterVT(LRDef->Node, Reg, TII); 1026 const TargetRegisterClass *RC = 1027 TRI->getPhysicalRegisterRegClass(Reg, VT); 1028 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC); 1029 if (!DestRC) { 1030 assert(false && "Don't know how to copy this physical register!"); 1031 abort(); 1032 } 1033 SmallVector<SUnit*, 2> Copies; 1034 InsertCCCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies); 1035 DOUT << "Adding an edge from SU # " << TrySU->NodeNum 1036 << " to SU #" << Copies.front()->NodeNum << "\n"; 1037 AddPred(TrySU, Copies.front(), true, true); 1038 NewDef = Copies.back(); 1039 } 1040 1041 DOUT << "Adding an edge from SU # " << NewDef->NodeNum 1042 << " to SU #" << TrySU->NodeNum << "\n"; 1043 LiveRegDefs[Reg] = NewDef; 1044 AddPred(NewDef, TrySU, true, true); 1045 TrySU->isAvailable = false; 1046 CurSU = NewDef; 1047 } 1048 1049 if (!CurSU) { 1050 assert(false && "Unable to resolve live physical register dependencies!"); 1051 abort(); 1052 } 1053 } 1054 1055 // Add the nodes that aren't ready back onto the available list. 1056 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) { 1057 NotReady[i]->isPending = false; 1058 // May no longer be available due to backtracking. 1059 if (NotReady[i]->isAvailable) 1060 AvailableQueue->push(NotReady[i]); 1061 } 1062 NotReady.clear(); 1063 1064 if (!CurSU) 1065 Sequence.push_back(0); 1066 else { 1067 ScheduleNodeBottomUp(CurSU, CurCycle); 1068 Sequence.push_back(CurSU); 1069 } 1070 ++CurCycle; 1071 } 1072 1073 // Reverse the order if it is bottom up. 1074 std::reverse(Sequence.begin(), Sequence.end()); 1075 1076 1077#ifndef NDEBUG 1078 // Verify that all SUnits were scheduled. 1079 bool AnyNotSched = false; 1080 unsigned DeadNodes = 0; 1081 unsigned Noops = 0; 1082 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 1083 if (!SUnits[i].isScheduled) { 1084 if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) { 1085 ++DeadNodes; 1086 continue; 1087 } 1088 if (!AnyNotSched) 1089 cerr << "*** List scheduling failed! ***\n"; 1090 SUnits[i].dump(&DAG); 1091 cerr << "has not been scheduled!\n"; 1092 AnyNotSched = true; 1093 } 1094 if (SUnits[i].NumSuccsLeft != 0) { 1095 if (!AnyNotSched) 1096 cerr << "*** List scheduling failed! ***\n"; 1097 SUnits[i].dump(&DAG); 1098 cerr << "has successors left!\n"; 1099 AnyNotSched = true; 1100 } 1101 } 1102 for (unsigned i = 0, e = Sequence.size(); i != e; ++i) 1103 if (!Sequence[i]) 1104 ++Noops; 1105 assert(!AnyNotSched); 1106 assert(Sequence.size() + DeadNodes - Noops == SUnits.size() && 1107 "The number of nodes scheduled doesn't match the expected number!"); 1108#endif 1109} 1110 1111//===----------------------------------------------------------------------===// 1112// Top-Down Scheduling 1113//===----------------------------------------------------------------------===// 1114 1115/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 1116/// the AvailableQueue if the count reaches zero. Also update its cycle bound. 1117void ScheduleDAGRRList::ReleaseSucc(SUnit *SuccSU, bool isChain, 1118 unsigned CurCycle) { 1119 // FIXME: the distance between two nodes is not always == the predecessor's 1120 // latency. For example, the reader can very well read the register written 1121 // by the predecessor later than the issue cycle. It also depends on the 1122 // interrupt model (drain vs. freeze). 1123 SuccSU->CycleBound = std::max(SuccSU->CycleBound, CurCycle + SuccSU->Latency); 1124 1125 --SuccSU->NumPredsLeft; 1126 1127#ifndef NDEBUG 1128 if (SuccSU->NumPredsLeft < 0) { 1129 cerr << "*** List scheduling failed! ***\n"; 1130 SuccSU->dump(&DAG); 1131 cerr << " has been released too many times!\n"; 1132 assert(0); 1133 } 1134#endif 1135 1136 if (SuccSU->NumPredsLeft == 0) { 1137 SuccSU->isAvailable = true; 1138 AvailableQueue->push(SuccSU); 1139 } 1140} 1141 1142 1143/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 1144/// count of its successors. If a successor pending count is zero, add it to 1145/// the Available queue. 1146void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 1147 DOUT << "*** Scheduling [" << CurCycle << "]: "; 1148 DEBUG(SU->dump(&DAG)); 1149 SU->Cycle = CurCycle; 1150 1151 AvailableQueue->ScheduledNode(SU); 1152 1153 // Top down: release successors 1154 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1155 I != E; ++I) 1156 ReleaseSucc(I->Dep, I->isCtrl, CurCycle); 1157 SU->isScheduled = true; 1158} 1159 1160/// ListScheduleTopDown - The main loop of list scheduling for top-down 1161/// schedulers. 1162void ScheduleDAGRRList::ListScheduleTopDown() { 1163 unsigned CurCycle = 0; 1164 1165 // All leaves to Available queue. 1166 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 1167 // It is available if it has no predecessors. 1168 if (SUnits[i].Preds.empty()) { 1169 AvailableQueue->push(&SUnits[i]); 1170 SUnits[i].isAvailable = true; 1171 } 1172 } 1173 1174 // While Available queue is not empty, grab the node with the highest 1175 // priority. If it is not ready put it back. Schedule the node. 1176 std::vector<SUnit*> NotReady; 1177 while (!AvailableQueue->empty()) { 1178 SUnit *CurSU = AvailableQueue->pop(); 1179 while (CurSU && CurSU->CycleBound > CurCycle) { 1180 NotReady.push_back(CurSU); 1181 CurSU = AvailableQueue->pop(); 1182 } 1183 1184 // Add the nodes that aren't ready back onto the available list. 1185 AvailableQueue->push_all(NotReady); 1186 NotReady.clear(); 1187 1188 if (!CurSU) 1189 Sequence.push_back(0); 1190 else { 1191 ScheduleNodeTopDown(CurSU, CurCycle); 1192 Sequence.push_back(CurSU); 1193 } 1194 ++CurCycle; 1195 } 1196 1197 1198#ifndef NDEBUG 1199 // Verify that all SUnits were scheduled. 1200 bool AnyNotSched = false; 1201 unsigned DeadNodes = 0; 1202 unsigned Noops = 0; 1203 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 1204 if (!SUnits[i].isScheduled) { 1205 if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) { 1206 ++DeadNodes; 1207 continue; 1208 } 1209 if (!AnyNotSched) 1210 cerr << "*** List scheduling failed! ***\n"; 1211 SUnits[i].dump(&DAG); 1212 cerr << "has not been scheduled!\n"; 1213 AnyNotSched = true; 1214 } 1215 if (SUnits[i].NumPredsLeft != 0) { 1216 if (!AnyNotSched) 1217 cerr << "*** List scheduling failed! ***\n"; 1218 SUnits[i].dump(&DAG); 1219 cerr << "has predecessors left!\n"; 1220 AnyNotSched = true; 1221 } 1222 } 1223 for (unsigned i = 0, e = Sequence.size(); i != e; ++i) 1224 if (!Sequence[i]) 1225 ++Noops; 1226 assert(!AnyNotSched); 1227 assert(Sequence.size() + DeadNodes - Noops == SUnits.size() && 1228 "The number of nodes scheduled doesn't match the expected number!"); 1229#endif 1230} 1231 1232 1233 1234//===----------------------------------------------------------------------===// 1235// RegReductionPriorityQueue Implementation 1236//===----------------------------------------------------------------------===// 1237// 1238// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers 1239// to reduce register pressure. 1240// 1241namespace { 1242 template<class SF> 1243 class RegReductionPriorityQueue; 1244 1245 /// Sorting functions for the Available queue. 1246 struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> { 1247 RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ; 1248 bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {} 1249 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {} 1250 1251 bool operator()(const SUnit* left, const SUnit* right) const; 1252 }; 1253 1254 struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> { 1255 RegReductionPriorityQueue<td_ls_rr_sort> *SPQ; 1256 td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {} 1257 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {} 1258 1259 bool operator()(const SUnit* left, const SUnit* right) const; 1260 }; 1261} // end anonymous namespace 1262 1263static inline bool isCopyFromLiveIn(const SUnit *SU) { 1264 SDNode *N = SU->Node; 1265 return N && N->getOpcode() == ISD::CopyFromReg && 1266 N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag; 1267} 1268 1269namespace { 1270 template<class SF> 1271 class VISIBILITY_HIDDEN RegReductionPriorityQueue 1272 : public SchedulingPriorityQueue { 1273 std::set<SUnit*, SF> Queue; 1274 unsigned currentQueueId; 1275 1276 public: 1277 RegReductionPriorityQueue() : 1278 Queue(SF(this)), currentQueueId(0) {} 1279 1280 virtual void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap, 1281 std::vector<SUnit> &sunits) {} 1282 1283 virtual void addNode(const SUnit *SU) {} 1284 1285 virtual void updateNode(const SUnit *SU) {} 1286 1287 virtual void releaseState() {} 1288 1289 virtual unsigned getNodePriority(const SUnit *SU) const { 1290 return 0; 1291 } 1292 1293 unsigned size() const { return Queue.size(); } 1294 1295 bool empty() const { return Queue.empty(); } 1296 1297 void push(SUnit *U) { 1298 assert(!U->NodeQueueId && "Node in the queue already"); 1299 U->NodeQueueId = ++currentQueueId; 1300 Queue.insert(U); 1301 } 1302 1303 void push_all(const std::vector<SUnit *> &Nodes) { 1304 for (unsigned i = 0, e = Nodes.size(); i != e; ++i) 1305 push(Nodes[i]); 1306 } 1307 1308 SUnit *pop() { 1309 if (empty()) return NULL; 1310 typename std::set<SUnit*, SF>::iterator i = prior(Queue.end()); 1311 SUnit *V = *i; 1312 Queue.erase(i); 1313 V->NodeQueueId = 0; 1314 return V; 1315 } 1316 1317 void remove(SUnit *SU) { 1318 assert(!Queue.empty() && "Queue is empty!"); 1319 size_t RemovedNum = Queue.erase(SU); 1320 RemovedNum = RemovedNum; // Silence compiler warning. 1321 assert(RemovedNum > 0 && "Not in queue!"); 1322 assert(RemovedNum == 1 && "Multiple times in the queue!"); 1323 SU->NodeQueueId = 0; 1324 } 1325 }; 1326 1327 template<class SF> 1328 class VISIBILITY_HIDDEN BURegReductionPriorityQueue 1329 : public RegReductionPriorityQueue<SF> { 1330 // SUnitMap SDNode to SUnit mapping (n -> n). 1331 DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap; 1332 1333 // SUnits - The SUnits for the current graph. 1334 const std::vector<SUnit> *SUnits; 1335 1336 // SethiUllmanNumbers - The SethiUllman number for each node. 1337 std::vector<unsigned> SethiUllmanNumbers; 1338 1339 const TargetInstrInfo *TII; 1340 const TargetRegisterInfo *TRI; 1341 ScheduleDAGRRList *scheduleDAG; 1342 public: 1343 explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii, 1344 const TargetRegisterInfo *tri) 1345 : TII(tii), TRI(tri), scheduleDAG(NULL) {} 1346 1347 void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap, 1348 std::vector<SUnit> &sunits) { 1349 SUnitMap = &sumap; 1350 SUnits = &sunits; 1351 // Add pseudo dependency edges for two-address nodes. 1352 AddPseudoTwoAddrDeps(); 1353 // Calculate node priorities. 1354 CalculateSethiUllmanNumbers(); 1355 } 1356 1357 void addNode(const SUnit *SU) { 1358 SethiUllmanNumbers.resize(SUnits->size(), 0); 1359 CalcNodeSethiUllmanNumber(SU); 1360 } 1361 1362 void updateNode(const SUnit *SU) { 1363 SethiUllmanNumbers[SU->NodeNum] = 0; 1364 CalcNodeSethiUllmanNumber(SU); 1365 } 1366 1367 void releaseState() { 1368 SUnits = 0; 1369 SethiUllmanNumbers.clear(); 1370 } 1371 1372 unsigned getNodePriority(const SUnit *SU) const { 1373 assert(SU->NodeNum < SethiUllmanNumbers.size()); 1374 unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0; 1375 if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU)) 1376 // CopyFromReg should be close to its def because it restricts 1377 // allocation choices. But if it is a livein then perhaps we want it 1378 // closer to its uses so it can be coalesced. 1379 return 0xffff; 1380 else if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg) 1381 // CopyToReg should be close to its uses to facilitate coalescing and 1382 // avoid spilling. 1383 return 0; 1384 else if (Opc == TargetInstrInfo::EXTRACT_SUBREG || 1385 Opc == TargetInstrInfo::INSERT_SUBREG) 1386 // EXTRACT_SUBREG / INSERT_SUBREG should be close to its use to 1387 // facilitate coalescing. 1388 return 0; 1389 else if (SU->NumSuccs == 0) 1390 // If SU does not have a use, i.e. it doesn't produce a value that would 1391 // be consumed (e.g. store), then it terminates a chain of computation. 1392 // Give it a large SethiUllman number so it will be scheduled right 1393 // before its predecessors that it doesn't lengthen their live ranges. 1394 return 0xffff; 1395 else if (SU->NumPreds == 0) 1396 // If SU does not have a def, schedule it close to its uses because it 1397 // does not lengthen any live ranges. 1398 return 0; 1399 else 1400 return SethiUllmanNumbers[SU->NodeNum]; 1401 } 1402 1403 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) { 1404 scheduleDAG = scheduleDag; 1405 } 1406 1407 private: 1408 bool canClobber(const SUnit *SU, const SUnit *Op); 1409 void AddPseudoTwoAddrDeps(); 1410 void CalculateSethiUllmanNumbers(); 1411 unsigned CalcNodeSethiUllmanNumber(const SUnit *SU); 1412 }; 1413 1414 1415 template<class SF> 1416 class VISIBILITY_HIDDEN TDRegReductionPriorityQueue 1417 : public RegReductionPriorityQueue<SF> { 1418 // SUnitMap SDNode to SUnit mapping (n -> n). 1419 DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap; 1420 1421 // SUnits - The SUnits for the current graph. 1422 const std::vector<SUnit> *SUnits; 1423 1424 // SethiUllmanNumbers - The SethiUllman number for each node. 1425 std::vector<unsigned> SethiUllmanNumbers; 1426 1427 public: 1428 TDRegReductionPriorityQueue() {} 1429 1430 void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap, 1431 std::vector<SUnit> &sunits) { 1432 SUnitMap = &sumap; 1433 SUnits = &sunits; 1434 // Calculate node priorities. 1435 CalculateSethiUllmanNumbers(); 1436 } 1437 1438 void addNode(const SUnit *SU) { 1439 SethiUllmanNumbers.resize(SUnits->size(), 0); 1440 CalcNodeSethiUllmanNumber(SU); 1441 } 1442 1443 void updateNode(const SUnit *SU) { 1444 SethiUllmanNumbers[SU->NodeNum] = 0; 1445 CalcNodeSethiUllmanNumber(SU); 1446 } 1447 1448 void releaseState() { 1449 SUnits = 0; 1450 SethiUllmanNumbers.clear(); 1451 } 1452 1453 unsigned getNodePriority(const SUnit *SU) const { 1454 assert(SU->NodeNum < SethiUllmanNumbers.size()); 1455 return SethiUllmanNumbers[SU->NodeNum]; 1456 } 1457 1458 private: 1459 void CalculateSethiUllmanNumbers(); 1460 unsigned CalcNodeSethiUllmanNumber(const SUnit *SU); 1461 }; 1462} 1463 1464/// closestSucc - Returns the scheduled cycle of the successor which is 1465/// closet to the current cycle. 1466static unsigned closestSucc(const SUnit *SU) { 1467 unsigned MaxCycle = 0; 1468 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1469 I != E; ++I) { 1470 unsigned Cycle = I->Dep->Cycle; 1471 // If there are bunch of CopyToRegs stacked up, they should be considered 1472 // to be at the same position. 1473 if (I->Dep->Node && I->Dep->Node->getOpcode() == ISD::CopyToReg) 1474 Cycle = closestSucc(I->Dep)+1; 1475 if (Cycle > MaxCycle) 1476 MaxCycle = Cycle; 1477 } 1478 return MaxCycle; 1479} 1480 1481/// calcMaxScratches - Returns an cost estimate of the worse case requirement 1482/// for scratch registers. Live-in operands and live-out results don't count 1483/// since they are "fixed". 1484static unsigned calcMaxScratches(const SUnit *SU) { 1485 unsigned Scratches = 0; 1486 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1487 I != E; ++I) { 1488 if (I->isCtrl) continue; // ignore chain preds 1489 if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyFromReg) 1490 Scratches++; 1491 } 1492 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1493 I != E; ++I) { 1494 if (I->isCtrl) continue; // ignore chain succs 1495 if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyToReg) 1496 Scratches += 10; 1497 } 1498 return Scratches; 1499} 1500 1501// Bottom up 1502bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const { 1503 1504 unsigned LPriority = SPQ->getNodePriority(left); 1505 unsigned RPriority = SPQ->getNodePriority(right); 1506 if (LPriority != RPriority) 1507 return LPriority > RPriority; 1508 1509 // Try schedule def + use closer when Sethi-Ullman numbers are the same. 1510 // e.g. 1511 // t1 = op t2, c1 1512 // t3 = op t4, c2 1513 // 1514 // and the following instructions are both ready. 1515 // t2 = op c3 1516 // t4 = op c4 1517 // 1518 // Then schedule t2 = op first. 1519 // i.e. 1520 // t4 = op c4 1521 // t2 = op c3 1522 // t1 = op t2, c1 1523 // t3 = op t4, c2 1524 // 1525 // This creates more short live intervals. 1526 unsigned LDist = closestSucc(left); 1527 unsigned RDist = closestSucc(right); 1528 if (LDist != RDist) 1529 return LDist < RDist; 1530 1531 // Intuitively, it's good to push down instructions whose results are 1532 // liveout so their long live ranges won't conflict with other values 1533 // which are needed inside the BB. Further prioritize liveout instructions 1534 // by the number of operands which are calculated within the BB. 1535 unsigned LScratch = calcMaxScratches(left); 1536 unsigned RScratch = calcMaxScratches(right); 1537 if (LScratch != RScratch) 1538 return LScratch > RScratch; 1539 1540 if (left->Height != right->Height) 1541 return left->Height > right->Height; 1542 1543 if (left->Depth != right->Depth) 1544 return left->Depth < right->Depth; 1545 1546 if (left->CycleBound != right->CycleBound) 1547 return left->CycleBound > right->CycleBound; 1548 1549 assert(left->NodeQueueId && right->NodeQueueId && 1550 "NodeQueueId cannot be zero"); 1551 return (left->NodeQueueId > right->NodeQueueId); 1552} 1553 1554template<class SF> bool 1555BURegReductionPriorityQueue<SF>::canClobber(const SUnit *SU, const SUnit *Op) { 1556 if (SU->isTwoAddress) { 1557 unsigned Opc = SU->Node->getTargetOpcode(); 1558 const TargetInstrDesc &TID = TII->get(Opc); 1559 unsigned NumRes = TID.getNumDefs(); 1560 unsigned NumOps = TID.getNumOperands() - NumRes; 1561 for (unsigned i = 0; i != NumOps; ++i) { 1562 if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) { 1563 SDNode *DU = SU->Node->getOperand(i).Val; 1564 if ((*SUnitMap).find(DU) != (*SUnitMap).end() && 1565 Op == (*SUnitMap)[DU][SU->InstanceNo]) 1566 return true; 1567 } 1568 } 1569 } 1570 return false; 1571} 1572 1573 1574/// hasCopyToRegUse - Return true if SU has a value successor that is a 1575/// CopyToReg node. 1576static bool hasCopyToRegUse(SUnit *SU) { 1577 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1578 I != E; ++I) { 1579 if (I->isCtrl) continue; 1580 SUnit *SuccSU = I->Dep; 1581 if (SuccSU->Node && SuccSU->Node->getOpcode() == ISD::CopyToReg) 1582 return true; 1583 } 1584 return false; 1585} 1586 1587/// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's 1588/// physical register def. 1589static bool canClobberPhysRegDefs(SUnit *SuccSU, SUnit *SU, 1590 const TargetInstrInfo *TII, 1591 const TargetRegisterInfo *TRI) { 1592 SDNode *N = SuccSU->Node; 1593 unsigned NumDefs = TII->get(N->getTargetOpcode()).getNumDefs(); 1594 const unsigned *ImpDefs = TII->get(N->getTargetOpcode()).getImplicitDefs(); 1595 if (!ImpDefs) 1596 return false; 1597 const unsigned *SUImpDefs = 1598 TII->get(SU->Node->getTargetOpcode()).getImplicitDefs(); 1599 if (!SUImpDefs) 1600 return false; 1601 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) { 1602 MVT VT = N->getValueType(i); 1603 if (VT == MVT::Flag || VT == MVT::Other) 1604 continue; 1605 unsigned Reg = ImpDefs[i - NumDefs]; 1606 for (;*SUImpDefs; ++SUImpDefs) { 1607 unsigned SUReg = *SUImpDefs; 1608 if (TRI->regsOverlap(Reg, SUReg)) 1609 return true; 1610 } 1611 } 1612 return false; 1613} 1614 1615/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses 1616/// it as a def&use operand. Add a pseudo control edge from it to the other 1617/// node (if it won't create a cycle) so the two-address one will be scheduled 1618/// first (lower in the schedule). If both nodes are two-address, favor the 1619/// one that has a CopyToReg use (more likely to be a loop induction update). 1620/// If both are two-address, but one is commutable while the other is not 1621/// commutable, favor the one that's not commutable. 1622template<class SF> 1623void BURegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() { 1624 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) { 1625 SUnit *SU = (SUnit *)&((*SUnits)[i]); 1626 if (!SU->isTwoAddress) 1627 continue; 1628 1629 SDNode *Node = SU->Node; 1630 if (!Node || !Node->isTargetOpcode() || SU->FlaggedNodes.size() > 0) 1631 continue; 1632 1633 unsigned Opc = Node->getTargetOpcode(); 1634 const TargetInstrDesc &TID = TII->get(Opc); 1635 unsigned NumRes = TID.getNumDefs(); 1636 unsigned NumOps = TID.getNumOperands() - NumRes; 1637 for (unsigned j = 0; j != NumOps; ++j) { 1638 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) != -1) { 1639 SDNode *DU = SU->Node->getOperand(j).Val; 1640 if ((*SUnitMap).find(DU) == (*SUnitMap).end()) 1641 continue; 1642 SUnit *DUSU = (*SUnitMap)[DU][SU->InstanceNo]; 1643 if (!DUSU) continue; 1644 for (SUnit::succ_iterator I = DUSU->Succs.begin(),E = DUSU->Succs.end(); 1645 I != E; ++I) { 1646 if (I->isCtrl) continue; 1647 SUnit *SuccSU = I->Dep; 1648 if (SuccSU == SU) 1649 continue; 1650 // Be conservative. Ignore if nodes aren't at roughly the same 1651 // depth and height. 1652 if (SuccSU->Height < SU->Height && (SU->Height - SuccSU->Height) > 1) 1653 continue; 1654 if (!SuccSU->Node || !SuccSU->Node->isTargetOpcode()) 1655 continue; 1656 // Don't constrain nodes with physical register defs if the 1657 // predecessor can clobber them. 1658 if (SuccSU->hasPhysRegDefs) { 1659 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI)) 1660 continue; 1661 } 1662 // Don't constraint extract_subreg / insert_subreg these may be 1663 // coalesced away. We don't them close to their uses. 1664 unsigned SuccOpc = SuccSU->Node->getTargetOpcode(); 1665 if (SuccOpc == TargetInstrInfo::EXTRACT_SUBREG || 1666 SuccOpc == TargetInstrInfo::INSERT_SUBREG) 1667 continue; 1668 if ((!canClobber(SuccSU, DUSU) || 1669 (hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) || 1670 (!SU->isCommutable && SuccSU->isCommutable)) && 1671 !scheduleDAG->IsReachable(SuccSU, SU)) { 1672 DOUT << "Adding an edge from SU # " << SU->NodeNum 1673 << " to SU #" << SuccSU->NodeNum << "\n"; 1674 scheduleDAG->AddPred(SU, SuccSU, true, true); 1675 } 1676 } 1677 } 1678 } 1679 } 1680} 1681 1682/// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number. 1683/// Smaller number is the higher priority. 1684template<class SF> 1685unsigned BURegReductionPriorityQueue<SF>:: 1686CalcNodeSethiUllmanNumber(const SUnit *SU) { 1687 unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum]; 1688 if (SethiUllmanNumber != 0) 1689 return SethiUllmanNumber; 1690 1691 unsigned Extra = 0; 1692 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1693 I != E; ++I) { 1694 if (I->isCtrl) continue; // ignore chain preds 1695 SUnit *PredSU = I->Dep; 1696 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU); 1697 if (PredSethiUllman > SethiUllmanNumber) { 1698 SethiUllmanNumber = PredSethiUllman; 1699 Extra = 0; 1700 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl) 1701 ++Extra; 1702 } 1703 1704 SethiUllmanNumber += Extra; 1705 1706 if (SethiUllmanNumber == 0) 1707 SethiUllmanNumber = 1; 1708 1709 return SethiUllmanNumber; 1710} 1711 1712/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all 1713/// scheduling units. 1714template<class SF> 1715void BURegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() { 1716 SethiUllmanNumbers.assign(SUnits->size(), 0); 1717 1718 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) 1719 CalcNodeSethiUllmanNumber(&(*SUnits)[i]); 1720} 1721 1722/// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled 1723/// predecessors of the successors of the SUnit SU. Stop when the provided 1724/// limit is exceeded. 1725static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU, 1726 unsigned Limit) { 1727 unsigned Sum = 0; 1728 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1729 I != E; ++I) { 1730 SUnit *SuccSU = I->Dep; 1731 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(), 1732 EE = SuccSU->Preds.end(); II != EE; ++II) { 1733 SUnit *PredSU = II->Dep; 1734 if (!PredSU->isScheduled) 1735 if (++Sum > Limit) 1736 return Sum; 1737 } 1738 } 1739 return Sum; 1740} 1741 1742 1743// Top down 1744bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const { 1745 unsigned LPriority = SPQ->getNodePriority(left); 1746 unsigned RPriority = SPQ->getNodePriority(right); 1747 bool LIsTarget = left->Node && left->Node->isTargetOpcode(); 1748 bool RIsTarget = right->Node && right->Node->isTargetOpcode(); 1749 bool LIsFloater = LIsTarget && left->NumPreds == 0; 1750 bool RIsFloater = RIsTarget && right->NumPreds == 0; 1751 unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0; 1752 unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0; 1753 1754 if (left->NumSuccs == 0 && right->NumSuccs != 0) 1755 return false; 1756 else if (left->NumSuccs != 0 && right->NumSuccs == 0) 1757 return true; 1758 1759 if (LIsFloater) 1760 LBonus -= 2; 1761 if (RIsFloater) 1762 RBonus -= 2; 1763 if (left->NumSuccs == 1) 1764 LBonus += 2; 1765 if (right->NumSuccs == 1) 1766 RBonus += 2; 1767 1768 if (LPriority+LBonus != RPriority+RBonus) 1769 return LPriority+LBonus < RPriority+RBonus; 1770 1771 if (left->Depth != right->Depth) 1772 return left->Depth < right->Depth; 1773 1774 if (left->NumSuccsLeft != right->NumSuccsLeft) 1775 return left->NumSuccsLeft > right->NumSuccsLeft; 1776 1777 if (left->CycleBound != right->CycleBound) 1778 return left->CycleBound > right->CycleBound; 1779 1780 assert(left->NodeQueueId && right->NodeQueueId && 1781 "NodeQueueId cannot be zero"); 1782 return (left->NodeQueueId > right->NodeQueueId); 1783} 1784 1785/// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number. 1786/// Smaller number is the higher priority. 1787template<class SF> 1788unsigned TDRegReductionPriorityQueue<SF>:: 1789CalcNodeSethiUllmanNumber(const SUnit *SU) { 1790 unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum]; 1791 if (SethiUllmanNumber != 0) 1792 return SethiUllmanNumber; 1793 1794 unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0; 1795 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg) 1796 SethiUllmanNumber = 0xffff; 1797 else if (SU->NumSuccsLeft == 0) 1798 // If SU does not have a use, i.e. it doesn't produce a value that would 1799 // be consumed (e.g. store), then it terminates a chain of computation. 1800 // Give it a small SethiUllman number so it will be scheduled right before 1801 // its predecessors that it doesn't lengthen their live ranges. 1802 SethiUllmanNumber = 0; 1803 else if (SU->NumPredsLeft == 0 && 1804 (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU))) 1805 SethiUllmanNumber = 0xffff; 1806 else { 1807 int Extra = 0; 1808 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1809 I != E; ++I) { 1810 if (I->isCtrl) continue; // ignore chain preds 1811 SUnit *PredSU = I->Dep; 1812 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU); 1813 if (PredSethiUllman > SethiUllmanNumber) { 1814 SethiUllmanNumber = PredSethiUllman; 1815 Extra = 0; 1816 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl) 1817 ++Extra; 1818 } 1819 1820 SethiUllmanNumber += Extra; 1821 } 1822 1823 return SethiUllmanNumber; 1824} 1825 1826/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all 1827/// scheduling units. 1828template<class SF> 1829void TDRegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() { 1830 SethiUllmanNumbers.assign(SUnits->size(), 0); 1831 1832 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) 1833 CalcNodeSethiUllmanNumber(&(*SUnits)[i]); 1834} 1835 1836//===----------------------------------------------------------------------===// 1837// Public Constructor Functions 1838//===----------------------------------------------------------------------===// 1839 1840llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS, 1841 SelectionDAG *DAG, 1842 MachineBasicBlock *BB) { 1843 const TargetInstrInfo *TII = DAG->getTarget().getInstrInfo(); 1844 const TargetRegisterInfo *TRI = DAG->getTarget().getRegisterInfo(); 1845 1846 BURegReductionPriorityQueue<bu_ls_rr_sort> *priorityQueue = 1847 new BURegReductionPriorityQueue<bu_ls_rr_sort>(TII, TRI); 1848 1849 ScheduleDAGRRList * scheduleDAG = 1850 new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), true, priorityQueue); 1851 priorityQueue->setScheduleDAG(scheduleDAG); 1852 return scheduleDAG; 1853} 1854 1855llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS, 1856 SelectionDAG *DAG, 1857 MachineBasicBlock *BB) { 1858 return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), false, 1859 new TDRegReductionPriorityQueue<td_ls_rr_sort>()); 1860} 1861 1862