ScheduleDAGRRList.cpp revision 599a6a88ce1925a6349ac7af9a9638aad1d832cc
1//===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements bottom-up and top-down register pressure reduction list 11// schedulers, using standard algorithms. The basic approach uses a priority 12// queue of available nodes to schedule. One at a time, nodes are taken from 13// the priority queue (thus in priority order), checked for legality to 14// schedule, and emitted if legal. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "pre-RA-sched" 19#include "ScheduleDAGSDNodes.h" 20#include "llvm/CodeGen/SchedulerRegistry.h" 21#include "llvm/CodeGen/SelectionDAGISel.h" 22#include "llvm/Target/TargetRegisterInfo.h" 23#include "llvm/Target/TargetData.h" 24#include "llvm/Target/TargetMachine.h" 25#include "llvm/Target/TargetInstrInfo.h" 26#include "llvm/Support/Debug.h" 27#include "llvm/Support/Compiler.h" 28#include "llvm/ADT/PriorityQueue.h" 29#include "llvm/ADT/SmallSet.h" 30#include "llvm/ADT/Statistic.h" 31#include "llvm/ADT/STLExtras.h" 32#include <climits> 33using namespace llvm; 34 35STATISTIC(NumBacktracks, "Number of times scheduler backtracked"); 36STATISTIC(NumUnfolds, "Number of nodes unfolded"); 37STATISTIC(NumDups, "Number of duplicated nodes"); 38STATISTIC(NumPRCopies, "Number of physical register copies"); 39 40static RegisterScheduler 41 burrListDAGScheduler("list-burr", 42 "Bottom-up register reduction list scheduling", 43 createBURRListDAGScheduler); 44static RegisterScheduler 45 tdrListrDAGScheduler("list-tdrr", 46 "Top-down register reduction list scheduling", 47 createTDRRListDAGScheduler); 48 49namespace { 50//===----------------------------------------------------------------------===// 51/// ScheduleDAGRRList - The actual register reduction list scheduler 52/// implementation. This supports both top-down and bottom-up scheduling. 53/// 54class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAGSDNodes { 55private: 56 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if 57 /// it is top-down. 58 bool isBottomUp; 59 60 /// AvailableQueue - The priority queue to use for the available SUnits. 61 SchedulingPriorityQueue *AvailableQueue; 62 63 /// LiveRegDefs - A set of physical registers and their definition 64 /// that are "live". These nodes must be scheduled before any other nodes that 65 /// modifies the registers can be scheduled. 66 unsigned NumLiveRegs; 67 std::vector<SUnit*> LiveRegDefs; 68 std::vector<unsigned> LiveRegCycles; 69 70 /// Topo - A topological ordering for SUnits which permits fast IsReachable 71 /// and similar queries. 72 ScheduleDAGTopologicalSort Topo; 73 74public: 75 ScheduleDAGRRList(MachineFunction &mf, 76 bool isbottomup, 77 SchedulingPriorityQueue *availqueue) 78 : ScheduleDAGSDNodes(mf), isBottomUp(isbottomup), 79 AvailableQueue(availqueue), Topo(SUnits) { 80 } 81 82 ~ScheduleDAGRRList() { 83 delete AvailableQueue; 84 } 85 86 void Schedule(); 87 88 /// IsReachable - Checks if SU is reachable from TargetSU. 89 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) { 90 return Topo.IsReachable(SU, TargetSU); 91 } 92 93 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will 94 /// create a cycle. 95 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) { 96 return Topo.WillCreateCycle(SU, TargetSU); 97 } 98 99 /// AddPred - adds a predecessor edge to SUnit SU. 100 /// This returns true if this is a new predecessor. 101 /// Updates the topological ordering if required. 102 void AddPred(SUnit *SU, const SDep &D) { 103 Topo.AddPred(SU, D.getSUnit()); 104 SU->addPred(D); 105 } 106 107 /// RemovePred - removes a predecessor edge from SUnit SU. 108 /// This returns true if an edge was removed. 109 /// Updates the topological ordering if required. 110 void RemovePred(SUnit *SU, const SDep &D) { 111 Topo.RemovePred(SU, D.getSUnit()); 112 SU->removePred(D); 113 } 114 115private: 116 void ReleasePred(SUnit *SU, const SDep *PredEdge); 117 void ReleasePredecessors(SUnit *SU, unsigned CurCycle); 118 void ReleaseSucc(SUnit *SU, const SDep *SuccEdge); 119 void ReleaseSuccessors(SUnit *SU); 120 void CapturePred(SDep *PredEdge); 121 void ScheduleNodeBottomUp(SUnit*, unsigned); 122 void ScheduleNodeTopDown(SUnit*, unsigned); 123 void UnscheduleNodeBottomUp(SUnit*); 124 void BacktrackBottomUp(SUnit*, unsigned, unsigned&); 125 SUnit *CopyAndMoveSuccessors(SUnit*); 126 void InsertCopiesAndMoveSuccs(SUnit*, unsigned, 127 const TargetRegisterClass*, 128 const TargetRegisterClass*, 129 SmallVector<SUnit*, 2>&); 130 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&); 131 void ListScheduleTopDown(); 132 void ListScheduleBottomUp(); 133 134 135 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it. 136 /// Updates the topological ordering if required. 137 SUnit *CreateNewSUnit(SDNode *N) { 138 unsigned NumSUnits = SUnits.size(); 139 SUnit *NewNode = NewSUnit(N); 140 // Update the topological ordering. 141 if (NewNode->NodeNum >= NumSUnits) 142 Topo.InitDAGTopologicalSorting(); 143 return NewNode; 144 } 145 146 /// CreateClone - Creates a new SUnit from an existing one. 147 /// Updates the topological ordering if required. 148 SUnit *CreateClone(SUnit *N) { 149 unsigned NumSUnits = SUnits.size(); 150 SUnit *NewNode = Clone(N); 151 // Update the topological ordering. 152 if (NewNode->NodeNum >= NumSUnits) 153 Topo.InitDAGTopologicalSorting(); 154 return NewNode; 155 } 156 157 /// ForceUnitLatencies - Return true, since register-pressure-reducing 158 /// scheduling doesn't need actual latency information. 159 bool ForceUnitLatencies() const { return true; } 160}; 161} // end anonymous namespace 162 163 164/// Schedule - Schedule the DAG using list scheduling. 165void ScheduleDAGRRList::Schedule() { 166 DOUT << "********** List Scheduling **********\n"; 167 168 NumLiveRegs = 0; 169 LiveRegDefs.resize(TRI->getNumRegs(), NULL); 170 LiveRegCycles.resize(TRI->getNumRegs(), 0); 171 172 // Build the scheduling graph. 173 BuildSchedGraph(); 174 175 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 176 SUnits[su].dumpAll(this)); 177 Topo.InitDAGTopologicalSorting(); 178 179 AvailableQueue->initNodes(SUnits); 180 181 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate. 182 if (isBottomUp) 183 ListScheduleBottomUp(); 184 else 185 ListScheduleTopDown(); 186 187 AvailableQueue->releaseState(); 188} 189 190//===----------------------------------------------------------------------===// 191// Bottom-Up Scheduling 192//===----------------------------------------------------------------------===// 193 194/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to 195/// the AvailableQueue if the count reaches zero. Also update its cycle bound. 196void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) { 197 SUnit *PredSU = PredEdge->getSUnit(); 198 --PredSU->NumSuccsLeft; 199 200#ifndef NDEBUG 201 if (PredSU->NumSuccsLeft < 0) { 202 cerr << "*** Scheduling failed! ***\n"; 203 PredSU->dump(this); 204 cerr << " has been released too many times!\n"; 205 assert(0); 206 } 207#endif 208 209 // If all the node's successors are scheduled, this node is ready 210 // to be scheduled. Ignore the special EntrySU node. 211 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) { 212 PredSU->isAvailable = true; 213 AvailableQueue->push(PredSU); 214 } 215} 216 217void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU, unsigned CurCycle) { 218 // Bottom up: release predecessors 219 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 220 I != E; ++I) { 221 ReleasePred(SU, &*I); 222 if (I->isAssignedRegDep()) { 223 // This is a physical register dependency and it's impossible or 224 // expensive to copy the register. Make sure nothing that can 225 // clobber the register is scheduled between the predecessor and 226 // this node. 227 if (!LiveRegDefs[I->getReg()]) { 228 ++NumLiveRegs; 229 LiveRegDefs[I->getReg()] = I->getSUnit(); 230 LiveRegCycles[I->getReg()] = CurCycle; 231 } 232 } 233 } 234} 235 236/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending 237/// count of its predecessors. If a predecessor pending count is zero, add it to 238/// the Available queue. 239void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) { 240 DOUT << "*** Scheduling [" << CurCycle << "]: "; 241 DEBUG(SU->dump(this)); 242 243 assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!"); 244 SU->setHeightToAtLeast(CurCycle); 245 Sequence.push_back(SU); 246 247 ReleasePredecessors(SU, CurCycle); 248 249 // Release all the implicit physical register defs that are live. 250 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 251 I != E; ++I) { 252 if (I->isAssignedRegDep()) { 253 if (LiveRegCycles[I->getReg()] == I->getSUnit()->getHeight()) { 254 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!"); 255 assert(LiveRegDefs[I->getReg()] == SU && 256 "Physical register dependency violated?"); 257 --NumLiveRegs; 258 LiveRegDefs[I->getReg()] = NULL; 259 LiveRegCycles[I->getReg()] = 0; 260 } 261 } 262 } 263 264 SU->isScheduled = true; 265 AvailableQueue->ScheduledNode(SU); 266} 267 268/// CapturePred - This does the opposite of ReleasePred. Since SU is being 269/// unscheduled, incrcease the succ left count of its predecessors. Remove 270/// them from AvailableQueue if necessary. 271void ScheduleDAGRRList::CapturePred(SDep *PredEdge) { 272 SUnit *PredSU = PredEdge->getSUnit(); 273 if (PredSU->isAvailable) { 274 PredSU->isAvailable = false; 275 if (!PredSU->isPending) 276 AvailableQueue->remove(PredSU); 277 } 278 279 ++PredSU->NumSuccsLeft; 280} 281 282/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and 283/// its predecessor states to reflect the change. 284void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) { 285 DOUT << "*** Unscheduling [" << SU->getHeight() << "]: "; 286 DEBUG(SU->dump(this)); 287 288 AvailableQueue->UnscheduledNode(SU); 289 290 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 291 I != E; ++I) { 292 CapturePred(&*I); 293 if (I->isAssignedRegDep() && SU->getHeight() == LiveRegCycles[I->getReg()]) { 294 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!"); 295 assert(LiveRegDefs[I->getReg()] == I->getSUnit() && 296 "Physical register dependency violated?"); 297 --NumLiveRegs; 298 LiveRegDefs[I->getReg()] = NULL; 299 LiveRegCycles[I->getReg()] = 0; 300 } 301 } 302 303 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 304 I != E; ++I) { 305 if (I->isAssignedRegDep()) { 306 if (!LiveRegDefs[I->getReg()]) { 307 LiveRegDefs[I->getReg()] = SU; 308 ++NumLiveRegs; 309 } 310 if (I->getSUnit()->getHeight() < LiveRegCycles[I->getReg()]) 311 LiveRegCycles[I->getReg()] = I->getSUnit()->getHeight(); 312 } 313 } 314 315 SU->setHeightDirty(); 316 SU->isScheduled = false; 317 SU->isAvailable = true; 318 AvailableQueue->push(SU); 319} 320 321/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in 322/// BTCycle in order to schedule a specific node. 323void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle, 324 unsigned &CurCycle) { 325 SUnit *OldSU = NULL; 326 while (CurCycle > BtCycle) { 327 OldSU = Sequence.back(); 328 Sequence.pop_back(); 329 if (SU->isSucc(OldSU)) 330 // Don't try to remove SU from AvailableQueue. 331 SU->isAvailable = false; 332 UnscheduleNodeBottomUp(OldSU); 333 --CurCycle; 334 } 335 336 assert(!SU->isSucc(OldSU) && "Something is wrong!"); 337 338 ++NumBacktracks; 339} 340 341/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled 342/// successors to the newly created node. 343SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) { 344 if (SU->getNode()->getFlaggedNode()) 345 return NULL; 346 347 SDNode *N = SU->getNode(); 348 if (!N) 349 return NULL; 350 351 SUnit *NewSU; 352 bool TryUnfold = false; 353 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { 354 MVT VT = N->getValueType(i); 355 if (VT == MVT::Flag) 356 return NULL; 357 else if (VT == MVT::Other) 358 TryUnfold = true; 359 } 360 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 361 const SDValue &Op = N->getOperand(i); 362 MVT VT = Op.getNode()->getValueType(Op.getResNo()); 363 if (VT == MVT::Flag) 364 return NULL; 365 } 366 367 if (TryUnfold) { 368 SmallVector<SDNode*, 2> NewNodes; 369 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes)) 370 return NULL; 371 372 DOUT << "Unfolding SU # " << SU->NodeNum << "\n"; 373 assert(NewNodes.size() == 2 && "Expected a load folding node!"); 374 375 N = NewNodes[1]; 376 SDNode *LoadNode = NewNodes[0]; 377 unsigned NumVals = N->getNumValues(); 378 unsigned OldNumVals = SU->getNode()->getNumValues(); 379 for (unsigned i = 0; i != NumVals; ++i) 380 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i)); 381 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1), 382 SDValue(LoadNode, 1)); 383 384 // LoadNode may already exist. This can happen when there is another 385 // load from the same location and producing the same type of value 386 // but it has different alignment or volatileness. 387 bool isNewLoad = true; 388 SUnit *LoadSU; 389 if (LoadNode->getNodeId() != -1) { 390 LoadSU = &SUnits[LoadNode->getNodeId()]; 391 isNewLoad = false; 392 } else { 393 LoadSU = CreateNewSUnit(LoadNode); 394 LoadNode->setNodeId(LoadSU->NodeNum); 395 ComputeLatency(LoadSU); 396 } 397 398 SUnit *NewSU = CreateNewSUnit(N); 399 assert(N->getNodeId() == -1 && "Node already inserted!"); 400 N->setNodeId(NewSU->NodeNum); 401 402 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode()); 403 for (unsigned i = 0; i != TID.getNumOperands(); ++i) { 404 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) { 405 NewSU->isTwoAddress = true; 406 break; 407 } 408 } 409 if (TID.isCommutable()) 410 NewSU->isCommutable = true; 411 ComputeLatency(NewSU); 412 413 SDep ChainPred; 414 SmallVector<SDep, 4> ChainSuccs; 415 SmallVector<SDep, 4> LoadPreds; 416 SmallVector<SDep, 4> NodePreds; 417 SmallVector<SDep, 4> NodeSuccs; 418 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 419 I != E; ++I) { 420 if (I->isCtrl()) 421 ChainPred = *I; 422 else if (I->getSUnit()->getNode() && 423 I->getSUnit()->getNode()->isOperandOf(LoadNode)) 424 LoadPreds.push_back(*I); 425 else 426 NodePreds.push_back(*I); 427 } 428 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 429 I != E; ++I) { 430 if (I->isCtrl()) 431 ChainSuccs.push_back(*I); 432 else 433 NodeSuccs.push_back(*I); 434 } 435 436 if (ChainPred.getSUnit()) { 437 RemovePred(SU, ChainPred); 438 if (isNewLoad) 439 AddPred(LoadSU, ChainPred); 440 } 441 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) { 442 const SDep &Pred = LoadPreds[i]; 443 RemovePred(SU, Pred); 444 if (isNewLoad) { 445 AddPred(LoadSU, Pred); 446 } 447 } 448 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) { 449 const SDep &Pred = NodePreds[i]; 450 RemovePred(SU, Pred); 451 AddPred(NewSU, Pred); 452 } 453 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) { 454 SDep D = NodeSuccs[i]; 455 SUnit *SuccDep = D.getSUnit(); 456 D.setSUnit(SU); 457 RemovePred(SuccDep, D); 458 D.setSUnit(NewSU); 459 AddPred(SuccDep, D); 460 } 461 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) { 462 SDep D = ChainSuccs[i]; 463 SUnit *SuccDep = D.getSUnit(); 464 D.setSUnit(SU); 465 RemovePred(SuccDep, D); 466 if (isNewLoad) { 467 D.setSUnit(LoadSU); 468 AddPred(SuccDep, D); 469 } 470 } 471 if (isNewLoad) { 472 AddPred(NewSU, SDep(LoadSU, SDep::Order, LoadSU->Latency)); 473 } 474 475 if (isNewLoad) 476 AvailableQueue->addNode(LoadSU); 477 AvailableQueue->addNode(NewSU); 478 479 ++NumUnfolds; 480 481 if (NewSU->NumSuccsLeft == 0) { 482 NewSU->isAvailable = true; 483 return NewSU; 484 } 485 SU = NewSU; 486 } 487 488 DOUT << "Duplicating SU # " << SU->NodeNum << "\n"; 489 NewSU = CreateClone(SU); 490 491 // New SUnit has the exact same predecessors. 492 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 493 I != E; ++I) 494 if (!I->isArtificial()) 495 AddPred(NewSU, *I); 496 497 // Only copy scheduled successors. Cut them from old node's successor 498 // list and move them over. 499 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps; 500 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 501 I != E; ++I) { 502 if (I->isArtificial()) 503 continue; 504 SUnit *SuccSU = I->getSUnit(); 505 if (SuccSU->isScheduled) { 506 SDep D = *I; 507 D.setSUnit(NewSU); 508 AddPred(SuccSU, D); 509 D.setSUnit(SU); 510 DelDeps.push_back(std::make_pair(SuccSU, D)); 511 } 512 } 513 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) 514 RemovePred(DelDeps[i].first, DelDeps[i].second); 515 516 AvailableQueue->updateNode(SU); 517 AvailableQueue->addNode(NewSU); 518 519 ++NumDups; 520 return NewSU; 521} 522 523/// InsertCopiesAndMoveSuccs - Insert register copies and move all 524/// scheduled successors of the given SUnit to the last copy. 525void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg, 526 const TargetRegisterClass *DestRC, 527 const TargetRegisterClass *SrcRC, 528 SmallVector<SUnit*, 2> &Copies) { 529 SUnit *CopyFromSU = CreateNewSUnit(NULL); 530 CopyFromSU->CopySrcRC = SrcRC; 531 CopyFromSU->CopyDstRC = DestRC; 532 533 SUnit *CopyToSU = CreateNewSUnit(NULL); 534 CopyToSU->CopySrcRC = DestRC; 535 CopyToSU->CopyDstRC = SrcRC; 536 537 // Only copy scheduled successors. Cut them from old node's successor 538 // list and move them over. 539 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps; 540 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 541 I != E; ++I) { 542 if (I->isArtificial()) 543 continue; 544 SUnit *SuccSU = I->getSUnit(); 545 if (SuccSU->isScheduled) { 546 SDep D = *I; 547 D.setSUnit(CopyToSU); 548 AddPred(SuccSU, D); 549 DelDeps.push_back(std::make_pair(SuccSU, *I)); 550 } 551 } 552 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) 553 RemovePred(DelDeps[i].first, DelDeps[i].second); 554 555 AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg)); 556 AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0)); 557 558 AvailableQueue->updateNode(SU); 559 AvailableQueue->addNode(CopyFromSU); 560 AvailableQueue->addNode(CopyToSU); 561 Copies.push_back(CopyFromSU); 562 Copies.push_back(CopyToSU); 563 564 ++NumPRCopies; 565} 566 567/// getPhysicalRegisterVT - Returns the ValueType of the physical register 568/// definition of the specified node. 569/// FIXME: Move to SelectionDAG? 570static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg, 571 const TargetInstrInfo *TII) { 572 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode()); 573 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!"); 574 unsigned NumRes = TID.getNumDefs(); 575 for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) { 576 if (Reg == *ImpDef) 577 break; 578 ++NumRes; 579 } 580 return N->getValueType(NumRes); 581} 582 583/// CheckForLiveRegDef - Return true and update live register vector if the 584/// specified register def of the specified SUnit clobbers any "live" registers. 585static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg, 586 std::vector<SUnit*> &LiveRegDefs, 587 SmallSet<unsigned, 4> &RegAdded, 588 SmallVector<unsigned, 4> &LRegs, 589 const TargetRegisterInfo *TRI) { 590 bool Added = false; 591 if (LiveRegDefs[Reg] && LiveRegDefs[Reg] != SU) { 592 if (RegAdded.insert(Reg)) { 593 LRegs.push_back(Reg); 594 Added = true; 595 } 596 } 597 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) 598 if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != SU) { 599 if (RegAdded.insert(*Alias)) { 600 LRegs.push_back(*Alias); 601 Added = true; 602 } 603 } 604 return Added; 605} 606 607/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay 608/// scheduling of the given node to satisfy live physical register dependencies. 609/// If the specific node is the last one that's available to schedule, do 610/// whatever is necessary (i.e. backtracking or cloning) to make it possible. 611bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU, 612 SmallVector<unsigned, 4> &LRegs){ 613 if (NumLiveRegs == 0) 614 return false; 615 616 SmallSet<unsigned, 4> RegAdded; 617 // If this node would clobber any "live" register, then it's not ready. 618 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 619 I != E; ++I) { 620 if (I->isAssignedRegDep()) 621 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs, 622 RegAdded, LRegs, TRI); 623 } 624 625 for (SDNode *Node = SU->getNode(); Node; Node = Node->getFlaggedNode()) { 626 if (Node->getOpcode() == ISD::INLINEASM) { 627 // Inline asm can clobber physical defs. 628 unsigned NumOps = Node->getNumOperands(); 629 if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag) 630 --NumOps; // Ignore the flag operand. 631 632 for (unsigned i = 2; i != NumOps;) { 633 unsigned Flags = 634 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue(); 635 unsigned NumVals = Flags >> 3; 636 637 ++i; // Skip the ID value. 638 if ((Flags & 7) == 2 || (Flags & 7) == 6) { 639 // Check for def of register or earlyclobber register. 640 for (; NumVals; --NumVals, ++i) { 641 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg(); 642 if (TargetRegisterInfo::isPhysicalRegister(Reg)) 643 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI); 644 } 645 } else 646 i += NumVals; 647 } 648 continue; 649 } 650 651 if (!Node->isMachineOpcode()) 652 continue; 653 const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode()); 654 if (!TID.ImplicitDefs) 655 continue; 656 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) 657 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI); 658 } 659 return !LRegs.empty(); 660} 661 662 663/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up 664/// schedulers. 665void ScheduleDAGRRList::ListScheduleBottomUp() { 666 unsigned CurCycle = 0; 667 668 // Release any predecessors of the special Exit node. 669 ReleasePredecessors(&ExitSU, CurCycle); 670 671 // Add root to Available queue. 672 if (!SUnits.empty()) { 673 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()]; 674 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!"); 675 RootSU->isAvailable = true; 676 AvailableQueue->push(RootSU); 677 } 678 679 // While Available queue is not empty, grab the node with the highest 680 // priority. If it is not ready put it back. Schedule the node. 681 SmallVector<SUnit*, 4> NotReady; 682 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap; 683 Sequence.reserve(SUnits.size()); 684 while (!AvailableQueue->empty()) { 685 bool Delayed = false; 686 LRegsMap.clear(); 687 SUnit *CurSU = AvailableQueue->pop(); 688 while (CurSU) { 689 SmallVector<unsigned, 4> LRegs; 690 if (!DelayForLiveRegsBottomUp(CurSU, LRegs)) 691 break; 692 Delayed = true; 693 LRegsMap.insert(std::make_pair(CurSU, LRegs)); 694 695 CurSU->isPending = true; // This SU is not in AvailableQueue right now. 696 NotReady.push_back(CurSU); 697 CurSU = AvailableQueue->pop(); 698 } 699 700 // All candidates are delayed due to live physical reg dependencies. 701 // Try backtracking, code duplication, or inserting cross class copies 702 // to resolve it. 703 if (Delayed && !CurSU) { 704 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) { 705 SUnit *TrySU = NotReady[i]; 706 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU]; 707 708 // Try unscheduling up to the point where it's safe to schedule 709 // this node. 710 unsigned LiveCycle = CurCycle; 711 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) { 712 unsigned Reg = LRegs[j]; 713 unsigned LCycle = LiveRegCycles[Reg]; 714 LiveCycle = std::min(LiveCycle, LCycle); 715 } 716 SUnit *OldSU = Sequence[LiveCycle]; 717 if (!WillCreateCycle(TrySU, OldSU)) { 718 BacktrackBottomUp(TrySU, LiveCycle, CurCycle); 719 // Force the current node to be scheduled before the node that 720 // requires the physical reg dep. 721 if (OldSU->isAvailable) { 722 OldSU->isAvailable = false; 723 AvailableQueue->remove(OldSU); 724 } 725 AddPred(TrySU, SDep(OldSU, SDep::Order, /*Latency=*/1, 726 /*Reg=*/0, /*isNormalMemory=*/false, 727 /*isMustAlias=*/false, /*isArtificial=*/true)); 728 // If one or more successors has been unscheduled, then the current 729 // node is no longer avaialable. Schedule a successor that's now 730 // available instead. 731 if (!TrySU->isAvailable) 732 CurSU = AvailableQueue->pop(); 733 else { 734 CurSU = TrySU; 735 TrySU->isPending = false; 736 NotReady.erase(NotReady.begin()+i); 737 } 738 break; 739 } 740 } 741 742 if (!CurSU) { 743 // Can't backtrack. If it's too expensive to copy the value, then try 744 // duplicate the nodes that produces these "too expensive to copy" 745 // values to break the dependency. In case even that doesn't work, 746 // insert cross class copies. 747 // If it's not too expensive, i.e. cost != -1, issue copies. 748 SUnit *TrySU = NotReady[0]; 749 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU]; 750 assert(LRegs.size() == 1 && "Can't handle this yet!"); 751 unsigned Reg = LRegs[0]; 752 SUnit *LRDef = LiveRegDefs[Reg]; 753 MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII); 754 const TargetRegisterClass *RC = 755 TRI->getPhysicalRegisterRegClass(Reg, VT); 756 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC); 757 758 // If cross copy register class is null, then it must be possible copy 759 // the value directly. Do not try duplicate the def. 760 SUnit *NewDef = 0; 761 if (DestRC) 762 NewDef = CopyAndMoveSuccessors(LRDef); 763 else 764 DestRC = RC; 765 if (!NewDef) { 766 // Issue copies, these can be expensive cross register class copies. 767 SmallVector<SUnit*, 2> Copies; 768 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies); 769 DOUT << "Adding an edge from SU #" << TrySU->NodeNum 770 << " to SU #" << Copies.front()->NodeNum << "\n"; 771 AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1, 772 /*Reg=*/0, /*isNormalMemory=*/false, 773 /*isMustAlias=*/false, 774 /*isArtificial=*/true)); 775 NewDef = Copies.back(); 776 } 777 778 DOUT << "Adding an edge from SU #" << NewDef->NodeNum 779 << " to SU #" << TrySU->NodeNum << "\n"; 780 LiveRegDefs[Reg] = NewDef; 781 AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1, 782 /*Reg=*/0, /*isNormalMemory=*/false, 783 /*isMustAlias=*/false, 784 /*isArtificial=*/true)); 785 TrySU->isAvailable = false; 786 CurSU = NewDef; 787 } 788 789 assert(CurSU && "Unable to resolve live physical register dependencies!"); 790 } 791 792 // Add the nodes that aren't ready back onto the available list. 793 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) { 794 NotReady[i]->isPending = false; 795 // May no longer be available due to backtracking. 796 if (NotReady[i]->isAvailable) 797 AvailableQueue->push(NotReady[i]); 798 } 799 NotReady.clear(); 800 801 if (CurSU) 802 ScheduleNodeBottomUp(CurSU, CurCycle); 803 ++CurCycle; 804 } 805 806 // Reverse the order if it is bottom up. 807 std::reverse(Sequence.begin(), Sequence.end()); 808 809#ifndef NDEBUG 810 VerifySchedule(isBottomUp); 811#endif 812} 813 814//===----------------------------------------------------------------------===// 815// Top-Down Scheduling 816//===----------------------------------------------------------------------===// 817 818/// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to 819/// the AvailableQueue if the count reaches zero. Also update its cycle bound. 820void ScheduleDAGRRList::ReleaseSucc(SUnit *SU, const SDep *SuccEdge) { 821 SUnit *SuccSU = SuccEdge->getSUnit(); 822 --SuccSU->NumPredsLeft; 823 824#ifndef NDEBUG 825 if (SuccSU->NumPredsLeft < 0) { 826 cerr << "*** Scheduling failed! ***\n"; 827 SuccSU->dump(this); 828 cerr << " has been released too many times!\n"; 829 assert(0); 830 } 831#endif 832 833 // If all the node's predecessors are scheduled, this node is ready 834 // to be scheduled. Ignore the special ExitSU node. 835 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) { 836 SuccSU->isAvailable = true; 837 AvailableQueue->push(SuccSU); 838 } 839} 840 841void ScheduleDAGRRList::ReleaseSuccessors(SUnit *SU) { 842 // Top down: release successors 843 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 844 I != E; ++I) { 845 assert(!I->isAssignedRegDep() && 846 "The list-tdrr scheduler doesn't yet support physreg dependencies!"); 847 848 ReleaseSucc(SU, &*I); 849 } 850} 851 852/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending 853/// count of its successors. If a successor pending count is zero, add it to 854/// the Available queue. 855void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) { 856 DOUT << "*** Scheduling [" << CurCycle << "]: "; 857 DEBUG(SU->dump(this)); 858 859 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!"); 860 SU->setDepthToAtLeast(CurCycle); 861 Sequence.push_back(SU); 862 863 ReleaseSuccessors(SU); 864 SU->isScheduled = true; 865 AvailableQueue->ScheduledNode(SU); 866} 867 868/// ListScheduleTopDown - The main loop of list scheduling for top-down 869/// schedulers. 870void ScheduleDAGRRList::ListScheduleTopDown() { 871 unsigned CurCycle = 0; 872 873 // Release any successors of the special Entry node. 874 ReleaseSuccessors(&EntrySU); 875 876 // All leaves to Available queue. 877 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { 878 // It is available if it has no predecessors. 879 if (SUnits[i].Preds.empty()) { 880 AvailableQueue->push(&SUnits[i]); 881 SUnits[i].isAvailable = true; 882 } 883 } 884 885 // While Available queue is not empty, grab the node with the highest 886 // priority. If it is not ready put it back. Schedule the node. 887 Sequence.reserve(SUnits.size()); 888 while (!AvailableQueue->empty()) { 889 SUnit *CurSU = AvailableQueue->pop(); 890 891 if (CurSU) 892 ScheduleNodeTopDown(CurSU, CurCycle); 893 ++CurCycle; 894 } 895 896#ifndef NDEBUG 897 VerifySchedule(isBottomUp); 898#endif 899} 900 901 902//===----------------------------------------------------------------------===// 903// RegReductionPriorityQueue Implementation 904//===----------------------------------------------------------------------===// 905// 906// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers 907// to reduce register pressure. 908// 909namespace { 910 template<class SF> 911 class RegReductionPriorityQueue; 912 913 /// Sorting functions for the Available queue. 914 struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> { 915 RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ; 916 bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {} 917 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {} 918 919 bool operator()(const SUnit* left, const SUnit* right) const; 920 }; 921 922 struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> { 923 RegReductionPriorityQueue<td_ls_rr_sort> *SPQ; 924 td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {} 925 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {} 926 927 bool operator()(const SUnit* left, const SUnit* right) const; 928 }; 929} // end anonymous namespace 930 931/// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number. 932/// Smaller number is the higher priority. 933static unsigned 934CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) { 935 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum]; 936 if (SethiUllmanNumber != 0) 937 return SethiUllmanNumber; 938 939 unsigned Extra = 0; 940 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 941 I != E; ++I) { 942 if (I->isCtrl()) continue; // ignore chain preds 943 SUnit *PredSU = I->getSUnit(); 944 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers); 945 if (PredSethiUllman > SethiUllmanNumber) { 946 SethiUllmanNumber = PredSethiUllman; 947 Extra = 0; 948 } else if (PredSethiUllman == SethiUllmanNumber) 949 ++Extra; 950 } 951 952 SethiUllmanNumber += Extra; 953 954 if (SethiUllmanNumber == 0) 955 SethiUllmanNumber = 1; 956 957 return SethiUllmanNumber; 958} 959 960namespace { 961 template<class SF> 962 class VISIBILITY_HIDDEN RegReductionPriorityQueue 963 : public SchedulingPriorityQueue { 964 PriorityQueue<SUnit*, std::vector<SUnit*>, SF> Queue; 965 unsigned currentQueueId; 966 967 protected: 968 // SUnits - The SUnits for the current graph. 969 std::vector<SUnit> *SUnits; 970 971 const TargetInstrInfo *TII; 972 const TargetRegisterInfo *TRI; 973 ScheduleDAGRRList *scheduleDAG; 974 975 // SethiUllmanNumbers - The SethiUllman number for each node. 976 std::vector<unsigned> SethiUllmanNumbers; 977 978 public: 979 RegReductionPriorityQueue(const TargetInstrInfo *tii, 980 const TargetRegisterInfo *tri) : 981 Queue(SF(this)), currentQueueId(0), 982 TII(tii), TRI(tri), scheduleDAG(NULL) {} 983 984 void initNodes(std::vector<SUnit> &sunits) { 985 SUnits = &sunits; 986 // Add pseudo dependency edges for two-address nodes. 987 AddPseudoTwoAddrDeps(); 988 // Calculate node priorities. 989 CalculateSethiUllmanNumbers(); 990 } 991 992 void addNode(const SUnit *SU) { 993 unsigned SUSize = SethiUllmanNumbers.size(); 994 if (SUnits->size() > SUSize) 995 SethiUllmanNumbers.resize(SUSize*2, 0); 996 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers); 997 } 998 999 void updateNode(const SUnit *SU) { 1000 SethiUllmanNumbers[SU->NodeNum] = 0; 1001 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers); 1002 } 1003 1004 void releaseState() { 1005 SUnits = 0; 1006 SethiUllmanNumbers.clear(); 1007 } 1008 1009 unsigned getNodePriority(const SUnit *SU) const { 1010 assert(SU->NodeNum < SethiUllmanNumbers.size()); 1011 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0; 1012 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg) 1013 // CopyToReg should be close to its uses to facilitate coalescing and 1014 // avoid spilling. 1015 return 0; 1016 if (Opc == TargetInstrInfo::EXTRACT_SUBREG || 1017 Opc == TargetInstrInfo::INSERT_SUBREG) 1018 // EXTRACT_SUBREG / INSERT_SUBREG should be close to its use to 1019 // facilitate coalescing. 1020 return 0; 1021 if (SU->NumSuccs == 0 && SU->NumPreds != 0) 1022 // If SU does not have a register use, i.e. it doesn't produce a value 1023 // that would be consumed (e.g. store), then it terminates a chain of 1024 // computation. Give it a large SethiUllman number so it will be 1025 // scheduled right before its predecessors that it doesn't lengthen 1026 // their live ranges. 1027 return 0xffff; 1028 if (SU->NumPreds == 0 && SU->NumSuccs != 0) 1029 // If SU does not have a register def, schedule it close to its uses 1030 // because it does not lengthen any live ranges. 1031 return 0; 1032 return SethiUllmanNumbers[SU->NodeNum]; 1033 } 1034 1035 unsigned size() const { return Queue.size(); } 1036 1037 bool empty() const { return Queue.empty(); } 1038 1039 void push(SUnit *U) { 1040 assert(!U->NodeQueueId && "Node in the queue already"); 1041 U->NodeQueueId = ++currentQueueId; 1042 Queue.push(U); 1043 } 1044 1045 void push_all(const std::vector<SUnit *> &Nodes) { 1046 for (unsigned i = 0, e = Nodes.size(); i != e; ++i) 1047 push(Nodes[i]); 1048 } 1049 1050 SUnit *pop() { 1051 if (empty()) return NULL; 1052 SUnit *V = Queue.top(); 1053 Queue.pop(); 1054 V->NodeQueueId = 0; 1055 return V; 1056 } 1057 1058 void remove(SUnit *SU) { 1059 assert(!Queue.empty() && "Queue is empty!"); 1060 assert(SU->NodeQueueId != 0 && "Not in queue!"); 1061 Queue.erase_one(SU); 1062 SU->NodeQueueId = 0; 1063 } 1064 1065 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) { 1066 scheduleDAG = scheduleDag; 1067 } 1068 1069 protected: 1070 bool canClobber(const SUnit *SU, const SUnit *Op); 1071 void AddPseudoTwoAddrDeps(); 1072 void CalculateSethiUllmanNumbers(); 1073 }; 1074 1075 typedef RegReductionPriorityQueue<bu_ls_rr_sort> 1076 BURegReductionPriorityQueue; 1077 1078 typedef RegReductionPriorityQueue<td_ls_rr_sort> 1079 TDRegReductionPriorityQueue; 1080} 1081 1082/// closestSucc - Returns the scheduled cycle of the successor which is 1083/// closet to the current cycle. 1084static unsigned closestSucc(const SUnit *SU) { 1085 unsigned MaxHeight = 0; 1086 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1087 I != E; ++I) { 1088 if (I->isCtrl()) continue; // ignore chain succs 1089 unsigned Height = I->getSUnit()->getHeight(); 1090 // If there are bunch of CopyToRegs stacked up, they should be considered 1091 // to be at the same position. 1092 if (I->getSUnit()->getNode() && 1093 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg) 1094 Height = closestSucc(I->getSUnit())+1; 1095 if (Height > MaxHeight) 1096 MaxHeight = Height; 1097 } 1098 return MaxHeight; 1099} 1100 1101/// calcMaxScratches - Returns an cost estimate of the worse case requirement 1102/// for scratch registers, i.e. number of data dependencies. 1103static unsigned calcMaxScratches(const SUnit *SU) { 1104 unsigned Scratches = 0; 1105 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1106 I != E; ++I) { 1107 if (I->isCtrl()) continue; // ignore chain preds 1108 Scratches++; 1109 } 1110 return Scratches; 1111} 1112 1113// Bottom up 1114bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const { 1115 unsigned LPriority = SPQ->getNodePriority(left); 1116 unsigned RPriority = SPQ->getNodePriority(right); 1117 if (LPriority != RPriority) 1118 return LPriority > RPriority; 1119 1120 // Try schedule def + use closer when Sethi-Ullman numbers are the same. 1121 // e.g. 1122 // t1 = op t2, c1 1123 // t3 = op t4, c2 1124 // 1125 // and the following instructions are both ready. 1126 // t2 = op c3 1127 // t4 = op c4 1128 // 1129 // Then schedule t2 = op first. 1130 // i.e. 1131 // t4 = op c4 1132 // t2 = op c3 1133 // t1 = op t2, c1 1134 // t3 = op t4, c2 1135 // 1136 // This creates more short live intervals. 1137 unsigned LDist = closestSucc(left); 1138 unsigned RDist = closestSucc(right); 1139 if (LDist != RDist) 1140 return LDist < RDist; 1141 1142 // How many registers becomes live when the node is scheduled. 1143 unsigned LScratch = calcMaxScratches(left); 1144 unsigned RScratch = calcMaxScratches(right); 1145 if (LScratch != RScratch) 1146 return LScratch > RScratch; 1147 1148 if (left->getHeight() != right->getHeight()) 1149 return left->getHeight() > right->getHeight(); 1150 1151 if (left->getDepth() != right->getDepth()) 1152 return left->getDepth() < right->getDepth(); 1153 1154 assert(left->NodeQueueId && right->NodeQueueId && 1155 "NodeQueueId cannot be zero"); 1156 return (left->NodeQueueId > right->NodeQueueId); 1157} 1158 1159template<class SF> 1160bool 1161RegReductionPriorityQueue<SF>::canClobber(const SUnit *SU, const SUnit *Op) { 1162 if (SU->isTwoAddress) { 1163 unsigned Opc = SU->getNode()->getMachineOpcode(); 1164 const TargetInstrDesc &TID = TII->get(Opc); 1165 unsigned NumRes = TID.getNumDefs(); 1166 unsigned NumOps = TID.getNumOperands() - NumRes; 1167 for (unsigned i = 0; i != NumOps; ++i) { 1168 if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) { 1169 SDNode *DU = SU->getNode()->getOperand(i).getNode(); 1170 if (DU->getNodeId() != -1 && 1171 Op->OrigNode == &(*SUnits)[DU->getNodeId()]) 1172 return true; 1173 } 1174 } 1175 } 1176 return false; 1177} 1178 1179 1180/// hasCopyToRegUse - Return true if SU has a value successor that is a 1181/// CopyToReg node. 1182static bool hasCopyToRegUse(const SUnit *SU) { 1183 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1184 I != E; ++I) { 1185 if (I->isCtrl()) continue; 1186 const SUnit *SuccSU = I->getSUnit(); 1187 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) 1188 return true; 1189 } 1190 return false; 1191} 1192 1193/// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's 1194/// physical register defs. 1195static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU, 1196 const TargetInstrInfo *TII, 1197 const TargetRegisterInfo *TRI) { 1198 SDNode *N = SuccSU->getNode(); 1199 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs(); 1200 const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs(); 1201 assert(ImpDefs && "Caller should check hasPhysRegDefs"); 1202 const unsigned *SUImpDefs = 1203 TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs(); 1204 if (!SUImpDefs) 1205 return false; 1206 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) { 1207 MVT VT = N->getValueType(i); 1208 if (VT == MVT::Flag || VT == MVT::Other) 1209 continue; 1210 if (!N->hasAnyUseOfValue(i)) 1211 continue; 1212 unsigned Reg = ImpDefs[i - NumDefs]; 1213 for (;*SUImpDefs; ++SUImpDefs) { 1214 unsigned SUReg = *SUImpDefs; 1215 if (TRI->regsOverlap(Reg, SUReg)) 1216 return true; 1217 } 1218 } 1219 return false; 1220} 1221 1222/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses 1223/// it as a def&use operand. Add a pseudo control edge from it to the other 1224/// node (if it won't create a cycle) so the two-address one will be scheduled 1225/// first (lower in the schedule). If both nodes are two-address, favor the 1226/// one that has a CopyToReg use (more likely to be a loop induction update). 1227/// If both are two-address, but one is commutable while the other is not 1228/// commutable, favor the one that's not commutable. 1229template<class SF> 1230void RegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() { 1231 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) { 1232 SUnit *SU = &(*SUnits)[i]; 1233 if (!SU->isTwoAddress) 1234 continue; 1235 1236 SDNode *Node = SU->getNode(); 1237 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getFlaggedNode()) 1238 continue; 1239 1240 unsigned Opc = Node->getMachineOpcode(); 1241 const TargetInstrDesc &TID = TII->get(Opc); 1242 unsigned NumRes = TID.getNumDefs(); 1243 unsigned NumOps = TID.getNumOperands() - NumRes; 1244 for (unsigned j = 0; j != NumOps; ++j) { 1245 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1) 1246 continue; 1247 SDNode *DU = SU->getNode()->getOperand(j).getNode(); 1248 if (DU->getNodeId() == -1) 1249 continue; 1250 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()]; 1251 if (!DUSU) continue; 1252 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(), 1253 E = DUSU->Succs.end(); I != E; ++I) { 1254 if (I->isCtrl()) continue; 1255 SUnit *SuccSU = I->getSUnit(); 1256 if (SuccSU == SU) 1257 continue; 1258 // Be conservative. Ignore if nodes aren't at roughly the same 1259 // depth and height. 1260 if (SuccSU->getHeight() < SU->getHeight() && 1261 (SU->getHeight() - SuccSU->getHeight()) > 1) 1262 continue; 1263 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode()) 1264 continue; 1265 // Don't constrain nodes with physical register defs if the 1266 // predecessor can clobber them. 1267 if (SuccSU->hasPhysRegDefs) { 1268 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI)) 1269 continue; 1270 } 1271 // Don't constrain extract_subreg / insert_subreg; these may be 1272 // coalesced away. We want them close to their uses. 1273 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode(); 1274 if (SuccOpc == TargetInstrInfo::EXTRACT_SUBREG || 1275 SuccOpc == TargetInstrInfo::INSERT_SUBREG) 1276 continue; 1277 if ((!canClobber(SuccSU, DUSU) || 1278 (hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) || 1279 (!SU->isCommutable && SuccSU->isCommutable)) && 1280 !scheduleDAG->IsReachable(SuccSU, SU)) { 1281 DOUT << "Adding a pseudo-two-addr edge from SU # " << SU->NodeNum 1282 << " to SU #" << SuccSU->NodeNum << "\n"; 1283 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0, 1284 /*Reg=*/0, /*isNormalMemory=*/false, 1285 /*isMustAlias=*/false, 1286 /*isArtificial=*/true)); 1287 } 1288 } 1289 } 1290 } 1291} 1292 1293/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all 1294/// scheduling units. 1295template<class SF> 1296void RegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() { 1297 SethiUllmanNumbers.assign(SUnits->size(), 0); 1298 1299 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) 1300 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers); 1301} 1302 1303/// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled 1304/// predecessors of the successors of the SUnit SU. Stop when the provided 1305/// limit is exceeded. 1306static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU, 1307 unsigned Limit) { 1308 unsigned Sum = 0; 1309 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1310 I != E; ++I) { 1311 const SUnit *SuccSU = I->getSUnit(); 1312 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(), 1313 EE = SuccSU->Preds.end(); II != EE; ++II) { 1314 SUnit *PredSU = II->getSUnit(); 1315 if (!PredSU->isScheduled) 1316 if (++Sum > Limit) 1317 return Sum; 1318 } 1319 } 1320 return Sum; 1321} 1322 1323 1324// Top down 1325bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const { 1326 unsigned LPriority = SPQ->getNodePriority(left); 1327 unsigned RPriority = SPQ->getNodePriority(right); 1328 bool LIsTarget = left->getNode() && left->getNode()->isMachineOpcode(); 1329 bool RIsTarget = right->getNode() && right->getNode()->isMachineOpcode(); 1330 bool LIsFloater = LIsTarget && left->NumPreds == 0; 1331 bool RIsFloater = RIsTarget && right->NumPreds == 0; 1332 unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0; 1333 unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0; 1334 1335 if (left->NumSuccs == 0 && right->NumSuccs != 0) 1336 return false; 1337 else if (left->NumSuccs != 0 && right->NumSuccs == 0) 1338 return true; 1339 1340 if (LIsFloater) 1341 LBonus -= 2; 1342 if (RIsFloater) 1343 RBonus -= 2; 1344 if (left->NumSuccs == 1) 1345 LBonus += 2; 1346 if (right->NumSuccs == 1) 1347 RBonus += 2; 1348 1349 if (LPriority+LBonus != RPriority+RBonus) 1350 return LPriority+LBonus < RPriority+RBonus; 1351 1352 if (left->getDepth() != right->getDepth()) 1353 return left->getDepth() < right->getDepth(); 1354 1355 if (left->NumSuccsLeft != right->NumSuccsLeft) 1356 return left->NumSuccsLeft > right->NumSuccsLeft; 1357 1358 assert(left->NodeQueueId && right->NodeQueueId && 1359 "NodeQueueId cannot be zero"); 1360 return (left->NodeQueueId > right->NodeQueueId); 1361} 1362 1363//===----------------------------------------------------------------------===// 1364// Public Constructor Functions 1365//===----------------------------------------------------------------------===// 1366 1367llvm::ScheduleDAGSDNodes * 1368llvm::createBURRListDAGScheduler(SelectionDAGISel *IS, bool) { 1369 const TargetMachine &TM = IS->TM; 1370 const TargetInstrInfo *TII = TM.getInstrInfo(); 1371 const TargetRegisterInfo *TRI = TM.getRegisterInfo(); 1372 1373 BURegReductionPriorityQueue *PQ = new BURegReductionPriorityQueue(TII, TRI); 1374 1375 ScheduleDAGRRList *SD = 1376 new ScheduleDAGRRList(*IS->MF, true, PQ); 1377 PQ->setScheduleDAG(SD); 1378 return SD; 1379} 1380 1381llvm::ScheduleDAGSDNodes * 1382llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS, bool) { 1383 const TargetMachine &TM = IS->TM; 1384 const TargetInstrInfo *TII = TM.getInstrInfo(); 1385 const TargetRegisterInfo *TRI = TM.getRegisterInfo(); 1386 1387 TDRegReductionPriorityQueue *PQ = new TDRegReductionPriorityQueue(TII, TRI); 1388 1389 ScheduleDAGRRList *SD = 1390 new ScheduleDAGRRList(*IS->MF, false, PQ); 1391 PQ->setScheduleDAG(SD); 1392 return SD; 1393} 1394