ScheduleDAGRRList.cpp revision 26b4f62e52845638a6e353b58ea72326a0aa7b06
1//===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements bottom-up and top-down register pressure reduction list 11// schedulers, using standard algorithms. The basic approach uses a priority 12// queue of available nodes to schedule. One at a time, nodes are taken from 13// the priority queue (thus in priority order), checked for legality to 14// schedule, and emitted if legal. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "pre-RA-sched" 19#include "ScheduleDAGSDNodes.h" 20#include "llvm/InlineAsm.h" 21#include "llvm/CodeGen/SchedulerRegistry.h" 22#include "llvm/CodeGen/SelectionDAGISel.h" 23#include "llvm/CodeGen/ScheduleHazardRecognizer.h" 24#include "llvm/Target/TargetRegisterInfo.h" 25#include "llvm/Target/TargetData.h" 26#include "llvm/Target/TargetMachine.h" 27#include "llvm/Target/TargetInstrInfo.h" 28#include "llvm/Target/TargetLowering.h" 29#include "llvm/ADT/SmallSet.h" 30#include "llvm/ADT/Statistic.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/Support/Debug.h" 33#include "llvm/Support/ErrorHandling.h" 34#include "llvm/Support/raw_ostream.h" 35#include <climits> 36using namespace llvm; 37 38STATISTIC(NumBacktracks, "Number of times scheduler backtracked"); 39STATISTIC(NumUnfolds, "Number of nodes unfolded"); 40STATISTIC(NumDups, "Number of duplicated nodes"); 41STATISTIC(NumPRCopies, "Number of physical register copies"); 42 43static RegisterScheduler 44 burrListDAGScheduler("list-burr", 45 "Bottom-up register reduction list scheduling", 46 createBURRListDAGScheduler); 47static RegisterScheduler 48 sourceListDAGScheduler("source", 49 "Similar to list-burr but schedules in source " 50 "order when possible", 51 createSourceListDAGScheduler); 52 53static RegisterScheduler 54 hybridListDAGScheduler("list-hybrid", 55 "Bottom-up register pressure aware list scheduling " 56 "which tries to balance latency and register pressure", 57 createHybridListDAGScheduler); 58 59static RegisterScheduler 60 ILPListDAGScheduler("list-ilp", 61 "Bottom-up register pressure aware list scheduling " 62 "which tries to balance ILP and register pressure", 63 createILPListDAGScheduler); 64 65static cl::opt<bool> DisableSchedCycles( 66 "disable-sched-cycles", cl::Hidden, cl::init(false), 67 cl::desc("Disable cycle-level precision during preRA scheduling")); 68 69// Temporary sched=list-ilp flags until the heuristics are robust. 70// Some options are also available under sched=list-hybrid. 71static cl::opt<bool> DisableSchedRegPressure( 72 "disable-sched-reg-pressure", cl::Hidden, cl::init(false), 73 cl::desc("Disable regpressure priority in sched=list-ilp")); 74static cl::opt<bool> DisableSchedLiveUses( 75 "disable-sched-live-uses", cl::Hidden, cl::init(true), 76 cl::desc("Disable live use priority in sched=list-ilp")); 77static cl::opt<bool> DisableSchedVRegCycle( 78 "disable-sched-vrcycle", cl::Hidden, cl::init(false), 79 cl::desc("Disable virtual register cycle interference checks")); 80static cl::opt<bool> DisableSchedPhysRegJoin( 81 "disable-sched-physreg-join", cl::Hidden, cl::init(false), 82 cl::desc("Disable physreg def-use affinity")); 83static cl::opt<bool> DisableSchedStalls( 84 "disable-sched-stalls", cl::Hidden, cl::init(true), 85 cl::desc("Disable no-stall priority in sched=list-ilp")); 86static cl::opt<bool> DisableSchedCriticalPath( 87 "disable-sched-critical-path", cl::Hidden, cl::init(false), 88 cl::desc("Disable critical path priority in sched=list-ilp")); 89static cl::opt<bool> DisableSchedHeight( 90 "disable-sched-height", cl::Hidden, cl::init(false), 91 cl::desc("Disable scheduled-height priority in sched=list-ilp")); 92 93static cl::opt<int> MaxReorderWindow( 94 "max-sched-reorder", cl::Hidden, cl::init(6), 95 cl::desc("Number of instructions to allow ahead of the critical path " 96 "in sched=list-ilp")); 97 98static cl::opt<unsigned> AvgIPC( 99 "sched-avg-ipc", cl::Hidden, cl::init(1), 100 cl::desc("Average inst/cycle whan no target itinerary exists.")); 101 102#ifndef NDEBUG 103namespace { 104 // For sched=list-ilp, Count the number of times each factor comes into play. 105 enum { FactPressureDiff, FactRegUses, FactStall, FactHeight, FactDepth, 106 FactStatic, FactOther, NumFactors }; 107} 108static const char *FactorName[NumFactors] = 109{"PressureDiff", "RegUses", "Stall", "Height", "Depth","Static", "Other"}; 110static int FactorCount[NumFactors]; 111#endif //!NDEBUG 112 113namespace { 114//===----------------------------------------------------------------------===// 115/// ScheduleDAGRRList - The actual register reduction list scheduler 116/// implementation. This supports both top-down and bottom-up scheduling. 117/// 118class ScheduleDAGRRList : public ScheduleDAGSDNodes { 119private: 120 /// NeedLatency - True if the scheduler will make use of latency information. 121 /// 122 bool NeedLatency; 123 124 /// AvailableQueue - The priority queue to use for the available SUnits. 125 SchedulingPriorityQueue *AvailableQueue; 126 127 /// PendingQueue - This contains all of the instructions whose operands have 128 /// been issued, but their results are not ready yet (due to the latency of 129 /// the operation). Once the operands becomes available, the instruction is 130 /// added to the AvailableQueue. 131 std::vector<SUnit*> PendingQueue; 132 133 /// HazardRec - The hazard recognizer to use. 134 ScheduleHazardRecognizer *HazardRec; 135 136 /// CurCycle - The current scheduler state corresponds to this cycle. 137 unsigned CurCycle; 138 139 /// MinAvailableCycle - Cycle of the soonest available instruction. 140 unsigned MinAvailableCycle; 141 142 /// IssueCount - Count instructions issued in this cycle 143 /// Currently valid only for bottom-up scheduling. 144 unsigned IssueCount; 145 146 /// LiveRegDefs - A set of physical registers and their definition 147 /// that are "live". These nodes must be scheduled before any other nodes that 148 /// modifies the registers can be scheduled. 149 unsigned NumLiveRegs; 150 std::vector<SUnit*> LiveRegDefs; 151 std::vector<SUnit*> LiveRegGens; 152 153 /// Topo - A topological ordering for SUnits which permits fast IsReachable 154 /// and similar queries. 155 ScheduleDAGTopologicalSort Topo; 156 157public: 158 ScheduleDAGRRList(MachineFunction &mf, bool needlatency, 159 SchedulingPriorityQueue *availqueue, 160 CodeGenOpt::Level OptLevel) 161 : ScheduleDAGSDNodes(mf), 162 NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0), 163 Topo(SUnits) { 164 165 const TargetMachine &tm = mf.getTarget(); 166 if (DisableSchedCycles || !NeedLatency) 167 HazardRec = new ScheduleHazardRecognizer(); 168 else 169 HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this); 170 } 171 172 ~ScheduleDAGRRList() { 173 delete HazardRec; 174 delete AvailableQueue; 175 } 176 177 void Schedule(); 178 179 ScheduleHazardRecognizer *getHazardRec() { return HazardRec; } 180 181 /// IsReachable - Checks if SU is reachable from TargetSU. 182 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) { 183 return Topo.IsReachable(SU, TargetSU); 184 } 185 186 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will 187 /// create a cycle. 188 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) { 189 return Topo.WillCreateCycle(SU, TargetSU); 190 } 191 192 /// AddPred - adds a predecessor edge to SUnit SU. 193 /// This returns true if this is a new predecessor. 194 /// Updates the topological ordering if required. 195 void AddPred(SUnit *SU, const SDep &D) { 196 Topo.AddPred(SU, D.getSUnit()); 197 SU->addPred(D); 198 } 199 200 /// RemovePred - removes a predecessor edge from SUnit SU. 201 /// This returns true if an edge was removed. 202 /// Updates the topological ordering if required. 203 void RemovePred(SUnit *SU, const SDep &D) { 204 Topo.RemovePred(SU, D.getSUnit()); 205 SU->removePred(D); 206 } 207 208private: 209 bool isReady(SUnit *SU) { 210 return DisableSchedCycles || !AvailableQueue->hasReadyFilter() || 211 AvailableQueue->isReady(SU); 212 } 213 214 void ReleasePred(SUnit *SU, const SDep *PredEdge); 215 void ReleasePredecessors(SUnit *SU); 216 void ReleasePending(); 217 void AdvanceToCycle(unsigned NextCycle); 218 void AdvancePastStalls(SUnit *SU); 219 void EmitNode(SUnit *SU); 220 void ScheduleNodeBottomUp(SUnit*); 221 void CapturePred(SDep *PredEdge); 222 void UnscheduleNodeBottomUp(SUnit*); 223 void RestoreHazardCheckerBottomUp(); 224 void BacktrackBottomUp(SUnit*, SUnit*); 225 SUnit *CopyAndMoveSuccessors(SUnit*); 226 void InsertCopiesAndMoveSuccs(SUnit*, unsigned, 227 const TargetRegisterClass*, 228 const TargetRegisterClass*, 229 SmallVector<SUnit*, 2>&); 230 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&); 231 232 SUnit *PickNodeToScheduleBottomUp(); 233 void ListScheduleBottomUp(); 234 235 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it. 236 /// Updates the topological ordering if required. 237 SUnit *CreateNewSUnit(SDNode *N) { 238 unsigned NumSUnits = SUnits.size(); 239 SUnit *NewNode = NewSUnit(N); 240 // Update the topological ordering. 241 if (NewNode->NodeNum >= NumSUnits) 242 Topo.InitDAGTopologicalSorting(); 243 return NewNode; 244 } 245 246 /// CreateClone - Creates a new SUnit from an existing one. 247 /// Updates the topological ordering if required. 248 SUnit *CreateClone(SUnit *N) { 249 unsigned NumSUnits = SUnits.size(); 250 SUnit *NewNode = Clone(N); 251 // Update the topological ordering. 252 if (NewNode->NodeNum >= NumSUnits) 253 Topo.InitDAGTopologicalSorting(); 254 return NewNode; 255 } 256 257 /// ForceUnitLatencies - Register-pressure-reducing scheduling doesn't 258 /// need actual latency information but the hybrid scheduler does. 259 bool ForceUnitLatencies() const { 260 return !NeedLatency; 261 } 262}; 263} // end anonymous namespace 264 265/// GetCostForDef - Looks up the register class and cost for a given definition. 266/// Typically this just means looking up the representative register class, 267/// but for untyped values (MVT::untyped) it means inspecting the node's 268/// opcode to determine what register class is being generated. 269static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos, 270 const TargetLowering *TLI, 271 const TargetInstrInfo *TII, 272 const TargetRegisterInfo *TRI, 273 unsigned &RegClass, unsigned &Cost) { 274 EVT VT = RegDefPos.GetValue(); 275 276 // Special handling for untyped values. These values can only come from 277 // the expansion of custom DAG-to-DAG patterns. 278 if (VT == MVT::untyped) { 279 const SDNode *Node = RegDefPos.GetNode(); 280 unsigned Opcode = Node->getMachineOpcode(); 281 282 if (Opcode == TargetOpcode::REG_SEQUENCE) { 283 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue(); 284 const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx); 285 RegClass = RC->getID(); 286 Cost = 1; 287 return; 288 } 289 290 unsigned Idx = RegDefPos.GetIdx(); 291 const MCInstrDesc Desc = TII->get(Opcode); 292 const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI); 293 RegClass = RC->getID(); 294 // FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a 295 // better way to determine it. 296 Cost = 1; 297 } else { 298 RegClass = TLI->getRepRegClassFor(VT)->getID(); 299 Cost = TLI->getRepRegClassCostFor(VT); 300 } 301} 302 303/// Schedule - Schedule the DAG using list scheduling. 304void ScheduleDAGRRList::Schedule() { 305 DEBUG(dbgs() 306 << "********** List Scheduling BB#" << BB->getNumber() 307 << " '" << BB->getName() << "' **********\n"); 308#ifndef NDEBUG 309 for (int i = 0; i < NumFactors; ++i) { 310 FactorCount[i] = 0; 311 } 312#endif //!NDEBUG 313 314 CurCycle = 0; 315 IssueCount = 0; 316 MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX; 317 NumLiveRegs = 0; 318 // Allocate slots for each physical register, plus one for a special register 319 // to track the virtual resource of a calling sequence. 320 LiveRegDefs.resize(TRI->getNumRegs() + 1, NULL); 321 LiveRegGens.resize(TRI->getNumRegs() + 1, NULL); 322 323 // Build the scheduling graph. 324 BuildSchedGraph(NULL); 325 326 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 327 SUnits[su].dumpAll(this)); 328 Topo.InitDAGTopologicalSorting(); 329 330 AvailableQueue->initNodes(SUnits); 331 332 HazardRec->Reset(); 333 334 // Execute the actual scheduling loop. 335 ListScheduleBottomUp(); 336 337#ifndef NDEBUG 338 for (int i = 0; i < NumFactors; ++i) { 339 DEBUG(dbgs() << FactorName[i] << "\t" << FactorCount[i] << "\n"); 340 } 341#endif // !NDEBUG 342 AvailableQueue->releaseState(); 343} 344 345//===----------------------------------------------------------------------===// 346// Bottom-Up Scheduling 347//===----------------------------------------------------------------------===// 348 349/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to 350/// the AvailableQueue if the count reaches zero. Also update its cycle bound. 351void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) { 352 SUnit *PredSU = PredEdge->getSUnit(); 353 354#ifndef NDEBUG 355 if (PredSU->NumSuccsLeft == 0) { 356 dbgs() << "*** Scheduling failed! ***\n"; 357 PredSU->dump(this); 358 dbgs() << " has been released too many times!\n"; 359 llvm_unreachable(0); 360 } 361#endif 362 --PredSU->NumSuccsLeft; 363 364 if (!ForceUnitLatencies()) { 365 // Updating predecessor's height. This is now the cycle when the 366 // predecessor can be scheduled without causing a pipeline stall. 367 PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency()); 368 } 369 370 // If all the node's successors are scheduled, this node is ready 371 // to be scheduled. Ignore the special EntrySU node. 372 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) { 373 PredSU->isAvailable = true; 374 375 unsigned Height = PredSU->getHeight(); 376 if (Height < MinAvailableCycle) 377 MinAvailableCycle = Height; 378 379 if (isReady(PredSU)) { 380 AvailableQueue->push(PredSU); 381 } 382 // CapturePred and others may have left the node in the pending queue, avoid 383 // adding it twice. 384 else if (!PredSU->isPending) { 385 PredSU->isPending = true; 386 PendingQueue.push_back(PredSU); 387 } 388 } 389} 390 391/// IsChainDependent - Test if Outer is reachable from Inner through 392/// chain dependencies. 393static bool IsChainDependent(SDNode *Outer, SDNode *Inner) { 394 SDNode *N = Outer; 395 for (;;) { 396 if (N == Inner) 397 return true; 398 if (N->getOpcode() == ISD::TokenFactor) { 399 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 400 if (IsChainDependent(N->getOperand(i).getNode(), Inner)) 401 return true; 402 return false; 403 } 404 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 405 if (N->getOperand(i).getValueType() == MVT::Other) { 406 N = N->getOperand(i).getNode(); 407 goto found_chain_operand; 408 } 409 return false; 410 found_chain_operand:; 411 if (N->getOpcode() == ISD::EntryToken) 412 return false; 413 } 414} 415 416/// FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate 417/// the corresponding (lowered) CALLSEQ_BEGIN node. 418/// 419/// NestLevel and MaxNested are used in recursion to indcate the current level 420/// of nesting of CALLSEQ_BEGIN and CALLSEQ_END pairs, as well as the maximum 421/// level seen so far. 422/// 423/// TODO: It would be better to give CALLSEQ_END an explicit operand to point 424/// to the corresponding CALLSEQ_BEGIN to avoid needing to search for it. 425static SDNode * 426FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest, 427 const TargetInstrInfo *TII) { 428 for (;;) { 429 // For a TokenFactor, examine each operand. There may be multiple ways 430 // to get to the CALLSEQ_BEGIN, but we need to find the path with the 431 // most nesting in order to ensure that we find the corresponding match. 432 if (N->getOpcode() == ISD::TokenFactor) { 433 SDNode *Best = 0; 434 unsigned BestMaxNest = MaxNest; 435 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 436 unsigned MyNestLevel = NestLevel; 437 unsigned MyMaxNest = MaxNest; 438 if (SDNode *New = FindCallSeqStart(N->getOperand(i).getNode(), 439 MyNestLevel, MyMaxNest, TII)) 440 if (!Best || (MyMaxNest > BestMaxNest)) { 441 Best = New; 442 BestMaxNest = MyMaxNest; 443 } 444 } 445 assert(Best); 446 MaxNest = BestMaxNest; 447 return Best; 448 } 449 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END. 450 if (N->isMachineOpcode()) { 451 if (N->getMachineOpcode() == 452 (unsigned)TII->getCallFrameDestroyOpcode()) { 453 ++NestLevel; 454 MaxNest = std::max(MaxNest, NestLevel); 455 } else if (N->getMachineOpcode() == 456 (unsigned)TII->getCallFrameSetupOpcode()) { 457 --NestLevel; 458 if (NestLevel == 0) 459 return N; 460 } 461 } 462 // Otherwise, find the chain and continue climbing. 463 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 464 if (N->getOperand(i).getValueType() == MVT::Other) { 465 N = N->getOperand(i).getNode(); 466 goto found_chain_operand; 467 } 468 return 0; 469 found_chain_operand:; 470 if (N->getOpcode() == ISD::EntryToken) 471 return 0; 472 } 473} 474 475/// Call ReleasePred for each predecessor, then update register live def/gen. 476/// Always update LiveRegDefs for a register dependence even if the current SU 477/// also defines the register. This effectively create one large live range 478/// across a sequence of two-address node. This is important because the 479/// entire chain must be scheduled together. Example: 480/// 481/// flags = (3) add 482/// flags = (2) addc flags 483/// flags = (1) addc flags 484/// 485/// results in 486/// 487/// LiveRegDefs[flags] = 3 488/// LiveRegGens[flags] = 1 489/// 490/// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid 491/// interference on flags. 492void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) { 493 // Bottom up: release predecessors 494 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 495 I != E; ++I) { 496 ReleasePred(SU, &*I); 497 if (I->isAssignedRegDep()) { 498 // This is a physical register dependency and it's impossible or 499 // expensive to copy the register. Make sure nothing that can 500 // clobber the register is scheduled between the predecessor and 501 // this node. 502 SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef; 503 assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) && 504 "interference on register dependence"); 505 LiveRegDefs[I->getReg()] = I->getSUnit(); 506 if (!LiveRegGens[I->getReg()]) { 507 ++NumLiveRegs; 508 LiveRegGens[I->getReg()] = SU; 509 } 510 } 511 } 512 513 // If we're scheduling a lowered CALLSEQ_END, find the corresponding CALLSEQ_BEGIN. 514 // Inject an artificial physical register dependence between these nodes, to 515 // prevent other calls from being interscheduled with them. 516 unsigned CallResource = TRI->getNumRegs(); 517 if (!LiveRegDefs[CallResource]) 518 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) 519 if (Node->isMachineOpcode() && 520 Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) { 521 unsigned NestLevel = 0; 522 unsigned MaxNest = 0; 523 SDNode *N = FindCallSeqStart(Node, NestLevel, MaxNest, TII); 524 525 SUnit *Def = &SUnits[N->getNodeId()]; 526 ++NumLiveRegs; 527 LiveRegDefs[CallResource] = Def; 528 LiveRegGens[CallResource] = SU; 529 break; 530 } 531} 532 533/// Check to see if any of the pending instructions are ready to issue. If 534/// so, add them to the available queue. 535void ScheduleDAGRRList::ReleasePending() { 536 if (DisableSchedCycles) { 537 assert(PendingQueue.empty() && "pending instrs not allowed in this mode"); 538 return; 539 } 540 541 // If the available queue is empty, it is safe to reset MinAvailableCycle. 542 if (AvailableQueue->empty()) 543 MinAvailableCycle = UINT_MAX; 544 545 // Check to see if any of the pending instructions are ready to issue. If 546 // so, add them to the available queue. 547 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 548 unsigned ReadyCycle = PendingQueue[i]->getHeight(); 549 if (ReadyCycle < MinAvailableCycle) 550 MinAvailableCycle = ReadyCycle; 551 552 if (PendingQueue[i]->isAvailable) { 553 if (!isReady(PendingQueue[i])) 554 continue; 555 AvailableQueue->push(PendingQueue[i]); 556 } 557 PendingQueue[i]->isPending = false; 558 PendingQueue[i] = PendingQueue.back(); 559 PendingQueue.pop_back(); 560 --i; --e; 561 } 562} 563 564/// Move the scheduler state forward by the specified number of Cycles. 565void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) { 566 if (NextCycle <= CurCycle) 567 return; 568 569 IssueCount = 0; 570 AvailableQueue->setCurCycle(NextCycle); 571 if (!HazardRec->isEnabled()) { 572 // Bypass lots of virtual calls in case of long latency. 573 CurCycle = NextCycle; 574 } 575 else { 576 for (; CurCycle != NextCycle; ++CurCycle) { 577 HazardRec->RecedeCycle(); 578 } 579 } 580 // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the 581 // available Q to release pending nodes at least once before popping. 582 ReleasePending(); 583} 584 585/// Move the scheduler state forward until the specified node's dependents are 586/// ready and can be scheduled with no resource conflicts. 587void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) { 588 if (DisableSchedCycles) 589 return; 590 591 // FIXME: Nodes such as CopyFromReg probably should not advance the current 592 // cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node 593 // has predecessors the cycle will be advanced when they are scheduled. 594 // But given the crude nature of modeling latency though such nodes, we 595 // currently need to treat these nodes like real instructions. 596 // if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return; 597 598 unsigned ReadyCycle = SU->getHeight(); 599 600 // Bump CurCycle to account for latency. We assume the latency of other 601 // available instructions may be hidden by the stall (not a full pipe stall). 602 // This updates the hazard recognizer's cycle before reserving resources for 603 // this instruction. 604 AdvanceToCycle(ReadyCycle); 605 606 // Calls are scheduled in their preceding cycle, so don't conflict with 607 // hazards from instructions after the call. EmitNode will reset the 608 // scoreboard state before emitting the call. 609 if (SU->isCall) 610 return; 611 612 // FIXME: For resource conflicts in very long non-pipelined stages, we 613 // should probably skip ahead here to avoid useless scoreboard checks. 614 int Stalls = 0; 615 while (true) { 616 ScheduleHazardRecognizer::HazardType HT = 617 HazardRec->getHazardType(SU, -Stalls); 618 619 if (HT == ScheduleHazardRecognizer::NoHazard) 620 break; 621 622 ++Stalls; 623 } 624 AdvanceToCycle(CurCycle + Stalls); 625} 626 627/// Record this SUnit in the HazardRecognizer. 628/// Does not update CurCycle. 629void ScheduleDAGRRList::EmitNode(SUnit *SU) { 630 if (!HazardRec->isEnabled()) 631 return; 632 633 // Check for phys reg copy. 634 if (!SU->getNode()) 635 return; 636 637 switch (SU->getNode()->getOpcode()) { 638 default: 639 assert(SU->getNode()->isMachineOpcode() && 640 "This target-independent node should not be scheduled."); 641 break; 642 case ISD::MERGE_VALUES: 643 case ISD::TokenFactor: 644 case ISD::CopyToReg: 645 case ISD::CopyFromReg: 646 case ISD::EH_LABEL: 647 // Noops don't affect the scoreboard state. Copies are likely to be 648 // removed. 649 return; 650 case ISD::INLINEASM: 651 // For inline asm, clear the pipeline state. 652 HazardRec->Reset(); 653 return; 654 } 655 if (SU->isCall) { 656 // Calls are scheduled with their preceding instructions. For bottom-up 657 // scheduling, clear the pipeline state before emitting. 658 HazardRec->Reset(); 659 } 660 661 HazardRec->EmitInstruction(SU); 662} 663 664static void resetVRegCycle(SUnit *SU); 665 666/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending 667/// count of its predecessors. If a predecessor pending count is zero, add it to 668/// the Available queue. 669void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) { 670 DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: "); 671 DEBUG(SU->dump(this)); 672 673#ifndef NDEBUG 674 if (CurCycle < SU->getHeight()) 675 DEBUG(dbgs() << " Height [" << SU->getHeight() 676 << "] pipeline stall!\n"); 677#endif 678 679 // FIXME: Do not modify node height. It may interfere with 680 // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the 681 // node its ready cycle can aid heuristics, and after scheduling it can 682 // indicate the scheduled cycle. 683 SU->setHeightToAtLeast(CurCycle); 684 685 // Reserve resources for the scheduled intruction. 686 EmitNode(SU); 687 688 Sequence.push_back(SU); 689 690 AvailableQueue->ScheduledNode(SU); 691 692 // If HazardRec is disabled, and each inst counts as one cycle, then 693 // advance CurCycle before ReleasePredecessors to avoid useless pushes to 694 // PendingQueue for schedulers that implement HasReadyFilter. 695 if (!HazardRec->isEnabled() && AvgIPC < 2) 696 AdvanceToCycle(CurCycle + 1); 697 698 // Update liveness of predecessors before successors to avoid treating a 699 // two-address node as a live range def. 700 ReleasePredecessors(SU); 701 702 // Release all the implicit physical register defs that are live. 703 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 704 I != E; ++I) { 705 // LiveRegDegs[I->getReg()] != SU when SU is a two-address node. 706 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) { 707 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!"); 708 --NumLiveRegs; 709 LiveRegDefs[I->getReg()] = NULL; 710 LiveRegGens[I->getReg()] = NULL; 711 } 712 } 713 // Release the special call resource dependence, if this is the beginning 714 // of a call. 715 unsigned CallResource = TRI->getNumRegs(); 716 if (LiveRegDefs[CallResource] == SU) 717 for (const SDNode *SUNode = SU->getNode(); SUNode; 718 SUNode = SUNode->getGluedNode()) { 719 if (SUNode->isMachineOpcode() && 720 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) { 721 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!"); 722 --NumLiveRegs; 723 LiveRegDefs[CallResource] = NULL; 724 LiveRegGens[CallResource] = NULL; 725 } 726 } 727 728 resetVRegCycle(SU); 729 730 SU->isScheduled = true; 731 732 // Conditions under which the scheduler should eagerly advance the cycle: 733 // (1) No available instructions 734 // (2) All pipelines full, so available instructions must have hazards. 735 // 736 // If HazardRec is disabled, the cycle was pre-advanced before calling 737 // ReleasePredecessors. In that case, IssueCount should remain 0. 738 // 739 // Check AvailableQueue after ReleasePredecessors in case of zero latency. 740 if (HazardRec->isEnabled() || AvgIPC > 1) { 741 if (SU->getNode() && SU->getNode()->isMachineOpcode()) 742 ++IssueCount; 743 if ((HazardRec->isEnabled() && HazardRec->atIssueLimit()) 744 || (!HazardRec->isEnabled() && IssueCount == AvgIPC)) 745 AdvanceToCycle(CurCycle + 1); 746 } 747} 748 749/// CapturePred - This does the opposite of ReleasePred. Since SU is being 750/// unscheduled, incrcease the succ left count of its predecessors. Remove 751/// them from AvailableQueue if necessary. 752void ScheduleDAGRRList::CapturePred(SDep *PredEdge) { 753 SUnit *PredSU = PredEdge->getSUnit(); 754 if (PredSU->isAvailable) { 755 PredSU->isAvailable = false; 756 if (!PredSU->isPending) 757 AvailableQueue->remove(PredSU); 758 } 759 760 assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!"); 761 ++PredSU->NumSuccsLeft; 762} 763 764/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and 765/// its predecessor states to reflect the change. 766void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) { 767 DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: "); 768 DEBUG(SU->dump(this)); 769 770 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 771 I != E; ++I) { 772 CapturePred(&*I); 773 if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){ 774 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!"); 775 assert(LiveRegDefs[I->getReg()] == I->getSUnit() && 776 "Physical register dependency violated?"); 777 --NumLiveRegs; 778 LiveRegDefs[I->getReg()] = NULL; 779 LiveRegGens[I->getReg()] = NULL; 780 } 781 } 782 783 // Reclaim the special call resource dependence, if this is the beginning 784 // of a call. 785 unsigned CallResource = TRI->getNumRegs(); 786 for (const SDNode *SUNode = SU->getNode(); SUNode; 787 SUNode = SUNode->getGluedNode()) { 788 if (SUNode->isMachineOpcode() && 789 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) { 790 ++NumLiveRegs; 791 LiveRegDefs[CallResource] = SU; 792 LiveRegGens[CallResource] = NULL; 793 } 794 } 795 796 // Release the special call resource dependence, if this is the end 797 // of a call. 798 if (LiveRegGens[CallResource] == SU) 799 for (const SDNode *SUNode = SU->getNode(); SUNode; 800 SUNode = SUNode->getGluedNode()) { 801 if (SUNode->isMachineOpcode() && 802 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) { 803 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!"); 804 --NumLiveRegs; 805 LiveRegDefs[CallResource] = NULL; 806 LiveRegGens[CallResource] = NULL; 807 } 808 } 809 810 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 811 I != E; ++I) { 812 if (I->isAssignedRegDep()) { 813 // This becomes the nearest def. Note that an earlier def may still be 814 // pending if this is a two-address node. 815 LiveRegDefs[I->getReg()] = SU; 816 if (!LiveRegDefs[I->getReg()]) { 817 ++NumLiveRegs; 818 } 819 if (LiveRegGens[I->getReg()] == NULL || 820 I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight()) 821 LiveRegGens[I->getReg()] = I->getSUnit(); 822 } 823 } 824 if (SU->getHeight() < MinAvailableCycle) 825 MinAvailableCycle = SU->getHeight(); 826 827 SU->setHeightDirty(); 828 SU->isScheduled = false; 829 SU->isAvailable = true; 830 if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) { 831 // Don't make available until backtracking is complete. 832 SU->isPending = true; 833 PendingQueue.push_back(SU); 834 } 835 else { 836 AvailableQueue->push(SU); 837 } 838 AvailableQueue->UnscheduledNode(SU); 839} 840 841/// After backtracking, the hazard checker needs to be restored to a state 842/// corresponding the the current cycle. 843void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() { 844 HazardRec->Reset(); 845 846 unsigned LookAhead = std::min((unsigned)Sequence.size(), 847 HazardRec->getMaxLookAhead()); 848 if (LookAhead == 0) 849 return; 850 851 std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead); 852 unsigned HazardCycle = (*I)->getHeight(); 853 for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) { 854 SUnit *SU = *I; 855 for (; SU->getHeight() > HazardCycle; ++HazardCycle) { 856 HazardRec->RecedeCycle(); 857 } 858 EmitNode(SU); 859 } 860} 861 862/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in 863/// BTCycle in order to schedule a specific node. 864void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) { 865 SUnit *OldSU = Sequence.back(); 866 while (true) { 867 Sequence.pop_back(); 868 if (SU->isSucc(OldSU)) 869 // Don't try to remove SU from AvailableQueue. 870 SU->isAvailable = false; 871 // FIXME: use ready cycle instead of height 872 CurCycle = OldSU->getHeight(); 873 UnscheduleNodeBottomUp(OldSU); 874 AvailableQueue->setCurCycle(CurCycle); 875 if (OldSU == BtSU) 876 break; 877 OldSU = Sequence.back(); 878 } 879 880 assert(!SU->isSucc(OldSU) && "Something is wrong!"); 881 882 RestoreHazardCheckerBottomUp(); 883 884 ReleasePending(); 885 886 ++NumBacktracks; 887} 888 889static bool isOperandOf(const SUnit *SU, SDNode *N) { 890 for (const SDNode *SUNode = SU->getNode(); SUNode; 891 SUNode = SUNode->getGluedNode()) { 892 if (SUNode->isOperandOf(N)) 893 return true; 894 } 895 return false; 896} 897 898/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled 899/// successors to the newly created node. 900SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) { 901 SDNode *N = SU->getNode(); 902 if (!N) 903 return NULL; 904 905 if (SU->getNode()->getGluedNode()) 906 return NULL; 907 908 SUnit *NewSU; 909 bool TryUnfold = false; 910 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { 911 EVT VT = N->getValueType(i); 912 if (VT == MVT::Glue) 913 return NULL; 914 else if (VT == MVT::Other) 915 TryUnfold = true; 916 } 917 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 918 const SDValue &Op = N->getOperand(i); 919 EVT VT = Op.getNode()->getValueType(Op.getResNo()); 920 if (VT == MVT::Glue) 921 return NULL; 922 } 923 924 if (TryUnfold) { 925 SmallVector<SDNode*, 2> NewNodes; 926 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes)) 927 return NULL; 928 929 DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n"); 930 assert(NewNodes.size() == 2 && "Expected a load folding node!"); 931 932 N = NewNodes[1]; 933 SDNode *LoadNode = NewNodes[0]; 934 unsigned NumVals = N->getNumValues(); 935 unsigned OldNumVals = SU->getNode()->getNumValues(); 936 for (unsigned i = 0; i != NumVals; ++i) 937 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i)); 938 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1), 939 SDValue(LoadNode, 1)); 940 941 // LoadNode may already exist. This can happen when there is another 942 // load from the same location and producing the same type of value 943 // but it has different alignment or volatileness. 944 bool isNewLoad = true; 945 SUnit *LoadSU; 946 if (LoadNode->getNodeId() != -1) { 947 LoadSU = &SUnits[LoadNode->getNodeId()]; 948 isNewLoad = false; 949 } else { 950 LoadSU = CreateNewSUnit(LoadNode); 951 LoadNode->setNodeId(LoadSU->NodeNum); 952 953 InitNumRegDefsLeft(LoadSU); 954 ComputeLatency(LoadSU); 955 } 956 957 SUnit *NewSU = CreateNewSUnit(N); 958 assert(N->getNodeId() == -1 && "Node already inserted!"); 959 N->setNodeId(NewSU->NodeNum); 960 961 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 962 for (unsigned i = 0; i != MCID.getNumOperands(); ++i) { 963 if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) { 964 NewSU->isTwoAddress = true; 965 break; 966 } 967 } 968 if (MCID.isCommutable()) 969 NewSU->isCommutable = true; 970 971 InitNumRegDefsLeft(NewSU); 972 ComputeLatency(NewSU); 973 974 // Record all the edges to and from the old SU, by category. 975 SmallVector<SDep, 4> ChainPreds; 976 SmallVector<SDep, 4> ChainSuccs; 977 SmallVector<SDep, 4> LoadPreds; 978 SmallVector<SDep, 4> NodePreds; 979 SmallVector<SDep, 4> NodeSuccs; 980 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 981 I != E; ++I) { 982 if (I->isCtrl()) 983 ChainPreds.push_back(*I); 984 else if (isOperandOf(I->getSUnit(), LoadNode)) 985 LoadPreds.push_back(*I); 986 else 987 NodePreds.push_back(*I); 988 } 989 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 990 I != E; ++I) { 991 if (I->isCtrl()) 992 ChainSuccs.push_back(*I); 993 else 994 NodeSuccs.push_back(*I); 995 } 996 997 // Now assign edges to the newly-created nodes. 998 for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) { 999 const SDep &Pred = ChainPreds[i]; 1000 RemovePred(SU, Pred); 1001 if (isNewLoad) 1002 AddPred(LoadSU, Pred); 1003 } 1004 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) { 1005 const SDep &Pred = LoadPreds[i]; 1006 RemovePred(SU, Pred); 1007 if (isNewLoad) 1008 AddPred(LoadSU, Pred); 1009 } 1010 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) { 1011 const SDep &Pred = NodePreds[i]; 1012 RemovePred(SU, Pred); 1013 AddPred(NewSU, Pred); 1014 } 1015 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) { 1016 SDep D = NodeSuccs[i]; 1017 SUnit *SuccDep = D.getSUnit(); 1018 D.setSUnit(SU); 1019 RemovePred(SuccDep, D); 1020 D.setSUnit(NewSU); 1021 AddPred(SuccDep, D); 1022 // Balance register pressure. 1023 if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled 1024 && !D.isCtrl() && NewSU->NumRegDefsLeft > 0) 1025 --NewSU->NumRegDefsLeft; 1026 } 1027 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) { 1028 SDep D = ChainSuccs[i]; 1029 SUnit *SuccDep = D.getSUnit(); 1030 D.setSUnit(SU); 1031 RemovePred(SuccDep, D); 1032 if (isNewLoad) { 1033 D.setSUnit(LoadSU); 1034 AddPred(SuccDep, D); 1035 } 1036 } 1037 1038 // Add a data dependency to reflect that NewSU reads the value defined 1039 // by LoadSU. 1040 AddPred(NewSU, SDep(LoadSU, SDep::Data, LoadSU->Latency)); 1041 1042 if (isNewLoad) 1043 AvailableQueue->addNode(LoadSU); 1044 AvailableQueue->addNode(NewSU); 1045 1046 ++NumUnfolds; 1047 1048 if (NewSU->NumSuccsLeft == 0) { 1049 NewSU->isAvailable = true; 1050 return NewSU; 1051 } 1052 SU = NewSU; 1053 } 1054 1055 DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n"); 1056 NewSU = CreateClone(SU); 1057 1058 // New SUnit has the exact same predecessors. 1059 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1060 I != E; ++I) 1061 if (!I->isArtificial()) 1062 AddPred(NewSU, *I); 1063 1064 // Only copy scheduled successors. Cut them from old node's successor 1065 // list and move them over. 1066 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps; 1067 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1068 I != E; ++I) { 1069 if (I->isArtificial()) 1070 continue; 1071 SUnit *SuccSU = I->getSUnit(); 1072 if (SuccSU->isScheduled) { 1073 SDep D = *I; 1074 D.setSUnit(NewSU); 1075 AddPred(SuccSU, D); 1076 D.setSUnit(SU); 1077 DelDeps.push_back(std::make_pair(SuccSU, D)); 1078 } 1079 } 1080 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) 1081 RemovePred(DelDeps[i].first, DelDeps[i].second); 1082 1083 AvailableQueue->updateNode(SU); 1084 AvailableQueue->addNode(NewSU); 1085 1086 ++NumDups; 1087 return NewSU; 1088} 1089 1090/// InsertCopiesAndMoveSuccs - Insert register copies and move all 1091/// scheduled successors of the given SUnit to the last copy. 1092void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg, 1093 const TargetRegisterClass *DestRC, 1094 const TargetRegisterClass *SrcRC, 1095 SmallVector<SUnit*, 2> &Copies) { 1096 SUnit *CopyFromSU = CreateNewSUnit(NULL); 1097 CopyFromSU->CopySrcRC = SrcRC; 1098 CopyFromSU->CopyDstRC = DestRC; 1099 1100 SUnit *CopyToSU = CreateNewSUnit(NULL); 1101 CopyToSU->CopySrcRC = DestRC; 1102 CopyToSU->CopyDstRC = SrcRC; 1103 1104 // Only copy scheduled successors. Cut them from old node's successor 1105 // list and move them over. 1106 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps; 1107 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1108 I != E; ++I) { 1109 if (I->isArtificial()) 1110 continue; 1111 SUnit *SuccSU = I->getSUnit(); 1112 if (SuccSU->isScheduled) { 1113 SDep D = *I; 1114 D.setSUnit(CopyToSU); 1115 AddPred(SuccSU, D); 1116 DelDeps.push_back(std::make_pair(SuccSU, *I)); 1117 } 1118 else { 1119 // Avoid scheduling the def-side copy before other successors. Otherwise 1120 // we could introduce another physreg interference on the copy and 1121 // continue inserting copies indefinitely. 1122 SDep D(CopyFromSU, SDep::Order, /*Latency=*/0, 1123 /*Reg=*/0, /*isNormalMemory=*/false, 1124 /*isMustAlias=*/false, /*isArtificial=*/true); 1125 AddPred(SuccSU, D); 1126 } 1127 } 1128 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) 1129 RemovePred(DelDeps[i].first, DelDeps[i].second); 1130 1131 AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg)); 1132 AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0)); 1133 1134 AvailableQueue->updateNode(SU); 1135 AvailableQueue->addNode(CopyFromSU); 1136 AvailableQueue->addNode(CopyToSU); 1137 Copies.push_back(CopyFromSU); 1138 Copies.push_back(CopyToSU); 1139 1140 ++NumPRCopies; 1141} 1142 1143/// getPhysicalRegisterVT - Returns the ValueType of the physical register 1144/// definition of the specified node. 1145/// FIXME: Move to SelectionDAG? 1146static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg, 1147 const TargetInstrInfo *TII) { 1148 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 1149 assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!"); 1150 unsigned NumRes = MCID.getNumDefs(); 1151 for (const unsigned *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) { 1152 if (Reg == *ImpDef) 1153 break; 1154 ++NumRes; 1155 } 1156 return N->getValueType(NumRes); 1157} 1158 1159/// CheckForLiveRegDef - Return true and update live register vector if the 1160/// specified register def of the specified SUnit clobbers any "live" registers. 1161static void CheckForLiveRegDef(SUnit *SU, unsigned Reg, 1162 std::vector<SUnit*> &LiveRegDefs, 1163 SmallSet<unsigned, 4> &RegAdded, 1164 SmallVector<unsigned, 4> &LRegs, 1165 const TargetRegisterInfo *TRI) { 1166 for (const unsigned *AliasI = TRI->getOverlaps(Reg); *AliasI; ++AliasI) { 1167 1168 // Check if Ref is live. 1169 if (!LiveRegDefs[*AliasI]) continue; 1170 1171 // Allow multiple uses of the same def. 1172 if (LiveRegDefs[*AliasI] == SU) continue; 1173 1174 // Add Reg to the set of interfering live regs. 1175 if (RegAdded.insert(*AliasI)) { 1176 LRegs.push_back(*AliasI); 1177 } 1178 } 1179} 1180 1181/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay 1182/// scheduling of the given node to satisfy live physical register dependencies. 1183/// If the specific node is the last one that's available to schedule, do 1184/// whatever is necessary (i.e. backtracking or cloning) to make it possible. 1185bool ScheduleDAGRRList:: 1186DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) { 1187 if (NumLiveRegs == 0) 1188 return false; 1189 1190 SmallSet<unsigned, 4> RegAdded; 1191 // If this node would clobber any "live" register, then it's not ready. 1192 // 1193 // If SU is the currently live definition of the same register that it uses, 1194 // then we are free to schedule it. 1195 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1196 I != E; ++I) { 1197 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU) 1198 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs, 1199 RegAdded, LRegs, TRI); 1200 } 1201 1202 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) { 1203 if (Node->getOpcode() == ISD::INLINEASM) { 1204 // Inline asm can clobber physical defs. 1205 unsigned NumOps = Node->getNumOperands(); 1206 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue) 1207 --NumOps; // Ignore the glue operand. 1208 1209 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { 1210 unsigned Flags = 1211 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue(); 1212 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); 1213 1214 ++i; // Skip the ID value. 1215 if (InlineAsm::isRegDefKind(Flags) || 1216 InlineAsm::isRegDefEarlyClobberKind(Flags) || 1217 InlineAsm::isClobberKind(Flags)) { 1218 // Check for def of register or earlyclobber register. 1219 for (; NumVals; --NumVals, ++i) { 1220 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg(); 1221 if (TargetRegisterInfo::isPhysicalRegister(Reg)) 1222 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI); 1223 } 1224 } else 1225 i += NumVals; 1226 } 1227 continue; 1228 } 1229 1230 if (!Node->isMachineOpcode()) 1231 continue; 1232 // If we're in the middle of scheduling a call, don't begin scheduling 1233 // another call. Also, don't allow any physical registers to be live across 1234 // the call. 1235 if (Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) { 1236 // Add one here so that we include the special calling-sequence resource. 1237 for (unsigned i = 0, e = TRI->getNumRegs() + 1; i != e; ++i) 1238 if (LiveRegDefs[i]) { 1239 SDNode *Gen = LiveRegGens[i]->getNode(); 1240 while (SDNode *Glued = Gen->getGluedNode()) 1241 Gen = Glued; 1242 if (!IsChainDependent(Gen, Node) && RegAdded.insert(i)) 1243 LRegs.push_back(i); 1244 } 1245 continue; 1246 } 1247 const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode()); 1248 if (!MCID.ImplicitDefs) 1249 continue; 1250 for (const unsigned *Reg = MCID.ImplicitDefs; *Reg; ++Reg) 1251 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI); 1252 } 1253 1254 return !LRegs.empty(); 1255} 1256 1257/// Return a node that can be scheduled in this cycle. Requirements: 1258/// (1) Ready: latency has been satisfied 1259/// (2) No Hazards: resources are available 1260/// (3) No Interferences: may unschedule to break register interferences. 1261SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() { 1262 SmallVector<SUnit*, 4> Interferences; 1263 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap; 1264 1265 SUnit *CurSU = AvailableQueue->pop(); 1266 while (CurSU) { 1267 SmallVector<unsigned, 4> LRegs; 1268 if (!DelayForLiveRegsBottomUp(CurSU, LRegs)) 1269 break; 1270 LRegsMap.insert(std::make_pair(CurSU, LRegs)); 1271 1272 CurSU->isPending = true; // This SU is not in AvailableQueue right now. 1273 Interferences.push_back(CurSU); 1274 CurSU = AvailableQueue->pop(); 1275 } 1276 if (CurSU) { 1277 // Add the nodes that aren't ready back onto the available list. 1278 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) { 1279 Interferences[i]->isPending = false; 1280 assert(Interferences[i]->isAvailable && "must still be available"); 1281 AvailableQueue->push(Interferences[i]); 1282 } 1283 return CurSU; 1284 } 1285 1286 // All candidates are delayed due to live physical reg dependencies. 1287 // Try backtracking, code duplication, or inserting cross class copies 1288 // to resolve it. 1289 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) { 1290 SUnit *TrySU = Interferences[i]; 1291 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU]; 1292 1293 // Try unscheduling up to the point where it's safe to schedule 1294 // this node. 1295 SUnit *BtSU = NULL; 1296 unsigned LiveCycle = UINT_MAX; 1297 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) { 1298 unsigned Reg = LRegs[j]; 1299 if (LiveRegGens[Reg]->getHeight() < LiveCycle) { 1300 BtSU = LiveRegGens[Reg]; 1301 LiveCycle = BtSU->getHeight(); 1302 } 1303 } 1304 if (!WillCreateCycle(TrySU, BtSU)) { 1305 BacktrackBottomUp(TrySU, BtSU); 1306 1307 // Force the current node to be scheduled before the node that 1308 // requires the physical reg dep. 1309 if (BtSU->isAvailable) { 1310 BtSU->isAvailable = false; 1311 if (!BtSU->isPending) 1312 AvailableQueue->remove(BtSU); 1313 } 1314 AddPred(TrySU, SDep(BtSU, SDep::Order, /*Latency=*/1, 1315 /*Reg=*/0, /*isNormalMemory=*/false, 1316 /*isMustAlias=*/false, /*isArtificial=*/true)); 1317 1318 // If one or more successors has been unscheduled, then the current 1319 // node is no longer avaialable. Schedule a successor that's now 1320 // available instead. 1321 if (!TrySU->isAvailable) { 1322 CurSU = AvailableQueue->pop(); 1323 } 1324 else { 1325 CurSU = TrySU; 1326 TrySU->isPending = false; 1327 Interferences.erase(Interferences.begin()+i); 1328 } 1329 break; 1330 } 1331 } 1332 1333 if (!CurSU) { 1334 // Can't backtrack. If it's too expensive to copy the value, then try 1335 // duplicate the nodes that produces these "too expensive to copy" 1336 // values to break the dependency. In case even that doesn't work, 1337 // insert cross class copies. 1338 // If it's not too expensive, i.e. cost != -1, issue copies. 1339 SUnit *TrySU = Interferences[0]; 1340 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU]; 1341 assert(LRegs.size() == 1 && "Can't handle this yet!"); 1342 unsigned Reg = LRegs[0]; 1343 SUnit *LRDef = LiveRegDefs[Reg]; 1344 EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII); 1345 const TargetRegisterClass *RC = 1346 TRI->getMinimalPhysRegClass(Reg, VT); 1347 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC); 1348 1349 // If cross copy register class is the same as RC, then it must be possible 1350 // copy the value directly. Do not try duplicate the def. 1351 // If cross copy register class is not the same as RC, then it's possible to 1352 // copy the value but it require cross register class copies and it is 1353 // expensive. 1354 // If cross copy register class is null, then it's not possible to copy 1355 // the value at all. 1356 SUnit *NewDef = 0; 1357 if (DestRC != RC) { 1358 NewDef = CopyAndMoveSuccessors(LRDef); 1359 if (!DestRC && !NewDef) 1360 report_fatal_error("Can't handle live physical register dependency!"); 1361 } 1362 if (!NewDef) { 1363 // Issue copies, these can be expensive cross register class copies. 1364 SmallVector<SUnit*, 2> Copies; 1365 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies); 1366 DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum 1367 << " to SU #" << Copies.front()->NodeNum << "\n"); 1368 AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1, 1369 /*Reg=*/0, /*isNormalMemory=*/false, 1370 /*isMustAlias=*/false, 1371 /*isArtificial=*/true)); 1372 NewDef = Copies.back(); 1373 } 1374 1375 DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum 1376 << " to SU #" << TrySU->NodeNum << "\n"); 1377 LiveRegDefs[Reg] = NewDef; 1378 AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1, 1379 /*Reg=*/0, /*isNormalMemory=*/false, 1380 /*isMustAlias=*/false, 1381 /*isArtificial=*/true)); 1382 TrySU->isAvailable = false; 1383 CurSU = NewDef; 1384 } 1385 1386 assert(CurSU && "Unable to resolve live physical register dependencies!"); 1387 1388 // Add the nodes that aren't ready back onto the available list. 1389 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) { 1390 Interferences[i]->isPending = false; 1391 // May no longer be available due to backtracking. 1392 if (Interferences[i]->isAvailable) { 1393 AvailableQueue->push(Interferences[i]); 1394 } 1395 } 1396 return CurSU; 1397} 1398 1399/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up 1400/// schedulers. 1401void ScheduleDAGRRList::ListScheduleBottomUp() { 1402 // Release any predecessors of the special Exit node. 1403 ReleasePredecessors(&ExitSU); 1404 1405 // Add root to Available queue. 1406 if (!SUnits.empty()) { 1407 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()]; 1408 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!"); 1409 RootSU->isAvailable = true; 1410 AvailableQueue->push(RootSU); 1411 } 1412 1413 // While Available queue is not empty, grab the node with the highest 1414 // priority. If it is not ready put it back. Schedule the node. 1415 Sequence.reserve(SUnits.size()); 1416 while (!AvailableQueue->empty()) { 1417 DEBUG(dbgs() << "\nExamining Available:\n"; 1418 AvailableQueue->dump(this)); 1419 1420 // Pick the best node to schedule taking all constraints into 1421 // consideration. 1422 SUnit *SU = PickNodeToScheduleBottomUp(); 1423 1424 AdvancePastStalls(SU); 1425 1426 ScheduleNodeBottomUp(SU); 1427 1428 while (AvailableQueue->empty() && !PendingQueue.empty()) { 1429 // Advance the cycle to free resources. Skip ahead to the next ready SU. 1430 assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized"); 1431 AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle)); 1432 } 1433 } 1434 1435 // Reverse the order if it is bottom up. 1436 std::reverse(Sequence.begin(), Sequence.end()); 1437 1438#ifndef NDEBUG 1439 VerifySchedule(/*isBottomUp=*/true); 1440#endif 1441} 1442 1443//===----------------------------------------------------------------------===// 1444// RegReductionPriorityQueue Definition 1445//===----------------------------------------------------------------------===// 1446// 1447// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers 1448// to reduce register pressure. 1449// 1450namespace { 1451class RegReductionPQBase; 1452 1453struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> { 1454 bool isReady(SUnit* SU, unsigned CurCycle) const { return true; } 1455}; 1456 1457#ifndef NDEBUG 1458template<class SF> 1459struct reverse_sort : public queue_sort { 1460 SF &SortFunc; 1461 reverse_sort(SF &sf) : SortFunc(sf) {} 1462 reverse_sort(const reverse_sort &RHS) : SortFunc(RHS.SortFunc) {} 1463 1464 bool operator()(SUnit* left, SUnit* right) const { 1465 // reverse left/right rather than simply !SortFunc(left, right) 1466 // to expose different paths in the comparison logic. 1467 return SortFunc(right, left); 1468 } 1469}; 1470#endif // NDEBUG 1471 1472/// bu_ls_rr_sort - Priority function for bottom up register pressure 1473// reduction scheduler. 1474struct bu_ls_rr_sort : public queue_sort { 1475 enum { 1476 IsBottomUp = true, 1477 HasReadyFilter = false 1478 }; 1479 1480 RegReductionPQBase *SPQ; 1481 bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {} 1482 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {} 1483 1484 bool operator()(SUnit* left, SUnit* right) const; 1485}; 1486 1487// src_ls_rr_sort - Priority function for source order scheduler. 1488struct src_ls_rr_sort : public queue_sort { 1489 enum { 1490 IsBottomUp = true, 1491 HasReadyFilter = false 1492 }; 1493 1494 RegReductionPQBase *SPQ; 1495 src_ls_rr_sort(RegReductionPQBase *spq) 1496 : SPQ(spq) {} 1497 src_ls_rr_sort(const src_ls_rr_sort &RHS) 1498 : SPQ(RHS.SPQ) {} 1499 1500 bool operator()(SUnit* left, SUnit* right) const; 1501}; 1502 1503// hybrid_ls_rr_sort - Priority function for hybrid scheduler. 1504struct hybrid_ls_rr_sort : public queue_sort { 1505 enum { 1506 IsBottomUp = true, 1507 HasReadyFilter = false 1508 }; 1509 1510 RegReductionPQBase *SPQ; 1511 hybrid_ls_rr_sort(RegReductionPQBase *spq) 1512 : SPQ(spq) {} 1513 hybrid_ls_rr_sort(const hybrid_ls_rr_sort &RHS) 1514 : SPQ(RHS.SPQ) {} 1515 1516 bool isReady(SUnit *SU, unsigned CurCycle) const; 1517 1518 bool operator()(SUnit* left, SUnit* right) const; 1519}; 1520 1521// ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism) 1522// scheduler. 1523struct ilp_ls_rr_sort : public queue_sort { 1524 enum { 1525 IsBottomUp = true, 1526 HasReadyFilter = false 1527 }; 1528 1529 RegReductionPQBase *SPQ; 1530 ilp_ls_rr_sort(RegReductionPQBase *spq) 1531 : SPQ(spq) {} 1532 ilp_ls_rr_sort(const ilp_ls_rr_sort &RHS) 1533 : SPQ(RHS.SPQ) {} 1534 1535 bool isReady(SUnit *SU, unsigned CurCycle) const; 1536 1537 bool operator()(SUnit* left, SUnit* right) const; 1538}; 1539 1540class RegReductionPQBase : public SchedulingPriorityQueue { 1541protected: 1542 std::vector<SUnit*> Queue; 1543 unsigned CurQueueId; 1544 bool TracksRegPressure; 1545 1546 // SUnits - The SUnits for the current graph. 1547 std::vector<SUnit> *SUnits; 1548 1549 MachineFunction &MF; 1550 const TargetInstrInfo *TII; 1551 const TargetRegisterInfo *TRI; 1552 const TargetLowering *TLI; 1553 ScheduleDAGRRList *scheduleDAG; 1554 1555 // SethiUllmanNumbers - The SethiUllman number for each node. 1556 std::vector<unsigned> SethiUllmanNumbers; 1557 1558 /// RegPressure - Tracking current reg pressure per register class. 1559 /// 1560 std::vector<unsigned> RegPressure; 1561 1562 /// RegLimit - Tracking the number of allocatable registers per register 1563 /// class. 1564 std::vector<unsigned> RegLimit; 1565 1566public: 1567 RegReductionPQBase(MachineFunction &mf, 1568 bool hasReadyFilter, 1569 bool tracksrp, 1570 const TargetInstrInfo *tii, 1571 const TargetRegisterInfo *tri, 1572 const TargetLowering *tli) 1573 : SchedulingPriorityQueue(hasReadyFilter), 1574 CurQueueId(0), TracksRegPressure(tracksrp), 1575 MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) { 1576 if (TracksRegPressure) { 1577 unsigned NumRC = TRI->getNumRegClasses(); 1578 RegLimit.resize(NumRC); 1579 RegPressure.resize(NumRC); 1580 std::fill(RegLimit.begin(), RegLimit.end(), 0); 1581 std::fill(RegPressure.begin(), RegPressure.end(), 0); 1582 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(), 1583 E = TRI->regclass_end(); I != E; ++I) 1584 RegLimit[(*I)->getID()] = tri->getRegPressureLimit(*I, MF); 1585 } 1586 } 1587 1588 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) { 1589 scheduleDAG = scheduleDag; 1590 } 1591 1592 ScheduleHazardRecognizer* getHazardRec() { 1593 return scheduleDAG->getHazardRec(); 1594 } 1595 1596 void initNodes(std::vector<SUnit> &sunits); 1597 1598 void addNode(const SUnit *SU); 1599 1600 void updateNode(const SUnit *SU); 1601 1602 void releaseState() { 1603 SUnits = 0; 1604 SethiUllmanNumbers.clear(); 1605 std::fill(RegPressure.begin(), RegPressure.end(), 0); 1606 } 1607 1608 unsigned getNodePriority(const SUnit *SU) const; 1609 1610 unsigned getNodeOrdering(const SUnit *SU) const { 1611 if (!SU->getNode()) return 0; 1612 1613 return scheduleDAG->DAG->GetOrdering(SU->getNode()); 1614 } 1615 1616 bool empty() const { return Queue.empty(); } 1617 1618 void push(SUnit *U) { 1619 assert(!U->NodeQueueId && "Node in the queue already"); 1620 U->NodeQueueId = ++CurQueueId; 1621 Queue.push_back(U); 1622 } 1623 1624 void remove(SUnit *SU) { 1625 assert(!Queue.empty() && "Queue is empty!"); 1626 assert(SU->NodeQueueId != 0 && "Not in queue!"); 1627 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(), 1628 SU); 1629 if (I != prior(Queue.end())) 1630 std::swap(*I, Queue.back()); 1631 Queue.pop_back(); 1632 SU->NodeQueueId = 0; 1633 } 1634 1635 bool tracksRegPressure() const { return TracksRegPressure; } 1636 1637 void dumpRegPressure() const; 1638 1639 bool HighRegPressure(const SUnit *SU) const; 1640 1641 bool MayReduceRegPressure(SUnit *SU) const; 1642 1643 int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const; 1644 1645 void ScheduledNode(SUnit *SU); 1646 1647 void UnscheduledNode(SUnit *SU); 1648 1649protected: 1650 bool canClobber(const SUnit *SU, const SUnit *Op); 1651 void AddPseudoTwoAddrDeps(); 1652 void PrescheduleNodesWithMultipleUses(); 1653 void CalculateSethiUllmanNumbers(); 1654}; 1655 1656template<class SF> 1657static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) { 1658 std::vector<SUnit *>::iterator Best = Q.begin(); 1659 for (std::vector<SUnit *>::iterator I = llvm::next(Q.begin()), 1660 E = Q.end(); I != E; ++I) 1661 if (Picker(*Best, *I)) 1662 Best = I; 1663 SUnit *V = *Best; 1664 if (Best != prior(Q.end())) 1665 std::swap(*Best, Q.back()); 1666 Q.pop_back(); 1667 return V; 1668} 1669 1670template<class SF> 1671SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker, ScheduleDAG *DAG) { 1672#ifndef NDEBUG 1673 if (DAG->StressSched) { 1674 reverse_sort<SF> RPicker(Picker); 1675 return popFromQueueImpl(Q, RPicker); 1676 } 1677#endif 1678 (void)DAG; 1679 return popFromQueueImpl(Q, Picker); 1680} 1681 1682template<class SF> 1683class RegReductionPriorityQueue : public RegReductionPQBase { 1684 SF Picker; 1685 1686public: 1687 RegReductionPriorityQueue(MachineFunction &mf, 1688 bool tracksrp, 1689 const TargetInstrInfo *tii, 1690 const TargetRegisterInfo *tri, 1691 const TargetLowering *tli) 1692 : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, tii, tri, tli), 1693 Picker(this) {} 1694 1695 bool isBottomUp() const { return SF::IsBottomUp; } 1696 1697 bool isReady(SUnit *U) const { 1698 return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle()); 1699 } 1700 1701 SUnit *pop() { 1702 if (Queue.empty()) return NULL; 1703 1704 SUnit *V = popFromQueue(Queue, Picker, scheduleDAG); 1705 V->NodeQueueId = 0; 1706 return V; 1707 } 1708 1709 void dump(ScheduleDAG *DAG) const { 1710 // Emulate pop() without clobbering NodeQueueIds. 1711 std::vector<SUnit*> DumpQueue = Queue; 1712 SF DumpPicker = Picker; 1713 while (!DumpQueue.empty()) { 1714 SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG); 1715 dbgs() << "Height " << SU->getHeight() << ": "; 1716 SU->dump(DAG); 1717 } 1718 } 1719}; 1720 1721typedef RegReductionPriorityQueue<bu_ls_rr_sort> 1722BURegReductionPriorityQueue; 1723 1724typedef RegReductionPriorityQueue<src_ls_rr_sort> 1725SrcRegReductionPriorityQueue; 1726 1727typedef RegReductionPriorityQueue<hybrid_ls_rr_sort> 1728HybridBURRPriorityQueue; 1729 1730typedef RegReductionPriorityQueue<ilp_ls_rr_sort> 1731ILPBURRPriorityQueue; 1732} // end anonymous namespace 1733 1734//===----------------------------------------------------------------------===// 1735// Static Node Priority for Register Pressure Reduction 1736//===----------------------------------------------------------------------===// 1737 1738// Check for special nodes that bypass scheduling heuristics. 1739// Currently this pushes TokenFactor nodes down, but may be used for other 1740// pseudo-ops as well. 1741// 1742// Return -1 to schedule right above left, 1 for left above right. 1743// Return 0 if no bias exists. 1744static int checkSpecialNodes(const SUnit *left, const SUnit *right) { 1745 bool LSchedLow = left->isScheduleLow; 1746 bool RSchedLow = right->isScheduleLow; 1747 if (LSchedLow != RSchedLow) 1748 return LSchedLow < RSchedLow ? 1 : -1; 1749 return 0; 1750} 1751 1752/// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number. 1753/// Smaller number is the higher priority. 1754static unsigned 1755CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) { 1756 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum]; 1757 if (SethiUllmanNumber != 0) 1758 return SethiUllmanNumber; 1759 1760 unsigned Extra = 0; 1761 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1762 I != E; ++I) { 1763 if (I->isCtrl()) continue; // ignore chain preds 1764 SUnit *PredSU = I->getSUnit(); 1765 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers); 1766 if (PredSethiUllman > SethiUllmanNumber) { 1767 SethiUllmanNumber = PredSethiUllman; 1768 Extra = 0; 1769 } else if (PredSethiUllman == SethiUllmanNumber) 1770 ++Extra; 1771 } 1772 1773 SethiUllmanNumber += Extra; 1774 1775 if (SethiUllmanNumber == 0) 1776 SethiUllmanNumber = 1; 1777 1778 return SethiUllmanNumber; 1779} 1780 1781/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all 1782/// scheduling units. 1783void RegReductionPQBase::CalculateSethiUllmanNumbers() { 1784 SethiUllmanNumbers.assign(SUnits->size(), 0); 1785 1786 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) 1787 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers); 1788} 1789 1790void RegReductionPQBase::addNode(const SUnit *SU) { 1791 unsigned SUSize = SethiUllmanNumbers.size(); 1792 if (SUnits->size() > SUSize) 1793 SethiUllmanNumbers.resize(SUSize*2, 0); 1794 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers); 1795} 1796 1797void RegReductionPQBase::updateNode(const SUnit *SU) { 1798 SethiUllmanNumbers[SU->NodeNum] = 0; 1799 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers); 1800} 1801 1802// Lower priority means schedule further down. For bottom-up scheduling, lower 1803// priority SUs are scheduled before higher priority SUs. 1804unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const { 1805 assert(SU->NodeNum < SethiUllmanNumbers.size()); 1806 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0; 1807 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg) 1808 // CopyToReg should be close to its uses to facilitate coalescing and 1809 // avoid spilling. 1810 return 0; 1811 if (Opc == TargetOpcode::EXTRACT_SUBREG || 1812 Opc == TargetOpcode::SUBREG_TO_REG || 1813 Opc == TargetOpcode::INSERT_SUBREG) 1814 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be 1815 // close to their uses to facilitate coalescing. 1816 return 0; 1817 if (SU->NumSuccs == 0 && SU->NumPreds != 0) 1818 // If SU does not have a register use, i.e. it doesn't produce a value 1819 // that would be consumed (e.g. store), then it terminates a chain of 1820 // computation. Give it a large SethiUllman number so it will be 1821 // scheduled right before its predecessors that it doesn't lengthen 1822 // their live ranges. 1823 return 0xffff; 1824 if (SU->NumPreds == 0 && SU->NumSuccs != 0) 1825 // If SU does not have a register def, schedule it close to its uses 1826 // because it does not lengthen any live ranges. 1827 return 0; 1828#if 1 1829 return SethiUllmanNumbers[SU->NodeNum]; 1830#else 1831 unsigned Priority = SethiUllmanNumbers[SU->NodeNum]; 1832 if (SU->isCallOp) { 1833 // FIXME: This assumes all of the defs are used as call operands. 1834 int NP = (int)Priority - SU->getNode()->getNumValues(); 1835 return (NP > 0) ? NP : 0; 1836 } 1837 return Priority; 1838#endif 1839} 1840 1841//===----------------------------------------------------------------------===// 1842// Register Pressure Tracking 1843//===----------------------------------------------------------------------===// 1844 1845void RegReductionPQBase::dumpRegPressure() const { 1846 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(), 1847 E = TRI->regclass_end(); I != E; ++I) { 1848 const TargetRegisterClass *RC = *I; 1849 unsigned Id = RC->getID(); 1850 unsigned RP = RegPressure[Id]; 1851 if (!RP) continue; 1852 DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id] 1853 << '\n'); 1854 } 1855} 1856 1857bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const { 1858 if (!TLI) 1859 return false; 1860 1861 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end(); 1862 I != E; ++I) { 1863 if (I->isCtrl()) 1864 continue; 1865 SUnit *PredSU = I->getSUnit(); 1866 // NumRegDefsLeft is zero when enough uses of this node have been scheduled 1867 // to cover the number of registers defined (they are all live). 1868 if (PredSU->NumRegDefsLeft == 0) { 1869 continue; 1870 } 1871 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG); 1872 RegDefPos.IsValid(); RegDefPos.Advance()) { 1873 unsigned RCId, Cost; 1874 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost); 1875 1876 if ((RegPressure[RCId] + Cost) >= RegLimit[RCId]) 1877 return true; 1878 } 1879 } 1880 return false; 1881} 1882 1883bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const { 1884 const SDNode *N = SU->getNode(); 1885 1886 if (!N->isMachineOpcode() || !SU->NumSuccs) 1887 return false; 1888 1889 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs(); 1890 for (unsigned i = 0; i != NumDefs; ++i) { 1891 EVT VT = N->getValueType(i); 1892 if (!N->hasAnyUseOfValue(i)) 1893 continue; 1894 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 1895 if (RegPressure[RCId] >= RegLimit[RCId]) 1896 return true; 1897 } 1898 return false; 1899} 1900 1901// Compute the register pressure contribution by this instruction by count up 1902// for uses that are not live and down for defs. Only count register classes 1903// that are already under high pressure. As a side effect, compute the number of 1904// uses of registers that are already live. 1905// 1906// FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure 1907// so could probably be factored. 1908int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const { 1909 LiveUses = 0; 1910 int PDiff = 0; 1911 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end(); 1912 I != E; ++I) { 1913 if (I->isCtrl()) 1914 continue; 1915 SUnit *PredSU = I->getSUnit(); 1916 // NumRegDefsLeft is zero when enough uses of this node have been scheduled 1917 // to cover the number of registers defined (they are all live). 1918 if (PredSU->NumRegDefsLeft == 0) { 1919 if (PredSU->getNode()->isMachineOpcode()) 1920 ++LiveUses; 1921 continue; 1922 } 1923 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG); 1924 RegDefPos.IsValid(); RegDefPos.Advance()) { 1925 EVT VT = RegDefPos.GetValue(); 1926 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 1927 if (RegPressure[RCId] >= RegLimit[RCId]) 1928 ++PDiff; 1929 } 1930 } 1931 const SDNode *N = SU->getNode(); 1932 1933 if (!N || !N->isMachineOpcode() || !SU->NumSuccs) 1934 return PDiff; 1935 1936 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs(); 1937 for (unsigned i = 0; i != NumDefs; ++i) { 1938 EVT VT = N->getValueType(i); 1939 if (!N->hasAnyUseOfValue(i)) 1940 continue; 1941 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 1942 if (RegPressure[RCId] >= RegLimit[RCId]) 1943 --PDiff; 1944 } 1945 return PDiff; 1946} 1947 1948void RegReductionPQBase::ScheduledNode(SUnit *SU) { 1949 if (!TracksRegPressure) 1950 return; 1951 1952 if (!SU->getNode()) 1953 return; 1954 1955 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1956 I != E; ++I) { 1957 if (I->isCtrl()) 1958 continue; 1959 SUnit *PredSU = I->getSUnit(); 1960 // NumRegDefsLeft is zero when enough uses of this node have been scheduled 1961 // to cover the number of registers defined (they are all live). 1962 if (PredSU->NumRegDefsLeft == 0) { 1963 continue; 1964 } 1965 // FIXME: The ScheduleDAG currently loses information about which of a 1966 // node's values is consumed by each dependence. Consequently, if the node 1967 // defines multiple register classes, we don't know which to pressurize 1968 // here. Instead the following loop consumes the register defs in an 1969 // arbitrary order. At least it handles the common case of clustered loads 1970 // to the same class. For precise liveness, each SDep needs to indicate the 1971 // result number. But that tightly couples the ScheduleDAG with the 1972 // SelectionDAG making updates tricky. A simpler hack would be to attach a 1973 // value type or register class to SDep. 1974 // 1975 // The most important aspect of register tracking is balancing the increase 1976 // here with the reduction further below. Note that this SU may use multiple 1977 // defs in PredSU. The can't be determined here, but we've already 1978 // compensated by reducing NumRegDefsLeft in PredSU during 1979 // ScheduleDAGSDNodes::AddSchedEdges. 1980 --PredSU->NumRegDefsLeft; 1981 unsigned SkipRegDefs = PredSU->NumRegDefsLeft; 1982 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG); 1983 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) { 1984 if (SkipRegDefs) 1985 continue; 1986 1987 unsigned RCId, Cost; 1988 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost); 1989 RegPressure[RCId] += Cost; 1990 break; 1991 } 1992 } 1993 1994 // We should have this assert, but there may be dead SDNodes that never 1995 // materialize as SUnits, so they don't appear to generate liveness. 1996 //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses"); 1997 int SkipRegDefs = (int)SU->NumRegDefsLeft; 1998 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG); 1999 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) { 2000 if (SkipRegDefs > 0) 2001 continue; 2002 unsigned RCId, Cost; 2003 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost); 2004 if (RegPressure[RCId] < Cost) { 2005 // Register pressure tracking is imprecise. This can happen. But we try 2006 // hard not to let it happen because it likely results in poor scheduling. 2007 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n"); 2008 RegPressure[RCId] = 0; 2009 } 2010 else { 2011 RegPressure[RCId] -= Cost; 2012 } 2013 } 2014 dumpRegPressure(); 2015} 2016 2017void RegReductionPQBase::UnscheduledNode(SUnit *SU) { 2018 if (!TracksRegPressure) 2019 return; 2020 2021 const SDNode *N = SU->getNode(); 2022 if (!N) return; 2023 2024 if (!N->isMachineOpcode()) { 2025 if (N->getOpcode() != ISD::CopyToReg) 2026 return; 2027 } else { 2028 unsigned Opc = N->getMachineOpcode(); 2029 if (Opc == TargetOpcode::EXTRACT_SUBREG || 2030 Opc == TargetOpcode::INSERT_SUBREG || 2031 Opc == TargetOpcode::SUBREG_TO_REG || 2032 Opc == TargetOpcode::REG_SEQUENCE || 2033 Opc == TargetOpcode::IMPLICIT_DEF) 2034 return; 2035 } 2036 2037 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 2038 I != E; ++I) { 2039 if (I->isCtrl()) 2040 continue; 2041 SUnit *PredSU = I->getSUnit(); 2042 // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only 2043 // counts data deps. 2044 if (PredSU->NumSuccsLeft != PredSU->Succs.size()) 2045 continue; 2046 const SDNode *PN = PredSU->getNode(); 2047 if (!PN->isMachineOpcode()) { 2048 if (PN->getOpcode() == ISD::CopyFromReg) { 2049 EVT VT = PN->getValueType(0); 2050 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 2051 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT); 2052 } 2053 continue; 2054 } 2055 unsigned POpc = PN->getMachineOpcode(); 2056 if (POpc == TargetOpcode::IMPLICIT_DEF) 2057 continue; 2058 if (POpc == TargetOpcode::EXTRACT_SUBREG || 2059 POpc == TargetOpcode::INSERT_SUBREG || 2060 POpc == TargetOpcode::SUBREG_TO_REG) { 2061 EVT VT = PN->getValueType(0); 2062 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 2063 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT); 2064 continue; 2065 } 2066 unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs(); 2067 for (unsigned i = 0; i != NumDefs; ++i) { 2068 EVT VT = PN->getValueType(i); 2069 if (!PN->hasAnyUseOfValue(i)) 2070 continue; 2071 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 2072 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT)) 2073 // Register pressure tracking is imprecise. This can happen. 2074 RegPressure[RCId] = 0; 2075 else 2076 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT); 2077 } 2078 } 2079 2080 // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses() 2081 // may transfer data dependencies to CopyToReg. 2082 if (SU->NumSuccs && N->isMachineOpcode()) { 2083 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs(); 2084 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) { 2085 EVT VT = N->getValueType(i); 2086 if (VT == MVT::Glue || VT == MVT::Other) 2087 continue; 2088 if (!N->hasAnyUseOfValue(i)) 2089 continue; 2090 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 2091 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT); 2092 } 2093 } 2094 2095 dumpRegPressure(); 2096} 2097 2098//===----------------------------------------------------------------------===// 2099// Dynamic Node Priority for Register Pressure Reduction 2100//===----------------------------------------------------------------------===// 2101 2102/// closestSucc - Returns the scheduled cycle of the successor which is 2103/// closest to the current cycle. 2104static unsigned closestSucc(const SUnit *SU) { 2105 unsigned MaxHeight = 0; 2106 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 2107 I != E; ++I) { 2108 if (I->isCtrl()) continue; // ignore chain succs 2109 unsigned Height = I->getSUnit()->getHeight(); 2110 // If there are bunch of CopyToRegs stacked up, they should be considered 2111 // to be at the same position. 2112 if (I->getSUnit()->getNode() && 2113 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg) 2114 Height = closestSucc(I->getSUnit())+1; 2115 if (Height > MaxHeight) 2116 MaxHeight = Height; 2117 } 2118 return MaxHeight; 2119} 2120 2121/// calcMaxScratches - Returns an cost estimate of the worse case requirement 2122/// for scratch registers, i.e. number of data dependencies. 2123static unsigned calcMaxScratches(const SUnit *SU) { 2124 unsigned Scratches = 0; 2125 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 2126 I != E; ++I) { 2127 if (I->isCtrl()) continue; // ignore chain preds 2128 Scratches++; 2129 } 2130 return Scratches; 2131} 2132 2133/// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are 2134/// CopyFromReg from a virtual register. 2135static bool hasOnlyLiveInOpers(const SUnit *SU) { 2136 bool RetVal = false; 2137 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 2138 I != E; ++I) { 2139 if (I->isCtrl()) continue; 2140 const SUnit *PredSU = I->getSUnit(); 2141 if (PredSU->getNode() && 2142 PredSU->getNode()->getOpcode() == ISD::CopyFromReg) { 2143 unsigned Reg = 2144 cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg(); 2145 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 2146 RetVal = true; 2147 continue; 2148 } 2149 } 2150 return false; 2151 } 2152 return RetVal; 2153} 2154 2155/// hasOnlyLiveOutUses - Return true if SU has only value successors that are 2156/// CopyToReg to a virtual register. This SU def is probably a liveout and 2157/// it has no other use. It should be scheduled closer to the terminator. 2158static bool hasOnlyLiveOutUses(const SUnit *SU) { 2159 bool RetVal = false; 2160 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 2161 I != E; ++I) { 2162 if (I->isCtrl()) continue; 2163 const SUnit *SuccSU = I->getSUnit(); 2164 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) { 2165 unsigned Reg = 2166 cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg(); 2167 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 2168 RetVal = true; 2169 continue; 2170 } 2171 } 2172 return false; 2173 } 2174 return RetVal; 2175} 2176 2177// Set isVRegCycle for a node with only live in opers and live out uses. Also 2178// set isVRegCycle for its CopyFromReg operands. 2179// 2180// This is only relevant for single-block loops, in which case the VRegCycle 2181// node is likely an induction variable in which the operand and target virtual 2182// registers should be coalesced (e.g. pre/post increment values). Setting the 2183// isVRegCycle flag helps the scheduler prioritize other uses of the same 2184// CopyFromReg so that this node becomes the virtual register "kill". This 2185// avoids interference between the values live in and out of the block and 2186// eliminates a copy inside the loop. 2187static void initVRegCycle(SUnit *SU) { 2188 if (DisableSchedVRegCycle) 2189 return; 2190 2191 if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU)) 2192 return; 2193 2194 DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n"); 2195 2196 SU->isVRegCycle = true; 2197 2198 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 2199 I != E; ++I) { 2200 if (I->isCtrl()) continue; 2201 I->getSUnit()->isVRegCycle = true; 2202 } 2203} 2204 2205// After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of 2206// CopyFromReg operands. We should no longer penalize other uses of this VReg. 2207static void resetVRegCycle(SUnit *SU) { 2208 if (!SU->isVRegCycle) 2209 return; 2210 2211 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end(); 2212 I != E; ++I) { 2213 if (I->isCtrl()) continue; // ignore chain preds 2214 SUnit *PredSU = I->getSUnit(); 2215 if (PredSU->isVRegCycle) { 2216 assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg && 2217 "VRegCycle def must be CopyFromReg"); 2218 I->getSUnit()->isVRegCycle = 0; 2219 } 2220 } 2221} 2222 2223// Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This 2224// means a node that defines the VRegCycle has not been scheduled yet. 2225static bool hasVRegCycleUse(const SUnit *SU) { 2226 // If this SU also defines the VReg, don't hoist it as a "use". 2227 if (SU->isVRegCycle) 2228 return false; 2229 2230 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end(); 2231 I != E; ++I) { 2232 if (I->isCtrl()) continue; // ignore chain preds 2233 if (I->getSUnit()->isVRegCycle && 2234 I->getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) { 2235 DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n"); 2236 return true; 2237 } 2238 } 2239 return false; 2240} 2241 2242// Check for either a dependence (latency) or resource (hazard) stall. 2243// 2244// Note: The ScheduleHazardRecognizer interface requires a non-const SU. 2245static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) { 2246 if ((int)SPQ->getCurCycle() < Height) return true; 2247 if (SPQ->getHazardRec()->getHazardType(SU, 0) 2248 != ScheduleHazardRecognizer::NoHazard) 2249 return true; 2250 return false; 2251} 2252 2253// Return -1 if left has higher priority, 1 if right has higher priority. 2254// Return 0 if latency-based priority is equivalent. 2255static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref, 2256 RegReductionPQBase *SPQ) { 2257 // Scheduling an instruction that uses a VReg whose postincrement has not yet 2258 // been scheduled will induce a copy. Model this as an extra cycle of latency. 2259 int LPenalty = hasVRegCycleUse(left) ? 1 : 0; 2260 int RPenalty = hasVRegCycleUse(right) ? 1 : 0; 2261 int LHeight = (int)left->getHeight() + LPenalty; 2262 int RHeight = (int)right->getHeight() + RPenalty; 2263 2264 bool LStall = (!checkPref || left->SchedulingPref == Sched::ILP) && 2265 BUHasStall(left, LHeight, SPQ); 2266 bool RStall = (!checkPref || right->SchedulingPref == Sched::ILP) && 2267 BUHasStall(right, RHeight, SPQ); 2268 2269 // If scheduling one of the node will cause a pipeline stall, delay it. 2270 // If scheduling either one of the node will cause a pipeline stall, sort 2271 // them according to their height. 2272 if (LStall) { 2273 if (!RStall) { 2274 DEBUG(++FactorCount[FactStall]); 2275 return 1; 2276 } 2277 if (LHeight != RHeight) { 2278 DEBUG(++FactorCount[FactStall]); 2279 return LHeight > RHeight ? 1 : -1; 2280 } 2281 } else if (RStall) { 2282 DEBUG(++FactorCount[FactStall]); 2283 return -1; 2284 } 2285 2286 // If either node is scheduling for latency, sort them by height/depth 2287 // and latency. 2288 if (!checkPref || (left->SchedulingPref == Sched::ILP || 2289 right->SchedulingPref == Sched::ILP)) { 2290 if (DisableSchedCycles) { 2291 if (LHeight != RHeight) { 2292 DEBUG(++FactorCount[FactHeight]); 2293 return LHeight > RHeight ? 1 : -1; 2294 } 2295 } 2296 else { 2297 // If neither instruction stalls (!LStall && !RStall) then 2298 // its height is already covered so only its depth matters. We also reach 2299 // this if both stall but have the same height. 2300 int LDepth = left->getDepth() - LPenalty; 2301 int RDepth = right->getDepth() - RPenalty; 2302 if (LDepth != RDepth) { 2303 DEBUG(++FactorCount[FactDepth]); 2304 DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum 2305 << ") depth " << LDepth << " vs SU (" << right->NodeNum 2306 << ") depth " << RDepth << "\n"); 2307 return LDepth < RDepth ? 1 : -1; 2308 } 2309 } 2310 if (left->Latency != right->Latency) { 2311 DEBUG(++FactorCount[FactOther]); 2312 return left->Latency > right->Latency ? 1 : -1; 2313 } 2314 } 2315 return 0; 2316} 2317 2318static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) { 2319 // Schedule physical register definitions close to their use. This is 2320 // motivated by microarchitectures that can fuse cmp+jump macro-ops. But as 2321 // long as shortening physreg live ranges is generally good, we can defer 2322 // creating a subtarget hook. 2323 if (!DisableSchedPhysRegJoin) { 2324 bool LHasPhysReg = left->hasPhysRegDefs; 2325 bool RHasPhysReg = right->hasPhysRegDefs; 2326 if (LHasPhysReg != RHasPhysReg) { 2327 DEBUG(++FactorCount[FactRegUses]); 2328 #ifndef NDEBUG 2329 const char *PhysRegMsg[] = {" has no physreg", " defines a physreg"}; 2330 #endif 2331 DEBUG(dbgs() << " SU (" << left->NodeNum << ") " 2332 << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") " 2333 << PhysRegMsg[RHasPhysReg] << "\n"); 2334 return LHasPhysReg < RHasPhysReg; 2335 } 2336 } 2337 2338 // Prioritize by Sethi-Ulmann number and push CopyToReg nodes down. 2339 unsigned LPriority = SPQ->getNodePriority(left); 2340 unsigned RPriority = SPQ->getNodePriority(right); 2341 2342 // Be really careful about hoisting call operands above previous calls. 2343 // Only allows it if it would reduce register pressure. 2344 if (left->isCall && right->isCallOp) { 2345 unsigned RNumVals = right->getNode()->getNumValues(); 2346 RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0; 2347 } 2348 if (right->isCall && left->isCallOp) { 2349 unsigned LNumVals = left->getNode()->getNumValues(); 2350 LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0; 2351 } 2352 2353 if (LPriority != RPriority) { 2354 DEBUG(++FactorCount[FactStatic]); 2355 return LPriority > RPriority; 2356 } 2357 2358 // One or both of the nodes are calls and their sethi-ullman numbers are the 2359 // same, then keep source order. 2360 if (left->isCall || right->isCall) { 2361 unsigned LOrder = SPQ->getNodeOrdering(left); 2362 unsigned ROrder = SPQ->getNodeOrdering(right); 2363 2364 // Prefer an ordering where the lower the non-zero order number, the higher 2365 // the preference. 2366 if ((LOrder || ROrder) && LOrder != ROrder) 2367 return LOrder != 0 && (LOrder < ROrder || ROrder == 0); 2368 } 2369 2370 // Try schedule def + use closer when Sethi-Ullman numbers are the same. 2371 // e.g. 2372 // t1 = op t2, c1 2373 // t3 = op t4, c2 2374 // 2375 // and the following instructions are both ready. 2376 // t2 = op c3 2377 // t4 = op c4 2378 // 2379 // Then schedule t2 = op first. 2380 // i.e. 2381 // t4 = op c4 2382 // t2 = op c3 2383 // t1 = op t2, c1 2384 // t3 = op t4, c2 2385 // 2386 // This creates more short live intervals. 2387 unsigned LDist = closestSucc(left); 2388 unsigned RDist = closestSucc(right); 2389 if (LDist != RDist) { 2390 DEBUG(++FactorCount[FactOther]); 2391 return LDist < RDist; 2392 } 2393 2394 // How many registers becomes live when the node is scheduled. 2395 unsigned LScratch = calcMaxScratches(left); 2396 unsigned RScratch = calcMaxScratches(right); 2397 if (LScratch != RScratch) { 2398 DEBUG(++FactorCount[FactOther]); 2399 return LScratch > RScratch; 2400 } 2401 2402 // Comparing latency against a call makes little sense unless the node 2403 // is register pressure-neutral. 2404 if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0)) 2405 return (left->NodeQueueId > right->NodeQueueId); 2406 2407 // Do not compare latencies when one or both of the nodes are calls. 2408 if (!DisableSchedCycles && 2409 !(left->isCall || right->isCall)) { 2410 int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ); 2411 if (result != 0) 2412 return result > 0; 2413 } 2414 else { 2415 if (left->getHeight() != right->getHeight()) { 2416 DEBUG(++FactorCount[FactHeight]); 2417 return left->getHeight() > right->getHeight(); 2418 } 2419 2420 if (left->getDepth() != right->getDepth()) { 2421 DEBUG(++FactorCount[FactDepth]); 2422 return left->getDepth() < right->getDepth(); 2423 } 2424 } 2425 2426 assert(left->NodeQueueId && right->NodeQueueId && 2427 "NodeQueueId cannot be zero"); 2428 DEBUG(++FactorCount[FactOther]); 2429 return (left->NodeQueueId > right->NodeQueueId); 2430} 2431 2432// Bottom up 2433bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const { 2434 if (int res = checkSpecialNodes(left, right)) 2435 return res > 0; 2436 2437 return BURRSort(left, right, SPQ); 2438} 2439 2440// Source order, otherwise bottom up. 2441bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const { 2442 if (int res = checkSpecialNodes(left, right)) 2443 return res > 0; 2444 2445 unsigned LOrder = SPQ->getNodeOrdering(left); 2446 unsigned ROrder = SPQ->getNodeOrdering(right); 2447 2448 // Prefer an ordering where the lower the non-zero order number, the higher 2449 // the preference. 2450 if ((LOrder || ROrder) && LOrder != ROrder) 2451 return LOrder != 0 && (LOrder < ROrder || ROrder == 0); 2452 2453 return BURRSort(left, right, SPQ); 2454} 2455 2456// If the time between now and when the instruction will be ready can cover 2457// the spill code, then avoid adding it to the ready queue. This gives long 2458// stalls highest priority and allows hoisting across calls. It should also 2459// speed up processing the available queue. 2460bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const { 2461 static const unsigned ReadyDelay = 3; 2462 2463 if (SPQ->MayReduceRegPressure(SU)) return true; 2464 2465 if (SU->getHeight() > (CurCycle + ReadyDelay)) return false; 2466 2467 if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay) 2468 != ScheduleHazardRecognizer::NoHazard) 2469 return false; 2470 2471 return true; 2472} 2473 2474// Return true if right should be scheduled with higher priority than left. 2475bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const { 2476 if (int res = checkSpecialNodes(left, right)) 2477 return res > 0; 2478 2479 if (left->isCall || right->isCall) 2480 // No way to compute latency of calls. 2481 return BURRSort(left, right, SPQ); 2482 2483 bool LHigh = SPQ->HighRegPressure(left); 2484 bool RHigh = SPQ->HighRegPressure(right); 2485 // Avoid causing spills. If register pressure is high, schedule for 2486 // register pressure reduction. 2487 if (LHigh && !RHigh) { 2488 DEBUG(++FactorCount[FactPressureDiff]); 2489 DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU(" 2490 << right->NodeNum << ")\n"); 2491 return true; 2492 } 2493 else if (!LHigh && RHigh) { 2494 DEBUG(++FactorCount[FactPressureDiff]); 2495 DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU(" 2496 << left->NodeNum << ")\n"); 2497 return false; 2498 } 2499 if (!LHigh && !RHigh) { 2500 int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ); 2501 if (result != 0) 2502 return result > 0; 2503 } 2504 return BURRSort(left, right, SPQ); 2505} 2506 2507// Schedule as many instructions in each cycle as possible. So don't make an 2508// instruction available unless it is ready in the current cycle. 2509bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const { 2510 if (SU->getHeight() > CurCycle) return false; 2511 2512 if (SPQ->getHazardRec()->getHazardType(SU, 0) 2513 != ScheduleHazardRecognizer::NoHazard) 2514 return false; 2515 2516 return true; 2517} 2518 2519static bool canEnableCoalescing(SUnit *SU) { 2520 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0; 2521 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg) 2522 // CopyToReg should be close to its uses to facilitate coalescing and 2523 // avoid spilling. 2524 return true; 2525 2526 if (Opc == TargetOpcode::EXTRACT_SUBREG || 2527 Opc == TargetOpcode::SUBREG_TO_REG || 2528 Opc == TargetOpcode::INSERT_SUBREG) 2529 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be 2530 // close to their uses to facilitate coalescing. 2531 return true; 2532 2533 if (SU->NumPreds == 0 && SU->NumSuccs != 0) 2534 // If SU does not have a register def, schedule it close to its uses 2535 // because it does not lengthen any live ranges. 2536 return true; 2537 2538 return false; 2539} 2540 2541// list-ilp is currently an experimental scheduler that allows various 2542// heuristics to be enabled prior to the normal register reduction logic. 2543bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const { 2544 if (int res = checkSpecialNodes(left, right)) 2545 return res > 0; 2546 2547 if (left->isCall || right->isCall) 2548 // No way to compute latency of calls. 2549 return BURRSort(left, right, SPQ); 2550 2551 unsigned LLiveUses = 0, RLiveUses = 0; 2552 int LPDiff = 0, RPDiff = 0; 2553 if (!DisableSchedRegPressure || !DisableSchedLiveUses) { 2554 LPDiff = SPQ->RegPressureDiff(left, LLiveUses); 2555 RPDiff = SPQ->RegPressureDiff(right, RLiveUses); 2556 } 2557 if (!DisableSchedRegPressure && LPDiff != RPDiff) { 2558 DEBUG(++FactorCount[FactPressureDiff]); 2559 DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff 2560 << " != SU(" << right->NodeNum << "): " << RPDiff << "\n"); 2561 return LPDiff > RPDiff; 2562 } 2563 2564 if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) { 2565 bool LReduce = canEnableCoalescing(left); 2566 bool RReduce = canEnableCoalescing(right); 2567 DEBUG(if (LReduce != RReduce) ++FactorCount[FactPressureDiff]); 2568 if (LReduce && !RReduce) return false; 2569 if (RReduce && !LReduce) return true; 2570 } 2571 2572 if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) { 2573 DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses 2574 << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n"); 2575 DEBUG(++FactorCount[FactRegUses]); 2576 return LLiveUses < RLiveUses; 2577 } 2578 2579 if (!DisableSchedStalls) { 2580 bool LStall = BUHasStall(left, left->getHeight(), SPQ); 2581 bool RStall = BUHasStall(right, right->getHeight(), SPQ); 2582 if (LStall != RStall) { 2583 DEBUG(++FactorCount[FactHeight]); 2584 return left->getHeight() > right->getHeight(); 2585 } 2586 } 2587 2588 if (!DisableSchedCriticalPath) { 2589 int spread = (int)left->getDepth() - (int)right->getDepth(); 2590 if (std::abs(spread) > MaxReorderWindow) { 2591 DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): " 2592 << left->getDepth() << " != SU(" << right->NodeNum << "): " 2593 << right->getDepth() << "\n"); 2594 DEBUG(++FactorCount[FactDepth]); 2595 return left->getDepth() < right->getDepth(); 2596 } 2597 } 2598 2599 if (!DisableSchedHeight && left->getHeight() != right->getHeight()) { 2600 int spread = (int)left->getHeight() - (int)right->getHeight(); 2601 if (std::abs(spread) > MaxReorderWindow) { 2602 DEBUG(++FactorCount[FactHeight]); 2603 return left->getHeight() > right->getHeight(); 2604 } 2605 } 2606 2607 return BURRSort(left, right, SPQ); 2608} 2609 2610void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) { 2611 SUnits = &sunits; 2612 // Add pseudo dependency edges for two-address nodes. 2613 AddPseudoTwoAddrDeps(); 2614 // Reroute edges to nodes with multiple uses. 2615 if (!TracksRegPressure) 2616 PrescheduleNodesWithMultipleUses(); 2617 // Calculate node priorities. 2618 CalculateSethiUllmanNumbers(); 2619 2620 // For single block loops, mark nodes that look like canonical IV increments. 2621 if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) { 2622 for (unsigned i = 0, e = sunits.size(); i != e; ++i) { 2623 initVRegCycle(&sunits[i]); 2624 } 2625 } 2626} 2627 2628//===----------------------------------------------------------------------===// 2629// Preschedule for Register Pressure 2630//===----------------------------------------------------------------------===// 2631 2632bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) { 2633 if (SU->isTwoAddress) { 2634 unsigned Opc = SU->getNode()->getMachineOpcode(); 2635 const MCInstrDesc &MCID = TII->get(Opc); 2636 unsigned NumRes = MCID.getNumDefs(); 2637 unsigned NumOps = MCID.getNumOperands() - NumRes; 2638 for (unsigned i = 0; i != NumOps; ++i) { 2639 if (MCID.getOperandConstraint(i+NumRes, MCOI::TIED_TO) != -1) { 2640 SDNode *DU = SU->getNode()->getOperand(i).getNode(); 2641 if (DU->getNodeId() != -1 && 2642 Op->OrigNode == &(*SUnits)[DU->getNodeId()]) 2643 return true; 2644 } 2645 } 2646 } 2647 return false; 2648} 2649 2650/// canClobberReachingPhysRegUse - True if SU would clobber one of it's 2651/// successor's explicit physregs whose definition can reach DepSU. 2652/// i.e. DepSU should not be scheduled above SU. 2653static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU, 2654 ScheduleDAGRRList *scheduleDAG, 2655 const TargetInstrInfo *TII, 2656 const TargetRegisterInfo *TRI) { 2657 const unsigned *ImpDefs 2658 = TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs(); 2659 if(!ImpDefs) 2660 return false; 2661 2662 for (SUnit::const_succ_iterator SI = SU->Succs.begin(), SE = SU->Succs.end(); 2663 SI != SE; ++SI) { 2664 SUnit *SuccSU = SI->getSUnit(); 2665 for (SUnit::const_pred_iterator PI = SuccSU->Preds.begin(), 2666 PE = SuccSU->Preds.end(); PI != PE; ++PI) { 2667 if (!PI->isAssignedRegDep()) 2668 continue; 2669 2670 for (const unsigned *ImpDef = ImpDefs; *ImpDef; ++ImpDef) { 2671 // Return true if SU clobbers this physical register use and the 2672 // definition of the register reaches from DepSU. IsReachable queries a 2673 // topological forward sort of the DAG (following the successors). 2674 if (TRI->regsOverlap(*ImpDef, PI->getReg()) && 2675 scheduleDAG->IsReachable(DepSU, PI->getSUnit())) 2676 return true; 2677 } 2678 } 2679 } 2680 return false; 2681} 2682 2683/// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's 2684/// physical register defs. 2685static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU, 2686 const TargetInstrInfo *TII, 2687 const TargetRegisterInfo *TRI) { 2688 SDNode *N = SuccSU->getNode(); 2689 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs(); 2690 const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs(); 2691 assert(ImpDefs && "Caller should check hasPhysRegDefs"); 2692 for (const SDNode *SUNode = SU->getNode(); SUNode; 2693 SUNode = SUNode->getGluedNode()) { 2694 if (!SUNode->isMachineOpcode()) 2695 continue; 2696 const unsigned *SUImpDefs = 2697 TII->get(SUNode->getMachineOpcode()).getImplicitDefs(); 2698 if (!SUImpDefs) 2699 return false; 2700 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) { 2701 EVT VT = N->getValueType(i); 2702 if (VT == MVT::Glue || VT == MVT::Other) 2703 continue; 2704 if (!N->hasAnyUseOfValue(i)) 2705 continue; 2706 unsigned Reg = ImpDefs[i - NumDefs]; 2707 for (;*SUImpDefs; ++SUImpDefs) { 2708 unsigned SUReg = *SUImpDefs; 2709 if (TRI->regsOverlap(Reg, SUReg)) 2710 return true; 2711 } 2712 } 2713 } 2714 return false; 2715} 2716 2717/// PrescheduleNodesWithMultipleUses - Nodes with multiple uses 2718/// are not handled well by the general register pressure reduction 2719/// heuristics. When presented with code like this: 2720/// 2721/// N 2722/// / | 2723/// / | 2724/// U store 2725/// | 2726/// ... 2727/// 2728/// the heuristics tend to push the store up, but since the 2729/// operand of the store has another use (U), this would increase 2730/// the length of that other use (the U->N edge). 2731/// 2732/// This function transforms code like the above to route U's 2733/// dependence through the store when possible, like this: 2734/// 2735/// N 2736/// || 2737/// || 2738/// store 2739/// | 2740/// U 2741/// | 2742/// ... 2743/// 2744/// This results in the store being scheduled immediately 2745/// after N, which shortens the U->N live range, reducing 2746/// register pressure. 2747/// 2748void RegReductionPQBase::PrescheduleNodesWithMultipleUses() { 2749 // Visit all the nodes in topological order, working top-down. 2750 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) { 2751 SUnit *SU = &(*SUnits)[i]; 2752 // For now, only look at nodes with no data successors, such as stores. 2753 // These are especially important, due to the heuristics in 2754 // getNodePriority for nodes with no data successors. 2755 if (SU->NumSuccs != 0) 2756 continue; 2757 // For now, only look at nodes with exactly one data predecessor. 2758 if (SU->NumPreds != 1) 2759 continue; 2760 // Avoid prescheduling copies to virtual registers, which don't behave 2761 // like other nodes from the perspective of scheduling heuristics. 2762 if (SDNode *N = SU->getNode()) 2763 if (N->getOpcode() == ISD::CopyToReg && 2764 TargetRegisterInfo::isVirtualRegister 2765 (cast<RegisterSDNode>(N->getOperand(1))->getReg())) 2766 continue; 2767 2768 // Locate the single data predecessor. 2769 SUnit *PredSU = 0; 2770 for (SUnit::const_pred_iterator II = SU->Preds.begin(), 2771 EE = SU->Preds.end(); II != EE; ++II) 2772 if (!II->isCtrl()) { 2773 PredSU = II->getSUnit(); 2774 break; 2775 } 2776 assert(PredSU); 2777 2778 // Don't rewrite edges that carry physregs, because that requires additional 2779 // support infrastructure. 2780 if (PredSU->hasPhysRegDefs) 2781 continue; 2782 // Short-circuit the case where SU is PredSU's only data successor. 2783 if (PredSU->NumSuccs == 1) 2784 continue; 2785 // Avoid prescheduling to copies from virtual registers, which don't behave 2786 // like other nodes from the perspective of scheduling heuristics. 2787 if (SDNode *N = SU->getNode()) 2788 if (N->getOpcode() == ISD::CopyFromReg && 2789 TargetRegisterInfo::isVirtualRegister 2790 (cast<RegisterSDNode>(N->getOperand(1))->getReg())) 2791 continue; 2792 2793 // Perform checks on the successors of PredSU. 2794 for (SUnit::const_succ_iterator II = PredSU->Succs.begin(), 2795 EE = PredSU->Succs.end(); II != EE; ++II) { 2796 SUnit *PredSuccSU = II->getSUnit(); 2797 if (PredSuccSU == SU) continue; 2798 // If PredSU has another successor with no data successors, for 2799 // now don't attempt to choose either over the other. 2800 if (PredSuccSU->NumSuccs == 0) 2801 goto outer_loop_continue; 2802 // Don't break physical register dependencies. 2803 if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs) 2804 if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI)) 2805 goto outer_loop_continue; 2806 // Don't introduce graph cycles. 2807 if (scheduleDAG->IsReachable(SU, PredSuccSU)) 2808 goto outer_loop_continue; 2809 } 2810 2811 // Ok, the transformation is safe and the heuristics suggest it is 2812 // profitable. Update the graph. 2813 DEBUG(dbgs() << " Prescheduling SU #" << SU->NodeNum 2814 << " next to PredSU #" << PredSU->NodeNum 2815 << " to guide scheduling in the presence of multiple uses\n"); 2816 for (unsigned i = 0; i != PredSU->Succs.size(); ++i) { 2817 SDep Edge = PredSU->Succs[i]; 2818 assert(!Edge.isAssignedRegDep()); 2819 SUnit *SuccSU = Edge.getSUnit(); 2820 if (SuccSU != SU) { 2821 Edge.setSUnit(PredSU); 2822 scheduleDAG->RemovePred(SuccSU, Edge); 2823 scheduleDAG->AddPred(SU, Edge); 2824 Edge.setSUnit(SU); 2825 scheduleDAG->AddPred(SuccSU, Edge); 2826 --i; 2827 } 2828 } 2829 outer_loop_continue:; 2830 } 2831} 2832 2833/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses 2834/// it as a def&use operand. Add a pseudo control edge from it to the other 2835/// node (if it won't create a cycle) so the two-address one will be scheduled 2836/// first (lower in the schedule). If both nodes are two-address, favor the 2837/// one that has a CopyToReg use (more likely to be a loop induction update). 2838/// If both are two-address, but one is commutable while the other is not 2839/// commutable, favor the one that's not commutable. 2840void RegReductionPQBase::AddPseudoTwoAddrDeps() { 2841 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) { 2842 SUnit *SU = &(*SUnits)[i]; 2843 if (!SU->isTwoAddress) 2844 continue; 2845 2846 SDNode *Node = SU->getNode(); 2847 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode()) 2848 continue; 2849 2850 bool isLiveOut = hasOnlyLiveOutUses(SU); 2851 unsigned Opc = Node->getMachineOpcode(); 2852 const MCInstrDesc &MCID = TII->get(Opc); 2853 unsigned NumRes = MCID.getNumDefs(); 2854 unsigned NumOps = MCID.getNumOperands() - NumRes; 2855 for (unsigned j = 0; j != NumOps; ++j) { 2856 if (MCID.getOperandConstraint(j+NumRes, MCOI::TIED_TO) == -1) 2857 continue; 2858 SDNode *DU = SU->getNode()->getOperand(j).getNode(); 2859 if (DU->getNodeId() == -1) 2860 continue; 2861 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()]; 2862 if (!DUSU) continue; 2863 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(), 2864 E = DUSU->Succs.end(); I != E; ++I) { 2865 if (I->isCtrl()) continue; 2866 SUnit *SuccSU = I->getSUnit(); 2867 if (SuccSU == SU) 2868 continue; 2869 // Be conservative. Ignore if nodes aren't at roughly the same 2870 // depth and height. 2871 if (SuccSU->getHeight() < SU->getHeight() && 2872 (SU->getHeight() - SuccSU->getHeight()) > 1) 2873 continue; 2874 // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge 2875 // constrains whatever is using the copy, instead of the copy 2876 // itself. In the case that the copy is coalesced, this 2877 // preserves the intent of the pseudo two-address heurietics. 2878 while (SuccSU->Succs.size() == 1 && 2879 SuccSU->getNode()->isMachineOpcode() && 2880 SuccSU->getNode()->getMachineOpcode() == 2881 TargetOpcode::COPY_TO_REGCLASS) 2882 SuccSU = SuccSU->Succs.front().getSUnit(); 2883 // Don't constrain non-instruction nodes. 2884 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode()) 2885 continue; 2886 // Don't constrain nodes with physical register defs if the 2887 // predecessor can clobber them. 2888 if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) { 2889 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI)) 2890 continue; 2891 } 2892 // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG; 2893 // these may be coalesced away. We want them close to their uses. 2894 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode(); 2895 if (SuccOpc == TargetOpcode::EXTRACT_SUBREG || 2896 SuccOpc == TargetOpcode::INSERT_SUBREG || 2897 SuccOpc == TargetOpcode::SUBREG_TO_REG) 2898 continue; 2899 if (!canClobberReachingPhysRegUse(SuccSU, SU, scheduleDAG, TII, TRI) && 2900 (!canClobber(SuccSU, DUSU) || 2901 (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) || 2902 (!SU->isCommutable && SuccSU->isCommutable)) && 2903 !scheduleDAG->IsReachable(SuccSU, SU)) { 2904 DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #" 2905 << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n"); 2906 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0, 2907 /*Reg=*/0, /*isNormalMemory=*/false, 2908 /*isMustAlias=*/false, 2909 /*isArtificial=*/true)); 2910 } 2911 } 2912 } 2913 } 2914} 2915 2916//===----------------------------------------------------------------------===// 2917// Public Constructor Functions 2918//===----------------------------------------------------------------------===// 2919 2920llvm::ScheduleDAGSDNodes * 2921llvm::createBURRListDAGScheduler(SelectionDAGISel *IS, 2922 CodeGenOpt::Level OptLevel) { 2923 const TargetMachine &TM = IS->TM; 2924 const TargetInstrInfo *TII = TM.getInstrInfo(); 2925 const TargetRegisterInfo *TRI = TM.getRegisterInfo(); 2926 2927 BURegReductionPriorityQueue *PQ = 2928 new BURegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0); 2929 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel); 2930 PQ->setScheduleDAG(SD); 2931 return SD; 2932} 2933 2934llvm::ScheduleDAGSDNodes * 2935llvm::createSourceListDAGScheduler(SelectionDAGISel *IS, 2936 CodeGenOpt::Level OptLevel) { 2937 const TargetMachine &TM = IS->TM; 2938 const TargetInstrInfo *TII = TM.getInstrInfo(); 2939 const TargetRegisterInfo *TRI = TM.getRegisterInfo(); 2940 2941 SrcRegReductionPriorityQueue *PQ = 2942 new SrcRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0); 2943 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel); 2944 PQ->setScheduleDAG(SD); 2945 return SD; 2946} 2947 2948llvm::ScheduleDAGSDNodes * 2949llvm::createHybridListDAGScheduler(SelectionDAGISel *IS, 2950 CodeGenOpt::Level OptLevel) { 2951 const TargetMachine &TM = IS->TM; 2952 const TargetInstrInfo *TII = TM.getInstrInfo(); 2953 const TargetRegisterInfo *TRI = TM.getRegisterInfo(); 2954 const TargetLowering *TLI = &IS->getTargetLowering(); 2955 2956 HybridBURRPriorityQueue *PQ = 2957 new HybridBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI); 2958 2959 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel); 2960 PQ->setScheduleDAG(SD); 2961 return SD; 2962} 2963 2964llvm::ScheduleDAGSDNodes * 2965llvm::createILPListDAGScheduler(SelectionDAGISel *IS, 2966 CodeGenOpt::Level OptLevel) { 2967 const TargetMachine &TM = IS->TM; 2968 const TargetInstrInfo *TII = TM.getInstrInfo(); 2969 const TargetRegisterInfo *TRI = TM.getRegisterInfo(); 2970 const TargetLowering *TLI = &IS->getTargetLowering(); 2971 2972 ILPBURRPriorityQueue *PQ = 2973 new ILPBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI); 2974 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel); 2975 PQ->setScheduleDAG(SD); 2976 return SD; 2977} 2978