ScheduleDAGRRList.cpp revision e32981048244ecfa67d0bdc211af1bac2020a555
1//===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements bottom-up and top-down register pressure reduction list 11// schedulers, using standard algorithms. The basic approach uses a priority 12// queue of available nodes to schedule. One at a time, nodes are taken from 13// the priority queue (thus in priority order), checked for legality to 14// schedule, and emitted if legal. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "pre-RA-sched" 19#include "ScheduleDAGSDNodes.h" 20#include "llvm/InlineAsm.h" 21#include "llvm/CodeGen/SchedulerRegistry.h" 22#include "llvm/CodeGen/SelectionDAGISel.h" 23#include "llvm/CodeGen/ScheduleHazardRecognizer.h" 24#include "llvm/Target/TargetRegisterInfo.h" 25#include "llvm/Target/TargetData.h" 26#include "llvm/Target/TargetMachine.h" 27#include "llvm/Target/TargetInstrInfo.h" 28#include "llvm/Target/TargetLowering.h" 29#include "llvm/ADT/SmallSet.h" 30#include "llvm/ADT/Statistic.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/Support/Debug.h" 33#include "llvm/Support/ErrorHandling.h" 34#include "llvm/Support/raw_ostream.h" 35#include <climits> 36using namespace llvm; 37 38STATISTIC(NumBacktracks, "Number of times scheduler backtracked"); 39STATISTIC(NumUnfolds, "Number of nodes unfolded"); 40STATISTIC(NumDups, "Number of duplicated nodes"); 41STATISTIC(NumPRCopies, "Number of physical register copies"); 42 43static RegisterScheduler 44 burrListDAGScheduler("list-burr", 45 "Bottom-up register reduction list scheduling", 46 createBURRListDAGScheduler); 47static RegisterScheduler 48 sourceListDAGScheduler("source", 49 "Similar to list-burr but schedules in source " 50 "order when possible", 51 createSourceListDAGScheduler); 52 53static RegisterScheduler 54 hybridListDAGScheduler("list-hybrid", 55 "Bottom-up register pressure aware list scheduling " 56 "which tries to balance latency and register pressure", 57 createHybridListDAGScheduler); 58 59static RegisterScheduler 60 ILPListDAGScheduler("list-ilp", 61 "Bottom-up register pressure aware list scheduling " 62 "which tries to balance ILP and register pressure", 63 createILPListDAGScheduler); 64 65static cl::opt<bool> DisableSchedCycles( 66 "disable-sched-cycles", cl::Hidden, cl::init(false), 67 cl::desc("Disable cycle-level precision during preRA scheduling")); 68 69// Temporary sched=list-ilp flags until the heuristics are robust. 70// Some options are also available under sched=list-hybrid. 71static cl::opt<bool> DisableSchedRegPressure( 72 "disable-sched-reg-pressure", cl::Hidden, cl::init(false), 73 cl::desc("Disable regpressure priority in sched=list-ilp")); 74static cl::opt<bool> DisableSchedLiveUses( 75 "disable-sched-live-uses", cl::Hidden, cl::init(true), 76 cl::desc("Disable live use priority in sched=list-ilp")); 77static cl::opt<bool> DisableSchedVRegCycle( 78 "disable-sched-vrcycle", cl::Hidden, cl::init(false), 79 cl::desc("Disable virtual register cycle interference checks")); 80static cl::opt<bool> DisableSchedPhysRegJoin( 81 "disable-sched-physreg-join", cl::Hidden, cl::init(false), 82 cl::desc("Disable physreg def-use affinity")); 83static cl::opt<bool> DisableSchedStalls( 84 "disable-sched-stalls", cl::Hidden, cl::init(true), 85 cl::desc("Disable no-stall priority in sched=list-ilp")); 86static cl::opt<bool> DisableSchedCriticalPath( 87 "disable-sched-critical-path", cl::Hidden, cl::init(false), 88 cl::desc("Disable critical path priority in sched=list-ilp")); 89static cl::opt<bool> DisableSchedHeight( 90 "disable-sched-height", cl::Hidden, cl::init(false), 91 cl::desc("Disable scheduled-height priority in sched=list-ilp")); 92static cl::opt<bool> Disable2AddrHack( 93 "disable-2addr-hack", cl::Hidden, cl::init(true), 94 cl::desc("Disable scheduler's two-address hack")); 95 96static cl::opt<int> MaxReorderWindow( 97 "max-sched-reorder", cl::Hidden, cl::init(6), 98 cl::desc("Number of instructions to allow ahead of the critical path " 99 "in sched=list-ilp")); 100 101static cl::opt<unsigned> AvgIPC( 102 "sched-avg-ipc", cl::Hidden, cl::init(1), 103 cl::desc("Average inst/cycle whan no target itinerary exists.")); 104 105namespace { 106//===----------------------------------------------------------------------===// 107/// ScheduleDAGRRList - The actual register reduction list scheduler 108/// implementation. This supports both top-down and bottom-up scheduling. 109/// 110class ScheduleDAGRRList : public ScheduleDAGSDNodes { 111private: 112 /// NeedLatency - True if the scheduler will make use of latency information. 113 /// 114 bool NeedLatency; 115 116 /// AvailableQueue - The priority queue to use for the available SUnits. 117 SchedulingPriorityQueue *AvailableQueue; 118 119 /// PendingQueue - This contains all of the instructions whose operands have 120 /// been issued, but their results are not ready yet (due to the latency of 121 /// the operation). Once the operands becomes available, the instruction is 122 /// added to the AvailableQueue. 123 std::vector<SUnit*> PendingQueue; 124 125 /// HazardRec - The hazard recognizer to use. 126 ScheduleHazardRecognizer *HazardRec; 127 128 /// CurCycle - The current scheduler state corresponds to this cycle. 129 unsigned CurCycle; 130 131 /// MinAvailableCycle - Cycle of the soonest available instruction. 132 unsigned MinAvailableCycle; 133 134 /// IssueCount - Count instructions issued in this cycle 135 /// Currently valid only for bottom-up scheduling. 136 unsigned IssueCount; 137 138 /// LiveRegDefs - A set of physical registers and their definition 139 /// that are "live". These nodes must be scheduled before any other nodes that 140 /// modifies the registers can be scheduled. 141 unsigned NumLiveRegs; 142 std::vector<SUnit*> LiveRegDefs; 143 std::vector<SUnit*> LiveRegGens; 144 145 /// Topo - A topological ordering for SUnits which permits fast IsReachable 146 /// and similar queries. 147 ScheduleDAGTopologicalSort Topo; 148 149 // Hack to keep track of the inverse of FindCallSeqStart without more crazy 150 // DAG crawling. 151 DenseMap<SUnit*, SUnit*> CallSeqEndForStart; 152 153public: 154 ScheduleDAGRRList(MachineFunction &mf, bool needlatency, 155 SchedulingPriorityQueue *availqueue, 156 CodeGenOpt::Level OptLevel) 157 : ScheduleDAGSDNodes(mf), 158 NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0), 159 Topo(SUnits) { 160 161 const TargetMachine &tm = mf.getTarget(); 162 if (DisableSchedCycles || !NeedLatency) 163 HazardRec = new ScheduleHazardRecognizer(); 164 else 165 HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this); 166 } 167 168 ~ScheduleDAGRRList() { 169 delete HazardRec; 170 delete AvailableQueue; 171 } 172 173 void Schedule(); 174 175 ScheduleHazardRecognizer *getHazardRec() { return HazardRec; } 176 177 /// IsReachable - Checks if SU is reachable from TargetSU. 178 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) { 179 return Topo.IsReachable(SU, TargetSU); 180 } 181 182 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will 183 /// create a cycle. 184 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) { 185 return Topo.WillCreateCycle(SU, TargetSU); 186 } 187 188 /// AddPred - adds a predecessor edge to SUnit SU. 189 /// This returns true if this is a new predecessor. 190 /// Updates the topological ordering if required. 191 void AddPred(SUnit *SU, const SDep &D) { 192 Topo.AddPred(SU, D.getSUnit()); 193 SU->addPred(D); 194 } 195 196 /// RemovePred - removes a predecessor edge from SUnit SU. 197 /// This returns true if an edge was removed. 198 /// Updates the topological ordering if required. 199 void RemovePred(SUnit *SU, const SDep &D) { 200 Topo.RemovePred(SU, D.getSUnit()); 201 SU->removePred(D); 202 } 203 204private: 205 bool isReady(SUnit *SU) { 206 return DisableSchedCycles || !AvailableQueue->hasReadyFilter() || 207 AvailableQueue->isReady(SU); 208 } 209 210 void ReleasePred(SUnit *SU, const SDep *PredEdge); 211 void ReleasePredecessors(SUnit *SU); 212 void ReleasePending(); 213 void AdvanceToCycle(unsigned NextCycle); 214 void AdvancePastStalls(SUnit *SU); 215 void EmitNode(SUnit *SU); 216 void ScheduleNodeBottomUp(SUnit*); 217 void CapturePred(SDep *PredEdge); 218 void UnscheduleNodeBottomUp(SUnit*); 219 void RestoreHazardCheckerBottomUp(); 220 void BacktrackBottomUp(SUnit*, SUnit*); 221 SUnit *CopyAndMoveSuccessors(SUnit*); 222 void InsertCopiesAndMoveSuccs(SUnit*, unsigned, 223 const TargetRegisterClass*, 224 const TargetRegisterClass*, 225 SmallVector<SUnit*, 2>&); 226 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&); 227 228 SUnit *PickNodeToScheduleBottomUp(); 229 void ListScheduleBottomUp(); 230 231 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it. 232 /// Updates the topological ordering if required. 233 SUnit *CreateNewSUnit(SDNode *N) { 234 unsigned NumSUnits = SUnits.size(); 235 SUnit *NewNode = newSUnit(N); 236 // Update the topological ordering. 237 if (NewNode->NodeNum >= NumSUnits) 238 Topo.InitDAGTopologicalSorting(); 239 return NewNode; 240 } 241 242 /// CreateClone - Creates a new SUnit from an existing one. 243 /// Updates the topological ordering if required. 244 SUnit *CreateClone(SUnit *N) { 245 unsigned NumSUnits = SUnits.size(); 246 SUnit *NewNode = Clone(N); 247 // Update the topological ordering. 248 if (NewNode->NodeNum >= NumSUnits) 249 Topo.InitDAGTopologicalSorting(); 250 return NewNode; 251 } 252 253 /// forceUnitLatencies - Register-pressure-reducing scheduling doesn't 254 /// need actual latency information but the hybrid scheduler does. 255 bool forceUnitLatencies() const { 256 return !NeedLatency; 257 } 258}; 259} // end anonymous namespace 260 261/// GetCostForDef - Looks up the register class and cost for a given definition. 262/// Typically this just means looking up the representative register class, 263/// but for untyped values (MVT::Untyped) it means inspecting the node's 264/// opcode to determine what register class is being generated. 265static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos, 266 const TargetLowering *TLI, 267 const TargetInstrInfo *TII, 268 const TargetRegisterInfo *TRI, 269 unsigned &RegClass, unsigned &Cost, 270 const MachineFunction &MF) { 271 EVT VT = RegDefPos.GetValue(); 272 273 // Special handling for untyped values. These values can only come from 274 // the expansion of custom DAG-to-DAG patterns. 275 if (VT == MVT::Untyped) { 276 const SDNode *Node = RegDefPos.GetNode(); 277 unsigned Opcode = Node->getMachineOpcode(); 278 279 if (Opcode == TargetOpcode::REG_SEQUENCE) { 280 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue(); 281 const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx); 282 RegClass = RC->getID(); 283 Cost = 1; 284 return; 285 } 286 287 unsigned Idx = RegDefPos.GetIdx(); 288 const MCInstrDesc Desc = TII->get(Opcode); 289 const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI, MF); 290 RegClass = RC->getID(); 291 // FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a 292 // better way to determine it. 293 Cost = 1; 294 } else { 295 RegClass = TLI->getRepRegClassFor(VT)->getID(); 296 Cost = TLI->getRepRegClassCostFor(VT); 297 } 298} 299 300/// Schedule - Schedule the DAG using list scheduling. 301void ScheduleDAGRRList::Schedule() { 302 DEBUG(dbgs() 303 << "********** List Scheduling BB#" << BB->getNumber() 304 << " '" << BB->getName() << "' **********\n"); 305 306 CurCycle = 0; 307 IssueCount = 0; 308 MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX; 309 NumLiveRegs = 0; 310 // Allocate slots for each physical register, plus one for a special register 311 // to track the virtual resource of a calling sequence. 312 LiveRegDefs.resize(TRI->getNumRegs() + 1, NULL); 313 LiveRegGens.resize(TRI->getNumRegs() + 1, NULL); 314 CallSeqEndForStart.clear(); 315 316 // Build the scheduling graph. 317 BuildSchedGraph(NULL); 318 319 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su) 320 SUnits[su].dumpAll(this)); 321 Topo.InitDAGTopologicalSorting(); 322 323 AvailableQueue->initNodes(SUnits); 324 325 HazardRec->Reset(); 326 327 // Execute the actual scheduling loop. 328 ListScheduleBottomUp(); 329 330 AvailableQueue->releaseState(); 331 332 DEBUG({ 333 dbgs() << "*** Final schedule ***\n"; 334 dumpSchedule(); 335 dbgs() << '\n'; 336 }); 337} 338 339//===----------------------------------------------------------------------===// 340// Bottom-Up Scheduling 341//===----------------------------------------------------------------------===// 342 343/// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to 344/// the AvailableQueue if the count reaches zero. Also update its cycle bound. 345void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) { 346 SUnit *PredSU = PredEdge->getSUnit(); 347 348#ifndef NDEBUG 349 if (PredSU->NumSuccsLeft == 0) { 350 dbgs() << "*** Scheduling failed! ***\n"; 351 PredSU->dump(this); 352 dbgs() << " has been released too many times!\n"; 353 llvm_unreachable(0); 354 } 355#endif 356 --PredSU->NumSuccsLeft; 357 358 if (!forceUnitLatencies()) { 359 // Updating predecessor's height. This is now the cycle when the 360 // predecessor can be scheduled without causing a pipeline stall. 361 PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency()); 362 } 363 364 // If all the node's successors are scheduled, this node is ready 365 // to be scheduled. Ignore the special EntrySU node. 366 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) { 367 PredSU->isAvailable = true; 368 369 unsigned Height = PredSU->getHeight(); 370 if (Height < MinAvailableCycle) 371 MinAvailableCycle = Height; 372 373 if (isReady(PredSU)) { 374 AvailableQueue->push(PredSU); 375 } 376 // CapturePred and others may have left the node in the pending queue, avoid 377 // adding it twice. 378 else if (!PredSU->isPending) { 379 PredSU->isPending = true; 380 PendingQueue.push_back(PredSU); 381 } 382 } 383} 384 385/// IsChainDependent - Test if Outer is reachable from Inner through 386/// chain dependencies. 387static bool IsChainDependent(SDNode *Outer, SDNode *Inner, 388 unsigned NestLevel, 389 const TargetInstrInfo *TII) { 390 SDNode *N = Outer; 391 for (;;) { 392 if (N == Inner) 393 return true; 394 // For a TokenFactor, examine each operand. There may be multiple ways 395 // to get to the CALLSEQ_BEGIN, but we need to find the path with the 396 // most nesting in order to ensure that we find the corresponding match. 397 if (N->getOpcode() == ISD::TokenFactor) { 398 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 399 if (IsChainDependent(N->getOperand(i).getNode(), Inner, NestLevel, TII)) 400 return true; 401 return false; 402 } 403 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END. 404 if (N->isMachineOpcode()) { 405 if (N->getMachineOpcode() == 406 (unsigned)TII->getCallFrameDestroyOpcode()) { 407 ++NestLevel; 408 } else if (N->getMachineOpcode() == 409 (unsigned)TII->getCallFrameSetupOpcode()) { 410 if (NestLevel == 0) 411 return false; 412 --NestLevel; 413 } 414 } 415 // Otherwise, find the chain and continue climbing. 416 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 417 if (N->getOperand(i).getValueType() == MVT::Other) { 418 N = N->getOperand(i).getNode(); 419 goto found_chain_operand; 420 } 421 return false; 422 found_chain_operand:; 423 if (N->getOpcode() == ISD::EntryToken) 424 return false; 425 } 426} 427 428/// FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate 429/// the corresponding (lowered) CALLSEQ_BEGIN node. 430/// 431/// NestLevel and MaxNested are used in recursion to indcate the current level 432/// of nesting of CALLSEQ_BEGIN and CALLSEQ_END pairs, as well as the maximum 433/// level seen so far. 434/// 435/// TODO: It would be better to give CALLSEQ_END an explicit operand to point 436/// to the corresponding CALLSEQ_BEGIN to avoid needing to search for it. 437static SDNode * 438FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest, 439 const TargetInstrInfo *TII) { 440 for (;;) { 441 // For a TokenFactor, examine each operand. There may be multiple ways 442 // to get to the CALLSEQ_BEGIN, but we need to find the path with the 443 // most nesting in order to ensure that we find the corresponding match. 444 if (N->getOpcode() == ISD::TokenFactor) { 445 SDNode *Best = 0; 446 unsigned BestMaxNest = MaxNest; 447 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 448 unsigned MyNestLevel = NestLevel; 449 unsigned MyMaxNest = MaxNest; 450 if (SDNode *New = FindCallSeqStart(N->getOperand(i).getNode(), 451 MyNestLevel, MyMaxNest, TII)) 452 if (!Best || (MyMaxNest > BestMaxNest)) { 453 Best = New; 454 BestMaxNest = MyMaxNest; 455 } 456 } 457 assert(Best); 458 MaxNest = BestMaxNest; 459 return Best; 460 } 461 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END. 462 if (N->isMachineOpcode()) { 463 if (N->getMachineOpcode() == 464 (unsigned)TII->getCallFrameDestroyOpcode()) { 465 ++NestLevel; 466 MaxNest = std::max(MaxNest, NestLevel); 467 } else if (N->getMachineOpcode() == 468 (unsigned)TII->getCallFrameSetupOpcode()) { 469 assert(NestLevel != 0); 470 --NestLevel; 471 if (NestLevel == 0) 472 return N; 473 } 474 } 475 // Otherwise, find the chain and continue climbing. 476 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 477 if (N->getOperand(i).getValueType() == MVT::Other) { 478 N = N->getOperand(i).getNode(); 479 goto found_chain_operand; 480 } 481 return 0; 482 found_chain_operand:; 483 if (N->getOpcode() == ISD::EntryToken) 484 return 0; 485 } 486} 487 488/// Call ReleasePred for each predecessor, then update register live def/gen. 489/// Always update LiveRegDefs for a register dependence even if the current SU 490/// also defines the register. This effectively create one large live range 491/// across a sequence of two-address node. This is important because the 492/// entire chain must be scheduled together. Example: 493/// 494/// flags = (3) add 495/// flags = (2) addc flags 496/// flags = (1) addc flags 497/// 498/// results in 499/// 500/// LiveRegDefs[flags] = 3 501/// LiveRegGens[flags] = 1 502/// 503/// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid 504/// interference on flags. 505void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) { 506 // Bottom up: release predecessors 507 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 508 I != E; ++I) { 509 ReleasePred(SU, &*I); 510 if (I->isAssignedRegDep()) { 511 // This is a physical register dependency and it's impossible or 512 // expensive to copy the register. Make sure nothing that can 513 // clobber the register is scheduled between the predecessor and 514 // this node. 515 SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef; 516 assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) && 517 "interference on register dependence"); 518 LiveRegDefs[I->getReg()] = I->getSUnit(); 519 if (!LiveRegGens[I->getReg()]) { 520 ++NumLiveRegs; 521 LiveRegGens[I->getReg()] = SU; 522 } 523 } 524 } 525 526 // If we're scheduling a lowered CALLSEQ_END, find the corresponding 527 // CALLSEQ_BEGIN. Inject an artificial physical register dependence between 528 // these nodes, to prevent other calls from being interscheduled with them. 529 unsigned CallResource = TRI->getNumRegs(); 530 if (!LiveRegDefs[CallResource]) 531 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) 532 if (Node->isMachineOpcode() && 533 Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) { 534 unsigned NestLevel = 0; 535 unsigned MaxNest = 0; 536 SDNode *N = FindCallSeqStart(Node, NestLevel, MaxNest, TII); 537 538 SUnit *Def = &SUnits[N->getNodeId()]; 539 CallSeqEndForStart[Def] = SU; 540 541 ++NumLiveRegs; 542 LiveRegDefs[CallResource] = Def; 543 LiveRegGens[CallResource] = SU; 544 break; 545 } 546} 547 548/// Check to see if any of the pending instructions are ready to issue. If 549/// so, add them to the available queue. 550void ScheduleDAGRRList::ReleasePending() { 551 if (DisableSchedCycles) { 552 assert(PendingQueue.empty() && "pending instrs not allowed in this mode"); 553 return; 554 } 555 556 // If the available queue is empty, it is safe to reset MinAvailableCycle. 557 if (AvailableQueue->empty()) 558 MinAvailableCycle = UINT_MAX; 559 560 // Check to see if any of the pending instructions are ready to issue. If 561 // so, add them to the available queue. 562 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) { 563 unsigned ReadyCycle = PendingQueue[i]->getHeight(); 564 if (ReadyCycle < MinAvailableCycle) 565 MinAvailableCycle = ReadyCycle; 566 567 if (PendingQueue[i]->isAvailable) { 568 if (!isReady(PendingQueue[i])) 569 continue; 570 AvailableQueue->push(PendingQueue[i]); 571 } 572 PendingQueue[i]->isPending = false; 573 PendingQueue[i] = PendingQueue.back(); 574 PendingQueue.pop_back(); 575 --i; --e; 576 } 577} 578 579/// Move the scheduler state forward by the specified number of Cycles. 580void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) { 581 if (NextCycle <= CurCycle) 582 return; 583 584 IssueCount = 0; 585 AvailableQueue->setCurCycle(NextCycle); 586 if (!HazardRec->isEnabled()) { 587 // Bypass lots of virtual calls in case of long latency. 588 CurCycle = NextCycle; 589 } 590 else { 591 for (; CurCycle != NextCycle; ++CurCycle) { 592 HazardRec->RecedeCycle(); 593 } 594 } 595 // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the 596 // available Q to release pending nodes at least once before popping. 597 ReleasePending(); 598} 599 600/// Move the scheduler state forward until the specified node's dependents are 601/// ready and can be scheduled with no resource conflicts. 602void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) { 603 if (DisableSchedCycles) 604 return; 605 606 // FIXME: Nodes such as CopyFromReg probably should not advance the current 607 // cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node 608 // has predecessors the cycle will be advanced when they are scheduled. 609 // But given the crude nature of modeling latency though such nodes, we 610 // currently need to treat these nodes like real instructions. 611 // if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return; 612 613 unsigned ReadyCycle = SU->getHeight(); 614 615 // Bump CurCycle to account for latency. We assume the latency of other 616 // available instructions may be hidden by the stall (not a full pipe stall). 617 // This updates the hazard recognizer's cycle before reserving resources for 618 // this instruction. 619 AdvanceToCycle(ReadyCycle); 620 621 // Calls are scheduled in their preceding cycle, so don't conflict with 622 // hazards from instructions after the call. EmitNode will reset the 623 // scoreboard state before emitting the call. 624 if (SU->isCall) 625 return; 626 627 // FIXME: For resource conflicts in very long non-pipelined stages, we 628 // should probably skip ahead here to avoid useless scoreboard checks. 629 int Stalls = 0; 630 while (true) { 631 ScheduleHazardRecognizer::HazardType HT = 632 HazardRec->getHazardType(SU, -Stalls); 633 634 if (HT == ScheduleHazardRecognizer::NoHazard) 635 break; 636 637 ++Stalls; 638 } 639 AdvanceToCycle(CurCycle + Stalls); 640} 641 642/// Record this SUnit in the HazardRecognizer. 643/// Does not update CurCycle. 644void ScheduleDAGRRList::EmitNode(SUnit *SU) { 645 if (!HazardRec->isEnabled()) 646 return; 647 648 // Check for phys reg copy. 649 if (!SU->getNode()) 650 return; 651 652 switch (SU->getNode()->getOpcode()) { 653 default: 654 assert(SU->getNode()->isMachineOpcode() && 655 "This target-independent node should not be scheduled."); 656 break; 657 case ISD::MERGE_VALUES: 658 case ISD::TokenFactor: 659 case ISD::CopyToReg: 660 case ISD::CopyFromReg: 661 case ISD::EH_LABEL: 662 // Noops don't affect the scoreboard state. Copies are likely to be 663 // removed. 664 return; 665 case ISD::INLINEASM: 666 // For inline asm, clear the pipeline state. 667 HazardRec->Reset(); 668 return; 669 } 670 if (SU->isCall) { 671 // Calls are scheduled with their preceding instructions. For bottom-up 672 // scheduling, clear the pipeline state before emitting. 673 HazardRec->Reset(); 674 } 675 676 HazardRec->EmitInstruction(SU); 677} 678 679static void resetVRegCycle(SUnit *SU); 680 681/// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending 682/// count of its predecessors. If a predecessor pending count is zero, add it to 683/// the Available queue. 684void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) { 685 DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: "); 686 DEBUG(SU->dump(this)); 687 688#ifndef NDEBUG 689 if (CurCycle < SU->getHeight()) 690 DEBUG(dbgs() << " Height [" << SU->getHeight() 691 << "] pipeline stall!\n"); 692#endif 693 694 // FIXME: Do not modify node height. It may interfere with 695 // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the 696 // node its ready cycle can aid heuristics, and after scheduling it can 697 // indicate the scheduled cycle. 698 SU->setHeightToAtLeast(CurCycle); 699 700 // Reserve resources for the scheduled intruction. 701 EmitNode(SU); 702 703 Sequence.push_back(SU); 704 705 AvailableQueue->scheduledNode(SU); 706 707 // If HazardRec is disabled, and each inst counts as one cycle, then 708 // advance CurCycle before ReleasePredecessors to avoid useless pushes to 709 // PendingQueue for schedulers that implement HasReadyFilter. 710 if (!HazardRec->isEnabled() && AvgIPC < 2) 711 AdvanceToCycle(CurCycle + 1); 712 713 // Update liveness of predecessors before successors to avoid treating a 714 // two-address node as a live range def. 715 ReleasePredecessors(SU); 716 717 // Release all the implicit physical register defs that are live. 718 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 719 I != E; ++I) { 720 // LiveRegDegs[I->getReg()] != SU when SU is a two-address node. 721 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) { 722 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!"); 723 --NumLiveRegs; 724 LiveRegDefs[I->getReg()] = NULL; 725 LiveRegGens[I->getReg()] = NULL; 726 } 727 } 728 // Release the special call resource dependence, if this is the beginning 729 // of a call. 730 unsigned CallResource = TRI->getNumRegs(); 731 if (LiveRegDefs[CallResource] == SU) 732 for (const SDNode *SUNode = SU->getNode(); SUNode; 733 SUNode = SUNode->getGluedNode()) { 734 if (SUNode->isMachineOpcode() && 735 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) { 736 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!"); 737 --NumLiveRegs; 738 LiveRegDefs[CallResource] = NULL; 739 LiveRegGens[CallResource] = NULL; 740 } 741 } 742 743 resetVRegCycle(SU); 744 745 SU->isScheduled = true; 746 747 // Conditions under which the scheduler should eagerly advance the cycle: 748 // (1) No available instructions 749 // (2) All pipelines full, so available instructions must have hazards. 750 // 751 // If HazardRec is disabled, the cycle was pre-advanced before calling 752 // ReleasePredecessors. In that case, IssueCount should remain 0. 753 // 754 // Check AvailableQueue after ReleasePredecessors in case of zero latency. 755 if (HazardRec->isEnabled() || AvgIPC > 1) { 756 if (SU->getNode() && SU->getNode()->isMachineOpcode()) 757 ++IssueCount; 758 if ((HazardRec->isEnabled() && HazardRec->atIssueLimit()) 759 || (!HazardRec->isEnabled() && IssueCount == AvgIPC)) 760 AdvanceToCycle(CurCycle + 1); 761 } 762} 763 764/// CapturePred - This does the opposite of ReleasePred. Since SU is being 765/// unscheduled, incrcease the succ left count of its predecessors. Remove 766/// them from AvailableQueue if necessary. 767void ScheduleDAGRRList::CapturePred(SDep *PredEdge) { 768 SUnit *PredSU = PredEdge->getSUnit(); 769 if (PredSU->isAvailable) { 770 PredSU->isAvailable = false; 771 if (!PredSU->isPending) 772 AvailableQueue->remove(PredSU); 773 } 774 775 assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!"); 776 ++PredSU->NumSuccsLeft; 777} 778 779/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and 780/// its predecessor states to reflect the change. 781void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) { 782 DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: "); 783 DEBUG(SU->dump(this)); 784 785 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 786 I != E; ++I) { 787 CapturePred(&*I); 788 if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){ 789 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!"); 790 assert(LiveRegDefs[I->getReg()] == I->getSUnit() && 791 "Physical register dependency violated?"); 792 --NumLiveRegs; 793 LiveRegDefs[I->getReg()] = NULL; 794 LiveRegGens[I->getReg()] = NULL; 795 } 796 } 797 798 // Reclaim the special call resource dependence, if this is the beginning 799 // of a call. 800 unsigned CallResource = TRI->getNumRegs(); 801 for (const SDNode *SUNode = SU->getNode(); SUNode; 802 SUNode = SUNode->getGluedNode()) { 803 if (SUNode->isMachineOpcode() && 804 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) { 805 ++NumLiveRegs; 806 LiveRegDefs[CallResource] = SU; 807 LiveRegGens[CallResource] = CallSeqEndForStart[SU]; 808 } 809 } 810 811 // Release the special call resource dependence, if this is the end 812 // of a call. 813 if (LiveRegGens[CallResource] == SU) 814 for (const SDNode *SUNode = SU->getNode(); SUNode; 815 SUNode = SUNode->getGluedNode()) { 816 if (SUNode->isMachineOpcode() && 817 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) { 818 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!"); 819 --NumLiveRegs; 820 LiveRegDefs[CallResource] = NULL; 821 LiveRegGens[CallResource] = NULL; 822 } 823 } 824 825 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 826 I != E; ++I) { 827 if (I->isAssignedRegDep()) { 828 if (!LiveRegDefs[I->getReg()]) 829 ++NumLiveRegs; 830 // This becomes the nearest def. Note that an earlier def may still be 831 // pending if this is a two-address node. 832 LiveRegDefs[I->getReg()] = SU; 833 if (LiveRegGens[I->getReg()] == NULL || 834 I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight()) 835 LiveRegGens[I->getReg()] = I->getSUnit(); 836 } 837 } 838 if (SU->getHeight() < MinAvailableCycle) 839 MinAvailableCycle = SU->getHeight(); 840 841 SU->setHeightDirty(); 842 SU->isScheduled = false; 843 SU->isAvailable = true; 844 if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) { 845 // Don't make available until backtracking is complete. 846 SU->isPending = true; 847 PendingQueue.push_back(SU); 848 } 849 else { 850 AvailableQueue->push(SU); 851 } 852 AvailableQueue->unscheduledNode(SU); 853} 854 855/// After backtracking, the hazard checker needs to be restored to a state 856/// corresponding the the current cycle. 857void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() { 858 HazardRec->Reset(); 859 860 unsigned LookAhead = std::min((unsigned)Sequence.size(), 861 HazardRec->getMaxLookAhead()); 862 if (LookAhead == 0) 863 return; 864 865 std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead); 866 unsigned HazardCycle = (*I)->getHeight(); 867 for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) { 868 SUnit *SU = *I; 869 for (; SU->getHeight() > HazardCycle; ++HazardCycle) { 870 HazardRec->RecedeCycle(); 871 } 872 EmitNode(SU); 873 } 874} 875 876/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in 877/// BTCycle in order to schedule a specific node. 878void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) { 879 SUnit *OldSU = Sequence.back(); 880 while (true) { 881 Sequence.pop_back(); 882 if (SU->isSucc(OldSU)) 883 // Don't try to remove SU from AvailableQueue. 884 SU->isAvailable = false; 885 // FIXME: use ready cycle instead of height 886 CurCycle = OldSU->getHeight(); 887 UnscheduleNodeBottomUp(OldSU); 888 AvailableQueue->setCurCycle(CurCycle); 889 if (OldSU == BtSU) 890 break; 891 OldSU = Sequence.back(); 892 } 893 894 assert(!SU->isSucc(OldSU) && "Something is wrong!"); 895 896 RestoreHazardCheckerBottomUp(); 897 898 ReleasePending(); 899 900 ++NumBacktracks; 901} 902 903static bool isOperandOf(const SUnit *SU, SDNode *N) { 904 for (const SDNode *SUNode = SU->getNode(); SUNode; 905 SUNode = SUNode->getGluedNode()) { 906 if (SUNode->isOperandOf(N)) 907 return true; 908 } 909 return false; 910} 911 912/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled 913/// successors to the newly created node. 914SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) { 915 SDNode *N = SU->getNode(); 916 if (!N) 917 return NULL; 918 919 if (SU->getNode()->getGluedNode()) 920 return NULL; 921 922 SUnit *NewSU; 923 bool TryUnfold = false; 924 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { 925 EVT VT = N->getValueType(i); 926 if (VT == MVT::Glue) 927 return NULL; 928 else if (VT == MVT::Other) 929 TryUnfold = true; 930 } 931 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 932 const SDValue &Op = N->getOperand(i); 933 EVT VT = Op.getNode()->getValueType(Op.getResNo()); 934 if (VT == MVT::Glue) 935 return NULL; 936 } 937 938 if (TryUnfold) { 939 SmallVector<SDNode*, 2> NewNodes; 940 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes)) 941 return NULL; 942 943 // unfolding an x86 DEC64m operation results in store, dec, load which 944 // can't be handled here so quit 945 if (NewNodes.size() == 3) 946 return NULL; 947 948 DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n"); 949 assert(NewNodes.size() == 2 && "Expected a load folding node!"); 950 951 N = NewNodes[1]; 952 SDNode *LoadNode = NewNodes[0]; 953 unsigned NumVals = N->getNumValues(); 954 unsigned OldNumVals = SU->getNode()->getNumValues(); 955 for (unsigned i = 0; i != NumVals; ++i) 956 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i)); 957 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1), 958 SDValue(LoadNode, 1)); 959 960 // LoadNode may already exist. This can happen when there is another 961 // load from the same location and producing the same type of value 962 // but it has different alignment or volatileness. 963 bool isNewLoad = true; 964 SUnit *LoadSU; 965 if (LoadNode->getNodeId() != -1) { 966 LoadSU = &SUnits[LoadNode->getNodeId()]; 967 isNewLoad = false; 968 } else { 969 LoadSU = CreateNewSUnit(LoadNode); 970 LoadNode->setNodeId(LoadSU->NodeNum); 971 972 InitNumRegDefsLeft(LoadSU); 973 computeLatency(LoadSU); 974 } 975 976 SUnit *NewSU = CreateNewSUnit(N); 977 assert(N->getNodeId() == -1 && "Node already inserted!"); 978 N->setNodeId(NewSU->NodeNum); 979 980 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 981 for (unsigned i = 0; i != MCID.getNumOperands(); ++i) { 982 if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) { 983 NewSU->isTwoAddress = true; 984 break; 985 } 986 } 987 if (MCID.isCommutable()) 988 NewSU->isCommutable = true; 989 990 InitNumRegDefsLeft(NewSU); 991 computeLatency(NewSU); 992 993 // Record all the edges to and from the old SU, by category. 994 SmallVector<SDep, 4> ChainPreds; 995 SmallVector<SDep, 4> ChainSuccs; 996 SmallVector<SDep, 4> LoadPreds; 997 SmallVector<SDep, 4> NodePreds; 998 SmallVector<SDep, 4> NodeSuccs; 999 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1000 I != E; ++I) { 1001 if (I->isCtrl()) 1002 ChainPreds.push_back(*I); 1003 else if (isOperandOf(I->getSUnit(), LoadNode)) 1004 LoadPreds.push_back(*I); 1005 else 1006 NodePreds.push_back(*I); 1007 } 1008 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1009 I != E; ++I) { 1010 if (I->isCtrl()) 1011 ChainSuccs.push_back(*I); 1012 else 1013 NodeSuccs.push_back(*I); 1014 } 1015 1016 // Now assign edges to the newly-created nodes. 1017 for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) { 1018 const SDep &Pred = ChainPreds[i]; 1019 RemovePred(SU, Pred); 1020 if (isNewLoad) 1021 AddPred(LoadSU, Pred); 1022 } 1023 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) { 1024 const SDep &Pred = LoadPreds[i]; 1025 RemovePred(SU, Pred); 1026 if (isNewLoad) 1027 AddPred(LoadSU, Pred); 1028 } 1029 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) { 1030 const SDep &Pred = NodePreds[i]; 1031 RemovePred(SU, Pred); 1032 AddPred(NewSU, Pred); 1033 } 1034 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) { 1035 SDep D = NodeSuccs[i]; 1036 SUnit *SuccDep = D.getSUnit(); 1037 D.setSUnit(SU); 1038 RemovePred(SuccDep, D); 1039 D.setSUnit(NewSU); 1040 AddPred(SuccDep, D); 1041 // Balance register pressure. 1042 if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled 1043 && !D.isCtrl() && NewSU->NumRegDefsLeft > 0) 1044 --NewSU->NumRegDefsLeft; 1045 } 1046 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) { 1047 SDep D = ChainSuccs[i]; 1048 SUnit *SuccDep = D.getSUnit(); 1049 D.setSUnit(SU); 1050 RemovePred(SuccDep, D); 1051 if (isNewLoad) { 1052 D.setSUnit(LoadSU); 1053 AddPred(SuccDep, D); 1054 } 1055 } 1056 1057 // Add a data dependency to reflect that NewSU reads the value defined 1058 // by LoadSU. 1059 AddPred(NewSU, SDep(LoadSU, SDep::Data, LoadSU->Latency)); 1060 1061 if (isNewLoad) 1062 AvailableQueue->addNode(LoadSU); 1063 AvailableQueue->addNode(NewSU); 1064 1065 ++NumUnfolds; 1066 1067 if (NewSU->NumSuccsLeft == 0) { 1068 NewSU->isAvailable = true; 1069 return NewSU; 1070 } 1071 SU = NewSU; 1072 } 1073 1074 DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n"); 1075 NewSU = CreateClone(SU); 1076 1077 // New SUnit has the exact same predecessors. 1078 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1079 I != E; ++I) 1080 if (!I->isArtificial()) 1081 AddPred(NewSU, *I); 1082 1083 // Only copy scheduled successors. Cut them from old node's successor 1084 // list and move them over. 1085 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps; 1086 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1087 I != E; ++I) { 1088 if (I->isArtificial()) 1089 continue; 1090 SUnit *SuccSU = I->getSUnit(); 1091 if (SuccSU->isScheduled) { 1092 SDep D = *I; 1093 D.setSUnit(NewSU); 1094 AddPred(SuccSU, D); 1095 D.setSUnit(SU); 1096 DelDeps.push_back(std::make_pair(SuccSU, D)); 1097 } 1098 } 1099 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) 1100 RemovePred(DelDeps[i].first, DelDeps[i].second); 1101 1102 AvailableQueue->updateNode(SU); 1103 AvailableQueue->addNode(NewSU); 1104 1105 ++NumDups; 1106 return NewSU; 1107} 1108 1109/// InsertCopiesAndMoveSuccs - Insert register copies and move all 1110/// scheduled successors of the given SUnit to the last copy. 1111void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg, 1112 const TargetRegisterClass *DestRC, 1113 const TargetRegisterClass *SrcRC, 1114 SmallVector<SUnit*, 2> &Copies) { 1115 SUnit *CopyFromSU = CreateNewSUnit(NULL); 1116 CopyFromSU->CopySrcRC = SrcRC; 1117 CopyFromSU->CopyDstRC = DestRC; 1118 1119 SUnit *CopyToSU = CreateNewSUnit(NULL); 1120 CopyToSU->CopySrcRC = DestRC; 1121 CopyToSU->CopyDstRC = SrcRC; 1122 1123 // Only copy scheduled successors. Cut them from old node's successor 1124 // list and move them over. 1125 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps; 1126 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 1127 I != E; ++I) { 1128 if (I->isArtificial()) 1129 continue; 1130 SUnit *SuccSU = I->getSUnit(); 1131 if (SuccSU->isScheduled) { 1132 SDep D = *I; 1133 D.setSUnit(CopyToSU); 1134 AddPred(SuccSU, D); 1135 DelDeps.push_back(std::make_pair(SuccSU, *I)); 1136 } 1137 else { 1138 // Avoid scheduling the def-side copy before other successors. Otherwise 1139 // we could introduce another physreg interference on the copy and 1140 // continue inserting copies indefinitely. 1141 SDep D(CopyFromSU, SDep::Order, /*Latency=*/0, 1142 /*Reg=*/0, /*isNormalMemory=*/false, 1143 /*isMustAlias=*/false, /*isArtificial=*/true); 1144 AddPred(SuccSU, D); 1145 } 1146 } 1147 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) 1148 RemovePred(DelDeps[i].first, DelDeps[i].second); 1149 1150 AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg)); 1151 AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0)); 1152 1153 AvailableQueue->updateNode(SU); 1154 AvailableQueue->addNode(CopyFromSU); 1155 AvailableQueue->addNode(CopyToSU); 1156 Copies.push_back(CopyFromSU); 1157 Copies.push_back(CopyToSU); 1158 1159 ++NumPRCopies; 1160} 1161 1162/// getPhysicalRegisterVT - Returns the ValueType of the physical register 1163/// definition of the specified node. 1164/// FIXME: Move to SelectionDAG? 1165static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg, 1166 const TargetInstrInfo *TII) { 1167 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode()); 1168 assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!"); 1169 unsigned NumRes = MCID.getNumDefs(); 1170 for (const uint16_t *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) { 1171 if (Reg == *ImpDef) 1172 break; 1173 ++NumRes; 1174 } 1175 return N->getValueType(NumRes); 1176} 1177 1178/// CheckForLiveRegDef - Return true and update live register vector if the 1179/// specified register def of the specified SUnit clobbers any "live" registers. 1180static void CheckForLiveRegDef(SUnit *SU, unsigned Reg, 1181 std::vector<SUnit*> &LiveRegDefs, 1182 SmallSet<unsigned, 4> &RegAdded, 1183 SmallVector<unsigned, 4> &LRegs, 1184 const TargetRegisterInfo *TRI) { 1185 for (const uint16_t *AliasI = TRI->getOverlaps(Reg); *AliasI; ++AliasI) { 1186 1187 // Check if Ref is live. 1188 if (!LiveRegDefs[*AliasI]) continue; 1189 1190 // Allow multiple uses of the same def. 1191 if (LiveRegDefs[*AliasI] == SU) continue; 1192 1193 // Add Reg to the set of interfering live regs. 1194 if (RegAdded.insert(*AliasI)) { 1195 LRegs.push_back(*AliasI); 1196 } 1197 } 1198} 1199 1200/// CheckForLiveRegDefMasked - Check for any live physregs that are clobbered 1201/// by RegMask, and add them to LRegs. 1202static void CheckForLiveRegDefMasked(SUnit *SU, const uint32_t *RegMask, 1203 std::vector<SUnit*> &LiveRegDefs, 1204 SmallSet<unsigned, 4> &RegAdded, 1205 SmallVector<unsigned, 4> &LRegs) { 1206 // Look at all live registers. Skip Reg0 and the special CallResource. 1207 for (unsigned i = 1, e = LiveRegDefs.size()-1; i != e; ++i) { 1208 if (!LiveRegDefs[i]) continue; 1209 if (LiveRegDefs[i] == SU) continue; 1210 if (!MachineOperand::clobbersPhysReg(RegMask, i)) continue; 1211 if (RegAdded.insert(i)) 1212 LRegs.push_back(i); 1213 } 1214} 1215 1216/// getNodeRegMask - Returns the register mask attached to an SDNode, if any. 1217static const uint32_t *getNodeRegMask(const SDNode *N) { 1218 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 1219 if (const RegisterMaskSDNode *Op = 1220 dyn_cast<RegisterMaskSDNode>(N->getOperand(i).getNode())) 1221 return Op->getRegMask(); 1222 return NULL; 1223} 1224 1225/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay 1226/// scheduling of the given node to satisfy live physical register dependencies. 1227/// If the specific node is the last one that's available to schedule, do 1228/// whatever is necessary (i.e. backtracking or cloning) to make it possible. 1229bool ScheduleDAGRRList:: 1230DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) { 1231 if (NumLiveRegs == 0) 1232 return false; 1233 1234 SmallSet<unsigned, 4> RegAdded; 1235 // If this node would clobber any "live" register, then it's not ready. 1236 // 1237 // If SU is the currently live definition of the same register that it uses, 1238 // then we are free to schedule it. 1239 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1240 I != E; ++I) { 1241 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU) 1242 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs, 1243 RegAdded, LRegs, TRI); 1244 } 1245 1246 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) { 1247 if (Node->getOpcode() == ISD::INLINEASM) { 1248 // Inline asm can clobber physical defs. 1249 unsigned NumOps = Node->getNumOperands(); 1250 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue) 1251 --NumOps; // Ignore the glue operand. 1252 1253 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { 1254 unsigned Flags = 1255 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue(); 1256 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); 1257 1258 ++i; // Skip the ID value. 1259 if (InlineAsm::isRegDefKind(Flags) || 1260 InlineAsm::isRegDefEarlyClobberKind(Flags) || 1261 InlineAsm::isClobberKind(Flags)) { 1262 // Check for def of register or earlyclobber register. 1263 for (; NumVals; --NumVals, ++i) { 1264 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg(); 1265 if (TargetRegisterInfo::isPhysicalRegister(Reg)) 1266 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI); 1267 } 1268 } else 1269 i += NumVals; 1270 } 1271 continue; 1272 } 1273 1274 if (!Node->isMachineOpcode()) 1275 continue; 1276 // If we're in the middle of scheduling a call, don't begin scheduling 1277 // another call. Also, don't allow any physical registers to be live across 1278 // the call. 1279 if (Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) { 1280 // Check the special calling-sequence resource. 1281 unsigned CallResource = TRI->getNumRegs(); 1282 if (LiveRegDefs[CallResource]) { 1283 SDNode *Gen = LiveRegGens[CallResource]->getNode(); 1284 while (SDNode *Glued = Gen->getGluedNode()) 1285 Gen = Glued; 1286 if (!IsChainDependent(Gen, Node, 0, TII) && RegAdded.insert(CallResource)) 1287 LRegs.push_back(CallResource); 1288 } 1289 } 1290 if (const uint32_t *RegMask = getNodeRegMask(Node)) 1291 CheckForLiveRegDefMasked(SU, RegMask, LiveRegDefs, RegAdded, LRegs); 1292 1293 const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode()); 1294 if (!MCID.ImplicitDefs) 1295 continue; 1296 for (const uint16_t *Reg = MCID.getImplicitDefs(); *Reg; ++Reg) 1297 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI); 1298 } 1299 1300 return !LRegs.empty(); 1301} 1302 1303/// Return a node that can be scheduled in this cycle. Requirements: 1304/// (1) Ready: latency has been satisfied 1305/// (2) No Hazards: resources are available 1306/// (3) No Interferences: may unschedule to break register interferences. 1307SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() { 1308 SmallVector<SUnit*, 4> Interferences; 1309 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap; 1310 1311 SUnit *CurSU = AvailableQueue->pop(); 1312 while (CurSU) { 1313 SmallVector<unsigned, 4> LRegs; 1314 if (!DelayForLiveRegsBottomUp(CurSU, LRegs)) 1315 break; 1316 LRegsMap.insert(std::make_pair(CurSU, LRegs)); 1317 1318 CurSU->isPending = true; // This SU is not in AvailableQueue right now. 1319 Interferences.push_back(CurSU); 1320 CurSU = AvailableQueue->pop(); 1321 } 1322 if (CurSU) { 1323 // Add the nodes that aren't ready back onto the available list. 1324 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) { 1325 Interferences[i]->isPending = false; 1326 assert(Interferences[i]->isAvailable && "must still be available"); 1327 AvailableQueue->push(Interferences[i]); 1328 } 1329 return CurSU; 1330 } 1331 1332 // All candidates are delayed due to live physical reg dependencies. 1333 // Try backtracking, code duplication, or inserting cross class copies 1334 // to resolve it. 1335 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) { 1336 SUnit *TrySU = Interferences[i]; 1337 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU]; 1338 1339 // Try unscheduling up to the point where it's safe to schedule 1340 // this node. 1341 SUnit *BtSU = NULL; 1342 unsigned LiveCycle = UINT_MAX; 1343 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) { 1344 unsigned Reg = LRegs[j]; 1345 if (LiveRegGens[Reg]->getHeight() < LiveCycle) { 1346 BtSU = LiveRegGens[Reg]; 1347 LiveCycle = BtSU->getHeight(); 1348 } 1349 } 1350 if (!WillCreateCycle(TrySU, BtSU)) { 1351 BacktrackBottomUp(TrySU, BtSU); 1352 1353 // Force the current node to be scheduled before the node that 1354 // requires the physical reg dep. 1355 if (BtSU->isAvailable) { 1356 BtSU->isAvailable = false; 1357 if (!BtSU->isPending) 1358 AvailableQueue->remove(BtSU); 1359 } 1360 AddPred(TrySU, SDep(BtSU, SDep::Order, /*Latency=*/1, 1361 /*Reg=*/0, /*isNormalMemory=*/false, 1362 /*isMustAlias=*/false, /*isArtificial=*/true)); 1363 1364 // If one or more successors has been unscheduled, then the current 1365 // node is no longer avaialable. Schedule a successor that's now 1366 // available instead. 1367 if (!TrySU->isAvailable) { 1368 CurSU = AvailableQueue->pop(); 1369 } 1370 else { 1371 CurSU = TrySU; 1372 TrySU->isPending = false; 1373 Interferences.erase(Interferences.begin()+i); 1374 } 1375 break; 1376 } 1377 } 1378 1379 if (!CurSU) { 1380 // Can't backtrack. If it's too expensive to copy the value, then try 1381 // duplicate the nodes that produces these "too expensive to copy" 1382 // values to break the dependency. In case even that doesn't work, 1383 // insert cross class copies. 1384 // If it's not too expensive, i.e. cost != -1, issue copies. 1385 SUnit *TrySU = Interferences[0]; 1386 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU]; 1387 assert(LRegs.size() == 1 && "Can't handle this yet!"); 1388 unsigned Reg = LRegs[0]; 1389 SUnit *LRDef = LiveRegDefs[Reg]; 1390 EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII); 1391 const TargetRegisterClass *RC = 1392 TRI->getMinimalPhysRegClass(Reg, VT); 1393 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC); 1394 1395 // If cross copy register class is the same as RC, then it must be possible 1396 // copy the value directly. Do not try duplicate the def. 1397 // If cross copy register class is not the same as RC, then it's possible to 1398 // copy the value but it require cross register class copies and it is 1399 // expensive. 1400 // If cross copy register class is null, then it's not possible to copy 1401 // the value at all. 1402 SUnit *NewDef = 0; 1403 if (DestRC != RC) { 1404 NewDef = CopyAndMoveSuccessors(LRDef); 1405 if (!DestRC && !NewDef) 1406 report_fatal_error("Can't handle live physical register dependency!"); 1407 } 1408 if (!NewDef) { 1409 // Issue copies, these can be expensive cross register class copies. 1410 SmallVector<SUnit*, 2> Copies; 1411 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies); 1412 DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum 1413 << " to SU #" << Copies.front()->NodeNum << "\n"); 1414 AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1, 1415 /*Reg=*/0, /*isNormalMemory=*/false, 1416 /*isMustAlias=*/false, 1417 /*isArtificial=*/true)); 1418 NewDef = Copies.back(); 1419 } 1420 1421 DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum 1422 << " to SU #" << TrySU->NodeNum << "\n"); 1423 LiveRegDefs[Reg] = NewDef; 1424 AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1, 1425 /*Reg=*/0, /*isNormalMemory=*/false, 1426 /*isMustAlias=*/false, 1427 /*isArtificial=*/true)); 1428 TrySU->isAvailable = false; 1429 CurSU = NewDef; 1430 } 1431 1432 assert(CurSU && "Unable to resolve live physical register dependencies!"); 1433 1434 // Add the nodes that aren't ready back onto the available list. 1435 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) { 1436 Interferences[i]->isPending = false; 1437 // May no longer be available due to backtracking. 1438 if (Interferences[i]->isAvailable) { 1439 AvailableQueue->push(Interferences[i]); 1440 } 1441 } 1442 return CurSU; 1443} 1444 1445/// ListScheduleBottomUp - The main loop of list scheduling for bottom-up 1446/// schedulers. 1447void ScheduleDAGRRList::ListScheduleBottomUp() { 1448 // Release any predecessors of the special Exit node. 1449 ReleasePredecessors(&ExitSU); 1450 1451 // Add root to Available queue. 1452 if (!SUnits.empty()) { 1453 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()]; 1454 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!"); 1455 RootSU->isAvailable = true; 1456 AvailableQueue->push(RootSU); 1457 } 1458 1459 // While Available queue is not empty, grab the node with the highest 1460 // priority. If it is not ready put it back. Schedule the node. 1461 Sequence.reserve(SUnits.size()); 1462 while (!AvailableQueue->empty()) { 1463 DEBUG(dbgs() << "\nExamining Available:\n"; 1464 AvailableQueue->dump(this)); 1465 1466 // Pick the best node to schedule taking all constraints into 1467 // consideration. 1468 SUnit *SU = PickNodeToScheduleBottomUp(); 1469 1470 AdvancePastStalls(SU); 1471 1472 ScheduleNodeBottomUp(SU); 1473 1474 while (AvailableQueue->empty() && !PendingQueue.empty()) { 1475 // Advance the cycle to free resources. Skip ahead to the next ready SU. 1476 assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized"); 1477 AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle)); 1478 } 1479 } 1480 1481 // Reverse the order if it is bottom up. 1482 std::reverse(Sequence.begin(), Sequence.end()); 1483 1484#ifndef NDEBUG 1485 VerifyScheduledSequence(/*isBottomUp=*/true); 1486#endif 1487} 1488 1489//===----------------------------------------------------------------------===// 1490// RegReductionPriorityQueue Definition 1491//===----------------------------------------------------------------------===// 1492// 1493// This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers 1494// to reduce register pressure. 1495// 1496namespace { 1497class RegReductionPQBase; 1498 1499struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> { 1500 bool isReady(SUnit* SU, unsigned CurCycle) const { return true; } 1501}; 1502 1503#ifndef NDEBUG 1504template<class SF> 1505struct reverse_sort : public queue_sort { 1506 SF &SortFunc; 1507 reverse_sort(SF &sf) : SortFunc(sf) {} 1508 reverse_sort(const reverse_sort &RHS) : SortFunc(RHS.SortFunc) {} 1509 1510 bool operator()(SUnit* left, SUnit* right) const { 1511 // reverse left/right rather than simply !SortFunc(left, right) 1512 // to expose different paths in the comparison logic. 1513 return SortFunc(right, left); 1514 } 1515}; 1516#endif // NDEBUG 1517 1518/// bu_ls_rr_sort - Priority function for bottom up register pressure 1519// reduction scheduler. 1520struct bu_ls_rr_sort : public queue_sort { 1521 enum { 1522 IsBottomUp = true, 1523 HasReadyFilter = false 1524 }; 1525 1526 RegReductionPQBase *SPQ; 1527 bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {} 1528 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {} 1529 1530 bool operator()(SUnit* left, SUnit* right) const; 1531}; 1532 1533// src_ls_rr_sort - Priority function for source order scheduler. 1534struct src_ls_rr_sort : public queue_sort { 1535 enum { 1536 IsBottomUp = true, 1537 HasReadyFilter = false 1538 }; 1539 1540 RegReductionPQBase *SPQ; 1541 src_ls_rr_sort(RegReductionPQBase *spq) 1542 : SPQ(spq) {} 1543 src_ls_rr_sort(const src_ls_rr_sort &RHS) 1544 : SPQ(RHS.SPQ) {} 1545 1546 bool operator()(SUnit* left, SUnit* right) const; 1547}; 1548 1549// hybrid_ls_rr_sort - Priority function for hybrid scheduler. 1550struct hybrid_ls_rr_sort : public queue_sort { 1551 enum { 1552 IsBottomUp = true, 1553 HasReadyFilter = false 1554 }; 1555 1556 RegReductionPQBase *SPQ; 1557 hybrid_ls_rr_sort(RegReductionPQBase *spq) 1558 : SPQ(spq) {} 1559 hybrid_ls_rr_sort(const hybrid_ls_rr_sort &RHS) 1560 : SPQ(RHS.SPQ) {} 1561 1562 bool isReady(SUnit *SU, unsigned CurCycle) const; 1563 1564 bool operator()(SUnit* left, SUnit* right) const; 1565}; 1566 1567// ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism) 1568// scheduler. 1569struct ilp_ls_rr_sort : public queue_sort { 1570 enum { 1571 IsBottomUp = true, 1572 HasReadyFilter = false 1573 }; 1574 1575 RegReductionPQBase *SPQ; 1576 ilp_ls_rr_sort(RegReductionPQBase *spq) 1577 : SPQ(spq) {} 1578 ilp_ls_rr_sort(const ilp_ls_rr_sort &RHS) 1579 : SPQ(RHS.SPQ) {} 1580 1581 bool isReady(SUnit *SU, unsigned CurCycle) const; 1582 1583 bool operator()(SUnit* left, SUnit* right) const; 1584}; 1585 1586class RegReductionPQBase : public SchedulingPriorityQueue { 1587protected: 1588 std::vector<SUnit*> Queue; 1589 unsigned CurQueueId; 1590 bool TracksRegPressure; 1591 bool SrcOrder; 1592 1593 // SUnits - The SUnits for the current graph. 1594 std::vector<SUnit> *SUnits; 1595 1596 MachineFunction &MF; 1597 const TargetInstrInfo *TII; 1598 const TargetRegisterInfo *TRI; 1599 const TargetLowering *TLI; 1600 ScheduleDAGRRList *scheduleDAG; 1601 1602 // SethiUllmanNumbers - The SethiUllman number for each node. 1603 std::vector<unsigned> SethiUllmanNumbers; 1604 1605 /// RegPressure - Tracking current reg pressure per register class. 1606 /// 1607 std::vector<unsigned> RegPressure; 1608 1609 /// RegLimit - Tracking the number of allocatable registers per register 1610 /// class. 1611 std::vector<unsigned> RegLimit; 1612 1613public: 1614 RegReductionPQBase(MachineFunction &mf, 1615 bool hasReadyFilter, 1616 bool tracksrp, 1617 bool srcorder, 1618 const TargetInstrInfo *tii, 1619 const TargetRegisterInfo *tri, 1620 const TargetLowering *tli) 1621 : SchedulingPriorityQueue(hasReadyFilter), 1622 CurQueueId(0), TracksRegPressure(tracksrp), SrcOrder(srcorder), 1623 MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) { 1624 if (TracksRegPressure) { 1625 unsigned NumRC = TRI->getNumRegClasses(); 1626 RegLimit.resize(NumRC); 1627 RegPressure.resize(NumRC); 1628 std::fill(RegLimit.begin(), RegLimit.end(), 0); 1629 std::fill(RegPressure.begin(), RegPressure.end(), 0); 1630 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(), 1631 E = TRI->regclass_end(); I != E; ++I) 1632 RegLimit[(*I)->getID()] = tri->getRegPressureLimit(*I, MF); 1633 } 1634 } 1635 1636 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) { 1637 scheduleDAG = scheduleDag; 1638 } 1639 1640 ScheduleHazardRecognizer* getHazardRec() { 1641 return scheduleDAG->getHazardRec(); 1642 } 1643 1644 void initNodes(std::vector<SUnit> &sunits); 1645 1646 void addNode(const SUnit *SU); 1647 1648 void updateNode(const SUnit *SU); 1649 1650 void releaseState() { 1651 SUnits = 0; 1652 SethiUllmanNumbers.clear(); 1653 std::fill(RegPressure.begin(), RegPressure.end(), 0); 1654 } 1655 1656 unsigned getNodePriority(const SUnit *SU) const; 1657 1658 unsigned getNodeOrdering(const SUnit *SU) const { 1659 if (!SU->getNode()) return 0; 1660 1661 return scheduleDAG->DAG->GetOrdering(SU->getNode()); 1662 } 1663 1664 bool empty() const { return Queue.empty(); } 1665 1666 void push(SUnit *U) { 1667 assert(!U->NodeQueueId && "Node in the queue already"); 1668 U->NodeQueueId = ++CurQueueId; 1669 Queue.push_back(U); 1670 } 1671 1672 void remove(SUnit *SU) { 1673 assert(!Queue.empty() && "Queue is empty!"); 1674 assert(SU->NodeQueueId != 0 && "Not in queue!"); 1675 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(), 1676 SU); 1677 if (I != prior(Queue.end())) 1678 std::swap(*I, Queue.back()); 1679 Queue.pop_back(); 1680 SU->NodeQueueId = 0; 1681 } 1682 1683 bool tracksRegPressure() const { return TracksRegPressure; } 1684 1685 void dumpRegPressure() const; 1686 1687 bool HighRegPressure(const SUnit *SU) const; 1688 1689 bool MayReduceRegPressure(SUnit *SU) const; 1690 1691 int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const; 1692 1693 void scheduledNode(SUnit *SU); 1694 1695 void unscheduledNode(SUnit *SU); 1696 1697protected: 1698 bool canClobber(const SUnit *SU, const SUnit *Op); 1699 void AddPseudoTwoAddrDeps(); 1700 void PrescheduleNodesWithMultipleUses(); 1701 void CalculateSethiUllmanNumbers(); 1702}; 1703 1704template<class SF> 1705static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) { 1706 std::vector<SUnit *>::iterator Best = Q.begin(); 1707 for (std::vector<SUnit *>::iterator I = llvm::next(Q.begin()), 1708 E = Q.end(); I != E; ++I) 1709 if (Picker(*Best, *I)) 1710 Best = I; 1711 SUnit *V = *Best; 1712 if (Best != prior(Q.end())) 1713 std::swap(*Best, Q.back()); 1714 Q.pop_back(); 1715 return V; 1716} 1717 1718template<class SF> 1719SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker, ScheduleDAG *DAG) { 1720#ifndef NDEBUG 1721 if (DAG->StressSched) { 1722 reverse_sort<SF> RPicker(Picker); 1723 return popFromQueueImpl(Q, RPicker); 1724 } 1725#endif 1726 (void)DAG; 1727 return popFromQueueImpl(Q, Picker); 1728} 1729 1730template<class SF> 1731class RegReductionPriorityQueue : public RegReductionPQBase { 1732 SF Picker; 1733 1734public: 1735 RegReductionPriorityQueue(MachineFunction &mf, 1736 bool tracksrp, 1737 bool srcorder, 1738 const TargetInstrInfo *tii, 1739 const TargetRegisterInfo *tri, 1740 const TargetLowering *tli) 1741 : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, srcorder, 1742 tii, tri, tli), 1743 Picker(this) {} 1744 1745 bool isBottomUp() const { return SF::IsBottomUp; } 1746 1747 bool isReady(SUnit *U) const { 1748 return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle()); 1749 } 1750 1751 SUnit *pop() { 1752 if (Queue.empty()) return NULL; 1753 1754 SUnit *V = popFromQueue(Queue, Picker, scheduleDAG); 1755 V->NodeQueueId = 0; 1756 return V; 1757 } 1758 1759 void dump(ScheduleDAG *DAG) const { 1760 // Emulate pop() without clobbering NodeQueueIds. 1761 std::vector<SUnit*> DumpQueue = Queue; 1762 SF DumpPicker = Picker; 1763 while (!DumpQueue.empty()) { 1764 SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG); 1765 dbgs() << "Height " << SU->getHeight() << ": "; 1766 SU->dump(DAG); 1767 } 1768 } 1769}; 1770 1771typedef RegReductionPriorityQueue<bu_ls_rr_sort> 1772BURegReductionPriorityQueue; 1773 1774typedef RegReductionPriorityQueue<src_ls_rr_sort> 1775SrcRegReductionPriorityQueue; 1776 1777typedef RegReductionPriorityQueue<hybrid_ls_rr_sort> 1778HybridBURRPriorityQueue; 1779 1780typedef RegReductionPriorityQueue<ilp_ls_rr_sort> 1781ILPBURRPriorityQueue; 1782} // end anonymous namespace 1783 1784//===----------------------------------------------------------------------===// 1785// Static Node Priority for Register Pressure Reduction 1786//===----------------------------------------------------------------------===// 1787 1788// Check for special nodes that bypass scheduling heuristics. 1789// Currently this pushes TokenFactor nodes down, but may be used for other 1790// pseudo-ops as well. 1791// 1792// Return -1 to schedule right above left, 1 for left above right. 1793// Return 0 if no bias exists. 1794static int checkSpecialNodes(const SUnit *left, const SUnit *right) { 1795 bool LSchedLow = left->isScheduleLow; 1796 bool RSchedLow = right->isScheduleLow; 1797 if (LSchedLow != RSchedLow) 1798 return LSchedLow < RSchedLow ? 1 : -1; 1799 return 0; 1800} 1801 1802/// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number. 1803/// Smaller number is the higher priority. 1804static unsigned 1805CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) { 1806 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum]; 1807 if (SethiUllmanNumber != 0) 1808 return SethiUllmanNumber; 1809 1810 unsigned Extra = 0; 1811 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 1812 I != E; ++I) { 1813 if (I->isCtrl()) continue; // ignore chain preds 1814 SUnit *PredSU = I->getSUnit(); 1815 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers); 1816 if (PredSethiUllman > SethiUllmanNumber) { 1817 SethiUllmanNumber = PredSethiUllman; 1818 Extra = 0; 1819 } else if (PredSethiUllman == SethiUllmanNumber) 1820 ++Extra; 1821 } 1822 1823 SethiUllmanNumber += Extra; 1824 1825 if (SethiUllmanNumber == 0) 1826 SethiUllmanNumber = 1; 1827 1828 return SethiUllmanNumber; 1829} 1830 1831/// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all 1832/// scheduling units. 1833void RegReductionPQBase::CalculateSethiUllmanNumbers() { 1834 SethiUllmanNumbers.assign(SUnits->size(), 0); 1835 1836 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) 1837 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers); 1838} 1839 1840void RegReductionPQBase::addNode(const SUnit *SU) { 1841 unsigned SUSize = SethiUllmanNumbers.size(); 1842 if (SUnits->size() > SUSize) 1843 SethiUllmanNumbers.resize(SUSize*2, 0); 1844 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers); 1845} 1846 1847void RegReductionPQBase::updateNode(const SUnit *SU) { 1848 SethiUllmanNumbers[SU->NodeNum] = 0; 1849 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers); 1850} 1851 1852// Lower priority means schedule further down. For bottom-up scheduling, lower 1853// priority SUs are scheduled before higher priority SUs. 1854unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const { 1855 assert(SU->NodeNum < SethiUllmanNumbers.size()); 1856 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0; 1857 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg) 1858 // CopyToReg should be close to its uses to facilitate coalescing and 1859 // avoid spilling. 1860 return 0; 1861 if (Opc == TargetOpcode::EXTRACT_SUBREG || 1862 Opc == TargetOpcode::SUBREG_TO_REG || 1863 Opc == TargetOpcode::INSERT_SUBREG) 1864 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be 1865 // close to their uses to facilitate coalescing. 1866 return 0; 1867 if (SU->NumSuccs == 0 && SU->NumPreds != 0) 1868 // If SU does not have a register use, i.e. it doesn't produce a value 1869 // that would be consumed (e.g. store), then it terminates a chain of 1870 // computation. Give it a large SethiUllman number so it will be 1871 // scheduled right before its predecessors that it doesn't lengthen 1872 // their live ranges. 1873 return 0xffff; 1874 if (SU->NumPreds == 0 && SU->NumSuccs != 0) 1875 // If SU does not have a register def, schedule it close to its uses 1876 // because it does not lengthen any live ranges. 1877 return 0; 1878#if 1 1879 return SethiUllmanNumbers[SU->NodeNum]; 1880#else 1881 unsigned Priority = SethiUllmanNumbers[SU->NodeNum]; 1882 if (SU->isCallOp) { 1883 // FIXME: This assumes all of the defs are used as call operands. 1884 int NP = (int)Priority - SU->getNode()->getNumValues(); 1885 return (NP > 0) ? NP : 0; 1886 } 1887 return Priority; 1888#endif 1889} 1890 1891//===----------------------------------------------------------------------===// 1892// Register Pressure Tracking 1893//===----------------------------------------------------------------------===// 1894 1895void RegReductionPQBase::dumpRegPressure() const { 1896 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(), 1897 E = TRI->regclass_end(); I != E; ++I) { 1898 const TargetRegisterClass *RC = *I; 1899 unsigned Id = RC->getID(); 1900 unsigned RP = RegPressure[Id]; 1901 if (!RP) continue; 1902 DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id] 1903 << '\n'); 1904 } 1905} 1906 1907bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const { 1908 if (!TLI) 1909 return false; 1910 1911 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end(); 1912 I != E; ++I) { 1913 if (I->isCtrl()) 1914 continue; 1915 SUnit *PredSU = I->getSUnit(); 1916 // NumRegDefsLeft is zero when enough uses of this node have been scheduled 1917 // to cover the number of registers defined (they are all live). 1918 if (PredSU->NumRegDefsLeft == 0) { 1919 continue; 1920 } 1921 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG); 1922 RegDefPos.IsValid(); RegDefPos.Advance()) { 1923 unsigned RCId, Cost; 1924 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF); 1925 1926 if ((RegPressure[RCId] + Cost) >= RegLimit[RCId]) 1927 return true; 1928 } 1929 } 1930 return false; 1931} 1932 1933bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const { 1934 const SDNode *N = SU->getNode(); 1935 1936 if (!N->isMachineOpcode() || !SU->NumSuccs) 1937 return false; 1938 1939 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs(); 1940 for (unsigned i = 0; i != NumDefs; ++i) { 1941 EVT VT = N->getValueType(i); 1942 if (!N->hasAnyUseOfValue(i)) 1943 continue; 1944 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 1945 if (RegPressure[RCId] >= RegLimit[RCId]) 1946 return true; 1947 } 1948 return false; 1949} 1950 1951// Compute the register pressure contribution by this instruction by count up 1952// for uses that are not live and down for defs. Only count register classes 1953// that are already under high pressure. As a side effect, compute the number of 1954// uses of registers that are already live. 1955// 1956// FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure 1957// so could probably be factored. 1958int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const { 1959 LiveUses = 0; 1960 int PDiff = 0; 1961 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end(); 1962 I != E; ++I) { 1963 if (I->isCtrl()) 1964 continue; 1965 SUnit *PredSU = I->getSUnit(); 1966 // NumRegDefsLeft is zero when enough uses of this node have been scheduled 1967 // to cover the number of registers defined (they are all live). 1968 if (PredSU->NumRegDefsLeft == 0) { 1969 if (PredSU->getNode()->isMachineOpcode()) 1970 ++LiveUses; 1971 continue; 1972 } 1973 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG); 1974 RegDefPos.IsValid(); RegDefPos.Advance()) { 1975 EVT VT = RegDefPos.GetValue(); 1976 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 1977 if (RegPressure[RCId] >= RegLimit[RCId]) 1978 ++PDiff; 1979 } 1980 } 1981 const SDNode *N = SU->getNode(); 1982 1983 if (!N || !N->isMachineOpcode() || !SU->NumSuccs) 1984 return PDiff; 1985 1986 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs(); 1987 for (unsigned i = 0; i != NumDefs; ++i) { 1988 EVT VT = N->getValueType(i); 1989 if (!N->hasAnyUseOfValue(i)) 1990 continue; 1991 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 1992 if (RegPressure[RCId] >= RegLimit[RCId]) 1993 --PDiff; 1994 } 1995 return PDiff; 1996} 1997 1998void RegReductionPQBase::scheduledNode(SUnit *SU) { 1999 if (!TracksRegPressure) 2000 return; 2001 2002 if (!SU->getNode()) 2003 return; 2004 2005 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 2006 I != E; ++I) { 2007 if (I->isCtrl()) 2008 continue; 2009 SUnit *PredSU = I->getSUnit(); 2010 // NumRegDefsLeft is zero when enough uses of this node have been scheduled 2011 // to cover the number of registers defined (they are all live). 2012 if (PredSU->NumRegDefsLeft == 0) { 2013 continue; 2014 } 2015 // FIXME: The ScheduleDAG currently loses information about which of a 2016 // node's values is consumed by each dependence. Consequently, if the node 2017 // defines multiple register classes, we don't know which to pressurize 2018 // here. Instead the following loop consumes the register defs in an 2019 // arbitrary order. At least it handles the common case of clustered loads 2020 // to the same class. For precise liveness, each SDep needs to indicate the 2021 // result number. But that tightly couples the ScheduleDAG with the 2022 // SelectionDAG making updates tricky. A simpler hack would be to attach a 2023 // value type or register class to SDep. 2024 // 2025 // The most important aspect of register tracking is balancing the increase 2026 // here with the reduction further below. Note that this SU may use multiple 2027 // defs in PredSU. The can't be determined here, but we've already 2028 // compensated by reducing NumRegDefsLeft in PredSU during 2029 // ScheduleDAGSDNodes::AddSchedEdges. 2030 --PredSU->NumRegDefsLeft; 2031 unsigned SkipRegDefs = PredSU->NumRegDefsLeft; 2032 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG); 2033 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) { 2034 if (SkipRegDefs) 2035 continue; 2036 2037 unsigned RCId, Cost; 2038 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF); 2039 RegPressure[RCId] += Cost; 2040 break; 2041 } 2042 } 2043 2044 // We should have this assert, but there may be dead SDNodes that never 2045 // materialize as SUnits, so they don't appear to generate liveness. 2046 //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses"); 2047 int SkipRegDefs = (int)SU->NumRegDefsLeft; 2048 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG); 2049 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) { 2050 if (SkipRegDefs > 0) 2051 continue; 2052 unsigned RCId, Cost; 2053 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF); 2054 if (RegPressure[RCId] < Cost) { 2055 // Register pressure tracking is imprecise. This can happen. But we try 2056 // hard not to let it happen because it likely results in poor scheduling. 2057 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n"); 2058 RegPressure[RCId] = 0; 2059 } 2060 else { 2061 RegPressure[RCId] -= Cost; 2062 } 2063 } 2064 dumpRegPressure(); 2065} 2066 2067void RegReductionPQBase::unscheduledNode(SUnit *SU) { 2068 if (!TracksRegPressure) 2069 return; 2070 2071 const SDNode *N = SU->getNode(); 2072 if (!N) return; 2073 2074 if (!N->isMachineOpcode()) { 2075 if (N->getOpcode() != ISD::CopyToReg) 2076 return; 2077 } else { 2078 unsigned Opc = N->getMachineOpcode(); 2079 if (Opc == TargetOpcode::EXTRACT_SUBREG || 2080 Opc == TargetOpcode::INSERT_SUBREG || 2081 Opc == TargetOpcode::SUBREG_TO_REG || 2082 Opc == TargetOpcode::REG_SEQUENCE || 2083 Opc == TargetOpcode::IMPLICIT_DEF) 2084 return; 2085 } 2086 2087 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 2088 I != E; ++I) { 2089 if (I->isCtrl()) 2090 continue; 2091 SUnit *PredSU = I->getSUnit(); 2092 // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only 2093 // counts data deps. 2094 if (PredSU->NumSuccsLeft != PredSU->Succs.size()) 2095 continue; 2096 const SDNode *PN = PredSU->getNode(); 2097 if (!PN->isMachineOpcode()) { 2098 if (PN->getOpcode() == ISD::CopyFromReg) { 2099 EVT VT = PN->getValueType(0); 2100 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 2101 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT); 2102 } 2103 continue; 2104 } 2105 unsigned POpc = PN->getMachineOpcode(); 2106 if (POpc == TargetOpcode::IMPLICIT_DEF) 2107 continue; 2108 if (POpc == TargetOpcode::EXTRACT_SUBREG || 2109 POpc == TargetOpcode::INSERT_SUBREG || 2110 POpc == TargetOpcode::SUBREG_TO_REG) { 2111 EVT VT = PN->getValueType(0); 2112 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 2113 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT); 2114 continue; 2115 } 2116 unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs(); 2117 for (unsigned i = 0; i != NumDefs; ++i) { 2118 EVT VT = PN->getValueType(i); 2119 if (!PN->hasAnyUseOfValue(i)) 2120 continue; 2121 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 2122 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT)) 2123 // Register pressure tracking is imprecise. This can happen. 2124 RegPressure[RCId] = 0; 2125 else 2126 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT); 2127 } 2128 } 2129 2130 // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses() 2131 // may transfer data dependencies to CopyToReg. 2132 if (SU->NumSuccs && N->isMachineOpcode()) { 2133 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs(); 2134 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) { 2135 EVT VT = N->getValueType(i); 2136 if (VT == MVT::Glue || VT == MVT::Other) 2137 continue; 2138 if (!N->hasAnyUseOfValue(i)) 2139 continue; 2140 unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); 2141 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT); 2142 } 2143 } 2144 2145 dumpRegPressure(); 2146} 2147 2148//===----------------------------------------------------------------------===// 2149// Dynamic Node Priority for Register Pressure Reduction 2150//===----------------------------------------------------------------------===// 2151 2152/// closestSucc - Returns the scheduled cycle of the successor which is 2153/// closest to the current cycle. 2154static unsigned closestSucc(const SUnit *SU) { 2155 unsigned MaxHeight = 0; 2156 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 2157 I != E; ++I) { 2158 if (I->isCtrl()) continue; // ignore chain succs 2159 unsigned Height = I->getSUnit()->getHeight(); 2160 // If there are bunch of CopyToRegs stacked up, they should be considered 2161 // to be at the same position. 2162 if (I->getSUnit()->getNode() && 2163 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg) 2164 Height = closestSucc(I->getSUnit())+1; 2165 if (Height > MaxHeight) 2166 MaxHeight = Height; 2167 } 2168 return MaxHeight; 2169} 2170 2171/// calcMaxScratches - Returns an cost estimate of the worse case requirement 2172/// for scratch registers, i.e. number of data dependencies. 2173static unsigned calcMaxScratches(const SUnit *SU) { 2174 unsigned Scratches = 0; 2175 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 2176 I != E; ++I) { 2177 if (I->isCtrl()) continue; // ignore chain preds 2178 Scratches++; 2179 } 2180 return Scratches; 2181} 2182 2183/// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are 2184/// CopyFromReg from a virtual register. 2185static bool hasOnlyLiveInOpers(const SUnit *SU) { 2186 bool RetVal = false; 2187 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 2188 I != E; ++I) { 2189 if (I->isCtrl()) continue; 2190 const SUnit *PredSU = I->getSUnit(); 2191 if (PredSU->getNode() && 2192 PredSU->getNode()->getOpcode() == ISD::CopyFromReg) { 2193 unsigned Reg = 2194 cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg(); 2195 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 2196 RetVal = true; 2197 continue; 2198 } 2199 } 2200 return false; 2201 } 2202 return RetVal; 2203} 2204 2205/// hasOnlyLiveOutUses - Return true if SU has only value successors that are 2206/// CopyToReg to a virtual register. This SU def is probably a liveout and 2207/// it has no other use. It should be scheduled closer to the terminator. 2208static bool hasOnlyLiveOutUses(const SUnit *SU) { 2209 bool RetVal = false; 2210 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 2211 I != E; ++I) { 2212 if (I->isCtrl()) continue; 2213 const SUnit *SuccSU = I->getSUnit(); 2214 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) { 2215 unsigned Reg = 2216 cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg(); 2217 if (TargetRegisterInfo::isVirtualRegister(Reg)) { 2218 RetVal = true; 2219 continue; 2220 } 2221 } 2222 return false; 2223 } 2224 return RetVal; 2225} 2226 2227// Set isVRegCycle for a node with only live in opers and live out uses. Also 2228// set isVRegCycle for its CopyFromReg operands. 2229// 2230// This is only relevant for single-block loops, in which case the VRegCycle 2231// node is likely an induction variable in which the operand and target virtual 2232// registers should be coalesced (e.g. pre/post increment values). Setting the 2233// isVRegCycle flag helps the scheduler prioritize other uses of the same 2234// CopyFromReg so that this node becomes the virtual register "kill". This 2235// avoids interference between the values live in and out of the block and 2236// eliminates a copy inside the loop. 2237static void initVRegCycle(SUnit *SU) { 2238 if (DisableSchedVRegCycle) 2239 return; 2240 2241 if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU)) 2242 return; 2243 2244 DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n"); 2245 2246 SU->isVRegCycle = true; 2247 2248 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 2249 I != E; ++I) { 2250 if (I->isCtrl()) continue; 2251 I->getSUnit()->isVRegCycle = true; 2252 } 2253} 2254 2255// After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of 2256// CopyFromReg operands. We should no longer penalize other uses of this VReg. 2257static void resetVRegCycle(SUnit *SU) { 2258 if (!SU->isVRegCycle) 2259 return; 2260 2261 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end(); 2262 I != E; ++I) { 2263 if (I->isCtrl()) continue; // ignore chain preds 2264 SUnit *PredSU = I->getSUnit(); 2265 if (PredSU->isVRegCycle) { 2266 assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg && 2267 "VRegCycle def must be CopyFromReg"); 2268 I->getSUnit()->isVRegCycle = 0; 2269 } 2270 } 2271} 2272 2273// Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This 2274// means a node that defines the VRegCycle has not been scheduled yet. 2275static bool hasVRegCycleUse(const SUnit *SU) { 2276 // If this SU also defines the VReg, don't hoist it as a "use". 2277 if (SU->isVRegCycle) 2278 return false; 2279 2280 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end(); 2281 I != E; ++I) { 2282 if (I->isCtrl()) continue; // ignore chain preds 2283 if (I->getSUnit()->isVRegCycle && 2284 I->getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) { 2285 DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n"); 2286 return true; 2287 } 2288 } 2289 return false; 2290} 2291 2292// Check for either a dependence (latency) or resource (hazard) stall. 2293// 2294// Note: The ScheduleHazardRecognizer interface requires a non-const SU. 2295static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) { 2296 if ((int)SPQ->getCurCycle() < Height) return true; 2297 if (SPQ->getHazardRec()->getHazardType(SU, 0) 2298 != ScheduleHazardRecognizer::NoHazard) 2299 return true; 2300 return false; 2301} 2302 2303// Return -1 if left has higher priority, 1 if right has higher priority. 2304// Return 0 if latency-based priority is equivalent. 2305static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref, 2306 RegReductionPQBase *SPQ) { 2307 // Scheduling an instruction that uses a VReg whose postincrement has not yet 2308 // been scheduled will induce a copy. Model this as an extra cycle of latency. 2309 int LPenalty = hasVRegCycleUse(left) ? 1 : 0; 2310 int RPenalty = hasVRegCycleUse(right) ? 1 : 0; 2311 int LHeight = (int)left->getHeight() + LPenalty; 2312 int RHeight = (int)right->getHeight() + RPenalty; 2313 2314 bool LStall = (!checkPref || left->SchedulingPref == Sched::ILP) && 2315 BUHasStall(left, LHeight, SPQ); 2316 bool RStall = (!checkPref || right->SchedulingPref == Sched::ILP) && 2317 BUHasStall(right, RHeight, SPQ); 2318 2319 // If scheduling one of the node will cause a pipeline stall, delay it. 2320 // If scheduling either one of the node will cause a pipeline stall, sort 2321 // them according to their height. 2322 if (LStall) { 2323 if (!RStall) 2324 return 1; 2325 if (LHeight != RHeight) 2326 return LHeight > RHeight ? 1 : -1; 2327 } else if (RStall) 2328 return -1; 2329 2330 // If either node is scheduling for latency, sort them by height/depth 2331 // and latency. 2332 if (!checkPref || (left->SchedulingPref == Sched::ILP || 2333 right->SchedulingPref == Sched::ILP)) { 2334 if (DisableSchedCycles) { 2335 if (LHeight != RHeight) 2336 return LHeight > RHeight ? 1 : -1; 2337 } 2338 else { 2339 // If neither instruction stalls (!LStall && !RStall) then 2340 // its height is already covered so only its depth matters. We also reach 2341 // this if both stall but have the same height. 2342 int LDepth = left->getDepth() - LPenalty; 2343 int RDepth = right->getDepth() - RPenalty; 2344 if (LDepth != RDepth) { 2345 DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum 2346 << ") depth " << LDepth << " vs SU (" << right->NodeNum 2347 << ") depth " << RDepth << "\n"); 2348 return LDepth < RDepth ? 1 : -1; 2349 } 2350 } 2351 if (left->Latency != right->Latency) 2352 return left->Latency > right->Latency ? 1 : -1; 2353 } 2354 return 0; 2355} 2356 2357static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) { 2358 // Schedule physical register definitions close to their use. This is 2359 // motivated by microarchitectures that can fuse cmp+jump macro-ops. But as 2360 // long as shortening physreg live ranges is generally good, we can defer 2361 // creating a subtarget hook. 2362 if (!DisableSchedPhysRegJoin) { 2363 bool LHasPhysReg = left->hasPhysRegDefs; 2364 bool RHasPhysReg = right->hasPhysRegDefs; 2365 if (LHasPhysReg != RHasPhysReg) { 2366 #ifndef NDEBUG 2367 const char *const PhysRegMsg[] = {" has no physreg"," defines a physreg"}; 2368 #endif 2369 DEBUG(dbgs() << " SU (" << left->NodeNum << ") " 2370 << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") " 2371 << PhysRegMsg[RHasPhysReg] << "\n"); 2372 return LHasPhysReg < RHasPhysReg; 2373 } 2374 } 2375 2376 // Prioritize by Sethi-Ulmann number and push CopyToReg nodes down. 2377 unsigned LPriority = SPQ->getNodePriority(left); 2378 unsigned RPriority = SPQ->getNodePriority(right); 2379 2380 // Be really careful about hoisting call operands above previous calls. 2381 // Only allows it if it would reduce register pressure. 2382 if (left->isCall && right->isCallOp) { 2383 unsigned RNumVals = right->getNode()->getNumValues(); 2384 RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0; 2385 } 2386 if (right->isCall && left->isCallOp) { 2387 unsigned LNumVals = left->getNode()->getNumValues(); 2388 LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0; 2389 } 2390 2391 if (LPriority != RPriority) 2392 return LPriority > RPriority; 2393 2394 // One or both of the nodes are calls and their sethi-ullman numbers are the 2395 // same, then keep source order. 2396 if (left->isCall || right->isCall) { 2397 unsigned LOrder = SPQ->getNodeOrdering(left); 2398 unsigned ROrder = SPQ->getNodeOrdering(right); 2399 2400 // Prefer an ordering where the lower the non-zero order number, the higher 2401 // the preference. 2402 if ((LOrder || ROrder) && LOrder != ROrder) 2403 return LOrder != 0 && (LOrder < ROrder || ROrder == 0); 2404 } 2405 2406 // Try schedule def + use closer when Sethi-Ullman numbers are the same. 2407 // e.g. 2408 // t1 = op t2, c1 2409 // t3 = op t4, c2 2410 // 2411 // and the following instructions are both ready. 2412 // t2 = op c3 2413 // t4 = op c4 2414 // 2415 // Then schedule t2 = op first. 2416 // i.e. 2417 // t4 = op c4 2418 // t2 = op c3 2419 // t1 = op t2, c1 2420 // t3 = op t4, c2 2421 // 2422 // This creates more short live intervals. 2423 unsigned LDist = closestSucc(left); 2424 unsigned RDist = closestSucc(right); 2425 if (LDist != RDist) 2426 return LDist < RDist; 2427 2428 // How many registers becomes live when the node is scheduled. 2429 unsigned LScratch = calcMaxScratches(left); 2430 unsigned RScratch = calcMaxScratches(right); 2431 if (LScratch != RScratch) 2432 return LScratch > RScratch; 2433 2434 // Comparing latency against a call makes little sense unless the node 2435 // is register pressure-neutral. 2436 if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0)) 2437 return (left->NodeQueueId > right->NodeQueueId); 2438 2439 // Do not compare latencies when one or both of the nodes are calls. 2440 if (!DisableSchedCycles && 2441 !(left->isCall || right->isCall)) { 2442 int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ); 2443 if (result != 0) 2444 return result > 0; 2445 } 2446 else { 2447 if (left->getHeight() != right->getHeight()) 2448 return left->getHeight() > right->getHeight(); 2449 2450 if (left->getDepth() != right->getDepth()) 2451 return left->getDepth() < right->getDepth(); 2452 } 2453 2454 assert(left->NodeQueueId && right->NodeQueueId && 2455 "NodeQueueId cannot be zero"); 2456 return (left->NodeQueueId > right->NodeQueueId); 2457} 2458 2459// Bottom up 2460bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const { 2461 if (int res = checkSpecialNodes(left, right)) 2462 return res > 0; 2463 2464 return BURRSort(left, right, SPQ); 2465} 2466 2467// Source order, otherwise bottom up. 2468bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const { 2469 if (int res = checkSpecialNodes(left, right)) 2470 return res > 0; 2471 2472 unsigned LOrder = SPQ->getNodeOrdering(left); 2473 unsigned ROrder = SPQ->getNodeOrdering(right); 2474 2475 // Prefer an ordering where the lower the non-zero order number, the higher 2476 // the preference. 2477 if ((LOrder || ROrder) && LOrder != ROrder) 2478 return LOrder != 0 && (LOrder < ROrder || ROrder == 0); 2479 2480 return BURRSort(left, right, SPQ); 2481} 2482 2483// If the time between now and when the instruction will be ready can cover 2484// the spill code, then avoid adding it to the ready queue. This gives long 2485// stalls highest priority and allows hoisting across calls. It should also 2486// speed up processing the available queue. 2487bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const { 2488 static const unsigned ReadyDelay = 3; 2489 2490 if (SPQ->MayReduceRegPressure(SU)) return true; 2491 2492 if (SU->getHeight() > (CurCycle + ReadyDelay)) return false; 2493 2494 if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay) 2495 != ScheduleHazardRecognizer::NoHazard) 2496 return false; 2497 2498 return true; 2499} 2500 2501// Return true if right should be scheduled with higher priority than left. 2502bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const { 2503 if (int res = checkSpecialNodes(left, right)) 2504 return res > 0; 2505 2506 if (left->isCall || right->isCall) 2507 // No way to compute latency of calls. 2508 return BURRSort(left, right, SPQ); 2509 2510 bool LHigh = SPQ->HighRegPressure(left); 2511 bool RHigh = SPQ->HighRegPressure(right); 2512 // Avoid causing spills. If register pressure is high, schedule for 2513 // register pressure reduction. 2514 if (LHigh && !RHigh) { 2515 DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU(" 2516 << right->NodeNum << ")\n"); 2517 return true; 2518 } 2519 else if (!LHigh && RHigh) { 2520 DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU(" 2521 << left->NodeNum << ")\n"); 2522 return false; 2523 } 2524 if (!LHigh && !RHigh) { 2525 int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ); 2526 if (result != 0) 2527 return result > 0; 2528 } 2529 return BURRSort(left, right, SPQ); 2530} 2531 2532// Schedule as many instructions in each cycle as possible. So don't make an 2533// instruction available unless it is ready in the current cycle. 2534bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const { 2535 if (SU->getHeight() > CurCycle) return false; 2536 2537 if (SPQ->getHazardRec()->getHazardType(SU, 0) 2538 != ScheduleHazardRecognizer::NoHazard) 2539 return false; 2540 2541 return true; 2542} 2543 2544static bool canEnableCoalescing(SUnit *SU) { 2545 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0; 2546 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg) 2547 // CopyToReg should be close to its uses to facilitate coalescing and 2548 // avoid spilling. 2549 return true; 2550 2551 if (Opc == TargetOpcode::EXTRACT_SUBREG || 2552 Opc == TargetOpcode::SUBREG_TO_REG || 2553 Opc == TargetOpcode::INSERT_SUBREG) 2554 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be 2555 // close to their uses to facilitate coalescing. 2556 return true; 2557 2558 if (SU->NumPreds == 0 && SU->NumSuccs != 0) 2559 // If SU does not have a register def, schedule it close to its uses 2560 // because it does not lengthen any live ranges. 2561 return true; 2562 2563 return false; 2564} 2565 2566// list-ilp is currently an experimental scheduler that allows various 2567// heuristics to be enabled prior to the normal register reduction logic. 2568bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const { 2569 if (int res = checkSpecialNodes(left, right)) 2570 return res > 0; 2571 2572 if (left->isCall || right->isCall) 2573 // No way to compute latency of calls. 2574 return BURRSort(left, right, SPQ); 2575 2576 unsigned LLiveUses = 0, RLiveUses = 0; 2577 int LPDiff = 0, RPDiff = 0; 2578 if (!DisableSchedRegPressure || !DisableSchedLiveUses) { 2579 LPDiff = SPQ->RegPressureDiff(left, LLiveUses); 2580 RPDiff = SPQ->RegPressureDiff(right, RLiveUses); 2581 } 2582 if (!DisableSchedRegPressure && LPDiff != RPDiff) { 2583 DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff 2584 << " != SU(" << right->NodeNum << "): " << RPDiff << "\n"); 2585 return LPDiff > RPDiff; 2586 } 2587 2588 if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) { 2589 bool LReduce = canEnableCoalescing(left); 2590 bool RReduce = canEnableCoalescing(right); 2591 if (LReduce && !RReduce) return false; 2592 if (RReduce && !LReduce) return true; 2593 } 2594 2595 if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) { 2596 DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses 2597 << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n"); 2598 return LLiveUses < RLiveUses; 2599 } 2600 2601 if (!DisableSchedStalls) { 2602 bool LStall = BUHasStall(left, left->getHeight(), SPQ); 2603 bool RStall = BUHasStall(right, right->getHeight(), SPQ); 2604 if (LStall != RStall) 2605 return left->getHeight() > right->getHeight(); 2606 } 2607 2608 if (!DisableSchedCriticalPath) { 2609 int spread = (int)left->getDepth() - (int)right->getDepth(); 2610 if (std::abs(spread) > MaxReorderWindow) { 2611 DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): " 2612 << left->getDepth() << " != SU(" << right->NodeNum << "): " 2613 << right->getDepth() << "\n"); 2614 return left->getDepth() < right->getDepth(); 2615 } 2616 } 2617 2618 if (!DisableSchedHeight && left->getHeight() != right->getHeight()) { 2619 int spread = (int)left->getHeight() - (int)right->getHeight(); 2620 if (std::abs(spread) > MaxReorderWindow) 2621 return left->getHeight() > right->getHeight(); 2622 } 2623 2624 return BURRSort(left, right, SPQ); 2625} 2626 2627void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) { 2628 SUnits = &sunits; 2629 // Add pseudo dependency edges for two-address nodes. 2630 if (!Disable2AddrHack) 2631 AddPseudoTwoAddrDeps(); 2632 // Reroute edges to nodes with multiple uses. 2633 if (!TracksRegPressure && !SrcOrder) 2634 PrescheduleNodesWithMultipleUses(); 2635 // Calculate node priorities. 2636 CalculateSethiUllmanNumbers(); 2637 2638 // For single block loops, mark nodes that look like canonical IV increments. 2639 if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) { 2640 for (unsigned i = 0, e = sunits.size(); i != e; ++i) { 2641 initVRegCycle(&sunits[i]); 2642 } 2643 } 2644} 2645 2646//===----------------------------------------------------------------------===// 2647// Preschedule for Register Pressure 2648//===----------------------------------------------------------------------===// 2649 2650bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) { 2651 if (SU->isTwoAddress) { 2652 unsigned Opc = SU->getNode()->getMachineOpcode(); 2653 const MCInstrDesc &MCID = TII->get(Opc); 2654 unsigned NumRes = MCID.getNumDefs(); 2655 unsigned NumOps = MCID.getNumOperands() - NumRes; 2656 for (unsigned i = 0; i != NumOps; ++i) { 2657 if (MCID.getOperandConstraint(i+NumRes, MCOI::TIED_TO) != -1) { 2658 SDNode *DU = SU->getNode()->getOperand(i).getNode(); 2659 if (DU->getNodeId() != -1 && 2660 Op->OrigNode == &(*SUnits)[DU->getNodeId()]) 2661 return true; 2662 } 2663 } 2664 } 2665 return false; 2666} 2667 2668/// canClobberReachingPhysRegUse - True if SU would clobber one of it's 2669/// successor's explicit physregs whose definition can reach DepSU. 2670/// i.e. DepSU should not be scheduled above SU. 2671static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU, 2672 ScheduleDAGRRList *scheduleDAG, 2673 const TargetInstrInfo *TII, 2674 const TargetRegisterInfo *TRI) { 2675 const uint16_t *ImpDefs 2676 = TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs(); 2677 const uint32_t *RegMask = getNodeRegMask(SU->getNode()); 2678 if(!ImpDefs && !RegMask) 2679 return false; 2680 2681 for (SUnit::const_succ_iterator SI = SU->Succs.begin(), SE = SU->Succs.end(); 2682 SI != SE; ++SI) { 2683 SUnit *SuccSU = SI->getSUnit(); 2684 for (SUnit::const_pred_iterator PI = SuccSU->Preds.begin(), 2685 PE = SuccSU->Preds.end(); PI != PE; ++PI) { 2686 if (!PI->isAssignedRegDep()) 2687 continue; 2688 2689 if (RegMask && MachineOperand::clobbersPhysReg(RegMask, PI->getReg()) && 2690 scheduleDAG->IsReachable(DepSU, PI->getSUnit())) 2691 return true; 2692 2693 if (ImpDefs) 2694 for (const uint16_t *ImpDef = ImpDefs; *ImpDef; ++ImpDef) 2695 // Return true if SU clobbers this physical register use and the 2696 // definition of the register reaches from DepSU. IsReachable queries 2697 // a topological forward sort of the DAG (following the successors). 2698 if (TRI->regsOverlap(*ImpDef, PI->getReg()) && 2699 scheduleDAG->IsReachable(DepSU, PI->getSUnit())) 2700 return true; 2701 } 2702 } 2703 return false; 2704} 2705 2706/// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's 2707/// physical register defs. 2708static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU, 2709 const TargetInstrInfo *TII, 2710 const TargetRegisterInfo *TRI) { 2711 SDNode *N = SuccSU->getNode(); 2712 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs(); 2713 const uint16_t *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs(); 2714 assert(ImpDefs && "Caller should check hasPhysRegDefs"); 2715 for (const SDNode *SUNode = SU->getNode(); SUNode; 2716 SUNode = SUNode->getGluedNode()) { 2717 if (!SUNode->isMachineOpcode()) 2718 continue; 2719 const uint16_t *SUImpDefs = 2720 TII->get(SUNode->getMachineOpcode()).getImplicitDefs(); 2721 const uint32_t *SURegMask = getNodeRegMask(SUNode); 2722 if (!SUImpDefs && !SURegMask) 2723 continue; 2724 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) { 2725 EVT VT = N->getValueType(i); 2726 if (VT == MVT::Glue || VT == MVT::Other) 2727 continue; 2728 if (!N->hasAnyUseOfValue(i)) 2729 continue; 2730 unsigned Reg = ImpDefs[i - NumDefs]; 2731 if (SURegMask && MachineOperand::clobbersPhysReg(SURegMask, Reg)) 2732 return true; 2733 if (!SUImpDefs) 2734 continue; 2735 for (;*SUImpDefs; ++SUImpDefs) { 2736 unsigned SUReg = *SUImpDefs; 2737 if (TRI->regsOverlap(Reg, SUReg)) 2738 return true; 2739 } 2740 } 2741 } 2742 return false; 2743} 2744 2745/// PrescheduleNodesWithMultipleUses - Nodes with multiple uses 2746/// are not handled well by the general register pressure reduction 2747/// heuristics. When presented with code like this: 2748/// 2749/// N 2750/// / | 2751/// / | 2752/// U store 2753/// | 2754/// ... 2755/// 2756/// the heuristics tend to push the store up, but since the 2757/// operand of the store has another use (U), this would increase 2758/// the length of that other use (the U->N edge). 2759/// 2760/// This function transforms code like the above to route U's 2761/// dependence through the store when possible, like this: 2762/// 2763/// N 2764/// || 2765/// || 2766/// store 2767/// | 2768/// U 2769/// | 2770/// ... 2771/// 2772/// This results in the store being scheduled immediately 2773/// after N, which shortens the U->N live range, reducing 2774/// register pressure. 2775/// 2776void RegReductionPQBase::PrescheduleNodesWithMultipleUses() { 2777 // Visit all the nodes in topological order, working top-down. 2778 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) { 2779 SUnit *SU = &(*SUnits)[i]; 2780 // For now, only look at nodes with no data successors, such as stores. 2781 // These are especially important, due to the heuristics in 2782 // getNodePriority for nodes with no data successors. 2783 if (SU->NumSuccs != 0) 2784 continue; 2785 // For now, only look at nodes with exactly one data predecessor. 2786 if (SU->NumPreds != 1) 2787 continue; 2788 // Avoid prescheduling copies to virtual registers, which don't behave 2789 // like other nodes from the perspective of scheduling heuristics. 2790 if (SDNode *N = SU->getNode()) 2791 if (N->getOpcode() == ISD::CopyToReg && 2792 TargetRegisterInfo::isVirtualRegister 2793 (cast<RegisterSDNode>(N->getOperand(1))->getReg())) 2794 continue; 2795 2796 // Locate the single data predecessor. 2797 SUnit *PredSU = 0; 2798 for (SUnit::const_pred_iterator II = SU->Preds.begin(), 2799 EE = SU->Preds.end(); II != EE; ++II) 2800 if (!II->isCtrl()) { 2801 PredSU = II->getSUnit(); 2802 break; 2803 } 2804 assert(PredSU); 2805 2806 // Don't rewrite edges that carry physregs, because that requires additional 2807 // support infrastructure. 2808 if (PredSU->hasPhysRegDefs) 2809 continue; 2810 // Short-circuit the case where SU is PredSU's only data successor. 2811 if (PredSU->NumSuccs == 1) 2812 continue; 2813 // Avoid prescheduling to copies from virtual registers, which don't behave 2814 // like other nodes from the perspective of scheduling heuristics. 2815 if (SDNode *N = SU->getNode()) 2816 if (N->getOpcode() == ISD::CopyFromReg && 2817 TargetRegisterInfo::isVirtualRegister 2818 (cast<RegisterSDNode>(N->getOperand(1))->getReg())) 2819 continue; 2820 2821 // Perform checks on the successors of PredSU. 2822 for (SUnit::const_succ_iterator II = PredSU->Succs.begin(), 2823 EE = PredSU->Succs.end(); II != EE; ++II) { 2824 SUnit *PredSuccSU = II->getSUnit(); 2825 if (PredSuccSU == SU) continue; 2826 // If PredSU has another successor with no data successors, for 2827 // now don't attempt to choose either over the other. 2828 if (PredSuccSU->NumSuccs == 0) 2829 goto outer_loop_continue; 2830 // Don't break physical register dependencies. 2831 if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs) 2832 if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI)) 2833 goto outer_loop_continue; 2834 // Don't introduce graph cycles. 2835 if (scheduleDAG->IsReachable(SU, PredSuccSU)) 2836 goto outer_loop_continue; 2837 } 2838 2839 // Ok, the transformation is safe and the heuristics suggest it is 2840 // profitable. Update the graph. 2841 DEBUG(dbgs() << " Prescheduling SU #" << SU->NodeNum 2842 << " next to PredSU #" << PredSU->NodeNum 2843 << " to guide scheduling in the presence of multiple uses\n"); 2844 for (unsigned i = 0; i != PredSU->Succs.size(); ++i) { 2845 SDep Edge = PredSU->Succs[i]; 2846 assert(!Edge.isAssignedRegDep()); 2847 SUnit *SuccSU = Edge.getSUnit(); 2848 if (SuccSU != SU) { 2849 Edge.setSUnit(PredSU); 2850 scheduleDAG->RemovePred(SuccSU, Edge); 2851 scheduleDAG->AddPred(SU, Edge); 2852 Edge.setSUnit(SU); 2853 scheduleDAG->AddPred(SuccSU, Edge); 2854 --i; 2855 } 2856 } 2857 outer_loop_continue:; 2858 } 2859} 2860 2861/// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses 2862/// it as a def&use operand. Add a pseudo control edge from it to the other 2863/// node (if it won't create a cycle) so the two-address one will be scheduled 2864/// first (lower in the schedule). If both nodes are two-address, favor the 2865/// one that has a CopyToReg use (more likely to be a loop induction update). 2866/// If both are two-address, but one is commutable while the other is not 2867/// commutable, favor the one that's not commutable. 2868void RegReductionPQBase::AddPseudoTwoAddrDeps() { 2869 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) { 2870 SUnit *SU = &(*SUnits)[i]; 2871 if (!SU->isTwoAddress) 2872 continue; 2873 2874 SDNode *Node = SU->getNode(); 2875 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode()) 2876 continue; 2877 2878 bool isLiveOut = hasOnlyLiveOutUses(SU); 2879 unsigned Opc = Node->getMachineOpcode(); 2880 const MCInstrDesc &MCID = TII->get(Opc); 2881 unsigned NumRes = MCID.getNumDefs(); 2882 unsigned NumOps = MCID.getNumOperands() - NumRes; 2883 for (unsigned j = 0; j != NumOps; ++j) { 2884 if (MCID.getOperandConstraint(j+NumRes, MCOI::TIED_TO) == -1) 2885 continue; 2886 SDNode *DU = SU->getNode()->getOperand(j).getNode(); 2887 if (DU->getNodeId() == -1) 2888 continue; 2889 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()]; 2890 if (!DUSU) continue; 2891 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(), 2892 E = DUSU->Succs.end(); I != E; ++I) { 2893 if (I->isCtrl()) continue; 2894 SUnit *SuccSU = I->getSUnit(); 2895 if (SuccSU == SU) 2896 continue; 2897 // Be conservative. Ignore if nodes aren't at roughly the same 2898 // depth and height. 2899 if (SuccSU->getHeight() < SU->getHeight() && 2900 (SU->getHeight() - SuccSU->getHeight()) > 1) 2901 continue; 2902 // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge 2903 // constrains whatever is using the copy, instead of the copy 2904 // itself. In the case that the copy is coalesced, this 2905 // preserves the intent of the pseudo two-address heurietics. 2906 while (SuccSU->Succs.size() == 1 && 2907 SuccSU->getNode()->isMachineOpcode() && 2908 SuccSU->getNode()->getMachineOpcode() == 2909 TargetOpcode::COPY_TO_REGCLASS) 2910 SuccSU = SuccSU->Succs.front().getSUnit(); 2911 // Don't constrain non-instruction nodes. 2912 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode()) 2913 continue; 2914 // Don't constrain nodes with physical register defs if the 2915 // predecessor can clobber them. 2916 if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) { 2917 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI)) 2918 continue; 2919 } 2920 // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG; 2921 // these may be coalesced away. We want them close to their uses. 2922 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode(); 2923 if (SuccOpc == TargetOpcode::EXTRACT_SUBREG || 2924 SuccOpc == TargetOpcode::INSERT_SUBREG || 2925 SuccOpc == TargetOpcode::SUBREG_TO_REG) 2926 continue; 2927 if (!canClobberReachingPhysRegUse(SuccSU, SU, scheduleDAG, TII, TRI) && 2928 (!canClobber(SuccSU, DUSU) || 2929 (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) || 2930 (!SU->isCommutable && SuccSU->isCommutable)) && 2931 !scheduleDAG->IsReachable(SuccSU, SU)) { 2932 DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #" 2933 << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n"); 2934 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0, 2935 /*Reg=*/0, /*isNormalMemory=*/false, 2936 /*isMustAlias=*/false, 2937 /*isArtificial=*/true)); 2938 } 2939 } 2940 } 2941 } 2942} 2943 2944//===----------------------------------------------------------------------===// 2945// Public Constructor Functions 2946//===----------------------------------------------------------------------===// 2947 2948llvm::ScheduleDAGSDNodes * 2949llvm::createBURRListDAGScheduler(SelectionDAGISel *IS, 2950 CodeGenOpt::Level OptLevel) { 2951 const TargetMachine &TM = IS->TM; 2952 const TargetInstrInfo *TII = TM.getInstrInfo(); 2953 const TargetRegisterInfo *TRI = TM.getRegisterInfo(); 2954 2955 BURegReductionPriorityQueue *PQ = 2956 new BURegReductionPriorityQueue(*IS->MF, false, false, TII, TRI, 0); 2957 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel); 2958 PQ->setScheduleDAG(SD); 2959 return SD; 2960} 2961 2962llvm::ScheduleDAGSDNodes * 2963llvm::createSourceListDAGScheduler(SelectionDAGISel *IS, 2964 CodeGenOpt::Level OptLevel) { 2965 const TargetMachine &TM = IS->TM; 2966 const TargetInstrInfo *TII = TM.getInstrInfo(); 2967 const TargetRegisterInfo *TRI = TM.getRegisterInfo(); 2968 2969 SrcRegReductionPriorityQueue *PQ = 2970 new SrcRegReductionPriorityQueue(*IS->MF, false, true, TII, TRI, 0); 2971 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel); 2972 PQ->setScheduleDAG(SD); 2973 return SD; 2974} 2975 2976llvm::ScheduleDAGSDNodes * 2977llvm::createHybridListDAGScheduler(SelectionDAGISel *IS, 2978 CodeGenOpt::Level OptLevel) { 2979 const TargetMachine &TM = IS->TM; 2980 const TargetInstrInfo *TII = TM.getInstrInfo(); 2981 const TargetRegisterInfo *TRI = TM.getRegisterInfo(); 2982 const TargetLowering *TLI = &IS->getTargetLowering(); 2983 2984 HybridBURRPriorityQueue *PQ = 2985 new HybridBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI); 2986 2987 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel); 2988 PQ->setScheduleDAG(SD); 2989 return SD; 2990} 2991 2992llvm::ScheduleDAGSDNodes * 2993llvm::createILPListDAGScheduler(SelectionDAGISel *IS, 2994 CodeGenOpt::Level OptLevel) { 2995 const TargetMachine &TM = IS->TM; 2996 const TargetInstrInfo *TII = TM.getInstrInfo(); 2997 const TargetRegisterInfo *TRI = TM.getRegisterInfo(); 2998 const TargetLowering *TLI = &IS->getTargetLowering(); 2999 3000 ILPBURRPriorityQueue *PQ = 3001 new ILPBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI); 3002 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel); 3003 PQ->setScheduleDAG(SD); 3004 return SD; 3005} 3006