1//===- ResourcePriorityQueue.cpp - A DFA-oriented priority queue -*- C++ -*-==// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the ResourcePriorityQueue class, which is a 11// SchedulingPriorityQueue that prioritizes instructions using DFA state to 12// reduce the length of the critical path through the basic block 13// on VLIW platforms. 14// The scheduler is basically a top-down adaptable list scheduler with DFA 15// resource tracking added to the cost function. 16// DFA is queried as a state machine to model "packets/bundles" during 17// schedule. Currently packets/bundles are discarded at the end of 18// scheduling, affecting only order of instructions. 19// 20//===----------------------------------------------------------------------===// 21 22#define DEBUG_TYPE "scheduler" 23#include "llvm/CodeGen/ResourcePriorityQueue.h" 24#include "llvm/CodeGen/MachineInstr.h" 25#include "llvm/CodeGen/SelectionDAGNodes.h" 26#include "llvm/Support/CommandLine.h" 27#include "llvm/Support/Debug.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/Target/TargetLowering.h" 30#include "llvm/Target/TargetMachine.h" 31 32using namespace llvm; 33 34static cl::opt<bool> DisableDFASched("disable-dfa-sched", cl::Hidden, 35 cl::ZeroOrMore, cl::init(false), 36 cl::desc("Disable use of DFA during scheduling")); 37 38static cl::opt<signed> RegPressureThreshold( 39 "dfa-sched-reg-pressure-threshold", cl::Hidden, cl::ZeroOrMore, cl::init(5), 40 cl::desc("Track reg pressure and switch priority to in-depth")); 41 42 43ResourcePriorityQueue::ResourcePriorityQueue(SelectionDAGISel *IS) : 44 Picker(this), 45 InstrItins(IS->getTargetLowering()->getTargetMachine().getInstrItineraryData()) 46{ 47 TII = IS->getTargetLowering()->getTargetMachine().getInstrInfo(); 48 TRI = IS->getTargetLowering()->getTargetMachine().getRegisterInfo(); 49 TLI = IS->getTargetLowering(); 50 51 const TargetMachine &tm = (*IS->MF).getTarget(); 52 ResourcesModel = tm.getInstrInfo()->CreateTargetScheduleState(&tm,NULL); 53 // This hard requirement could be relaxed, but for now 54 // do not let it procede. 55 assert (ResourcesModel && "Unimplemented CreateTargetScheduleState."); 56 57 unsigned NumRC = TRI->getNumRegClasses(); 58 RegLimit.resize(NumRC); 59 RegPressure.resize(NumRC); 60 std::fill(RegLimit.begin(), RegLimit.end(), 0); 61 std::fill(RegPressure.begin(), RegPressure.end(), 0); 62 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(), 63 E = TRI->regclass_end(); I != E; ++I) 64 RegLimit[(*I)->getID()] = TRI->getRegPressureLimit(*I, *IS->MF); 65 66 ParallelLiveRanges = 0; 67 HorizontalVerticalBalance = 0; 68} 69 70unsigned 71ResourcePriorityQueue::numberRCValPredInSU(SUnit *SU, unsigned RCId) { 72 unsigned NumberDeps = 0; 73 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 74 I != E; ++I) { 75 if (I->isCtrl()) 76 continue; 77 78 SUnit *PredSU = I->getSUnit(); 79 const SDNode *ScegN = PredSU->getNode(); 80 81 if (!ScegN) 82 continue; 83 84 // If value is passed to CopyToReg, it is probably 85 // live outside BB. 86 switch (ScegN->getOpcode()) { 87 default: break; 88 case ISD::TokenFactor: break; 89 case ISD::CopyFromReg: NumberDeps++; break; 90 case ISD::CopyToReg: break; 91 case ISD::INLINEASM: break; 92 } 93 if (!ScegN->isMachineOpcode()) 94 continue; 95 96 for (unsigned i = 0, e = ScegN->getNumValues(); i != e; ++i) { 97 MVT VT = ScegN->getSimpleValueType(i); 98 if (TLI->isTypeLegal(VT) 99 && (TLI->getRegClassFor(VT)->getID() == RCId)) { 100 NumberDeps++; 101 break; 102 } 103 } 104 } 105 return NumberDeps; 106} 107 108unsigned ResourcePriorityQueue::numberRCValSuccInSU(SUnit *SU, 109 unsigned RCId) { 110 unsigned NumberDeps = 0; 111 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 112 I != E; ++I) { 113 if (I->isCtrl()) 114 continue; 115 116 SUnit *SuccSU = I->getSUnit(); 117 const SDNode *ScegN = SuccSU->getNode(); 118 if (!ScegN) 119 continue; 120 121 // If value is passed to CopyToReg, it is probably 122 // live outside BB. 123 switch (ScegN->getOpcode()) { 124 default: break; 125 case ISD::TokenFactor: break; 126 case ISD::CopyFromReg: break; 127 case ISD::CopyToReg: NumberDeps++; break; 128 case ISD::INLINEASM: break; 129 } 130 if (!ScegN->isMachineOpcode()) 131 continue; 132 133 for (unsigned i = 0, e = ScegN->getNumOperands(); i != e; ++i) { 134 const SDValue &Op = ScegN->getOperand(i); 135 MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo()); 136 if (TLI->isTypeLegal(VT) 137 && (TLI->getRegClassFor(VT)->getID() == RCId)) { 138 NumberDeps++; 139 break; 140 } 141 } 142 } 143 return NumberDeps; 144} 145 146static unsigned numberCtrlDepsInSU(SUnit *SU) { 147 unsigned NumberDeps = 0; 148 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 149 I != E; ++I) 150 if (I->isCtrl()) 151 NumberDeps++; 152 153 return NumberDeps; 154} 155 156static unsigned numberCtrlPredInSU(SUnit *SU) { 157 unsigned NumberDeps = 0; 158 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 159 I != E; ++I) 160 if (I->isCtrl()) 161 NumberDeps++; 162 163 return NumberDeps; 164} 165 166/// 167/// Initialize nodes. 168/// 169void ResourcePriorityQueue::initNodes(std::vector<SUnit> &sunits) { 170 SUnits = &sunits; 171 NumNodesSolelyBlocking.resize(SUnits->size(), 0); 172 173 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) { 174 SUnit *SU = &(*SUnits)[i]; 175 initNumRegDefsLeft(SU); 176 SU->NodeQueueId = 0; 177 } 178} 179 180/// This heuristic is used if DFA scheduling is not desired 181/// for some VLIW platform. 182bool resource_sort::operator()(const SUnit *LHS, const SUnit *RHS) const { 183 // The isScheduleHigh flag allows nodes with wraparound dependencies that 184 // cannot easily be modeled as edges with latencies to be scheduled as 185 // soon as possible in a top-down schedule. 186 if (LHS->isScheduleHigh && !RHS->isScheduleHigh) 187 return false; 188 189 if (!LHS->isScheduleHigh && RHS->isScheduleHigh) 190 return true; 191 192 unsigned LHSNum = LHS->NodeNum; 193 unsigned RHSNum = RHS->NodeNum; 194 195 // The most important heuristic is scheduling the critical path. 196 unsigned LHSLatency = PQ->getLatency(LHSNum); 197 unsigned RHSLatency = PQ->getLatency(RHSNum); 198 if (LHSLatency < RHSLatency) return true; 199 if (LHSLatency > RHSLatency) return false; 200 201 // After that, if two nodes have identical latencies, look to see if one will 202 // unblock more other nodes than the other. 203 unsigned LHSBlocked = PQ->getNumSolelyBlockNodes(LHSNum); 204 unsigned RHSBlocked = PQ->getNumSolelyBlockNodes(RHSNum); 205 if (LHSBlocked < RHSBlocked) return true; 206 if (LHSBlocked > RHSBlocked) return false; 207 208 // Finally, just to provide a stable ordering, use the node number as a 209 // deciding factor. 210 return LHSNum < RHSNum; 211} 212 213 214/// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor 215/// of SU, return it, otherwise return null. 216SUnit *ResourcePriorityQueue::getSingleUnscheduledPred(SUnit *SU) { 217 SUnit *OnlyAvailablePred = 0; 218 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 219 I != E; ++I) { 220 SUnit &Pred = *I->getSUnit(); 221 if (!Pred.isScheduled) { 222 // We found an available, but not scheduled, predecessor. If it's the 223 // only one we have found, keep track of it... otherwise give up. 224 if (OnlyAvailablePred && OnlyAvailablePred != &Pred) 225 return 0; 226 OnlyAvailablePred = &Pred; 227 } 228 } 229 return OnlyAvailablePred; 230} 231 232void ResourcePriorityQueue::push(SUnit *SU) { 233 // Look at all of the successors of this node. Count the number of nodes that 234 // this node is the sole unscheduled node for. 235 unsigned NumNodesBlocking = 0; 236 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 237 I != E; ++I) 238 if (getSingleUnscheduledPred(I->getSUnit()) == SU) 239 ++NumNodesBlocking; 240 241 NumNodesSolelyBlocking[SU->NodeNum] = NumNodesBlocking; 242 Queue.push_back(SU); 243} 244 245/// Check if scheduling of this SU is possible 246/// in the current packet. 247bool ResourcePriorityQueue::isResourceAvailable(SUnit *SU) { 248 if (!SU || !SU->getNode()) 249 return false; 250 251 // If this is a compound instruction, 252 // it is likely to be a call. Do not delay it. 253 if (SU->getNode()->getGluedNode()) 254 return true; 255 256 // First see if the pipeline could receive this instruction 257 // in the current cycle. 258 if (SU->getNode()->isMachineOpcode()) 259 switch (SU->getNode()->getMachineOpcode()) { 260 default: 261 if (!ResourcesModel->canReserveResources(&TII->get( 262 SU->getNode()->getMachineOpcode()))) 263 return false; 264 case TargetOpcode::EXTRACT_SUBREG: 265 case TargetOpcode::INSERT_SUBREG: 266 case TargetOpcode::SUBREG_TO_REG: 267 case TargetOpcode::REG_SEQUENCE: 268 case TargetOpcode::IMPLICIT_DEF: 269 break; 270 } 271 272 // Now see if there are no other dependencies 273 // to instructions alredy in the packet. 274 for (unsigned i = 0, e = Packet.size(); i != e; ++i) 275 for (SUnit::const_succ_iterator I = Packet[i]->Succs.begin(), 276 E = Packet[i]->Succs.end(); I != E; ++I) { 277 // Since we do not add pseudos to packets, might as well 278 // ignor order deps. 279 if (I->isCtrl()) 280 continue; 281 282 if (I->getSUnit() == SU) 283 return false; 284 } 285 286 return true; 287} 288 289/// Keep track of available resources. 290void ResourcePriorityQueue::reserveResources(SUnit *SU) { 291 // If this SU does not fit in the packet 292 // start a new one. 293 if (!isResourceAvailable(SU) || SU->getNode()->getGluedNode()) { 294 ResourcesModel->clearResources(); 295 Packet.clear(); 296 } 297 298 if (SU->getNode() && SU->getNode()->isMachineOpcode()) { 299 switch (SU->getNode()->getMachineOpcode()) { 300 default: 301 ResourcesModel->reserveResources(&TII->get( 302 SU->getNode()->getMachineOpcode())); 303 break; 304 case TargetOpcode::EXTRACT_SUBREG: 305 case TargetOpcode::INSERT_SUBREG: 306 case TargetOpcode::SUBREG_TO_REG: 307 case TargetOpcode::REG_SEQUENCE: 308 case TargetOpcode::IMPLICIT_DEF: 309 break; 310 } 311 Packet.push_back(SU); 312 } 313 // Forcefully end packet for PseudoOps. 314 else { 315 ResourcesModel->clearResources(); 316 Packet.clear(); 317 } 318 319 // If packet is now full, reset the state so in the next cycle 320 // we start fresh. 321 if (Packet.size() >= InstrItins->SchedModel->IssueWidth) { 322 ResourcesModel->clearResources(); 323 Packet.clear(); 324 } 325} 326 327signed ResourcePriorityQueue::rawRegPressureDelta(SUnit *SU, unsigned RCId) { 328 signed RegBalance = 0; 329 330 if (!SU || !SU->getNode() || !SU->getNode()->isMachineOpcode()) 331 return RegBalance; 332 333 // Gen estimate. 334 for (unsigned i = 0, e = SU->getNode()->getNumValues(); i != e; ++i) { 335 MVT VT = SU->getNode()->getSimpleValueType(i); 336 if (TLI->isTypeLegal(VT) 337 && TLI->getRegClassFor(VT) 338 && TLI->getRegClassFor(VT)->getID() == RCId) 339 RegBalance += numberRCValSuccInSU(SU, RCId); 340 } 341 // Kill estimate. 342 for (unsigned i = 0, e = SU->getNode()->getNumOperands(); i != e; ++i) { 343 const SDValue &Op = SU->getNode()->getOperand(i); 344 MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo()); 345 if (isa<ConstantSDNode>(Op.getNode())) 346 continue; 347 348 if (TLI->isTypeLegal(VT) && TLI->getRegClassFor(VT) 349 && TLI->getRegClassFor(VT)->getID() == RCId) 350 RegBalance -= numberRCValPredInSU(SU, RCId); 351 } 352 return RegBalance; 353} 354 355/// Estimates change in reg pressure from this SU. 356/// It is achieved by trivial tracking of defined 357/// and used vregs in dependent instructions. 358/// The RawPressure flag makes this function to ignore 359/// existing reg file sizes, and report raw def/use 360/// balance. 361signed ResourcePriorityQueue::regPressureDelta(SUnit *SU, bool RawPressure) { 362 signed RegBalance = 0; 363 364 if (!SU || !SU->getNode() || !SU->getNode()->isMachineOpcode()) 365 return RegBalance; 366 367 if (RawPressure) { 368 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(), 369 E = TRI->regclass_end(); I != E; ++I) { 370 const TargetRegisterClass *RC = *I; 371 RegBalance += rawRegPressureDelta(SU, RC->getID()); 372 } 373 } 374 else { 375 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(), 376 E = TRI->regclass_end(); I != E; ++I) { 377 const TargetRegisterClass *RC = *I; 378 if ((RegPressure[RC->getID()] + 379 rawRegPressureDelta(SU, RC->getID()) > 0) && 380 (RegPressure[RC->getID()] + 381 rawRegPressureDelta(SU, RC->getID()) >= RegLimit[RC->getID()])) 382 RegBalance += rawRegPressureDelta(SU, RC->getID()); 383 } 384 } 385 386 return RegBalance; 387} 388 389// Constants used to denote relative importance of 390// heuristic components for cost computation. 391static const unsigned PriorityOne = 200; 392static const unsigned PriorityTwo = 100; 393static const unsigned PriorityThree = 50; 394static const unsigned PriorityFour = 15; 395static const unsigned PriorityFive = 5; 396static const unsigned ScaleOne = 20; 397static const unsigned ScaleTwo = 10; 398static const unsigned ScaleThree = 5; 399static const unsigned FactorOne = 2; 400 401/// Returns single number reflecting benefit of scheduling SU 402/// in the current cycle. 403signed ResourcePriorityQueue::SUSchedulingCost(SUnit *SU) { 404 // Initial trivial priority. 405 signed ResCount = 1; 406 407 // Do not waste time on a node that is already scheduled. 408 if (SU->isScheduled) 409 return ResCount; 410 411 // Forced priority is high. 412 if (SU->isScheduleHigh) 413 ResCount += PriorityOne; 414 415 // Adaptable scheduling 416 // A small, but very parallel 417 // region, where reg pressure is an issue. 418 if (HorizontalVerticalBalance > RegPressureThreshold) { 419 // Critical path first 420 ResCount += (SU->getHeight() * ScaleTwo); 421 // If resources are available for it, multiply the 422 // chance of scheduling. 423 if (isResourceAvailable(SU)) 424 ResCount <<= FactorOne; 425 426 // Consider change to reg pressure from scheduling 427 // this SU. 428 ResCount -= (regPressureDelta(SU,true) * ScaleOne); 429 } 430 // Default heuristic, greeady and 431 // critical path driven. 432 else { 433 // Critical path first. 434 ResCount += (SU->getHeight() * ScaleTwo); 435 // Now see how many instructions is blocked by this SU. 436 ResCount += (NumNodesSolelyBlocking[SU->NodeNum] * ScaleTwo); 437 // If resources are available for it, multiply the 438 // chance of scheduling. 439 if (isResourceAvailable(SU)) 440 ResCount <<= FactorOne; 441 442 ResCount -= (regPressureDelta(SU) * ScaleTwo); 443 } 444 445 // These are platform specific things. 446 // Will need to go into the back end 447 // and accessed from here via a hook. 448 for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) { 449 if (N->isMachineOpcode()) { 450 const MCInstrDesc &TID = TII->get(N->getMachineOpcode()); 451 if (TID.isCall()) 452 ResCount += (PriorityThree + (ScaleThree*N->getNumValues())); 453 } 454 else 455 switch (N->getOpcode()) { 456 default: break; 457 case ISD::TokenFactor: 458 case ISD::CopyFromReg: 459 case ISD::CopyToReg: 460 ResCount += PriorityFive; 461 break; 462 463 case ISD::INLINEASM: 464 ResCount += PriorityFour; 465 break; 466 } 467 } 468 return ResCount; 469} 470 471 472/// Main resource tracking point. 473void ResourcePriorityQueue::scheduledNode(SUnit *SU) { 474 // Use NULL entry as an event marker to reset 475 // the DFA state. 476 if (!SU) { 477 ResourcesModel->clearResources(); 478 Packet.clear(); 479 return; 480 } 481 482 const SDNode *ScegN = SU->getNode(); 483 // Update reg pressure tracking. 484 // First update current node. 485 if (ScegN->isMachineOpcode()) { 486 // Estimate generated regs. 487 for (unsigned i = 0, e = ScegN->getNumValues(); i != e; ++i) { 488 MVT VT = ScegN->getSimpleValueType(i); 489 490 if (TLI->isTypeLegal(VT)) { 491 const TargetRegisterClass *RC = TLI->getRegClassFor(VT); 492 if (RC) 493 RegPressure[RC->getID()] += numberRCValSuccInSU(SU, RC->getID()); 494 } 495 } 496 // Estimate killed regs. 497 for (unsigned i = 0, e = ScegN->getNumOperands(); i != e; ++i) { 498 const SDValue &Op = ScegN->getOperand(i); 499 MVT VT = Op.getNode()->getSimpleValueType(Op.getResNo()); 500 501 if (TLI->isTypeLegal(VT)) { 502 const TargetRegisterClass *RC = TLI->getRegClassFor(VT); 503 if (RC) { 504 if (RegPressure[RC->getID()] > 505 (numberRCValPredInSU(SU, RC->getID()))) 506 RegPressure[RC->getID()] -= numberRCValPredInSU(SU, RC->getID()); 507 else RegPressure[RC->getID()] = 0; 508 } 509 } 510 } 511 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); 512 I != E; ++I) { 513 if (I->isCtrl() || (I->getSUnit()->NumRegDefsLeft == 0)) 514 continue; 515 --I->getSUnit()->NumRegDefsLeft; 516 } 517 } 518 519 // Reserve resources for this SU. 520 reserveResources(SU); 521 522 // Adjust number of parallel live ranges. 523 // Heuristic is simple - node with no data successors reduces 524 // number of live ranges. All others, increase it. 525 unsigned NumberNonControlDeps = 0; 526 527 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); 528 I != E; ++I) { 529 adjustPriorityOfUnscheduledPreds(I->getSUnit()); 530 if (!I->isCtrl()) 531 NumberNonControlDeps++; 532 } 533 534 if (!NumberNonControlDeps) { 535 if (ParallelLiveRanges >= SU->NumPreds) 536 ParallelLiveRanges -= SU->NumPreds; 537 else 538 ParallelLiveRanges = 0; 539 540 } 541 else 542 ParallelLiveRanges += SU->NumRegDefsLeft; 543 544 // Track parallel live chains. 545 HorizontalVerticalBalance += (SU->Succs.size() - numberCtrlDepsInSU(SU)); 546 HorizontalVerticalBalance -= (SU->Preds.size() - numberCtrlPredInSU(SU)); 547} 548 549void ResourcePriorityQueue::initNumRegDefsLeft(SUnit *SU) { 550 unsigned NodeNumDefs = 0; 551 for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) 552 if (N->isMachineOpcode()) { 553 const MCInstrDesc &TID = TII->get(N->getMachineOpcode()); 554 // No register need be allocated for this. 555 if (N->getMachineOpcode() == TargetOpcode::IMPLICIT_DEF) { 556 NodeNumDefs = 0; 557 break; 558 } 559 NodeNumDefs = std::min(N->getNumValues(), TID.getNumDefs()); 560 } 561 else 562 switch(N->getOpcode()) { 563 default: break; 564 case ISD::CopyFromReg: 565 NodeNumDefs++; 566 break; 567 case ISD::INLINEASM: 568 NodeNumDefs++; 569 break; 570 } 571 572 SU->NumRegDefsLeft = NodeNumDefs; 573} 574 575/// adjustPriorityOfUnscheduledPreds - One of the predecessors of SU was just 576/// scheduled. If SU is not itself available, then there is at least one 577/// predecessor node that has not been scheduled yet. If SU has exactly ONE 578/// unscheduled predecessor, we want to increase its priority: it getting 579/// scheduled will make this node available, so it is better than some other 580/// node of the same priority that will not make a node available. 581void ResourcePriorityQueue::adjustPriorityOfUnscheduledPreds(SUnit *SU) { 582 if (SU->isAvailable) return; // All preds scheduled. 583 584 SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU); 585 if (OnlyAvailablePred == 0 || !OnlyAvailablePred->isAvailable) 586 return; 587 588 // Okay, we found a single predecessor that is available, but not scheduled. 589 // Since it is available, it must be in the priority queue. First remove it. 590 remove(OnlyAvailablePred); 591 592 // Reinsert the node into the priority queue, which recomputes its 593 // NumNodesSolelyBlocking value. 594 push(OnlyAvailablePred); 595} 596 597 598/// Main access point - returns next instructions 599/// to be placed in scheduling sequence. 600SUnit *ResourcePriorityQueue::pop() { 601 if (empty()) 602 return 0; 603 604 std::vector<SUnit *>::iterator Best = Queue.begin(); 605 if (!DisableDFASched) { 606 signed BestCost = SUSchedulingCost(*Best); 607 for (std::vector<SUnit *>::iterator I = llvm::next(Queue.begin()), 608 E = Queue.end(); I != E; ++I) { 609 610 if (SUSchedulingCost(*I) > BestCost) { 611 BestCost = SUSchedulingCost(*I); 612 Best = I; 613 } 614 } 615 } 616 // Use default TD scheduling mechanism. 617 else { 618 for (std::vector<SUnit *>::iterator I = llvm::next(Queue.begin()), 619 E = Queue.end(); I != E; ++I) 620 if (Picker(*Best, *I)) 621 Best = I; 622 } 623 624 SUnit *V = *Best; 625 if (Best != prior(Queue.end())) 626 std::swap(*Best, Queue.back()); 627 628 Queue.pop_back(); 629 630 return V; 631} 632 633 634void ResourcePriorityQueue::remove(SUnit *SU) { 635 assert(!Queue.empty() && "Queue is empty!"); 636 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(), SU); 637 if (I != prior(Queue.end())) 638 std::swap(*I, Queue.back()); 639 640 Queue.pop_back(); 641} 642 643 644#ifdef NDEBUG 645void ResourcePriorityQueue::dump(ScheduleDAG *DAG) const {} 646#else 647void ResourcePriorityQueue::dump(ScheduleDAG *DAG) const { 648 ResourcePriorityQueue q = *this; 649 while (!q.empty()) { 650 SUnit *su = q.pop(); 651 dbgs() << "Height " << su->getHeight() << ": "; 652 su->dump(DAG); 653 } 654} 655#endif 656