ScheduleDAGInstrs.cpp revision e264f62ca09a8f65c87a46d562a4d0f9ec5d457e
1//===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the ScheduleDAGInstrs class, which implements re-scheduling 11// of MachineInstrs. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "sched-instrs" 16#include "ScheduleDAGInstrs.h" 17#include "llvm/Operator.h" 18#include "llvm/Analysis/AliasAnalysis.h" 19#include "llvm/CodeGen/MachineFunctionPass.h" 20#include "llvm/CodeGen/MachineMemOperand.h" 21#include "llvm/CodeGen/MachineRegisterInfo.h" 22#include "llvm/CodeGen/PseudoSourceValue.h" 23#include "llvm/Target/TargetMachine.h" 24#include "llvm/Target/TargetInstrInfo.h" 25#include "llvm/Target/TargetRegisterInfo.h" 26#include "llvm/Target/TargetSubtarget.h" 27#include "llvm/Support/Debug.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/SmallSet.h" 30using namespace llvm; 31 32ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, 33 const MachineLoopInfo &mli, 34 const MachineDominatorTree &mdt) 35 : ScheduleDAG(mf), MLI(mli), MDT(mdt), LoopRegs(MLI, MDT) { 36 MFI = mf.getFrameInfo(); 37} 38 39/// Run - perform scheduling. 40/// 41void ScheduleDAGInstrs::Run(MachineBasicBlock *bb, 42 MachineBasicBlock::iterator begin, 43 MachineBasicBlock::iterator end, 44 unsigned endcount) { 45 BB = bb; 46 Begin = begin; 47 InsertPosIndex = endcount; 48 49 ScheduleDAG::Run(bb, end); 50} 51 52/// getUnderlyingObjectFromInt - This is the function that does the work of 53/// looking through basic ptrtoint+arithmetic+inttoptr sequences. 54static const Value *getUnderlyingObjectFromInt(const Value *V) { 55 do { 56 if (const Operator *U = dyn_cast<Operator>(V)) { 57 // If we find a ptrtoint, we can transfer control back to the 58 // regular getUnderlyingObjectFromInt. 59 if (U->getOpcode() == Instruction::PtrToInt) 60 return U->getOperand(0); 61 // If we find an add of a constant or a multiplied value, it's 62 // likely that the other operand will lead us to the base 63 // object. We don't have to worry about the case where the 64 // object address is somehow being computed by the multiply, 65 // because our callers only care when the result is an 66 // identifibale object. 67 if (U->getOpcode() != Instruction::Add || 68 (!isa<ConstantInt>(U->getOperand(1)) && 69 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul)) 70 return V; 71 V = U->getOperand(0); 72 } else { 73 return V; 74 } 75 assert(isa<IntegerType>(V->getType()) && "Unexpected operand type!"); 76 } while (1); 77} 78 79/// getUnderlyingObject - This is a wrapper around Value::getUnderlyingObject 80/// and adds support for basic ptrtoint+arithmetic+inttoptr sequences. 81static const Value *getUnderlyingObject(const Value *V) { 82 // First just call Value::getUnderlyingObject to let it do what it does. 83 do { 84 V = V->getUnderlyingObject(); 85 // If it found an inttoptr, use special code to continue climing. 86 if (Operator::getOpcode(V) != Instruction::IntToPtr) 87 break; 88 const Value *O = getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 89 // If that succeeded in finding a pointer, continue the search. 90 if (!isa<PointerType>(O->getType())) 91 break; 92 V = O; 93 } while (1); 94 return V; 95} 96 97/// getUnderlyingObjectForInstr - If this machine instr has memory reference 98/// information and it can be tracked to a normal reference to a known 99/// object, return the Value for that object. Otherwise return null. 100static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI, 101 const MachineFrameInfo *MFI, 102 bool &MayAlias) { 103 MayAlias = true; 104 if (!MI->hasOneMemOperand() || 105 !(*MI->memoperands_begin())->getValue() || 106 (*MI->memoperands_begin())->isVolatile()) 107 return 0; 108 109 const Value *V = (*MI->memoperands_begin())->getValue(); 110 if (!V) 111 return 0; 112 113 V = getUnderlyingObject(V); 114 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) { 115 // For now, ignore PseudoSourceValues which may alias LLVM IR values 116 // because the code that uses this function has no way to cope with 117 // such aliases. 118 if (PSV->isAliased(MFI)) 119 return 0; 120 121 MayAlias = PSV->mayAlias(MFI); 122 return V; 123 } 124 125 if (isIdentifiedObject(V)) 126 return V; 127 128 return 0; 129} 130 131void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) { 132 if (MachineLoop *ML = MLI.getLoopFor(BB)) 133 if (BB == ML->getLoopLatch()) { 134 MachineBasicBlock *Header = ML->getHeader(); 135 for (MachineBasicBlock::livein_iterator I = Header->livein_begin(), 136 E = Header->livein_end(); I != E; ++I) 137 LoopLiveInRegs.insert(*I); 138 LoopRegs.VisitLoop(ML); 139 } 140} 141 142void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { 143 // We'll be allocating one SUnit for each instruction, plus one for 144 // the region exit node. 145 SUnits.reserve(BB->size()); 146 147 // We build scheduling units by walking a block's instruction list from bottom 148 // to top. 149 150 // Remember where a generic side-effecting instruction is as we procede. 151 SUnit *BarrierChain = 0, *AliasChain = 0; 152 153 // Memory references to specific known memory locations are tracked 154 // so that they can be given more precise dependencies. We track 155 // separately the known memory locations that may alias and those 156 // that are known not to alias 157 std::map<const Value *, SUnit *> AliasMemDefs, NonAliasMemDefs; 158 std::map<const Value *, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses; 159 160 // Check to see if the scheduler cares about latencies. 161 bool UnitLatencies = ForceUnitLatencies(); 162 163 // Ask the target if address-backscheduling is desirable, and if so how much. 164 const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>(); 165 unsigned SpecialAddressLatency = ST.getSpecialAddressLatency(); 166 167 // Walk the list of instructions, from bottom moving up. 168 for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin; 169 MII != MIE; --MII) { 170 MachineInstr *MI = prior(MII); 171 const TargetInstrDesc &TID = MI->getDesc(); 172 assert(!TID.isTerminator() && !MI->isLabel() && 173 "Cannot schedule terminators or labels!"); 174 // Create the SUnit for this MI. 175 SUnit *SU = NewSUnit(MI); 176 177 // Assign the Latency field of SU using target-provided information. 178 if (UnitLatencies) 179 SU->Latency = 1; 180 else 181 ComputeLatency(SU); 182 183 // Add register-based dependencies (data, anti, and output). 184 for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) { 185 const MachineOperand &MO = MI->getOperand(j); 186 if (!MO.isReg()) continue; 187 unsigned Reg = MO.getReg(); 188 if (Reg == 0) continue; 189 190 assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!"); 191 std::vector<SUnit *> &UseList = Uses[Reg]; 192 std::vector<SUnit *> &DefList = Defs[Reg]; 193 // Optionally add output and anti dependencies. For anti 194 // dependencies we use a latency of 0 because for a multi-issue 195 // target we want to allow the defining instruction to issue 196 // in the same cycle as the using instruction. 197 // TODO: Using a latency of 1 here for output dependencies assumes 198 // there's no cost for reusing registers. 199 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output; 200 unsigned AOLatency = (Kind == SDep::Anti) ? 0 : 1; 201 for (unsigned i = 0, e = DefList.size(); i != e; ++i) { 202 SUnit *DefSU = DefList[i]; 203 if (DefSU != SU && 204 (Kind != SDep::Output || !MO.isDead() || 205 !DefSU->getInstr()->registerDefIsDead(Reg))) 206 DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/Reg)); 207 } 208 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 209 std::vector<SUnit *> &DefList = Defs[*Alias]; 210 for (unsigned i = 0, e = DefList.size(); i != e; ++i) { 211 SUnit *DefSU = DefList[i]; 212 if (DefSU != SU && 213 (Kind != SDep::Output || !MO.isDead() || 214 !DefSU->getInstr()->registerDefIsDead(*Alias))) 215 DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/ *Alias)); 216 } 217 } 218 219 if (MO.isDef()) { 220 // Add any data dependencies. 221 unsigned DataLatency = SU->Latency; 222 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 223 SUnit *UseSU = UseList[i]; 224 if (UseSU != SU) { 225 unsigned LDataLatency = DataLatency; 226 // Optionally add in a special extra latency for nodes that 227 // feed addresses. 228 // TODO: Do this for register aliases too. 229 // TODO: Perhaps we should get rid of 230 // SpecialAddressLatency and just move this into 231 // adjustSchedDependency for the targets that care about 232 // it. 233 if (SpecialAddressLatency != 0 && !UnitLatencies) { 234 MachineInstr *UseMI = UseSU->getInstr(); 235 const TargetInstrDesc &UseTID = UseMI->getDesc(); 236 int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg); 237 assert(RegUseIndex >= 0 && "UseMI doesn's use register!"); 238 if ((UseTID.mayLoad() || UseTID.mayStore()) && 239 (unsigned)RegUseIndex < UseTID.getNumOperands() && 240 UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass()) 241 LDataLatency += SpecialAddressLatency; 242 } 243 // Adjust the dependence latency using operand def/use 244 // information (if any), and then allow the target to 245 // perform its own adjustments. 246 const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg); 247 if (!UnitLatencies) { 248 ComputeOperandLatency(SU, UseSU, (SDep &)dep); 249 ST.adjustSchedDependency(SU, UseSU, (SDep &)dep); 250 } 251 UseSU->addPred(dep); 252 } 253 } 254 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 255 std::vector<SUnit *> &UseList = Uses[*Alias]; 256 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 257 SUnit *UseSU = UseList[i]; 258 if (UseSU != SU) { 259 const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias); 260 if (!UnitLatencies) { 261 ComputeOperandLatency(SU, UseSU, (SDep &)dep); 262 ST.adjustSchedDependency(SU, UseSU, (SDep &)dep); 263 } 264 UseSU->addPred(dep); 265 } 266 } 267 } 268 269 // If a def is going to wrap back around to the top of the loop, 270 // backschedule it. 271 if (!UnitLatencies && DefList.empty()) { 272 LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(Reg); 273 if (I != LoopRegs.Deps.end()) { 274 const MachineOperand *UseMO = I->second.first; 275 unsigned Count = I->second.second; 276 const MachineInstr *UseMI = UseMO->getParent(); 277 unsigned UseMOIdx = UseMO - &UseMI->getOperand(0); 278 const TargetInstrDesc &UseTID = UseMI->getDesc(); 279 // TODO: If we knew the total depth of the region here, we could 280 // handle the case where the whole loop is inside the region but 281 // is large enough that the isScheduleHigh trick isn't needed. 282 if (UseMOIdx < UseTID.getNumOperands()) { 283 // Currently, we only support scheduling regions consisting of 284 // single basic blocks. Check to see if the instruction is in 285 // the same region by checking to see if it has the same parent. 286 if (UseMI->getParent() != MI->getParent()) { 287 unsigned Latency = SU->Latency; 288 if (UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) 289 Latency += SpecialAddressLatency; 290 // This is a wild guess as to the portion of the latency which 291 // will be overlapped by work done outside the current 292 // scheduling region. 293 Latency -= std::min(Latency, Count); 294 // Add the artifical edge. 295 ExitSU.addPred(SDep(SU, SDep::Order, Latency, 296 /*Reg=*/0, /*isNormalMemory=*/false, 297 /*isMustAlias=*/false, 298 /*isArtificial=*/true)); 299 } else if (SpecialAddressLatency > 0 && 300 UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) { 301 // The entire loop body is within the current scheduling region 302 // and the latency of this operation is assumed to be greater 303 // than the latency of the loop. 304 // TODO: Recursively mark data-edge predecessors as 305 // isScheduleHigh too. 306 SU->isScheduleHigh = true; 307 } 308 } 309 LoopRegs.Deps.erase(I); 310 } 311 } 312 313 UseList.clear(); 314 if (!MO.isDead()) 315 DefList.clear(); 316 DefList.push_back(SU); 317 } else { 318 UseList.push_back(SU); 319 } 320 } 321 322 // Add chain dependencies. 323 // Chain dependencies used to enforce memory order should have 324 // latency of 0 (except for true dependency of Store followed by 325 // aliased Load... we estimate that with a single cycle of latency 326 // assuming the hardware will bypass) 327 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable 328 // after stack slots are lowered to actual addresses. 329 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and 330 // produce more precise dependence information. 331#define STORE_LOAD_LATENCY 1 332 unsigned TrueMemOrderLatency = 0; 333 if (TID.isCall() || TID.hasUnmodeledSideEffects() || 334 (MI->hasVolatileMemoryRef() && 335 (!TID.mayLoad() || !MI->isInvariantLoad(AA)))) { 336 // Be conservative with these and add dependencies on all memory 337 // references, even those that are known to not alias. 338 for (std::map<const Value *, SUnit *>::iterator I = 339 NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) { 340 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 341 } 342 for (std::map<const Value *, std::vector<SUnit *> >::iterator I = 343 NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) { 344 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 345 I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 346 } 347 NonAliasMemDefs.clear(); 348 NonAliasMemUses.clear(); 349 // Add SU to the barrier chain. 350 if (BarrierChain) 351 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 352 BarrierChain = SU; 353 354 // fall-through 355 new_alias_chain: 356 // Chain all possibly aliasing memory references though SU. 357 if (AliasChain) 358 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 359 AliasChain = SU; 360 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 361 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 362 for (std::map<const Value *, SUnit *>::iterator I = AliasMemDefs.begin(), 363 E = AliasMemDefs.end(); I != E; ++I) { 364 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 365 } 366 for (std::map<const Value *, std::vector<SUnit *> >::iterator I = 367 AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) { 368 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 369 I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 370 } 371 PendingLoads.clear(); 372 AliasMemDefs.clear(); 373 AliasMemUses.clear(); 374 } else if (TID.mayStore()) { 375 bool MayAlias = true; 376 TrueMemOrderLatency = STORE_LOAD_LATENCY; 377 if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) { 378 // A store to a specific PseudoSourceValue. Add precise dependencies. 379 // Record the def in MemDefs, first adding a dep if there is 380 // an existing def. 381 std::map<const Value *, SUnit *>::iterator I = 382 ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); 383 std::map<const Value *, SUnit *>::iterator IE = 384 ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); 385 if (I != IE) { 386 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0, 387 /*isNormalMemory=*/true)); 388 I->second = SU; 389 } else { 390 if (MayAlias) 391 AliasMemDefs[V] = SU; 392 else 393 NonAliasMemDefs[V] = SU; 394 } 395 // Handle the uses in MemUses, if there are any. 396 std::map<const Value *, std::vector<SUnit *> >::iterator J = 397 ((MayAlias) ? AliasMemUses.find(V) : NonAliasMemUses.find(V)); 398 std::map<const Value *, std::vector<SUnit *> >::iterator JE = 399 ((MayAlias) ? AliasMemUses.end() : NonAliasMemUses.end()); 400 if (J != JE) { 401 for (unsigned i = 0, e = J->second.size(); i != e; ++i) 402 J->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency, 403 /*Reg=*/0, /*isNormalMemory=*/true)); 404 J->second.clear(); 405 } 406 if (MayAlias) { 407 // Add dependencies from all the PendingLoads, i.e. loads 408 // with no underlying object. 409 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 410 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 411 // Add dependence on alias chain, if needed. 412 if (AliasChain) 413 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 414 } 415 // Add dependence on barrier chain, if needed. 416 if (BarrierChain) 417 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 418 } else { 419 // Treat all other stores conservatively. 420 goto new_alias_chain; 421 } 422 } else if (TID.mayLoad()) { 423 bool MayAlias = true; 424 TrueMemOrderLatency = 0; 425 if (MI->isInvariantLoad(AA)) { 426 // Invariant load, no chain dependencies needed! 427 } else { 428 if (const Value *V = 429 getUnderlyingObjectForInstr(MI, MFI, MayAlias)) { 430 // A load from a specific PseudoSourceValue. Add precise dependencies. 431 std::map<const Value *, SUnit *>::iterator I = 432 ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); 433 std::map<const Value *, SUnit *>::iterator IE = 434 ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); 435 if (I != IE) 436 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0, 437 /*isNormalMemory=*/true)); 438 if (MayAlias) 439 AliasMemUses[V].push_back(SU); 440 else 441 NonAliasMemUses[V].push_back(SU); 442 } else { 443 // A load with no underlying object. Depend on all 444 // potentially aliasing stores. 445 for (std::map<const Value *, SUnit *>::iterator I = 446 AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I) 447 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 448 449 PendingLoads.push_back(SU); 450 MayAlias = true; 451 } 452 453 // Add dependencies on alias and barrier chains, if needed. 454 if (MayAlias && AliasChain) 455 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 456 if (BarrierChain) 457 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 458 } 459 } 460 } 461 462 for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) { 463 Defs[i].clear(); 464 Uses[i].clear(); 465 } 466 PendingLoads.clear(); 467} 468 469void ScheduleDAGInstrs::FinishBlock() { 470 // Nothing to do. 471} 472 473void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) { 474 const InstrItineraryData &InstrItins = TM.getInstrItineraryData(); 475 476 // Compute the latency for the node. 477 SU->Latency = 478 InstrItins.getStageLatency(SU->getInstr()->getDesc().getSchedClass()); 479 480 // Simplistic target-independent heuristic: assume that loads take 481 // extra time. 482 if (InstrItins.isEmpty()) 483 if (SU->getInstr()->getDesc().mayLoad()) 484 SU->Latency += 2; 485} 486 487void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use, 488 SDep& dep) const { 489 const InstrItineraryData &InstrItins = TM.getInstrItineraryData(); 490 if (InstrItins.isEmpty()) 491 return; 492 493 // For a data dependency with a known register... 494 if ((dep.getKind() != SDep::Data) || (dep.getReg() == 0)) 495 return; 496 497 const unsigned Reg = dep.getReg(); 498 499 // ... find the definition of the register in the defining 500 // instruction 501 MachineInstr *DefMI = Def->getInstr(); 502 int DefIdx = DefMI->findRegisterDefOperandIdx(Reg); 503 if (DefIdx != -1) { 504 int DefCycle = InstrItins.getOperandCycle(DefMI->getDesc().getSchedClass(), DefIdx); 505 if (DefCycle >= 0) { 506 MachineInstr *UseMI = Use->getInstr(); 507 const unsigned UseClass = UseMI->getDesc().getSchedClass(); 508 509 // For all uses of the register, calculate the maxmimum latency 510 int Latency = -1; 511 for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) { 512 const MachineOperand &MO = UseMI->getOperand(i); 513 if (!MO.isReg() || !MO.isUse()) 514 continue; 515 unsigned MOReg = MO.getReg(); 516 if (MOReg != Reg) 517 continue; 518 519 int UseCycle = InstrItins.getOperandCycle(UseClass, i); 520 if (UseCycle >= 0) 521 Latency = std::max(Latency, DefCycle - UseCycle + 1); 522 } 523 524 // If we found a latency, then replace the existing dependence latency. 525 if (Latency >= 0) 526 dep.setLatency(Latency); 527 } 528 } 529} 530 531void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const { 532 SU->getInstr()->dump(); 533} 534 535std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { 536 std::string s; 537 raw_string_ostream oss(s); 538 if (SU == &EntrySU) 539 oss << "<entry>"; 540 else if (SU == &ExitSU) 541 oss << "<exit>"; 542 else 543 SU->getInstr()->print(oss); 544 return oss.str(); 545} 546 547// EmitSchedule - Emit the machine code in scheduled order. 548MachineBasicBlock *ScheduleDAGInstrs:: 549EmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) { 550 // For MachineInstr-based scheduling, we're rescheduling the instructions in 551 // the block, so start by removing them from the block. 552 while (Begin != InsertPos) { 553 MachineBasicBlock::iterator I = Begin; 554 ++Begin; 555 BB->remove(I); 556 } 557 558 // Then re-insert them according to the given schedule. 559 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 560 SUnit *SU = Sequence[i]; 561 if (!SU) { 562 // Null SUnit* is a noop. 563 EmitNoop(); 564 continue; 565 } 566 567 BB->insert(InsertPos, SU->getInstr()); 568 } 569 570 // Update the Begin iterator, as the first instruction in the block 571 // may have been scheduled later. 572 if (!Sequence.empty()) 573 Begin = Sequence[0]->getInstr(); 574 575 return BB; 576} 577