ScheduleDAGInstrs.cpp revision 87ea294b0d72ef5f29c6d3ea9c9c5faa8be7abc4
1//===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the ScheduleDAGInstrs class, which implements re-scheduling 11// of MachineInstrs. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "sched-instrs" 16#include "ScheduleDAGInstrs.h" 17#include "llvm/Operator.h" 18#include "llvm/Analysis/AliasAnalysis.h" 19#include "llvm/CodeGen/MachineFunctionPass.h" 20#include "llvm/CodeGen/MachineMemOperand.h" 21#include "llvm/CodeGen/MachineRegisterInfo.h" 22#include "llvm/CodeGen/PseudoSourceValue.h" 23#include "llvm/Target/TargetMachine.h" 24#include "llvm/Target/TargetInstrInfo.h" 25#include "llvm/Target/TargetRegisterInfo.h" 26#include "llvm/Target/TargetSubtarget.h" 27#include "llvm/Support/Debug.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/SmallSet.h" 30using namespace llvm; 31 32ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, 33 const MachineLoopInfo &mli, 34 const MachineDominatorTree &mdt) 35 : ScheduleDAG(mf), MLI(mli), MDT(mdt), LoopRegs(MLI, MDT) { 36 MFI = mf.getFrameInfo(); 37 DbgValueVec.clear(); 38} 39 40/// Run - perform scheduling. 41/// 42void ScheduleDAGInstrs::Run(MachineBasicBlock *bb, 43 MachineBasicBlock::iterator begin, 44 MachineBasicBlock::iterator end, 45 unsigned endcount) { 46 BB = bb; 47 Begin = begin; 48 InsertPosIndex = endcount; 49 50 ScheduleDAG::Run(bb, end); 51} 52 53/// getUnderlyingObjectFromInt - This is the function that does the work of 54/// looking through basic ptrtoint+arithmetic+inttoptr sequences. 55static const Value *getUnderlyingObjectFromInt(const Value *V) { 56 do { 57 if (const Operator *U = dyn_cast<Operator>(V)) { 58 // If we find a ptrtoint, we can transfer control back to the 59 // regular getUnderlyingObjectFromInt. 60 if (U->getOpcode() == Instruction::PtrToInt) 61 return U->getOperand(0); 62 // If we find an add of a constant or a multiplied value, it's 63 // likely that the other operand will lead us to the base 64 // object. We don't have to worry about the case where the 65 // object address is somehow being computed by the multiply, 66 // because our callers only care when the result is an 67 // identifibale object. 68 if (U->getOpcode() != Instruction::Add || 69 (!isa<ConstantInt>(U->getOperand(1)) && 70 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul)) 71 return V; 72 V = U->getOperand(0); 73 } else { 74 return V; 75 } 76 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 77 } while (1); 78} 79 80/// getUnderlyingObject - This is a wrapper around Value::getUnderlyingObject 81/// and adds support for basic ptrtoint+arithmetic+inttoptr sequences. 82static const Value *getUnderlyingObject(const Value *V) { 83 // First just call Value::getUnderlyingObject to let it do what it does. 84 do { 85 V = V->getUnderlyingObject(); 86 // If it found an inttoptr, use special code to continue climing. 87 if (Operator::getOpcode(V) != Instruction::IntToPtr) 88 break; 89 const Value *O = getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 90 // If that succeeded in finding a pointer, continue the search. 91 if (!O->getType()->isPointerTy()) 92 break; 93 V = O; 94 } while (1); 95 return V; 96} 97 98/// getUnderlyingObjectForInstr - If this machine instr has memory reference 99/// information and it can be tracked to a normal reference to a known 100/// object, return the Value for that object. Otherwise return null. 101static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI, 102 const MachineFrameInfo *MFI, 103 bool &MayAlias) { 104 MayAlias = true; 105 if (!MI->hasOneMemOperand() || 106 !(*MI->memoperands_begin())->getValue() || 107 (*MI->memoperands_begin())->isVolatile()) 108 return 0; 109 110 const Value *V = (*MI->memoperands_begin())->getValue(); 111 if (!V) 112 return 0; 113 114 V = getUnderlyingObject(V); 115 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) { 116 // For now, ignore PseudoSourceValues which may alias LLVM IR values 117 // because the code that uses this function has no way to cope with 118 // such aliases. 119 if (PSV->isAliased(MFI)) 120 return 0; 121 122 MayAlias = PSV->mayAlias(MFI); 123 return V; 124 } 125 126 if (isIdentifiedObject(V)) 127 return V; 128 129 return 0; 130} 131 132void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) { 133 if (MachineLoop *ML = MLI.getLoopFor(BB)) 134 if (BB == ML->getLoopLatch()) { 135 MachineBasicBlock *Header = ML->getHeader(); 136 for (MachineBasicBlock::livein_iterator I = Header->livein_begin(), 137 E = Header->livein_end(); I != E; ++I) 138 LoopLiveInRegs.insert(*I); 139 LoopRegs.VisitLoop(ML); 140 } 141} 142 143void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { 144 // We'll be allocating one SUnit for each instruction, plus one for 145 // the region exit node. 146 SUnits.reserve(BB->size()); 147 148 // We build scheduling units by walking a block's instruction list from bottom 149 // to top. 150 151 // Remember where a generic side-effecting instruction is as we procede. 152 SUnit *BarrierChain = 0, *AliasChain = 0; 153 154 // Memory references to specific known memory locations are tracked 155 // so that they can be given more precise dependencies. We track 156 // separately the known memory locations that may alias and those 157 // that are known not to alias 158 std::map<const Value *, SUnit *> AliasMemDefs, NonAliasMemDefs; 159 std::map<const Value *, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses; 160 161 // Keep track of dangling debug references to registers. 162 std::vector<std::pair<MachineInstr*, unsigned> > 163 DanglingDebugValue(TRI->getNumRegs(), 164 std::make_pair(static_cast<MachineInstr*>(0), 0)); 165 166 // Check to see if the scheduler cares about latencies. 167 bool UnitLatencies = ForceUnitLatencies(); 168 169 // Ask the target if address-backscheduling is desirable, and if so how much. 170 const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>(); 171 unsigned SpecialAddressLatency = ST.getSpecialAddressLatency(); 172 173 // Remove any stale debug info; sometimes BuildSchedGraph is called again 174 // without emitting the info from the previous call. 175 DbgValueVec.clear(); 176 177 // Walk the list of instructions, from bottom moving up. 178 for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin; 179 MII != MIE; --MII) { 180 MachineInstr *MI = prior(MII); 181 // DBG_VALUE does not have SUnit's built, so just remember these for later 182 // reinsertion. 183 if (MI->isDebugValue()) { 184 if (MI->getNumOperands()==3 && MI->getOperand(0).isReg() && 185 MI->getOperand(0).getReg()) 186 DanglingDebugValue[MI->getOperand(0).getReg()] = 187 std::make_pair(MI, DbgValueVec.size()); 188 DbgValueVec.push_back(MI); 189 continue; 190 } 191 const TargetInstrDesc &TID = MI->getDesc(); 192 assert(!TID.isTerminator() && !MI->isLabel() && 193 "Cannot schedule terminators or labels!"); 194 // Create the SUnit for this MI. 195 SUnit *SU = NewSUnit(MI); 196 197 // Assign the Latency field of SU using target-provided information. 198 if (UnitLatencies) 199 SU->Latency = 1; 200 else 201 ComputeLatency(SU); 202 203 // Add register-based dependencies (data, anti, and output). 204 for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) { 205 const MachineOperand &MO = MI->getOperand(j); 206 if (!MO.isReg()) continue; 207 unsigned Reg = MO.getReg(); 208 if (Reg == 0) continue; 209 210 assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!"); 211 212 if (MO.isDef() && DanglingDebugValue[Reg].first!=0) { 213 SU->DbgInstrList.push_back(DanglingDebugValue[Reg].first); 214 DbgValueVec[DanglingDebugValue[Reg].second] = 0; 215 DanglingDebugValue[Reg] = std::make_pair((MachineInstr*)0, 0); 216 } 217 218 std::vector<SUnit *> &UseList = Uses[Reg]; 219 std::vector<SUnit *> &DefList = Defs[Reg]; 220 // Optionally add output and anti dependencies. For anti 221 // dependencies we use a latency of 0 because for a multi-issue 222 // target we want to allow the defining instruction to issue 223 // in the same cycle as the using instruction. 224 // TODO: Using a latency of 1 here for output dependencies assumes 225 // there's no cost for reusing registers. 226 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output; 227 unsigned AOLatency = (Kind == SDep::Anti) ? 0 : 1; 228 for (unsigned i = 0, e = DefList.size(); i != e; ++i) { 229 SUnit *DefSU = DefList[i]; 230 if (DefSU != SU && 231 (Kind != SDep::Output || !MO.isDead() || 232 !DefSU->getInstr()->registerDefIsDead(Reg))) 233 DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/Reg)); 234 } 235 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 236 std::vector<SUnit *> &DefList = Defs[*Alias]; 237 for (unsigned i = 0, e = DefList.size(); i != e; ++i) { 238 SUnit *DefSU = DefList[i]; 239 if (DefSU != SU && 240 (Kind != SDep::Output || !MO.isDead() || 241 !DefSU->getInstr()->registerDefIsDead(*Alias))) 242 DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/ *Alias)); 243 } 244 } 245 246 if (MO.isDef()) { 247 // Add any data dependencies. 248 unsigned DataLatency = SU->Latency; 249 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 250 SUnit *UseSU = UseList[i]; 251 if (UseSU == SU) 252 continue; 253 unsigned LDataLatency = DataLatency; 254 // Optionally add in a special extra latency for nodes that 255 // feed addresses. 256 // TODO: Do this for register aliases too. 257 // TODO: Perhaps we should get rid of 258 // SpecialAddressLatency and just move this into 259 // adjustSchedDependency for the targets that care about it. 260 if (SpecialAddressLatency != 0 && !UnitLatencies) { 261 MachineInstr *UseMI = UseSU->getInstr(); 262 const TargetInstrDesc &UseTID = UseMI->getDesc(); 263 int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg); 264 assert(RegUseIndex >= 0 && "UseMI doesn's use register!"); 265 if ((UseTID.mayLoad() || UseTID.mayStore()) && 266 (unsigned)RegUseIndex < UseTID.getNumOperands() && 267 UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass()) 268 LDataLatency += SpecialAddressLatency; 269 } 270 // Adjust the dependence latency using operand def/use 271 // information (if any), and then allow the target to 272 // perform its own adjustments. 273 const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg); 274 if (!UnitLatencies) { 275 ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep)); 276 ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep)); 277 } 278 UseSU->addPred(dep); 279 } 280 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 281 std::vector<SUnit *> &UseList = Uses[*Alias]; 282 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 283 SUnit *UseSU = UseList[i]; 284 if (UseSU == SU) 285 continue; 286 const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias); 287 if (!UnitLatencies) { 288 ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep)); 289 ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep)); 290 } 291 UseSU->addPred(dep); 292 } 293 } 294 295 // If a def is going to wrap back around to the top of the loop, 296 // backschedule it. 297 if (!UnitLatencies && DefList.empty()) { 298 LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(Reg); 299 if (I != LoopRegs.Deps.end()) { 300 const MachineOperand *UseMO = I->second.first; 301 unsigned Count = I->second.second; 302 const MachineInstr *UseMI = UseMO->getParent(); 303 unsigned UseMOIdx = UseMO - &UseMI->getOperand(0); 304 const TargetInstrDesc &UseTID = UseMI->getDesc(); 305 // TODO: If we knew the total depth of the region here, we could 306 // handle the case where the whole loop is inside the region but 307 // is large enough that the isScheduleHigh trick isn't needed. 308 if (UseMOIdx < UseTID.getNumOperands()) { 309 // Currently, we only support scheduling regions consisting of 310 // single basic blocks. Check to see if the instruction is in 311 // the same region by checking to see if it has the same parent. 312 if (UseMI->getParent() != MI->getParent()) { 313 unsigned Latency = SU->Latency; 314 if (UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) 315 Latency += SpecialAddressLatency; 316 // This is a wild guess as to the portion of the latency which 317 // will be overlapped by work done outside the current 318 // scheduling region. 319 Latency -= std::min(Latency, Count); 320 // Add the artifical edge. 321 ExitSU.addPred(SDep(SU, SDep::Order, Latency, 322 /*Reg=*/0, /*isNormalMemory=*/false, 323 /*isMustAlias=*/false, 324 /*isArtificial=*/true)); 325 } else if (SpecialAddressLatency > 0 && 326 UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) { 327 // The entire loop body is within the current scheduling region 328 // and the latency of this operation is assumed to be greater 329 // than the latency of the loop. 330 // TODO: Recursively mark data-edge predecessors as 331 // isScheduleHigh too. 332 SU->isScheduleHigh = true; 333 } 334 } 335 LoopRegs.Deps.erase(I); 336 } 337 } 338 339 UseList.clear(); 340 if (!MO.isDead()) 341 DefList.clear(); 342 DefList.push_back(SU); 343 } else { 344 UseList.push_back(SU); 345 } 346 } 347 348 // Add chain dependencies. 349 // Chain dependencies used to enforce memory order should have 350 // latency of 0 (except for true dependency of Store followed by 351 // aliased Load... we estimate that with a single cycle of latency 352 // assuming the hardware will bypass) 353 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable 354 // after stack slots are lowered to actual addresses. 355 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and 356 // produce more precise dependence information. 357#define STORE_LOAD_LATENCY 1 358 unsigned TrueMemOrderLatency = 0; 359 if (TID.isCall() || TID.hasUnmodeledSideEffects() || 360 (MI->hasVolatileMemoryRef() && 361 (!TID.mayLoad() || !MI->isInvariantLoad(AA)))) { 362 // Be conservative with these and add dependencies on all memory 363 // references, even those that are known to not alias. 364 for (std::map<const Value *, SUnit *>::iterator I = 365 NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) { 366 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 367 } 368 for (std::map<const Value *, std::vector<SUnit *> >::iterator I = 369 NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) { 370 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 371 I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 372 } 373 NonAliasMemDefs.clear(); 374 NonAliasMemUses.clear(); 375 // Add SU to the barrier chain. 376 if (BarrierChain) 377 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 378 BarrierChain = SU; 379 380 // fall-through 381 new_alias_chain: 382 // Chain all possibly aliasing memory references though SU. 383 if (AliasChain) 384 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 385 AliasChain = SU; 386 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 387 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 388 for (std::map<const Value *, SUnit *>::iterator I = AliasMemDefs.begin(), 389 E = AliasMemDefs.end(); I != E; ++I) { 390 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 391 } 392 for (std::map<const Value *, std::vector<SUnit *> >::iterator I = 393 AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) { 394 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 395 I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 396 } 397 PendingLoads.clear(); 398 AliasMemDefs.clear(); 399 AliasMemUses.clear(); 400 } else if (TID.mayStore()) { 401 bool MayAlias = true; 402 TrueMemOrderLatency = STORE_LOAD_LATENCY; 403 if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) { 404 // A store to a specific PseudoSourceValue. Add precise dependencies. 405 // Record the def in MemDefs, first adding a dep if there is 406 // an existing def. 407 std::map<const Value *, SUnit *>::iterator I = 408 ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); 409 std::map<const Value *, SUnit *>::iterator IE = 410 ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); 411 if (I != IE) { 412 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0, 413 /*isNormalMemory=*/true)); 414 I->second = SU; 415 } else { 416 if (MayAlias) 417 AliasMemDefs[V] = SU; 418 else 419 NonAliasMemDefs[V] = SU; 420 } 421 // Handle the uses in MemUses, if there are any. 422 std::map<const Value *, std::vector<SUnit *> >::iterator J = 423 ((MayAlias) ? AliasMemUses.find(V) : NonAliasMemUses.find(V)); 424 std::map<const Value *, std::vector<SUnit *> >::iterator JE = 425 ((MayAlias) ? AliasMemUses.end() : NonAliasMemUses.end()); 426 if (J != JE) { 427 for (unsigned i = 0, e = J->second.size(); i != e; ++i) 428 J->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency, 429 /*Reg=*/0, /*isNormalMemory=*/true)); 430 J->second.clear(); 431 } 432 if (MayAlias) { 433 // Add dependencies from all the PendingLoads, i.e. loads 434 // with no underlying object. 435 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 436 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 437 // Add dependence on alias chain, if needed. 438 if (AliasChain) 439 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 440 } 441 // Add dependence on barrier chain, if needed. 442 if (BarrierChain) 443 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 444 } else { 445 // Treat all other stores conservatively. 446 goto new_alias_chain; 447 } 448 } else if (TID.mayLoad()) { 449 bool MayAlias = true; 450 TrueMemOrderLatency = 0; 451 if (MI->isInvariantLoad(AA)) { 452 // Invariant load, no chain dependencies needed! 453 } else { 454 if (const Value *V = 455 getUnderlyingObjectForInstr(MI, MFI, MayAlias)) { 456 // A load from a specific PseudoSourceValue. Add precise dependencies. 457 std::map<const Value *, SUnit *>::iterator I = 458 ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); 459 std::map<const Value *, SUnit *>::iterator IE = 460 ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); 461 if (I != IE) 462 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0, 463 /*isNormalMemory=*/true)); 464 if (MayAlias) 465 AliasMemUses[V].push_back(SU); 466 else 467 NonAliasMemUses[V].push_back(SU); 468 } else { 469 // A load with no underlying object. Depend on all 470 // potentially aliasing stores. 471 for (std::map<const Value *, SUnit *>::iterator I = 472 AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I) 473 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 474 475 PendingLoads.push_back(SU); 476 MayAlias = true; 477 } 478 479 // Add dependencies on alias and barrier chains, if needed. 480 if (MayAlias && AliasChain) 481 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 482 if (BarrierChain) 483 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 484 } 485 } 486 } 487 488 for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) { 489 Defs[i].clear(); 490 Uses[i].clear(); 491 } 492 PendingLoads.clear(); 493} 494 495void ScheduleDAGInstrs::FinishBlock() { 496 // Nothing to do. 497} 498 499void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) { 500 const InstrItineraryData &InstrItins = TM.getInstrItineraryData(); 501 502 // Compute the latency for the node. 503 SU->Latency = 504 InstrItins.getStageLatency(SU->getInstr()->getDesc().getSchedClass()); 505 506 // Simplistic target-independent heuristic: assume that loads take 507 // extra time. 508 if (InstrItins.isEmpty()) 509 if (SU->getInstr()->getDesc().mayLoad()) 510 SU->Latency += 2; 511} 512 513void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use, 514 SDep& dep) const { 515 const InstrItineraryData &InstrItins = TM.getInstrItineraryData(); 516 if (InstrItins.isEmpty()) 517 return; 518 519 // For a data dependency with a known register... 520 if ((dep.getKind() != SDep::Data) || (dep.getReg() == 0)) 521 return; 522 523 const unsigned Reg = dep.getReg(); 524 525 // ... find the definition of the register in the defining 526 // instruction 527 MachineInstr *DefMI = Def->getInstr(); 528 int DefIdx = DefMI->findRegisterDefOperandIdx(Reg); 529 if (DefIdx != -1) { 530 int DefCycle = InstrItins.getOperandCycle(DefMI->getDesc().getSchedClass(), 531 DefIdx); 532 if (DefCycle >= 0) { 533 MachineInstr *UseMI = Use->getInstr(); 534 const unsigned UseClass = UseMI->getDesc().getSchedClass(); 535 536 // For all uses of the register, calculate the maxmimum latency 537 int Latency = -1; 538 for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) { 539 const MachineOperand &MO = UseMI->getOperand(i); 540 if (!MO.isReg() || !MO.isUse()) 541 continue; 542 unsigned MOReg = MO.getReg(); 543 if (MOReg != Reg) 544 continue; 545 546 int UseCycle = InstrItins.getOperandCycle(UseClass, i); 547 if (UseCycle >= 0) 548 Latency = std::max(Latency, DefCycle - UseCycle + 1); 549 } 550 551 // If we found a latency, then replace the existing dependence latency. 552 if (Latency >= 0) 553 dep.setLatency(Latency); 554 } 555 } 556} 557 558void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const { 559 SU->getInstr()->dump(); 560} 561 562std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { 563 std::string s; 564 raw_string_ostream oss(s); 565 if (SU == &EntrySU) 566 oss << "<entry>"; 567 else if (SU == &ExitSU) 568 oss << "<exit>"; 569 else 570 SU->getInstr()->print(oss); 571 return oss.str(); 572} 573 574// EmitSchedule - Emit the machine code in scheduled order. 575MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() { 576 // For MachineInstr-based scheduling, we're rescheduling the instructions in 577 // the block, so start by removing them from the block. 578 while (Begin != InsertPos) { 579 MachineBasicBlock::iterator I = Begin; 580 ++Begin; 581 BB->remove(I); 582 } 583 584 // First reinsert any remaining debug_values; these are either constants, 585 // or refer to live-in registers. The beginning of the block is the right 586 // place for the latter. The former might reasonably be placed elsewhere 587 // using some kind of ordering algorithm, but right now it doesn't matter. 588 for (int i = DbgValueVec.size()-1; i>=0; --i) 589 if (DbgValueVec[i]) 590 BB->insert(InsertPos, DbgValueVec[i]); 591 592 // Then re-insert them according to the given schedule. 593 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 594 SUnit *SU = Sequence[i]; 595 if (!SU) { 596 // Null SUnit* is a noop. 597 EmitNoop(); 598 continue; 599 } 600 601 BB->insert(InsertPos, SU->getInstr()); 602 for (unsigned i = 0, e = SU->DbgInstrList.size() ; i < e ; ++i) 603 BB->insert(InsertPos, SU->DbgInstrList[i]); 604 } 605 606 // Update the Begin iterator, as the first instruction in the block 607 // may have been scheduled later. 608 if (!DbgValueVec.empty()) { 609 for (int i = DbgValueVec.size()-1; i>=0; --i) 610 if (DbgValueVec[i]!=0) { 611 Begin = DbgValueVec[DbgValueVec.size()-1]; 612 break; 613 } 614 } else if (!Sequence.empty()) 615 Begin = Sequence[0]->getInstr(); 616 617 DbgValueVec.clear(); 618 return BB; 619} 620