ScheduleDAGInstrs.cpp revision 7a2bdde0a0eebcd2125055e0eacaca040f0b766c
1//===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the ScheduleDAGInstrs class, which implements re-scheduling 11// of MachineInstrs. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "sched-instrs" 16#include "ScheduleDAGInstrs.h" 17#include "llvm/Operator.h" 18#include "llvm/Analysis/AliasAnalysis.h" 19#include "llvm/Analysis/ValueTracking.h" 20#include "llvm/CodeGen/MachineFunctionPass.h" 21#include "llvm/CodeGen/MachineMemOperand.h" 22#include "llvm/CodeGen/MachineRegisterInfo.h" 23#include "llvm/CodeGen/PseudoSourceValue.h" 24#include "llvm/Target/TargetMachine.h" 25#include "llvm/Target/TargetInstrInfo.h" 26#include "llvm/Target/TargetRegisterInfo.h" 27#include "llvm/Target/TargetSubtarget.h" 28#include "llvm/Support/Debug.h" 29#include "llvm/Support/raw_ostream.h" 30#include "llvm/ADT/SmallSet.h" 31using namespace llvm; 32 33ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, 34 const MachineLoopInfo &mli, 35 const MachineDominatorTree &mdt) 36 : ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()), 37 InstrItins(mf.getTarget().getInstrItineraryData()), 38 Defs(TRI->getNumRegs()), Uses(TRI->getNumRegs()), LoopRegs(MLI, MDT) { 39 DbgValueVec.clear(); 40} 41 42/// Run - perform scheduling. 43/// 44void ScheduleDAGInstrs::Run(MachineBasicBlock *bb, 45 MachineBasicBlock::iterator begin, 46 MachineBasicBlock::iterator end, 47 unsigned endcount) { 48 BB = bb; 49 Begin = begin; 50 InsertPosIndex = endcount; 51 52 ScheduleDAG::Run(bb, end); 53} 54 55/// getUnderlyingObjectFromInt - This is the function that does the work of 56/// looking through basic ptrtoint+arithmetic+inttoptr sequences. 57static const Value *getUnderlyingObjectFromInt(const Value *V) { 58 do { 59 if (const Operator *U = dyn_cast<Operator>(V)) { 60 // If we find a ptrtoint, we can transfer control back to the 61 // regular getUnderlyingObjectFromInt. 62 if (U->getOpcode() == Instruction::PtrToInt) 63 return U->getOperand(0); 64 // If we find an add of a constant or a multiplied value, it's 65 // likely that the other operand will lead us to the base 66 // object. We don't have to worry about the case where the 67 // object address is somehow being computed by the multiply, 68 // because our callers only care when the result is an 69 // identifibale object. 70 if (U->getOpcode() != Instruction::Add || 71 (!isa<ConstantInt>(U->getOperand(1)) && 72 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul)) 73 return V; 74 V = U->getOperand(0); 75 } else { 76 return V; 77 } 78 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 79 } while (1); 80} 81 82/// getUnderlyingObject - This is a wrapper around GetUnderlyingObject 83/// and adds support for basic ptrtoint+arithmetic+inttoptr sequences. 84static const Value *getUnderlyingObject(const Value *V) { 85 // First just call Value::getUnderlyingObject to let it do what it does. 86 do { 87 V = GetUnderlyingObject(V); 88 // If it found an inttoptr, use special code to continue climing. 89 if (Operator::getOpcode(V) != Instruction::IntToPtr) 90 break; 91 const Value *O = getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 92 // If that succeeded in finding a pointer, continue the search. 93 if (!O->getType()->isPointerTy()) 94 break; 95 V = O; 96 } while (1); 97 return V; 98} 99 100/// getUnderlyingObjectForInstr - If this machine instr has memory reference 101/// information and it can be tracked to a normal reference to a known 102/// object, return the Value for that object. Otherwise return null. 103static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI, 104 const MachineFrameInfo *MFI, 105 bool &MayAlias) { 106 MayAlias = true; 107 if (!MI->hasOneMemOperand() || 108 !(*MI->memoperands_begin())->getValue() || 109 (*MI->memoperands_begin())->isVolatile()) 110 return 0; 111 112 const Value *V = (*MI->memoperands_begin())->getValue(); 113 if (!V) 114 return 0; 115 116 V = getUnderlyingObject(V); 117 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) { 118 // For now, ignore PseudoSourceValues which may alias LLVM IR values 119 // because the code that uses this function has no way to cope with 120 // such aliases. 121 if (PSV->isAliased(MFI)) 122 return 0; 123 124 MayAlias = PSV->mayAlias(MFI); 125 return V; 126 } 127 128 if (isIdentifiedObject(V)) 129 return V; 130 131 return 0; 132} 133 134void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) { 135 if (MachineLoop *ML = MLI.getLoopFor(BB)) 136 if (BB == ML->getLoopLatch()) { 137 MachineBasicBlock *Header = ML->getHeader(); 138 for (MachineBasicBlock::livein_iterator I = Header->livein_begin(), 139 E = Header->livein_end(); I != E; ++I) 140 LoopLiveInRegs.insert(*I); 141 LoopRegs.VisitLoop(ML); 142 } 143} 144 145/// AddSchedBarrierDeps - Add dependencies from instructions in the current 146/// list of instructions being scheduled to scheduling barrier by adding 147/// the exit SU to the register defs and use list. This is because we want to 148/// make sure instructions which define registers that are either used by 149/// the terminator or are live-out are properly scheduled. This is 150/// especially important when the definition latency of the return value(s) 151/// are too high to be hidden by the branch or when the liveout registers 152/// used by instructions in the fallthrough block. 153void ScheduleDAGInstrs::AddSchedBarrierDeps() { 154 MachineInstr *ExitMI = InsertPos != BB->end() ? &*InsertPos : 0; 155 ExitSU.setInstr(ExitMI); 156 bool AllDepKnown = ExitMI && 157 (ExitMI->getDesc().isCall() || ExitMI->getDesc().isBarrier()); 158 if (ExitMI && AllDepKnown) { 159 // If it's a call or a barrier, add dependencies on the defs and uses of 160 // instruction. 161 for (unsigned i = 0, e = ExitMI->getNumOperands(); i != e; ++i) { 162 const MachineOperand &MO = ExitMI->getOperand(i); 163 if (!MO.isReg() || MO.isDef()) continue; 164 unsigned Reg = MO.getReg(); 165 if (Reg == 0) continue; 166 167 assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!"); 168 Uses[Reg].push_back(&ExitSU); 169 } 170 } else { 171 // For others, e.g. fallthrough, conditional branch, assume the exit 172 // uses all the registers that are livein to the successor blocks. 173 SmallSet<unsigned, 8> Seen; 174 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 175 SE = BB->succ_end(); SI != SE; ++SI) 176 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 177 E = (*SI)->livein_end(); I != E; ++I) { 178 unsigned Reg = *I; 179 if (Seen.insert(Reg)) 180 Uses[Reg].push_back(&ExitSU); 181 } 182 } 183} 184 185void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { 186 // We'll be allocating one SUnit for each instruction, plus one for 187 // the region exit node. 188 SUnits.reserve(BB->size()); 189 190 // We build scheduling units by walking a block's instruction list from bottom 191 // to top. 192 193 // Remember where a generic side-effecting instruction is as we procede. 194 SUnit *BarrierChain = 0, *AliasChain = 0; 195 196 // Memory references to specific known memory locations are tracked 197 // so that they can be given more precise dependencies. We track 198 // separately the known memory locations that may alias and those 199 // that are known not to alias 200 std::map<const Value *, SUnit *> AliasMemDefs, NonAliasMemDefs; 201 std::map<const Value *, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses; 202 203 // Keep track of dangling debug references to registers. 204 std::vector<std::pair<MachineInstr*, unsigned> > 205 DanglingDebugValue(TRI->getNumRegs(), 206 std::make_pair(static_cast<MachineInstr*>(0), 0)); 207 208 // Check to see if the scheduler cares about latencies. 209 bool UnitLatencies = ForceUnitLatencies(); 210 211 // Ask the target if address-backscheduling is desirable, and if so how much. 212 const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>(); 213 unsigned SpecialAddressLatency = ST.getSpecialAddressLatency(); 214 215 // Remove any stale debug info; sometimes BuildSchedGraph is called again 216 // without emitting the info from the previous call. 217 DbgValueVec.clear(); 218 219 // Model data dependencies between instructions being scheduled and the 220 // ExitSU. 221 AddSchedBarrierDeps(); 222 223 // Walk the list of instructions, from bottom moving up. 224 for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin; 225 MII != MIE; --MII) { 226 MachineInstr *MI = prior(MII); 227 // DBG_VALUE does not have SUnit's built, so just remember these for later 228 // reinsertion. 229 if (MI->isDebugValue()) { 230 if (MI->getNumOperands()==3 && MI->getOperand(0).isReg() && 231 MI->getOperand(0).getReg()) 232 DanglingDebugValue[MI->getOperand(0).getReg()] = 233 std::make_pair(MI, DbgValueVec.size()); 234 DbgValueVec.push_back(MI); 235 continue; 236 } 237 const TargetInstrDesc &TID = MI->getDesc(); 238 assert(!TID.isTerminator() && !MI->isLabel() && 239 "Cannot schedule terminators or labels!"); 240 // Create the SUnit for this MI. 241 SUnit *SU = NewSUnit(MI); 242 SU->isCall = TID.isCall(); 243 SU->isCommutable = TID.isCommutable(); 244 245 // Assign the Latency field of SU using target-provided information. 246 if (UnitLatencies) 247 SU->Latency = 1; 248 else 249 ComputeLatency(SU); 250 251 // Add register-based dependencies (data, anti, and output). 252 for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) { 253 const MachineOperand &MO = MI->getOperand(j); 254 if (!MO.isReg()) continue; 255 unsigned Reg = MO.getReg(); 256 if (Reg == 0) continue; 257 258 assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!"); 259 260 if (MO.isDef() && DanglingDebugValue[Reg].first!=0) { 261 SU->DbgInstrList.push_back(DanglingDebugValue[Reg].first); 262 DbgValueVec[DanglingDebugValue[Reg].second] = 0; 263 DanglingDebugValue[Reg] = std::make_pair((MachineInstr*)0, 0); 264 } 265 266 std::vector<SUnit *> &UseList = Uses[Reg]; 267 std::vector<SUnit *> &DefList = Defs[Reg]; 268 // Optionally add output and anti dependencies. For anti 269 // dependencies we use a latency of 0 because for a multi-issue 270 // target we want to allow the defining instruction to issue 271 // in the same cycle as the using instruction. 272 // TODO: Using a latency of 1 here for output dependencies assumes 273 // there's no cost for reusing registers. 274 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output; 275 unsigned AOLatency = (Kind == SDep::Anti) ? 0 : 1; 276 for (unsigned i = 0, e = DefList.size(); i != e; ++i) { 277 SUnit *DefSU = DefList[i]; 278 if (DefSU == &ExitSU) 279 continue; 280 if (DefSU != SU && 281 (Kind != SDep::Output || !MO.isDead() || 282 !DefSU->getInstr()->registerDefIsDead(Reg))) 283 DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/Reg)); 284 } 285 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 286 std::vector<SUnit *> &DefList = Defs[*Alias]; 287 for (unsigned i = 0, e = DefList.size(); i != e; ++i) { 288 SUnit *DefSU = DefList[i]; 289 if (DefSU == &ExitSU) 290 continue; 291 if (DefSU != SU && 292 (Kind != SDep::Output || !MO.isDead() || 293 !DefSU->getInstr()->registerDefIsDead(*Alias))) 294 DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/ *Alias)); 295 } 296 } 297 298 if (MO.isDef()) { 299 // Add any data dependencies. 300 unsigned DataLatency = SU->Latency; 301 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 302 SUnit *UseSU = UseList[i]; 303 if (UseSU == SU) 304 continue; 305 unsigned LDataLatency = DataLatency; 306 // Optionally add in a special extra latency for nodes that 307 // feed addresses. 308 // TODO: Do this for register aliases too. 309 // TODO: Perhaps we should get rid of 310 // SpecialAddressLatency and just move this into 311 // adjustSchedDependency for the targets that care about it. 312 if (SpecialAddressLatency != 0 && !UnitLatencies && 313 UseSU != &ExitSU) { 314 MachineInstr *UseMI = UseSU->getInstr(); 315 const TargetInstrDesc &UseTID = UseMI->getDesc(); 316 int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg); 317 assert(RegUseIndex >= 0 && "UseMI doesn's use register!"); 318 if (RegUseIndex >= 0 && 319 (UseTID.mayLoad() || UseTID.mayStore()) && 320 (unsigned)RegUseIndex < UseTID.getNumOperands() && 321 UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass()) 322 LDataLatency += SpecialAddressLatency; 323 } 324 // Adjust the dependence latency using operand def/use 325 // information (if any), and then allow the target to 326 // perform its own adjustments. 327 const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg); 328 if (!UnitLatencies) { 329 ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep)); 330 ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep)); 331 } 332 UseSU->addPred(dep); 333 } 334 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 335 std::vector<SUnit *> &UseList = Uses[*Alias]; 336 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 337 SUnit *UseSU = UseList[i]; 338 if (UseSU == SU) 339 continue; 340 const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias); 341 if (!UnitLatencies) { 342 ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep)); 343 ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep)); 344 } 345 UseSU->addPred(dep); 346 } 347 } 348 349 // If a def is going to wrap back around to the top of the loop, 350 // backschedule it. 351 if (!UnitLatencies && DefList.empty()) { 352 LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(Reg); 353 if (I != LoopRegs.Deps.end()) { 354 const MachineOperand *UseMO = I->second.first; 355 unsigned Count = I->second.second; 356 const MachineInstr *UseMI = UseMO->getParent(); 357 unsigned UseMOIdx = UseMO - &UseMI->getOperand(0); 358 const TargetInstrDesc &UseTID = UseMI->getDesc(); 359 // TODO: If we knew the total depth of the region here, we could 360 // handle the case where the whole loop is inside the region but 361 // is large enough that the isScheduleHigh trick isn't needed. 362 if (UseMOIdx < UseTID.getNumOperands()) { 363 // Currently, we only support scheduling regions consisting of 364 // single basic blocks. Check to see if the instruction is in 365 // the same region by checking to see if it has the same parent. 366 if (UseMI->getParent() != MI->getParent()) { 367 unsigned Latency = SU->Latency; 368 if (UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) 369 Latency += SpecialAddressLatency; 370 // This is a wild guess as to the portion of the latency which 371 // will be overlapped by work done outside the current 372 // scheduling region. 373 Latency -= std::min(Latency, Count); 374 // Add the artificial edge. 375 ExitSU.addPred(SDep(SU, SDep::Order, Latency, 376 /*Reg=*/0, /*isNormalMemory=*/false, 377 /*isMustAlias=*/false, 378 /*isArtificial=*/true)); 379 } else if (SpecialAddressLatency > 0 && 380 UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) { 381 // The entire loop body is within the current scheduling region 382 // and the latency of this operation is assumed to be greater 383 // than the latency of the loop. 384 // TODO: Recursively mark data-edge predecessors as 385 // isScheduleHigh too. 386 SU->isScheduleHigh = true; 387 } 388 } 389 LoopRegs.Deps.erase(I); 390 } 391 } 392 393 UseList.clear(); 394 if (!MO.isDead()) 395 DefList.clear(); 396 DefList.push_back(SU); 397 } else { 398 UseList.push_back(SU); 399 } 400 } 401 402 // Add chain dependencies. 403 // Chain dependencies used to enforce memory order should have 404 // latency of 0 (except for true dependency of Store followed by 405 // aliased Load... we estimate that with a single cycle of latency 406 // assuming the hardware will bypass) 407 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable 408 // after stack slots are lowered to actual addresses. 409 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and 410 // produce more precise dependence information. 411#define STORE_LOAD_LATENCY 1 412 unsigned TrueMemOrderLatency = 0; 413 if (TID.isCall() || MI->hasUnmodeledSideEffects() || 414 (MI->hasVolatileMemoryRef() && 415 (!TID.mayLoad() || !MI->isInvariantLoad(AA)))) { 416 // Be conservative with these and add dependencies on all memory 417 // references, even those that are known to not alias. 418 for (std::map<const Value *, SUnit *>::iterator I = 419 NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) { 420 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 421 } 422 for (std::map<const Value *, std::vector<SUnit *> >::iterator I = 423 NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) { 424 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 425 I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 426 } 427 NonAliasMemDefs.clear(); 428 NonAliasMemUses.clear(); 429 // Add SU to the barrier chain. 430 if (BarrierChain) 431 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 432 BarrierChain = SU; 433 434 // fall-through 435 new_alias_chain: 436 // Chain all possibly aliasing memory references though SU. 437 if (AliasChain) 438 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 439 AliasChain = SU; 440 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 441 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 442 for (std::map<const Value *, SUnit *>::iterator I = AliasMemDefs.begin(), 443 E = AliasMemDefs.end(); I != E; ++I) { 444 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 445 } 446 for (std::map<const Value *, std::vector<SUnit *> >::iterator I = 447 AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) { 448 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 449 I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 450 } 451 PendingLoads.clear(); 452 AliasMemDefs.clear(); 453 AliasMemUses.clear(); 454 } else if (TID.mayStore()) { 455 bool MayAlias = true; 456 TrueMemOrderLatency = STORE_LOAD_LATENCY; 457 if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) { 458 // A store to a specific PseudoSourceValue. Add precise dependencies. 459 // Record the def in MemDefs, first adding a dep if there is 460 // an existing def. 461 std::map<const Value *, SUnit *>::iterator I = 462 ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); 463 std::map<const Value *, SUnit *>::iterator IE = 464 ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); 465 if (I != IE) { 466 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0, 467 /*isNormalMemory=*/true)); 468 I->second = SU; 469 } else { 470 if (MayAlias) 471 AliasMemDefs[V] = SU; 472 else 473 NonAliasMemDefs[V] = SU; 474 } 475 // Handle the uses in MemUses, if there are any. 476 std::map<const Value *, std::vector<SUnit *> >::iterator J = 477 ((MayAlias) ? AliasMemUses.find(V) : NonAliasMemUses.find(V)); 478 std::map<const Value *, std::vector<SUnit *> >::iterator JE = 479 ((MayAlias) ? AliasMemUses.end() : NonAliasMemUses.end()); 480 if (J != JE) { 481 for (unsigned i = 0, e = J->second.size(); i != e; ++i) 482 J->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency, 483 /*Reg=*/0, /*isNormalMemory=*/true)); 484 J->second.clear(); 485 } 486 if (MayAlias) { 487 // Add dependencies from all the PendingLoads, i.e. loads 488 // with no underlying object. 489 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 490 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 491 // Add dependence on alias chain, if needed. 492 if (AliasChain) 493 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 494 } 495 // Add dependence on barrier chain, if needed. 496 if (BarrierChain) 497 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 498 } else { 499 // Treat all other stores conservatively. 500 goto new_alias_chain; 501 } 502 503 if (!ExitSU.isPred(SU)) 504 // Push store's up a bit to avoid them getting in between cmp 505 // and branches. 506 ExitSU.addPred(SDep(SU, SDep::Order, 0, 507 /*Reg=*/0, /*isNormalMemory=*/false, 508 /*isMustAlias=*/false, 509 /*isArtificial=*/true)); 510 } else if (TID.mayLoad()) { 511 bool MayAlias = true; 512 TrueMemOrderLatency = 0; 513 if (MI->isInvariantLoad(AA)) { 514 // Invariant load, no chain dependencies needed! 515 } else { 516 if (const Value *V = 517 getUnderlyingObjectForInstr(MI, MFI, MayAlias)) { 518 // A load from a specific PseudoSourceValue. Add precise dependencies. 519 std::map<const Value *, SUnit *>::iterator I = 520 ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); 521 std::map<const Value *, SUnit *>::iterator IE = 522 ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); 523 if (I != IE) 524 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0, 525 /*isNormalMemory=*/true)); 526 if (MayAlias) 527 AliasMemUses[V].push_back(SU); 528 else 529 NonAliasMemUses[V].push_back(SU); 530 } else { 531 // A load with no underlying object. Depend on all 532 // potentially aliasing stores. 533 for (std::map<const Value *, SUnit *>::iterator I = 534 AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I) 535 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 536 537 PendingLoads.push_back(SU); 538 MayAlias = true; 539 } 540 541 // Add dependencies on alias and barrier chains, if needed. 542 if (MayAlias && AliasChain) 543 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 544 if (BarrierChain) 545 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 546 } 547 } 548 } 549 550 for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) { 551 Defs[i].clear(); 552 Uses[i].clear(); 553 } 554 PendingLoads.clear(); 555} 556 557void ScheduleDAGInstrs::FinishBlock() { 558 // Nothing to do. 559} 560 561void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) { 562 // Compute the latency for the node. 563 if (!InstrItins || InstrItins->isEmpty()) { 564 SU->Latency = 1; 565 566 // Simplistic target-independent heuristic: assume that loads take 567 // extra time. 568 if (SU->getInstr()->getDesc().mayLoad()) 569 SU->Latency += 2; 570 } else { 571 SU->Latency = TII->getInstrLatency(InstrItins, SU->getInstr()); 572 } 573} 574 575void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use, 576 SDep& dep) const { 577 if (!InstrItins || InstrItins->isEmpty()) 578 return; 579 580 // For a data dependency with a known register... 581 if ((dep.getKind() != SDep::Data) || (dep.getReg() == 0)) 582 return; 583 584 const unsigned Reg = dep.getReg(); 585 586 // ... find the definition of the register in the defining 587 // instruction 588 MachineInstr *DefMI = Def->getInstr(); 589 int DefIdx = DefMI->findRegisterDefOperandIdx(Reg); 590 if (DefIdx != -1) { 591 const MachineOperand &MO = DefMI->getOperand(DefIdx); 592 if (MO.isReg() && MO.isImplicit() && 593 DefIdx >= (int)DefMI->getDesc().getNumOperands()) { 594 // This is an implicit def, getOperandLatency() won't return the correct 595 // latency. e.g. 596 // %D6<def>, %D7<def> = VLD1q16 %R2<kill>, 0, ..., %Q3<imp-def> 597 // %Q1<def> = VMULv8i16 %Q1<kill>, %Q3<kill>, ... 598 // What we want is to compute latency between def of %D6/%D7 and use of 599 // %Q3 instead. 600 DefIdx = DefMI->findRegisterDefOperandIdx(Reg, false, true, TRI); 601 } 602 MachineInstr *UseMI = Use->getInstr(); 603 // For all uses of the register, calculate the maxmimum latency 604 int Latency = -1; 605 if (UseMI) { 606 for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) { 607 const MachineOperand &MO = UseMI->getOperand(i); 608 if (!MO.isReg() || !MO.isUse()) 609 continue; 610 unsigned MOReg = MO.getReg(); 611 if (MOReg != Reg) 612 continue; 613 614 int UseCycle = TII->getOperandLatency(InstrItins, DefMI, DefIdx, 615 UseMI, i); 616 Latency = std::max(Latency, UseCycle); 617 } 618 } else { 619 // UseMI is null, then it must be a scheduling barrier. 620 if (!InstrItins || InstrItins->isEmpty()) 621 return; 622 unsigned DefClass = DefMI->getDesc().getSchedClass(); 623 Latency = InstrItins->getOperandCycle(DefClass, DefIdx); 624 } 625 626 // If we found a latency, then replace the existing dependence latency. 627 if (Latency >= 0) 628 dep.setLatency(Latency); 629 } 630} 631 632void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const { 633 SU->getInstr()->dump(); 634} 635 636std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { 637 std::string s; 638 raw_string_ostream oss(s); 639 if (SU == &EntrySU) 640 oss << "<entry>"; 641 else if (SU == &ExitSU) 642 oss << "<exit>"; 643 else 644 SU->getInstr()->print(oss); 645 return oss.str(); 646} 647 648// EmitSchedule - Emit the machine code in scheduled order. 649MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() { 650 // For MachineInstr-based scheduling, we're rescheduling the instructions in 651 // the block, so start by removing them from the block. 652 while (Begin != InsertPos) { 653 MachineBasicBlock::iterator I = Begin; 654 ++Begin; 655 BB->remove(I); 656 } 657 658 // First reinsert any remaining debug_values; these are either constants, 659 // or refer to live-in registers. The beginning of the block is the right 660 // place for the latter. The former might reasonably be placed elsewhere 661 // using some kind of ordering algorithm, but right now it doesn't matter. 662 for (int i = DbgValueVec.size()-1; i>=0; --i) 663 if (DbgValueVec[i]) 664 BB->insert(InsertPos, DbgValueVec[i]); 665 666 // Then re-insert them according to the given schedule. 667 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 668 SUnit *SU = Sequence[i]; 669 if (!SU) { 670 // Null SUnit* is a noop. 671 EmitNoop(); 672 continue; 673 } 674 675 BB->insert(InsertPos, SU->getInstr()); 676 for (unsigned i = 0, e = SU->DbgInstrList.size() ; i < e ; ++i) 677 BB->insert(InsertPos, SU->DbgInstrList[i]); 678 } 679 680 // Update the Begin iterator, as the first instruction in the block 681 // may have been scheduled later. 682 if (!DbgValueVec.empty()) { 683 for (int i = DbgValueVec.size()-1; i>=0; --i) 684 if (DbgValueVec[i]!=0) { 685 Begin = DbgValueVec[DbgValueVec.size()-1]; 686 break; 687 } 688 } else if (!Sequence.empty()) 689 Begin = Sequence[0]->getInstr(); 690 691 DbgValueVec.clear(); 692 return BB; 693} 694