ScheduleDAGInstrs.cpp revision de5fa932b9bc0eebe803c9549586bf512eeb12f9
1//===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the ScheduleDAGInstrs class, which implements re-scheduling 11// of MachineInstrs. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "sched-instrs" 16#include "ScheduleDAGInstrs.h" 17#include "llvm/Operator.h" 18#include "llvm/Analysis/AliasAnalysis.h" 19#include "llvm/CodeGen/MachineFunctionPass.h" 20#include "llvm/CodeGen/MachineMemOperand.h" 21#include "llvm/CodeGen/MachineRegisterInfo.h" 22#include "llvm/CodeGen/PseudoSourceValue.h" 23#include "llvm/Target/TargetMachine.h" 24#include "llvm/Target/TargetInstrInfo.h" 25#include "llvm/Target/TargetRegisterInfo.h" 26#include "llvm/Target/TargetSubtarget.h" 27#include "llvm/Support/Debug.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/SmallSet.h" 30using namespace llvm; 31 32ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, 33 const MachineLoopInfo &mli, 34 const MachineDominatorTree &mdt) 35 : ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()), 36 InstrItins(mf.getTarget().getInstrItineraryData()), 37 Defs(TRI->getNumRegs()), Uses(TRI->getNumRegs()), LoopRegs(MLI, MDT) { 38 DbgValueVec.clear(); 39} 40 41/// Run - perform scheduling. 42/// 43void ScheduleDAGInstrs::Run(MachineBasicBlock *bb, 44 MachineBasicBlock::iterator begin, 45 MachineBasicBlock::iterator end, 46 unsigned endcount) { 47 BB = bb; 48 Begin = begin; 49 InsertPosIndex = endcount; 50 51 ScheduleDAG::Run(bb, end); 52} 53 54/// getUnderlyingObjectFromInt - This is the function that does the work of 55/// looking through basic ptrtoint+arithmetic+inttoptr sequences. 56static const Value *getUnderlyingObjectFromInt(const Value *V) { 57 do { 58 if (const Operator *U = dyn_cast<Operator>(V)) { 59 // If we find a ptrtoint, we can transfer control back to the 60 // regular getUnderlyingObjectFromInt. 61 if (U->getOpcode() == Instruction::PtrToInt) 62 return U->getOperand(0); 63 // If we find an add of a constant or a multiplied value, it's 64 // likely that the other operand will lead us to the base 65 // object. We don't have to worry about the case where the 66 // object address is somehow being computed by the multiply, 67 // because our callers only care when the result is an 68 // identifibale object. 69 if (U->getOpcode() != Instruction::Add || 70 (!isa<ConstantInt>(U->getOperand(1)) && 71 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul)) 72 return V; 73 V = U->getOperand(0); 74 } else { 75 return V; 76 } 77 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 78 } while (1); 79} 80 81/// getUnderlyingObject - This is a wrapper around Value::getUnderlyingObject 82/// and adds support for basic ptrtoint+arithmetic+inttoptr sequences. 83static const Value *getUnderlyingObject(const Value *V) { 84 // First just call Value::getUnderlyingObject to let it do what it does. 85 do { 86 V = V->getUnderlyingObject(); 87 // If it found an inttoptr, use special code to continue climing. 88 if (Operator::getOpcode(V) != Instruction::IntToPtr) 89 break; 90 const Value *O = getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 91 // If that succeeded in finding a pointer, continue the search. 92 if (!O->getType()->isPointerTy()) 93 break; 94 V = O; 95 } while (1); 96 return V; 97} 98 99/// getUnderlyingObjectForInstr - If this machine instr has memory reference 100/// information and it can be tracked to a normal reference to a known 101/// object, return the Value for that object. Otherwise return null. 102static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI, 103 const MachineFrameInfo *MFI, 104 bool &MayAlias) { 105 MayAlias = true; 106 if (!MI->hasOneMemOperand() || 107 !(*MI->memoperands_begin())->getValue() || 108 (*MI->memoperands_begin())->isVolatile()) 109 return 0; 110 111 const Value *V = (*MI->memoperands_begin())->getValue(); 112 if (!V) 113 return 0; 114 115 V = getUnderlyingObject(V); 116 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) { 117 // For now, ignore PseudoSourceValues which may alias LLVM IR values 118 // because the code that uses this function has no way to cope with 119 // such aliases. 120 if (PSV->isAliased(MFI)) 121 return 0; 122 123 MayAlias = PSV->mayAlias(MFI); 124 return V; 125 } 126 127 if (isIdentifiedObject(V)) 128 return V; 129 130 return 0; 131} 132 133void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) { 134 if (MachineLoop *ML = MLI.getLoopFor(BB)) 135 if (BB == ML->getLoopLatch()) { 136 MachineBasicBlock *Header = ML->getHeader(); 137 for (MachineBasicBlock::livein_iterator I = Header->livein_begin(), 138 E = Header->livein_end(); I != E; ++I) 139 LoopLiveInRegs.insert(*I); 140 LoopRegs.VisitLoop(ML); 141 } 142} 143 144/// AddSchedBarrierDeps - Add dependencies from instructions in the current 145/// list of instructions being scheduled to scheduling barrier by adding 146/// the exit SU to the register defs and use list. This is because we want to 147/// make sure instructions which define registers that are either used by 148/// the terminator or are live-out are properly scheduled. This is 149/// especially important when the definition latency of the return value(s) 150/// are too high to be hidden by the branch or when the liveout registers 151/// used by instructions in the fallthrough block. 152void ScheduleDAGInstrs::AddSchedBarrierDeps() { 153 MachineInstr *ExitMI = InsertPos != BB->end() ? &*InsertPos : 0; 154 ExitSU.setInstr(ExitMI); 155 bool AllDepKnown = ExitMI && 156 (ExitMI->getDesc().isCall() || ExitMI->getDesc().isBarrier()); 157 if (ExitMI && AllDepKnown) { 158 // If it's a call or a barrier, add dependencies on the defs and uses of 159 // instruction. 160 for (unsigned i = 0, e = ExitMI->getNumOperands(); i != e; ++i) { 161 const MachineOperand &MO = ExitMI->getOperand(i); 162 if (!MO.isReg() || MO.isDef()) continue; 163 unsigned Reg = MO.getReg(); 164 if (Reg == 0) continue; 165 166 assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!"); 167 Uses[Reg].push_back(&ExitSU); 168 } 169 } else { 170 // For others, e.g. fallthrough, conditional branch, assume the exit 171 // uses all the registers that are livein to the successor blocks. 172 SmallSet<unsigned, 8> Seen; 173 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 174 SE = BB->succ_end(); SI != SE; ++SI) 175 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 176 E = (*SI)->livein_end(); I != E; ++I) { 177 unsigned Reg = *I; 178 if (Seen.insert(Reg)) 179 Uses[Reg].push_back(&ExitSU); 180 } 181 } 182} 183 184void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { 185 // We'll be allocating one SUnit for each instruction, plus one for 186 // the region exit node. 187 SUnits.reserve(BB->size()); 188 189 // We build scheduling units by walking a block's instruction list from bottom 190 // to top. 191 192 // Remember where a generic side-effecting instruction is as we procede. 193 SUnit *BarrierChain = 0, *AliasChain = 0; 194 195 // Memory references to specific known memory locations are tracked 196 // so that they can be given more precise dependencies. We track 197 // separately the known memory locations that may alias and those 198 // that are known not to alias 199 std::map<const Value *, SUnit *> AliasMemDefs, NonAliasMemDefs; 200 std::map<const Value *, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses; 201 202 // Keep track of dangling debug references to registers. 203 std::vector<std::pair<MachineInstr*, unsigned> > 204 DanglingDebugValue(TRI->getNumRegs(), 205 std::make_pair(static_cast<MachineInstr*>(0), 0)); 206 207 // Check to see if the scheduler cares about latencies. 208 bool UnitLatencies = ForceUnitLatencies(); 209 210 // Ask the target if address-backscheduling is desirable, and if so how much. 211 const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>(); 212 unsigned SpecialAddressLatency = ST.getSpecialAddressLatency(); 213 214 // Remove any stale debug info; sometimes BuildSchedGraph is called again 215 // without emitting the info from the previous call. 216 DbgValueVec.clear(); 217 218 // Model data dependencies between instructions being scheduled and the 219 // ExitSU. 220 AddSchedBarrierDeps(); 221 222 // Walk the list of instructions, from bottom moving up. 223 for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin; 224 MII != MIE; --MII) { 225 MachineInstr *MI = prior(MII); 226 // DBG_VALUE does not have SUnit's built, so just remember these for later 227 // reinsertion. 228 if (MI->isDebugValue()) { 229 if (MI->getNumOperands()==3 && MI->getOperand(0).isReg() && 230 MI->getOperand(0).getReg()) 231 DanglingDebugValue[MI->getOperand(0).getReg()] = 232 std::make_pair(MI, DbgValueVec.size()); 233 DbgValueVec.push_back(MI); 234 continue; 235 } 236 const TargetInstrDesc &TID = MI->getDesc(); 237 assert(!TID.isTerminator() && !MI->isLabel() && 238 "Cannot schedule terminators or labels!"); 239 // Create the SUnit for this MI. 240 SUnit *SU = NewSUnit(MI); 241 242 // Assign the Latency field of SU using target-provided information. 243 if (UnitLatencies) 244 SU->Latency = 1; 245 else 246 ComputeLatency(SU); 247 248 // Add register-based dependencies (data, anti, and output). 249 for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) { 250 const MachineOperand &MO = MI->getOperand(j); 251 if (!MO.isReg()) continue; 252 unsigned Reg = MO.getReg(); 253 if (Reg == 0) continue; 254 255 assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!"); 256 257 if (MO.isDef() && DanglingDebugValue[Reg].first!=0) { 258 SU->DbgInstrList.push_back(DanglingDebugValue[Reg].first); 259 DbgValueVec[DanglingDebugValue[Reg].second] = 0; 260 DanglingDebugValue[Reg] = std::make_pair((MachineInstr*)0, 0); 261 } 262 263 std::vector<SUnit *> &UseList = Uses[Reg]; 264 std::vector<SUnit *> &DefList = Defs[Reg]; 265 // Optionally add output and anti dependencies. For anti 266 // dependencies we use a latency of 0 because for a multi-issue 267 // target we want to allow the defining instruction to issue 268 // in the same cycle as the using instruction. 269 // TODO: Using a latency of 1 here for output dependencies assumes 270 // there's no cost for reusing registers. 271 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output; 272 unsigned AOLatency = (Kind == SDep::Anti) ? 0 : 1; 273 for (unsigned i = 0, e = DefList.size(); i != e; ++i) { 274 SUnit *DefSU = DefList[i]; 275 if (DefSU == &ExitSU) 276 continue; 277 if (DefSU != SU && 278 (Kind != SDep::Output || !MO.isDead() || 279 !DefSU->getInstr()->registerDefIsDead(Reg))) 280 DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/Reg)); 281 } 282 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 283 std::vector<SUnit *> &DefList = Defs[*Alias]; 284 for (unsigned i = 0, e = DefList.size(); i != e; ++i) { 285 SUnit *DefSU = DefList[i]; 286 if (DefSU == &ExitSU) 287 continue; 288 if (DefSU != SU && 289 (Kind != SDep::Output || !MO.isDead() || 290 !DefSU->getInstr()->registerDefIsDead(*Alias))) 291 DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/ *Alias)); 292 } 293 } 294 295 if (MO.isDef()) { 296 // Add any data dependencies. 297 unsigned DataLatency = SU->Latency; 298 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 299 SUnit *UseSU = UseList[i]; 300 if (UseSU == SU) 301 continue; 302 unsigned LDataLatency = DataLatency; 303 // Optionally add in a special extra latency for nodes that 304 // feed addresses. 305 // TODO: Do this for register aliases too. 306 // TODO: Perhaps we should get rid of 307 // SpecialAddressLatency and just move this into 308 // adjustSchedDependency for the targets that care about it. 309 if (SpecialAddressLatency != 0 && !UnitLatencies && 310 UseSU != &ExitSU) { 311 MachineInstr *UseMI = UseSU->getInstr(); 312 const TargetInstrDesc &UseTID = UseMI->getDesc(); 313 int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg); 314 assert(RegUseIndex >= 0 && "UseMI doesn's use register!"); 315 if (RegUseIndex >= 0 && 316 (UseTID.mayLoad() || UseTID.mayStore()) && 317 (unsigned)RegUseIndex < UseTID.getNumOperands() && 318 UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass()) 319 LDataLatency += SpecialAddressLatency; 320 } 321 // Adjust the dependence latency using operand def/use 322 // information (if any), and then allow the target to 323 // perform its own adjustments. 324 const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg); 325 if (!UnitLatencies) { 326 ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep)); 327 ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep)); 328 } 329 UseSU->addPred(dep); 330 } 331 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 332 std::vector<SUnit *> &UseList = Uses[*Alias]; 333 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 334 SUnit *UseSU = UseList[i]; 335 if (UseSU == SU) 336 continue; 337 const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias); 338 if (!UnitLatencies) { 339 ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep)); 340 ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep)); 341 } 342 UseSU->addPred(dep); 343 } 344 } 345 346 // If a def is going to wrap back around to the top of the loop, 347 // backschedule it. 348 if (!UnitLatencies && DefList.empty()) { 349 LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(Reg); 350 if (I != LoopRegs.Deps.end()) { 351 const MachineOperand *UseMO = I->second.first; 352 unsigned Count = I->second.second; 353 const MachineInstr *UseMI = UseMO->getParent(); 354 unsigned UseMOIdx = UseMO - &UseMI->getOperand(0); 355 const TargetInstrDesc &UseTID = UseMI->getDesc(); 356 // TODO: If we knew the total depth of the region here, we could 357 // handle the case where the whole loop is inside the region but 358 // is large enough that the isScheduleHigh trick isn't needed. 359 if (UseMOIdx < UseTID.getNumOperands()) { 360 // Currently, we only support scheduling regions consisting of 361 // single basic blocks. Check to see if the instruction is in 362 // the same region by checking to see if it has the same parent. 363 if (UseMI->getParent() != MI->getParent()) { 364 unsigned Latency = SU->Latency; 365 if (UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) 366 Latency += SpecialAddressLatency; 367 // This is a wild guess as to the portion of the latency which 368 // will be overlapped by work done outside the current 369 // scheduling region. 370 Latency -= std::min(Latency, Count); 371 // Add the artifical edge. 372 ExitSU.addPred(SDep(SU, SDep::Order, Latency, 373 /*Reg=*/0, /*isNormalMemory=*/false, 374 /*isMustAlias=*/false, 375 /*isArtificial=*/true)); 376 } else if (SpecialAddressLatency > 0 && 377 UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) { 378 // The entire loop body is within the current scheduling region 379 // and the latency of this operation is assumed to be greater 380 // than the latency of the loop. 381 // TODO: Recursively mark data-edge predecessors as 382 // isScheduleHigh too. 383 SU->isScheduleHigh = true; 384 } 385 } 386 LoopRegs.Deps.erase(I); 387 } 388 } 389 390 UseList.clear(); 391 if (!MO.isDead()) 392 DefList.clear(); 393 DefList.push_back(SU); 394 } else { 395 UseList.push_back(SU); 396 } 397 } 398 399 // Add chain dependencies. 400 // Chain dependencies used to enforce memory order should have 401 // latency of 0 (except for true dependency of Store followed by 402 // aliased Load... we estimate that with a single cycle of latency 403 // assuming the hardware will bypass) 404 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable 405 // after stack slots are lowered to actual addresses. 406 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and 407 // produce more precise dependence information. 408#define STORE_LOAD_LATENCY 1 409 unsigned TrueMemOrderLatency = 0; 410 if (TID.isCall() || TID.hasUnmodeledSideEffects() || 411 (MI->hasVolatileMemoryRef() && 412 (!TID.mayLoad() || !MI->isInvariantLoad(AA)))) { 413 // Be conservative with these and add dependencies on all memory 414 // references, even those that are known to not alias. 415 for (std::map<const Value *, SUnit *>::iterator I = 416 NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) { 417 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 418 } 419 for (std::map<const Value *, std::vector<SUnit *> >::iterator I = 420 NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) { 421 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 422 I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 423 } 424 NonAliasMemDefs.clear(); 425 NonAliasMemUses.clear(); 426 // Add SU to the barrier chain. 427 if (BarrierChain) 428 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 429 BarrierChain = SU; 430 431 // fall-through 432 new_alias_chain: 433 // Chain all possibly aliasing memory references though SU. 434 if (AliasChain) 435 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 436 AliasChain = SU; 437 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 438 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 439 for (std::map<const Value *, SUnit *>::iterator I = AliasMemDefs.begin(), 440 E = AliasMemDefs.end(); I != E; ++I) { 441 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 442 } 443 for (std::map<const Value *, std::vector<SUnit *> >::iterator I = 444 AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) { 445 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 446 I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 447 } 448 PendingLoads.clear(); 449 AliasMemDefs.clear(); 450 AliasMemUses.clear(); 451 } else if (TID.mayStore()) { 452 bool MayAlias = true; 453 TrueMemOrderLatency = STORE_LOAD_LATENCY; 454 if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) { 455 // A store to a specific PseudoSourceValue. Add precise dependencies. 456 // Record the def in MemDefs, first adding a dep if there is 457 // an existing def. 458 std::map<const Value *, SUnit *>::iterator I = 459 ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); 460 std::map<const Value *, SUnit *>::iterator IE = 461 ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); 462 if (I != IE) { 463 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0, 464 /*isNormalMemory=*/true)); 465 I->second = SU; 466 } else { 467 if (MayAlias) 468 AliasMemDefs[V] = SU; 469 else 470 NonAliasMemDefs[V] = SU; 471 } 472 // Handle the uses in MemUses, if there are any. 473 std::map<const Value *, std::vector<SUnit *> >::iterator J = 474 ((MayAlias) ? AliasMemUses.find(V) : NonAliasMemUses.find(V)); 475 std::map<const Value *, std::vector<SUnit *> >::iterator JE = 476 ((MayAlias) ? AliasMemUses.end() : NonAliasMemUses.end()); 477 if (J != JE) { 478 for (unsigned i = 0, e = J->second.size(); i != e; ++i) 479 J->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency, 480 /*Reg=*/0, /*isNormalMemory=*/true)); 481 J->second.clear(); 482 } 483 if (MayAlias) { 484 // Add dependencies from all the PendingLoads, i.e. loads 485 // with no underlying object. 486 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 487 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 488 // Add dependence on alias chain, if needed. 489 if (AliasChain) 490 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 491 } 492 // Add dependence on barrier chain, if needed. 493 if (BarrierChain) 494 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 495 } else { 496 // Treat all other stores conservatively. 497 goto new_alias_chain; 498 } 499 500 if (!ExitSU.isPred(SU)) 501 // Push store's up a bit to avoid them getting in between cmp 502 // and branches. 503 ExitSU.addPred(SDep(SU, SDep::Order, 0, 504 /*Reg=*/0, /*isNormalMemory=*/false, 505 /*isMustAlias=*/false, 506 /*isArtificial=*/true)); 507 } else if (TID.mayLoad()) { 508 bool MayAlias = true; 509 TrueMemOrderLatency = 0; 510 if (MI->isInvariantLoad(AA)) { 511 // Invariant load, no chain dependencies needed! 512 } else { 513 if (const Value *V = 514 getUnderlyingObjectForInstr(MI, MFI, MayAlias)) { 515 // A load from a specific PseudoSourceValue. Add precise dependencies. 516 std::map<const Value *, SUnit *>::iterator I = 517 ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); 518 std::map<const Value *, SUnit *>::iterator IE = 519 ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); 520 if (I != IE) 521 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0, 522 /*isNormalMemory=*/true)); 523 if (MayAlias) 524 AliasMemUses[V].push_back(SU); 525 else 526 NonAliasMemUses[V].push_back(SU); 527 } else { 528 // A load with no underlying object. Depend on all 529 // potentially aliasing stores. 530 for (std::map<const Value *, SUnit *>::iterator I = 531 AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I) 532 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 533 534 PendingLoads.push_back(SU); 535 MayAlias = true; 536 } 537 538 // Add dependencies on alias and barrier chains, if needed. 539 if (MayAlias && AliasChain) 540 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 541 if (BarrierChain) 542 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 543 } 544 } 545 } 546 547 for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) { 548 Defs[i].clear(); 549 Uses[i].clear(); 550 } 551 PendingLoads.clear(); 552} 553 554void ScheduleDAGInstrs::FinishBlock() { 555 // Nothing to do. 556} 557 558void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) { 559 // Compute the latency for the node. 560 if (!InstrItins || InstrItins->isEmpty()) { 561 SU->Latency = 1; 562 563 // Simplistic target-independent heuristic: assume that loads take 564 // extra time. 565 if (SU->getInstr()->getDesc().mayLoad()) 566 SU->Latency += 2; 567 } else 568 SU->Latency = 569 InstrItins->getStageLatency(SU->getInstr()->getDesc().getSchedClass()); 570} 571 572void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use, 573 SDep& dep) const { 574 if (!InstrItins || InstrItins->isEmpty()) 575 return; 576 577 // For a data dependency with a known register... 578 if ((dep.getKind() != SDep::Data) || (dep.getReg() == 0)) 579 return; 580 581 const unsigned Reg = dep.getReg(); 582 583 // ... find the definition of the register in the defining 584 // instruction 585 MachineInstr *DefMI = Def->getInstr(); 586 int DefIdx = DefMI->findRegisterDefOperandIdx(Reg); 587 if (DefIdx != -1) { 588 const MachineOperand &MO = DefMI->getOperand(DefIdx); 589 if (MO.isReg() && MO.isImplicit() && 590 DefIdx >= (int)DefMI->getDesc().getNumOperands()) { 591 // This is an implicit def, getOperandLatency() won't return the correct 592 // latency. e.g. 593 // %D6<def>, %D7<def> = VLD1q16 %R2<kill>, 0, ..., %Q3<imp-def> 594 // %Q1<def> = VMULv8i16 %Q1<kill>, %Q3<kill>, ... 595 // What we want is to compute latency between def of %D6/%D7 and use of 596 // %Q3 instead. 597 DefIdx = DefMI->findRegisterDefOperandIdx(Reg, false, true, TRI); 598 } 599 MachineInstr *UseMI = Use->getInstr(); 600 // For all uses of the register, calculate the maxmimum latency 601 int Latency = -1; 602 if (UseMI) { 603 for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) { 604 const MachineOperand &MO = UseMI->getOperand(i); 605 if (!MO.isReg() || !MO.isUse()) 606 continue; 607 unsigned MOReg = MO.getReg(); 608 if (MOReg != Reg) 609 continue; 610 611 int UseCycle = TII->getOperandLatency(InstrItins, DefMI, DefIdx, 612 UseMI, i); 613 Latency = std::max(Latency, UseCycle); 614 } 615 } else { 616 // UseMI is null, then it must be a scheduling barrier. 617 if (!InstrItins || InstrItins->isEmpty()) 618 return; 619 unsigned DefClass = DefMI->getDesc().getSchedClass(); 620 Latency = InstrItins->getOperandCycle(DefClass, DefIdx); 621 } 622 623 // If we found a latency, then replace the existing dependence latency. 624 if (Latency >= 0) 625 dep.setLatency(Latency); 626 } 627} 628 629void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const { 630 SU->getInstr()->dump(); 631} 632 633std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { 634 std::string s; 635 raw_string_ostream oss(s); 636 if (SU == &EntrySU) 637 oss << "<entry>"; 638 else if (SU == &ExitSU) 639 oss << "<exit>"; 640 else 641 SU->getInstr()->print(oss); 642 return oss.str(); 643} 644 645// EmitSchedule - Emit the machine code in scheduled order. 646MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() { 647 // For MachineInstr-based scheduling, we're rescheduling the instructions in 648 // the block, so start by removing them from the block. 649 while (Begin != InsertPos) { 650 MachineBasicBlock::iterator I = Begin; 651 ++Begin; 652 BB->remove(I); 653 } 654 655 // First reinsert any remaining debug_values; these are either constants, 656 // or refer to live-in registers. The beginning of the block is the right 657 // place for the latter. The former might reasonably be placed elsewhere 658 // using some kind of ordering algorithm, but right now it doesn't matter. 659 for (int i = DbgValueVec.size()-1; i>=0; --i) 660 if (DbgValueVec[i]) 661 BB->insert(InsertPos, DbgValueVec[i]); 662 663 // Then re-insert them according to the given schedule. 664 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 665 SUnit *SU = Sequence[i]; 666 if (!SU) { 667 // Null SUnit* is a noop. 668 EmitNoop(); 669 continue; 670 } 671 672 BB->insert(InsertPos, SU->getInstr()); 673 for (unsigned i = 0, e = SU->DbgInstrList.size() ; i < e ; ++i) 674 BB->insert(InsertPos, SU->DbgInstrList[i]); 675 } 676 677 // Update the Begin iterator, as the first instruction in the block 678 // may have been scheduled later. 679 if (!DbgValueVec.empty()) { 680 for (int i = DbgValueVec.size()-1; i>=0; --i) 681 if (DbgValueVec[i]!=0) { 682 Begin = DbgValueVec[DbgValueVec.size()-1]; 683 break; 684 } 685 } else if (!Sequence.empty()) 686 Begin = Sequence[0]->getInstr(); 687 688 DbgValueVec.clear(); 689 return BB; 690} 691