ScheduleDAGInstrs.cpp revision 0a4c09e724bd3ab7c9a1d3a1615894e7bf209179
1//===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the ScheduleDAGInstrs class, which implements re-scheduling 11// of MachineInstrs. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "sched-instrs" 16#include "ScheduleDAGInstrs.h" 17#include "llvm/Operator.h" 18#include "llvm/Analysis/AliasAnalysis.h" 19#include "llvm/CodeGen/MachineFunctionPass.h" 20#include "llvm/CodeGen/MachineMemOperand.h" 21#include "llvm/CodeGen/MachineRegisterInfo.h" 22#include "llvm/CodeGen/PseudoSourceValue.h" 23#include "llvm/Target/TargetMachine.h" 24#include "llvm/Target/TargetInstrInfo.h" 25#include "llvm/Target/TargetRegisterInfo.h" 26#include "llvm/Target/TargetSubtarget.h" 27#include "llvm/Support/Debug.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/SmallSet.h" 30using namespace llvm; 31 32ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, 33 const MachineLoopInfo &mli, 34 const MachineDominatorTree &mdt) 35 : ScheduleDAG(mf), MLI(mli), MDT(mdt), LoopRegs(MLI, MDT) {} 36 37/// Run - perform scheduling. 38/// 39void ScheduleDAGInstrs::Run(MachineBasicBlock *bb, 40 MachineBasicBlock::iterator begin, 41 MachineBasicBlock::iterator end, 42 unsigned endcount) { 43 BB = bb; 44 Begin = begin; 45 InsertPosIndex = endcount; 46 47 ScheduleDAG::Run(bb, end); 48} 49 50/// getUnderlyingObjectFromInt - This is the function that does the work of 51/// looking through basic ptrtoint+arithmetic+inttoptr sequences. 52static const Value *getUnderlyingObjectFromInt(const Value *V) { 53 do { 54 if (const Operator *U = dyn_cast<Operator>(V)) { 55 // If we find a ptrtoint, we can transfer control back to the 56 // regular getUnderlyingObjectFromInt. 57 if (U->getOpcode() == Instruction::PtrToInt) 58 return U->getOperand(0); 59 // If we find an add of a constant or a multiplied value, it's 60 // likely that the other operand will lead us to the base 61 // object. We don't have to worry about the case where the 62 // object address is somehow being computed by the multiply, 63 // because our callers only care when the result is an 64 // identifibale object. 65 if (U->getOpcode() != Instruction::Add || 66 (!isa<ConstantInt>(U->getOperand(1)) && 67 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul)) 68 return V; 69 V = U->getOperand(0); 70 } else { 71 return V; 72 } 73 assert(isa<IntegerType>(V->getType()) && "Unexpected operand type!"); 74 } while (1); 75} 76 77/// getUnderlyingObject - This is a wrapper around Value::getUnderlyingObject 78/// and adds support for basic ptrtoint+arithmetic+inttoptr sequences. 79static const Value *getUnderlyingObject(const Value *V) { 80 // First just call Value::getUnderlyingObject to let it do what it does. 81 do { 82 V = V->getUnderlyingObject(); 83 // If it found an inttoptr, use special code to continue climing. 84 if (Operator::getOpcode(V) != Instruction::IntToPtr) 85 break; 86 const Value *O = getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 87 // If that succeeded in finding a pointer, continue the search. 88 if (!isa<PointerType>(O->getType())) 89 break; 90 V = O; 91 } while (1); 92 return V; 93} 94 95/// getUnderlyingObjectForInstr - If this machine instr has memory reference 96/// information and it can be tracked to a normal reference to a known 97/// object, return the Value for that object. Otherwise return null. 98static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI) { 99 if (!MI->hasOneMemOperand() || 100 !(*MI->memoperands_begin())->getValue() || 101 (*MI->memoperands_begin())->isVolatile()) 102 return 0; 103 104 const Value *V = (*MI->memoperands_begin())->getValue(); 105 if (!V) 106 return 0; 107 108 V = getUnderlyingObject(V); 109 if (!isa<PseudoSourceValue>(V) && !isIdentifiedObject(V)) 110 return 0; 111 112 return V; 113} 114 115void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) { 116 if (MachineLoop *ML = MLI.getLoopFor(BB)) 117 if (BB == ML->getLoopLatch()) { 118 MachineBasicBlock *Header = ML->getHeader(); 119 for (MachineBasicBlock::livein_iterator I = Header->livein_begin(), 120 E = Header->livein_end(); I != E; ++I) 121 LoopLiveInRegs.insert(*I); 122 LoopRegs.VisitLoop(ML); 123 } 124} 125 126void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { 127 // We'll be allocating one SUnit for each instruction, plus one for 128 // the region exit node. 129 SUnits.reserve(BB->size()); 130 131 // We build scheduling units by walking a block's instruction list from bottom 132 // to top. 133 134 // Remember where a generic side-effecting instruction is as we procede. If 135 // ChainMMO is null, this is assumed to have arbitrary side-effects. If 136 // ChainMMO is non-null, then Chain makes only a single memory reference. 137 SUnit *Chain = 0; 138 MachineMemOperand *ChainMMO = 0; 139 140 // Memory references to specific known memory locations are tracked so that 141 // they can be given more precise dependencies. 142 std::map<const Value *, SUnit *> MemDefs; 143 std::map<const Value *, std::vector<SUnit *> > MemUses; 144 145 // Check to see if the scheduler cares about latencies. 146 bool UnitLatencies = ForceUnitLatencies(); 147 148 // Ask the target if address-backscheduling is desirable, and if so how much. 149 const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>(); 150 unsigned SpecialAddressLatency = ST.getSpecialAddressLatency(); 151 152 // Walk the list of instructions, from bottom moving up. 153 for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin; 154 MII != MIE; --MII) { 155 MachineInstr *MI = prior(MII); 156 const TargetInstrDesc &TID = MI->getDesc(); 157 assert(!TID.isTerminator() && !MI->isLabel() && 158 "Cannot schedule terminators or labels!"); 159 // Create the SUnit for this MI. 160 SUnit *SU = NewSUnit(MI); 161 162 // Assign the Latency field of SU using target-provided information. 163 if (UnitLatencies) 164 SU->Latency = 1; 165 else 166 ComputeLatency(SU); 167 168 // Add register-based dependencies (data, anti, and output). 169 for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) { 170 const MachineOperand &MO = MI->getOperand(j); 171 if (!MO.isReg()) continue; 172 unsigned Reg = MO.getReg(); 173 if (Reg == 0) continue; 174 175 assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!"); 176 std::vector<SUnit *> &UseList = Uses[Reg]; 177 std::vector<SUnit *> &DefList = Defs[Reg]; 178 // Optionally add output and anti dependencies. For anti 179 // dependencies we use a latency of 0 because for a multi-issue 180 // target we want to allow the defining instruction to issue 181 // in the same cycle as the using instruction. 182 // TODO: Using a latency of 1 here for output dependencies assumes 183 // there's no cost for reusing registers. 184 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output; 185 unsigned AOLatency = (Kind == SDep::Anti) ? 0 : 1; 186 for (unsigned i = 0, e = DefList.size(); i != e; ++i) { 187 SUnit *DefSU = DefList[i]; 188 if (DefSU != SU && 189 (Kind != SDep::Output || !MO.isDead() || 190 !DefSU->getInstr()->registerDefIsDead(Reg))) 191 DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/Reg)); 192 } 193 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 194 std::vector<SUnit *> &DefList = Defs[*Alias]; 195 for (unsigned i = 0, e = DefList.size(); i != e; ++i) { 196 SUnit *DefSU = DefList[i]; 197 if (DefSU != SU && 198 (Kind != SDep::Output || !MO.isDead() || 199 !DefSU->getInstr()->registerDefIsDead(Reg))) 200 DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/ *Alias)); 201 } 202 } 203 204 if (MO.isDef()) { 205 // Add any data dependencies. 206 unsigned DataLatency = SU->Latency; 207 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 208 SUnit *UseSU = UseList[i]; 209 if (UseSU != SU) { 210 unsigned LDataLatency = DataLatency; 211 // Optionally add in a special extra latency for nodes that 212 // feed addresses. 213 // TODO: Do this for register aliases too. 214 // TODO: Perhaps we should get rid of 215 // SpecialAddressLatency and just move this into 216 // adjustSchedDependency for the targets that care about 217 // it. 218 if (SpecialAddressLatency != 0 && !UnitLatencies) { 219 MachineInstr *UseMI = UseSU->getInstr(); 220 const TargetInstrDesc &UseTID = UseMI->getDesc(); 221 int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg); 222 assert(RegUseIndex >= 0 && "UseMI doesn's use register!"); 223 if ((UseTID.mayLoad() || UseTID.mayStore()) && 224 (unsigned)RegUseIndex < UseTID.getNumOperands() && 225 UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass()) 226 LDataLatency += SpecialAddressLatency; 227 } 228 // Adjust the dependence latency using operand def/use 229 // information (if any), and then allow the target to 230 // perform its own adjustments. 231 const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg); 232 if (!UnitLatencies) { 233 ComputeOperandLatency(SU, UseSU, (SDep &)dep); 234 ST.adjustSchedDependency(SU, UseSU, (SDep &)dep); 235 } 236 UseSU->addPred(dep); 237 } 238 } 239 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 240 std::vector<SUnit *> &UseList = Uses[*Alias]; 241 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 242 SUnit *UseSU = UseList[i]; 243 if (UseSU != SU) { 244 const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias); 245 if (!UnitLatencies) { 246 ComputeOperandLatency(SU, UseSU, (SDep &)dep); 247 ST.adjustSchedDependency(SU, UseSU, (SDep &)dep); 248 } 249 UseSU->addPred(dep); 250 } 251 } 252 } 253 254 // If a def is going to wrap back around to the top of the loop, 255 // backschedule it. 256 if (!UnitLatencies && DefList.empty()) { 257 LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(Reg); 258 if (I != LoopRegs.Deps.end()) { 259 const MachineOperand *UseMO = I->second.first; 260 unsigned Count = I->second.second; 261 const MachineInstr *UseMI = UseMO->getParent(); 262 unsigned UseMOIdx = UseMO - &UseMI->getOperand(0); 263 const TargetInstrDesc &UseTID = UseMI->getDesc(); 264 // TODO: If we knew the total depth of the region here, we could 265 // handle the case where the whole loop is inside the region but 266 // is large enough that the isScheduleHigh trick isn't needed. 267 if (UseMOIdx < UseTID.getNumOperands()) { 268 // Currently, we only support scheduling regions consisting of 269 // single basic blocks. Check to see if the instruction is in 270 // the same region by checking to see if it has the same parent. 271 if (UseMI->getParent() != MI->getParent()) { 272 unsigned Latency = SU->Latency; 273 if (UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) 274 Latency += SpecialAddressLatency; 275 // This is a wild guess as to the portion of the latency which 276 // will be overlapped by work done outside the current 277 // scheduling region. 278 Latency -= std::min(Latency, Count); 279 // Add the artifical edge. 280 ExitSU.addPred(SDep(SU, SDep::Order, Latency, 281 /*Reg=*/0, /*isNormalMemory=*/false, 282 /*isMustAlias=*/false, 283 /*isArtificial=*/true)); 284 } else if (SpecialAddressLatency > 0 && 285 UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) { 286 // The entire loop body is within the current scheduling region 287 // and the latency of this operation is assumed to be greater 288 // than the latency of the loop. 289 // TODO: Recursively mark data-edge predecessors as 290 // isScheduleHigh too. 291 SU->isScheduleHigh = true; 292 } 293 } 294 LoopRegs.Deps.erase(I); 295 } 296 } 297 298 UseList.clear(); 299 if (!MO.isDead()) 300 DefList.clear(); 301 DefList.push_back(SU); 302 } else { 303 UseList.push_back(SU); 304 } 305 } 306 307 // Add chain dependencies. 308 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable 309 // after stack slots are lowered to actual addresses. 310 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and 311 // produce more precise dependence information. 312 if (TID.isCall() || TID.hasUnmodeledSideEffects()) { 313 new_chain: 314 // This is the conservative case. Add dependencies on all memory 315 // references. 316 if (Chain) 317 Chain->addPred(SDep(SU, SDep::Order, SU->Latency)); 318 Chain = SU; 319 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 320 PendingLoads[k]->addPred(SDep(SU, SDep::Order, SU->Latency)); 321 PendingLoads.clear(); 322 for (std::map<const Value *, SUnit *>::iterator I = MemDefs.begin(), 323 E = MemDefs.end(); I != E; ++I) { 324 I->second->addPred(SDep(SU, SDep::Order, SU->Latency)); 325 I->second = SU; 326 } 327 for (std::map<const Value *, std::vector<SUnit *> >::iterator I = 328 MemUses.begin(), E = MemUses.end(); I != E; ++I) { 329 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 330 I->second[i]->addPred(SDep(SU, SDep::Order, SU->Latency)); 331 I->second.clear(); 332 } 333 // See if it is known to just have a single memory reference. 334 MachineInstr *ChainMI = Chain->getInstr(); 335 const TargetInstrDesc &ChainTID = ChainMI->getDesc(); 336 if (!ChainTID.isCall() && 337 !ChainTID.hasUnmodeledSideEffects() && 338 ChainMI->hasOneMemOperand() && 339 !(*ChainMI->memoperands_begin())->isVolatile() && 340 (*ChainMI->memoperands_begin())->getValue()) 341 // We know that the Chain accesses one specific memory location. 342 ChainMMO = *ChainMI->memoperands_begin(); 343 else 344 // Unknown memory accesses. Assume the worst. 345 ChainMMO = 0; 346 } else if (TID.mayStore()) { 347 if (const Value *V = getUnderlyingObjectForInstr(MI)) { 348 // A store to a specific PseudoSourceValue. Add precise dependencies. 349 // Handle the def in MemDefs, if there is one. 350 std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V); 351 if (I != MemDefs.end()) { 352 I->second->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0, 353 /*isNormalMemory=*/true)); 354 I->second = SU; 355 } else { 356 MemDefs[V] = SU; 357 } 358 // Handle the uses in MemUses, if there are any. 359 std::map<const Value *, std::vector<SUnit *> >::iterator J = 360 MemUses.find(V); 361 if (J != MemUses.end()) { 362 for (unsigned i = 0, e = J->second.size(); i != e; ++i) 363 J->second[i]->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0, 364 /*isNormalMemory=*/true)); 365 J->second.clear(); 366 } 367 // Add dependencies from all the PendingLoads, since without 368 // memoperands we must assume they alias anything. 369 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 370 PendingLoads[k]->addPred(SDep(SU, SDep::Order, SU->Latency)); 371 // Add a general dependence too, if needed. 372 if (Chain) 373 Chain->addPred(SDep(SU, SDep::Order, SU->Latency)); 374 } else 375 // Treat all other stores conservatively. 376 goto new_chain; 377 } else if (TID.mayLoad()) { 378 if (MI->isInvariantLoad(AA)) { 379 // Invariant load, no chain dependencies needed! 380 } else if (const Value *V = getUnderlyingObjectForInstr(MI)) { 381 // A load from a specific PseudoSourceValue. Add precise dependencies. 382 std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V); 383 if (I != MemDefs.end()) 384 I->second->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0, 385 /*isNormalMemory=*/true)); 386 MemUses[V].push_back(SU); 387 388 // Add a general dependence too, if needed. 389 if (Chain && (!ChainMMO || 390 (ChainMMO->isStore() || ChainMMO->isVolatile()))) 391 Chain->addPred(SDep(SU, SDep::Order, SU->Latency)); 392 } else if (MI->hasVolatileMemoryRef()) { 393 // Treat volatile loads conservatively. Note that this includes 394 // cases where memoperand information is unavailable. 395 goto new_chain; 396 } else { 397 // A normal load. Depend on the general chain, as well as on 398 // all stores. In the absense of MachineMemOperand information, 399 // we can't even assume that the load doesn't alias well-behaved 400 // memory locations. 401 if (Chain) 402 Chain->addPred(SDep(SU, SDep::Order, SU->Latency)); 403 for (std::map<const Value *, SUnit *>::iterator I = MemDefs.begin(), 404 E = MemDefs.end(); I != E; ++I) 405 I->second->addPred(SDep(SU, SDep::Order, SU->Latency)); 406 PendingLoads.push_back(SU); 407 } 408 } 409 } 410 411 for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) { 412 Defs[i].clear(); 413 Uses[i].clear(); 414 } 415 PendingLoads.clear(); 416} 417 418void ScheduleDAGInstrs::FinishBlock() { 419 // Nothing to do. 420} 421 422void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) { 423 const InstrItineraryData &InstrItins = TM.getInstrItineraryData(); 424 425 // Compute the latency for the node. 426 SU->Latency = 427 InstrItins.getStageLatency(SU->getInstr()->getDesc().getSchedClass()); 428 429 // Simplistic target-independent heuristic: assume that loads take 430 // extra time. 431 if (InstrItins.isEmpty()) 432 if (SU->getInstr()->getDesc().mayLoad()) 433 SU->Latency += 2; 434} 435 436void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use, 437 SDep& dep) const { 438 const InstrItineraryData &InstrItins = TM.getInstrItineraryData(); 439 if (InstrItins.isEmpty()) 440 return; 441 442 // For a data dependency with a known register... 443 if ((dep.getKind() != SDep::Data) || (dep.getReg() == 0)) 444 return; 445 446 const unsigned Reg = dep.getReg(); 447 448 // ... find the definition of the register in the defining 449 // instruction 450 MachineInstr *DefMI = Def->getInstr(); 451 int DefIdx = DefMI->findRegisterDefOperandIdx(Reg); 452 if (DefIdx != -1) { 453 int DefCycle = InstrItins.getOperandCycle(DefMI->getDesc().getSchedClass(), DefIdx); 454 if (DefCycle >= 0) { 455 MachineInstr *UseMI = Use->getInstr(); 456 const unsigned UseClass = UseMI->getDesc().getSchedClass(); 457 458 // For all uses of the register, calculate the maxmimum latency 459 int Latency = -1; 460 for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) { 461 const MachineOperand &MO = UseMI->getOperand(i); 462 if (!MO.isReg() || !MO.isUse()) 463 continue; 464 unsigned MOReg = MO.getReg(); 465 if (MOReg != Reg) 466 continue; 467 468 int UseCycle = InstrItins.getOperandCycle(UseClass, i); 469 if (UseCycle >= 0) 470 Latency = std::max(Latency, DefCycle - UseCycle + 1); 471 } 472 473 // If we found a latency, then replace the existing dependence latency. 474 if (Latency >= 0) 475 dep.setLatency(Latency); 476 } 477 } 478} 479 480void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const { 481 SU->getInstr()->dump(); 482} 483 484std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { 485 std::string s; 486 raw_string_ostream oss(s); 487 if (SU == &EntrySU) 488 oss << "<entry>"; 489 else if (SU == &ExitSU) 490 oss << "<exit>"; 491 else 492 SU->getInstr()->print(oss); 493 return oss.str(); 494} 495 496// EmitSchedule - Emit the machine code in scheduled order. 497MachineBasicBlock *ScheduleDAGInstrs:: 498EmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) { 499 // For MachineInstr-based scheduling, we're rescheduling the instructions in 500 // the block, so start by removing them from the block. 501 while (Begin != InsertPos) { 502 MachineBasicBlock::iterator I = Begin; 503 ++Begin; 504 BB->remove(I); 505 } 506 507 // Then re-insert them according to the given schedule. 508 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 509 SUnit *SU = Sequence[i]; 510 if (!SU) { 511 // Null SUnit* is a noop. 512 EmitNoop(); 513 continue; 514 } 515 516 BB->insert(InsertPos, SU->getInstr()); 517 } 518 519 // Update the Begin iterator, as the first instruction in the block 520 // may have been scheduled later. 521 if (!Sequence.empty()) 522 Begin = Sequence[0]->getInstr(); 523 524 return BB; 525} 526