ScheduleDAGInstrs.cpp revision d94a4e5d8de1145be200ff7223f98b0928462b94
1//===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the ScheduleDAGInstrs class, which implements re-scheduling 11// of MachineInstrs. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "sched-instrs" 16#include "ScheduleDAGInstrs.h" 17#include "llvm/Operator.h" 18#include "llvm/Analysis/AliasAnalysis.h" 19#include "llvm/CodeGen/MachineFunctionPass.h" 20#include "llvm/CodeGen/MachineRegisterInfo.h" 21#include "llvm/CodeGen/PseudoSourceValue.h" 22#include "llvm/Target/TargetMachine.h" 23#include "llvm/Target/TargetInstrInfo.h" 24#include "llvm/Target/TargetRegisterInfo.h" 25#include "llvm/Target/TargetSubtarget.h" 26#include "llvm/Support/Debug.h" 27#include "llvm/Support/raw_ostream.h" 28#include "llvm/ADT/SmallSet.h" 29using namespace llvm; 30 31ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, 32 const MachineLoopInfo &mli, 33 const MachineDominatorTree &mdt) 34 : ScheduleDAG(mf), MLI(mli), MDT(mdt), LoopRegs(MLI, MDT) {} 35 36/// Run - perform scheduling. 37/// 38void ScheduleDAGInstrs::Run(MachineBasicBlock *bb, 39 MachineBasicBlock::iterator begin, 40 MachineBasicBlock::iterator end, 41 unsigned endcount) { 42 BB = bb; 43 Begin = begin; 44 InsertPosIndex = endcount; 45 46 ScheduleDAG::Run(bb, end); 47} 48 49/// getUnderlyingObjectFromInt - This is the function that does the work of 50/// looking through basic ptrtoint+arithmetic+inttoptr sequences. 51static const Value *getUnderlyingObjectFromInt(const Value *V) { 52 do { 53 if (const Operator *U = dyn_cast<Operator>(V)) { 54 // If we find a ptrtoint, we can transfer control back to the 55 // regular getUnderlyingObjectFromInt. 56 if (U->getOpcode() == Instruction::PtrToInt) 57 return U->getOperand(0); 58 // If we find an add of a constant or a multiplied value, it's 59 // likely that the other operand will lead us to the base 60 // object. We don't have to worry about the case where the 61 // object address is somehow being computed by the multiply, 62 // because our callers only care when the result is an 63 // identifibale object. 64 if (U->getOpcode() != Instruction::Add || 65 (!isa<ConstantInt>(U->getOperand(1)) && 66 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul)) 67 return V; 68 V = U->getOperand(0); 69 } else { 70 return V; 71 } 72 assert(isa<IntegerType>(V->getType()) && "Unexpected operand type!"); 73 } while (1); 74} 75 76/// getUnderlyingObject - This is a wrapper around Value::getUnderlyingObject 77/// and adds support for basic ptrtoint+arithmetic+inttoptr sequences. 78static const Value *getUnderlyingObject(const Value *V) { 79 // First just call Value::getUnderlyingObject to let it do what it does. 80 do { 81 V = V->getUnderlyingObject(); 82 // If it found an inttoptr, use special code to continue climing. 83 if (Operator::getOpcode(V) != Instruction::IntToPtr) 84 break; 85 const Value *O = getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 86 // If that succeeded in finding a pointer, continue the search. 87 if (!isa<PointerType>(O->getType())) 88 break; 89 V = O; 90 } while (1); 91 return V; 92} 93 94/// getUnderlyingObjectForInstr - If this machine instr has memory reference 95/// information and it can be tracked to a normal reference to a known 96/// object, return the Value for that object. Otherwise return null. 97static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI) { 98 if (!MI->hasOneMemOperand() || 99 !MI->memoperands_begin()->getValue() || 100 MI->memoperands_begin()->isVolatile()) 101 return 0; 102 103 const Value *V = MI->memoperands_begin()->getValue(); 104 if (!V) 105 return 0; 106 107 V = getUnderlyingObject(V); 108 if (!isa<PseudoSourceValue>(V) && !isIdentifiedObject(V)) 109 return 0; 110 111 return V; 112} 113 114void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) { 115 if (MachineLoop *ML = MLI.getLoopFor(BB)) 116 if (BB == ML->getLoopLatch()) { 117 MachineBasicBlock *Header = ML->getHeader(); 118 for (MachineBasicBlock::livein_iterator I = Header->livein_begin(), 119 E = Header->livein_end(); I != E; ++I) 120 LoopLiveInRegs.insert(*I); 121 LoopRegs.VisitLoop(ML); 122 } 123} 124 125void ScheduleDAGInstrs::BuildSchedGraph() { 126 // We'll be allocating one SUnit for each instruction, plus one for 127 // the region exit node. 128 SUnits.reserve(BB->size()); 129 130 // We build scheduling units by walking a block's instruction list from bottom 131 // to top. 132 133 // Remember where a generic side-effecting instruction is as we procede. If 134 // ChainMMO is null, this is assumed to have arbitrary side-effects. If 135 // ChainMMO is non-null, then Chain makes only a single memory reference. 136 SUnit *Chain = 0; 137 MachineMemOperand *ChainMMO = 0; 138 139 // Memory references to specific known memory locations are tracked so that 140 // they can be given more precise dependencies. 141 std::map<const Value *, SUnit *> MemDefs; 142 std::map<const Value *, std::vector<SUnit *> > MemUses; 143 144 // Check to see if the scheduler cares about latencies. 145 bool UnitLatencies = ForceUnitLatencies(); 146 147 // Ask the target if address-backscheduling is desirable, and if so how much. 148 unsigned SpecialAddressLatency = 149 TM.getSubtarget<TargetSubtarget>().getSpecialAddressLatency(); 150 151 // Walk the list of instructions, from bottom moving up. 152 for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin; 153 MII != MIE; --MII) { 154 MachineInstr *MI = prior(MII); 155 const TargetInstrDesc &TID = MI->getDesc(); 156 assert(!TID.isTerminator() && !MI->isLabel() && 157 "Cannot schedule terminators or labels!"); 158 // Create the SUnit for this MI. 159 SUnit *SU = NewSUnit(MI); 160 161 // Assign the Latency field of SU using target-provided information. 162 if (UnitLatencies) 163 SU->Latency = 1; 164 else 165 ComputeLatency(SU); 166 167 // Add register-based dependencies (data, anti, and output). 168 for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) { 169 const MachineOperand &MO = MI->getOperand(j); 170 if (!MO.isReg()) continue; 171 unsigned Reg = MO.getReg(); 172 if (Reg == 0) continue; 173 174 assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!"); 175 std::vector<SUnit *> &UseList = Uses[Reg]; 176 std::vector<SUnit *> &DefList = Defs[Reg]; 177 // Optionally add output and anti dependencies. For anti 178 // dependencies we use a latency of 0 because for a multi-issue 179 // target we want to allow the defining instruction to issue 180 // in the same cycle as the using instruction. 181 // TODO: Using a latency of 1 here for output dependencies assumes 182 // there's no cost for reusing registers. 183 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output; 184 unsigned AOLatency = (Kind == SDep::Anti) ? 0 : 1; 185 for (unsigned i = 0, e = DefList.size(); i != e; ++i) { 186 SUnit *DefSU = DefList[i]; 187 if (DefSU != SU && 188 (Kind != SDep::Output || !MO.isDead() || 189 !DefSU->getInstr()->registerDefIsDead(Reg))) 190 DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/Reg)); 191 } 192 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 193 std::vector<SUnit *> &DefList = Defs[*Alias]; 194 for (unsigned i = 0, e = DefList.size(); i != e; ++i) { 195 SUnit *DefSU = DefList[i]; 196 if (DefSU != SU && 197 (Kind != SDep::Output || !MO.isDead() || 198 !DefSU->getInstr()->registerDefIsDead(Reg))) 199 DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/ *Alias)); 200 } 201 } 202 203 if (MO.isDef()) { 204 // Add any data dependencies. 205 unsigned DataLatency = SU->Latency; 206 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 207 SUnit *UseSU = UseList[i]; 208 if (UseSU != SU) { 209 unsigned LDataLatency = DataLatency; 210 // Optionally add in a special extra latency for nodes that 211 // feed addresses. 212 // TODO: Do this for register aliases too. 213 if (SpecialAddressLatency != 0 && !UnitLatencies) { 214 MachineInstr *UseMI = UseSU->getInstr(); 215 const TargetInstrDesc &UseTID = UseMI->getDesc(); 216 int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg); 217 assert(RegUseIndex >= 0 && "UseMI doesn's use register!"); 218 if ((UseTID.mayLoad() || UseTID.mayStore()) && 219 (unsigned)RegUseIndex < UseTID.getNumOperands() && 220 UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass()) 221 LDataLatency += SpecialAddressLatency; 222 } 223 UseSU->addPred(SDep(SU, SDep::Data, LDataLatency, Reg)); 224 } 225 } 226 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 227 std::vector<SUnit *> &UseList = Uses[*Alias]; 228 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 229 SUnit *UseSU = UseList[i]; 230 if (UseSU != SU) 231 UseSU->addPred(SDep(SU, SDep::Data, DataLatency, *Alias)); 232 } 233 } 234 235 // If a def is going to wrap back around to the top of the loop, 236 // backschedule it. 237 if (!UnitLatencies && DefList.empty()) { 238 LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(Reg); 239 if (I != LoopRegs.Deps.end()) { 240 const MachineOperand *UseMO = I->second.first; 241 unsigned Count = I->second.second; 242 const MachineInstr *UseMI = UseMO->getParent(); 243 unsigned UseMOIdx = UseMO - &UseMI->getOperand(0); 244 const TargetInstrDesc &UseTID = UseMI->getDesc(); 245 // TODO: If we knew the total depth of the region here, we could 246 // handle the case where the whole loop is inside the region but 247 // is large enough that the isScheduleHigh trick isn't needed. 248 if (UseMOIdx < UseTID.getNumOperands()) { 249 // Currently, we only support scheduling regions consisting of 250 // single basic blocks. Check to see if the instruction is in 251 // the same region by checking to see if it has the same parent. 252 if (UseMI->getParent() != MI->getParent()) { 253 unsigned Latency = SU->Latency; 254 if (UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) 255 Latency += SpecialAddressLatency; 256 // This is a wild guess as to the portion of the latency which 257 // will be overlapped by work done outside the current 258 // scheduling region. 259 Latency -= std::min(Latency, Count); 260 // Add the artifical edge. 261 ExitSU.addPred(SDep(SU, SDep::Order, Latency, 262 /*Reg=*/0, /*isNormalMemory=*/false, 263 /*isMustAlias=*/false, 264 /*isArtificial=*/true)); 265 } else if (SpecialAddressLatency > 0 && 266 UseTID.OpInfo[UseMOIdx].isLookupPtrRegClass()) { 267 // The entire loop body is within the current scheduling region 268 // and the latency of this operation is assumed to be greater 269 // than the latency of the loop. 270 // TODO: Recursively mark data-edge predecessors as 271 // isScheduleHigh too. 272 SU->isScheduleHigh = true; 273 } 274 } 275 LoopRegs.Deps.erase(I); 276 } 277 } 278 279 UseList.clear(); 280 if (!MO.isDead()) 281 DefList.clear(); 282 DefList.push_back(SU); 283 } else { 284 UseList.push_back(SU); 285 } 286 } 287 288 // Add chain dependencies. 289 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable 290 // after stack slots are lowered to actual addresses. 291 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and 292 // produce more precise dependence information. 293 if (TID.isCall() || TID.hasUnmodeledSideEffects()) { 294 new_chain: 295 // This is the conservative case. Add dependencies on all memory 296 // references. 297 if (Chain) 298 Chain->addPred(SDep(SU, SDep::Order, SU->Latency)); 299 Chain = SU; 300 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 301 PendingLoads[k]->addPred(SDep(SU, SDep::Order, SU->Latency)); 302 PendingLoads.clear(); 303 for (std::map<const Value *, SUnit *>::iterator I = MemDefs.begin(), 304 E = MemDefs.end(); I != E; ++I) { 305 I->second->addPred(SDep(SU, SDep::Order, SU->Latency)); 306 I->second = SU; 307 } 308 for (std::map<const Value *, std::vector<SUnit *> >::iterator I = 309 MemUses.begin(), E = MemUses.end(); I != E; ++I) { 310 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 311 I->second[i]->addPred(SDep(SU, SDep::Order, SU->Latency)); 312 I->second.clear(); 313 } 314 // See if it is known to just have a single memory reference. 315 MachineInstr *ChainMI = Chain->getInstr(); 316 const TargetInstrDesc &ChainTID = ChainMI->getDesc(); 317 if (!ChainTID.isCall() && 318 !ChainTID.hasUnmodeledSideEffects() && 319 ChainMI->hasOneMemOperand() && 320 !ChainMI->memoperands_begin()->isVolatile() && 321 ChainMI->memoperands_begin()->getValue()) 322 // We know that the Chain accesses one specific memory location. 323 ChainMMO = &*ChainMI->memoperands_begin(); 324 else 325 // Unknown memory accesses. Assume the worst. 326 ChainMMO = 0; 327 } else if (TID.mayStore()) { 328 if (const Value *V = getUnderlyingObjectForInstr(MI)) { 329 // A store to a specific PseudoSourceValue. Add precise dependencies. 330 // Handle the def in MemDefs, if there is one. 331 std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V); 332 if (I != MemDefs.end()) { 333 I->second->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0, 334 /*isNormalMemory=*/true)); 335 I->second = SU; 336 } else { 337 MemDefs[V] = SU; 338 } 339 // Handle the uses in MemUses, if there are any. 340 std::map<const Value *, std::vector<SUnit *> >::iterator J = 341 MemUses.find(V); 342 if (J != MemUses.end()) { 343 for (unsigned i = 0, e = J->second.size(); i != e; ++i) 344 J->second[i]->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0, 345 /*isNormalMemory=*/true)); 346 J->second.clear(); 347 } 348 // Add dependencies from all the PendingLoads, since without 349 // memoperands we must assume they alias anything. 350 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 351 PendingLoads[k]->addPred(SDep(SU, SDep::Order, SU->Latency)); 352 // Add a general dependence too, if needed. 353 if (Chain) 354 Chain->addPred(SDep(SU, SDep::Order, SU->Latency)); 355 } else 356 // Treat all other stores conservatively. 357 goto new_chain; 358 } else if (TID.mayLoad()) { 359 if (TII->isInvariantLoad(MI)) { 360 // Invariant load, no chain dependencies needed! 361 } else if (const Value *V = getUnderlyingObjectForInstr(MI)) { 362 // A load from a specific PseudoSourceValue. Add precise dependencies. 363 std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V); 364 if (I != MemDefs.end()) 365 I->second->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0, 366 /*isNormalMemory=*/true)); 367 MemUses[V].push_back(SU); 368 369 // Add a general dependence too, if needed. 370 if (Chain && (!ChainMMO || 371 (ChainMMO->isStore() || ChainMMO->isVolatile()))) 372 Chain->addPred(SDep(SU, SDep::Order, SU->Latency)); 373 } else if (MI->hasVolatileMemoryRef()) { 374 // Treat volatile loads conservatively. Note that this includes 375 // cases where memoperand information is unavailable. 376 goto new_chain; 377 } else { 378 // A normal load. Depend on the general chain, as well as on 379 // all stores. In the absense of MachineMemOperand information, 380 // we can't even assume that the load doesn't alias well-behaved 381 // memory locations. 382 if (Chain) 383 Chain->addPred(SDep(SU, SDep::Order, SU->Latency)); 384 for (std::map<const Value *, SUnit *>::iterator I = MemDefs.begin(), 385 E = MemDefs.end(); I != E; ++I) 386 I->second->addPred(SDep(SU, SDep::Order, SU->Latency)); 387 PendingLoads.push_back(SU); 388 } 389 } 390 } 391 392 for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) { 393 Defs[i].clear(); 394 Uses[i].clear(); 395 } 396 PendingLoads.clear(); 397} 398 399void ScheduleDAGInstrs::FinishBlock() { 400 // Nothing to do. 401} 402 403void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) { 404 const InstrItineraryData &InstrItins = TM.getInstrItineraryData(); 405 406 // Compute the latency for the node. 407 SU->Latency = 408 InstrItins.getLatency(SU->getInstr()->getDesc().getSchedClass()); 409 410 // Simplistic target-independent heuristic: assume that loads take 411 // extra time. 412 if (InstrItins.isEmpty()) 413 if (SU->getInstr()->getDesc().mayLoad()) 414 SU->Latency += 2; 415} 416 417void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const { 418 SU->getInstr()->dump(); 419} 420 421std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { 422 std::string s; 423 raw_string_ostream oss(s); 424 if (SU == &EntrySU) 425 oss << "<entry>"; 426 else if (SU == &ExitSU) 427 oss << "<exit>"; 428 else 429 SU->getInstr()->print(oss); 430 return oss.str(); 431} 432 433// EmitSchedule - Emit the machine code in scheduled order. 434MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() { 435 // For MachineInstr-based scheduling, we're rescheduling the instructions in 436 // the block, so start by removing them from the block. 437 while (Begin != InsertPos) { 438 MachineBasicBlock::iterator I = Begin; 439 ++Begin; 440 BB->remove(I); 441 } 442 443 // Then re-insert them according to the given schedule. 444 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 445 SUnit *SU = Sequence[i]; 446 if (!SU) { 447 // Null SUnit* is a noop. 448 EmitNoop(); 449 continue; 450 } 451 452 BB->insert(InsertPos, SU->getInstr()); 453 } 454 455 // Update the Begin iterator, as the first instruction in the block 456 // may have been scheduled later. 457 if (!Sequence.empty()) 458 Begin = Sequence[0]->getInstr(); 459 460 return BB; 461} 462