ScheduleDAGInstrs.cpp revision 020f4106f820648fd7e91956859844a80de13974
1//===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the ScheduleDAGInstrs class, which implements re-scheduling 11// of MachineInstrs. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "sched-instrs" 16#include "ScheduleDAGInstrs.h" 17#include "llvm/Operator.h" 18#include "llvm/Analysis/AliasAnalysis.h" 19#include "llvm/Analysis/ValueTracking.h" 20#include "llvm/CodeGen/MachineFunctionPass.h" 21#include "llvm/CodeGen/MachineMemOperand.h" 22#include "llvm/CodeGen/MachineRegisterInfo.h" 23#include "llvm/CodeGen/PseudoSourceValue.h" 24#include "llvm/MC/MCInstrItineraries.h" 25#include "llvm/Target/TargetMachine.h" 26#include "llvm/Target/TargetInstrInfo.h" 27#include "llvm/Target/TargetRegisterInfo.h" 28#include "llvm/Target/TargetSubtargetInfo.h" 29#include "llvm/Support/Debug.h" 30#include "llvm/Support/raw_ostream.h" 31#include "llvm/ADT/SmallSet.h" 32using namespace llvm; 33 34ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, 35 const MachineLoopInfo &mli, 36 const MachineDominatorTree &mdt) 37 : ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()), 38 InstrItins(mf.getTarget().getInstrItineraryData()), 39 Defs(TRI->getNumRegs()), Uses(TRI->getNumRegs()), 40 LoopRegs(MLI, MDT), FirstDbgValue(0) { 41 DbgValues.clear(); 42} 43 44/// Run - perform scheduling. 45/// 46void ScheduleDAGInstrs::Run(MachineBasicBlock *bb, 47 MachineBasicBlock::iterator begin, 48 MachineBasicBlock::iterator end, 49 unsigned endcount) { 50 BB = bb; 51 Begin = begin; 52 InsertPosIndex = endcount; 53 54 ScheduleDAG::Run(bb, end); 55} 56 57/// getUnderlyingObjectFromInt - This is the function that does the work of 58/// looking through basic ptrtoint+arithmetic+inttoptr sequences. 59static const Value *getUnderlyingObjectFromInt(const Value *V) { 60 do { 61 if (const Operator *U = dyn_cast<Operator>(V)) { 62 // If we find a ptrtoint, we can transfer control back to the 63 // regular getUnderlyingObjectFromInt. 64 if (U->getOpcode() == Instruction::PtrToInt) 65 return U->getOperand(0); 66 // If we find an add of a constant or a multiplied value, it's 67 // likely that the other operand will lead us to the base 68 // object. We don't have to worry about the case where the 69 // object address is somehow being computed by the multiply, 70 // because our callers only care when the result is an 71 // identifibale object. 72 if (U->getOpcode() != Instruction::Add || 73 (!isa<ConstantInt>(U->getOperand(1)) && 74 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul)) 75 return V; 76 V = U->getOperand(0); 77 } else { 78 return V; 79 } 80 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 81 } while (1); 82} 83 84/// getUnderlyingObject - This is a wrapper around GetUnderlyingObject 85/// and adds support for basic ptrtoint+arithmetic+inttoptr sequences. 86static const Value *getUnderlyingObject(const Value *V) { 87 // First just call Value::getUnderlyingObject to let it do what it does. 88 do { 89 V = GetUnderlyingObject(V); 90 // If it found an inttoptr, use special code to continue climing. 91 if (Operator::getOpcode(V) != Instruction::IntToPtr) 92 break; 93 const Value *O = getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 94 // If that succeeded in finding a pointer, continue the search. 95 if (!O->getType()->isPointerTy()) 96 break; 97 V = O; 98 } while (1); 99 return V; 100} 101 102/// getUnderlyingObjectForInstr - If this machine instr has memory reference 103/// information and it can be tracked to a normal reference to a known 104/// object, return the Value for that object. Otherwise return null. 105static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI, 106 const MachineFrameInfo *MFI, 107 bool &MayAlias) { 108 MayAlias = true; 109 if (!MI->hasOneMemOperand() || 110 !(*MI->memoperands_begin())->getValue() || 111 (*MI->memoperands_begin())->isVolatile()) 112 return 0; 113 114 const Value *V = (*MI->memoperands_begin())->getValue(); 115 if (!V) 116 return 0; 117 118 V = getUnderlyingObject(V); 119 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) { 120 // For now, ignore PseudoSourceValues which may alias LLVM IR values 121 // because the code that uses this function has no way to cope with 122 // such aliases. 123 if (PSV->isAliased(MFI)) 124 return 0; 125 126 MayAlias = PSV->mayAlias(MFI); 127 return V; 128 } 129 130 if (isIdentifiedObject(V)) 131 return V; 132 133 return 0; 134} 135 136void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) { 137 LoopRegs.Deps.clear(); 138 if (MachineLoop *ML = MLI.getLoopFor(BB)) 139 if (BB == ML->getLoopLatch()) { 140 MachineBasicBlock *Header = ML->getHeader(); 141 for (MachineBasicBlock::livein_iterator I = Header->livein_begin(), 142 E = Header->livein_end(); I != E; ++I) 143 LoopLiveInRegs.insert(*I); 144 LoopRegs.VisitLoop(ML); 145 } 146} 147 148/// AddSchedBarrierDeps - Add dependencies from instructions in the current 149/// list of instructions being scheduled to scheduling barrier by adding 150/// the exit SU to the register defs and use list. This is because we want to 151/// make sure instructions which define registers that are either used by 152/// the terminator or are live-out are properly scheduled. This is 153/// especially important when the definition latency of the return value(s) 154/// are too high to be hidden by the branch or when the liveout registers 155/// used by instructions in the fallthrough block. 156void ScheduleDAGInstrs::AddSchedBarrierDeps() { 157 MachineInstr *ExitMI = InsertPos != BB->end() ? &*InsertPos : 0; 158 ExitSU.setInstr(ExitMI); 159 bool AllDepKnown = ExitMI && 160 (ExitMI->isCall() || ExitMI->isBarrier()); 161 if (ExitMI && AllDepKnown) { 162 // If it's a call or a barrier, add dependencies on the defs and uses of 163 // instruction. 164 for (unsigned i = 0, e = ExitMI->getNumOperands(); i != e; ++i) { 165 const MachineOperand &MO = ExitMI->getOperand(i); 166 if (!MO.isReg() || MO.isDef()) continue; 167 unsigned Reg = MO.getReg(); 168 if (Reg == 0) continue; 169 170 assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!"); 171 Uses[Reg].push_back(&ExitSU); 172 } 173 } else { 174 // For others, e.g. fallthrough, conditional branch, assume the exit 175 // uses all the registers that are livein to the successor blocks. 176 SmallSet<unsigned, 8> Seen; 177 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 178 SE = BB->succ_end(); SI != SE; ++SI) 179 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 180 E = (*SI)->livein_end(); I != E; ++I) { 181 unsigned Reg = *I; 182 if (Seen.insert(Reg)) 183 Uses[Reg].push_back(&ExitSU); 184 } 185 } 186} 187 188void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { 189 // We'll be allocating one SUnit for each instruction, plus one for 190 // the region exit node. 191 SUnits.reserve(BB->size()); 192 193 // We build scheduling units by walking a block's instruction list from bottom 194 // to top. 195 196 // Remember where a generic side-effecting instruction is as we procede. 197 SUnit *BarrierChain = 0, *AliasChain = 0; 198 199 // Memory references to specific known memory locations are tracked 200 // so that they can be given more precise dependencies. We track 201 // separately the known memory locations that may alias and those 202 // that are known not to alias 203 std::map<const Value *, SUnit *> AliasMemDefs, NonAliasMemDefs; 204 std::map<const Value *, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses; 205 206 // Check to see if the scheduler cares about latencies. 207 bool UnitLatencies = ForceUnitLatencies(); 208 209 // Ask the target if address-backscheduling is desirable, and if so how much. 210 const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>(); 211 unsigned SpecialAddressLatency = ST.getSpecialAddressLatency(); 212 213 // Remove any stale debug info; sometimes BuildSchedGraph is called again 214 // without emitting the info from the previous call. 215 DbgValues.clear(); 216 FirstDbgValue = NULL; 217 218 // Model data dependencies between instructions being scheduled and the 219 // ExitSU. 220 AddSchedBarrierDeps(); 221 222 for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) { 223 assert(Defs[i].empty() && "Only BuildGraph should push/pop Defs"); 224 } 225 226 // Walk the list of instructions, from bottom moving up. 227 MachineInstr *PrevMI = NULL; 228 for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin; 229 MII != MIE; --MII) { 230 MachineInstr *MI = prior(MII); 231 if (MI && PrevMI) { 232 DbgValues.push_back(std::make_pair(PrevMI, MI)); 233 PrevMI = NULL; 234 } 235 236 if (MI->isDebugValue()) { 237 PrevMI = MI; 238 continue; 239 } 240 241 assert(!MI->isTerminator() && !MI->isLabel() && 242 "Cannot schedule terminators or labels!"); 243 // Create the SUnit for this MI. 244 SUnit *SU = NewSUnit(MI); 245 SU->isCall = MI->isCall(); 246 SU->isCommutable = MI->isCommutable(); 247 248 // Assign the Latency field of SU using target-provided information. 249 if (UnitLatencies) 250 SU->Latency = 1; 251 else 252 ComputeLatency(SU); 253 254 // Add register-based dependencies (data, anti, and output). 255 for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) { 256 const MachineOperand &MO = MI->getOperand(j); 257 if (!MO.isReg()) continue; 258 unsigned Reg = MO.getReg(); 259 if (Reg == 0) continue; 260 261 assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!"); 262 263 std::vector<SUnit *> &UseList = Uses[Reg]; 264 // Defs are push in the order they are visited and never reordered. 265 std::vector<SUnit *> &DefList = Defs[Reg]; 266 // Optionally add output and anti dependencies. For anti 267 // dependencies we use a latency of 0 because for a multi-issue 268 // target we want to allow the defining instruction to issue 269 // in the same cycle as the using instruction. 270 // TODO: Using a latency of 1 here for output dependencies assumes 271 // there's no cost for reusing registers. 272 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output; 273 unsigned AOLatency = (Kind == SDep::Anti) ? 0 : 1; 274 for (unsigned i = 0, e = DefList.size(); i != e; ++i) { 275 SUnit *DefSU = DefList[i]; 276 if (DefSU == &ExitSU) 277 continue; 278 if (DefSU != SU && 279 (Kind != SDep::Output || !MO.isDead() || 280 !DefSU->getInstr()->registerDefIsDead(Reg))) { 281 if (Kind == SDep::Anti) 282 DefSU->addPred(SDep(SU, Kind, 0, /*Reg=*/Reg)); 283 else { 284 unsigned AOLat = TII->getOutputLatency(InstrItins, MI, j, 285 DefSU->getInstr()); 286 DefSU->addPred(SDep(SU, Kind, AOLat, /*Reg=*/Reg)); 287 } 288 } 289 } 290 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 291 std::vector<SUnit *> &MemDefList = Defs[*Alias]; 292 for (unsigned i = 0, e = MemDefList.size(); i != e; ++i) { 293 SUnit *DefSU = MemDefList[i]; 294 if (DefSU == &ExitSU) 295 continue; 296 if (DefSU != SU && 297 (Kind != SDep::Output || !MO.isDead() || 298 !DefSU->getInstr()->registerDefIsDead(*Alias))) 299 DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/ *Alias)); 300 } 301 } 302 303 if (MO.isDef()) { 304 // Add any data dependencies. 305 unsigned DataLatency = SU->Latency; 306 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 307 SUnit *UseSU = UseList[i]; 308 if (UseSU == SU) 309 continue; 310 unsigned LDataLatency = DataLatency; 311 // Optionally add in a special extra latency for nodes that 312 // feed addresses. 313 // TODO: Do this for register aliases too. 314 // TODO: Perhaps we should get rid of 315 // SpecialAddressLatency and just move this into 316 // adjustSchedDependency for the targets that care about it. 317 if (SpecialAddressLatency != 0 && !UnitLatencies && 318 UseSU != &ExitSU) { 319 MachineInstr *UseMI = UseSU->getInstr(); 320 const MCInstrDesc &UseMCID = UseMI->getDesc(); 321 int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg); 322 assert(RegUseIndex >= 0 && "UseMI doesn's use register!"); 323 if (RegUseIndex >= 0 && 324 (UseMI->mayLoad() || UseMI->mayStore()) && 325 (unsigned)RegUseIndex < UseMCID.getNumOperands() && 326 UseMCID.OpInfo[RegUseIndex].isLookupPtrRegClass()) 327 LDataLatency += SpecialAddressLatency; 328 } 329 // Adjust the dependence latency using operand def/use 330 // information (if any), and then allow the target to 331 // perform its own adjustments. 332 const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg); 333 if (!UnitLatencies) { 334 ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep)); 335 ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep)); 336 } 337 UseSU->addPred(dep); 338 } 339 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 340 std::vector<SUnit *> &UseList = Uses[*Alias]; 341 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 342 SUnit *UseSU = UseList[i]; 343 if (UseSU == SU) 344 continue; 345 const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias); 346 if (!UnitLatencies) { 347 ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep)); 348 ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep)); 349 } 350 UseSU->addPred(dep); 351 } 352 } 353 354 // If a def is going to wrap back around to the top of the loop, 355 // backschedule it. 356 if (!UnitLatencies && DefList.empty()) { 357 LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(Reg); 358 if (I != LoopRegs.Deps.end()) { 359 const MachineOperand *UseMO = I->second.first; 360 unsigned Count = I->second.second; 361 const MachineInstr *UseMI = UseMO->getParent(); 362 unsigned UseMOIdx = UseMO - &UseMI->getOperand(0); 363 const MCInstrDesc &UseMCID = UseMI->getDesc(); 364 // TODO: If we knew the total depth of the region here, we could 365 // handle the case where the whole loop is inside the region but 366 // is large enough that the isScheduleHigh trick isn't needed. 367 if (UseMOIdx < UseMCID.getNumOperands()) { 368 // Currently, we only support scheduling regions consisting of 369 // single basic blocks. Check to see if the instruction is in 370 // the same region by checking to see if it has the same parent. 371 if (UseMI->getParent() != MI->getParent()) { 372 unsigned Latency = SU->Latency; 373 if (UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) 374 Latency += SpecialAddressLatency; 375 // This is a wild guess as to the portion of the latency which 376 // will be overlapped by work done outside the current 377 // scheduling region. 378 Latency -= std::min(Latency, Count); 379 // Add the artificial edge. 380 ExitSU.addPred(SDep(SU, SDep::Order, Latency, 381 /*Reg=*/0, /*isNormalMemory=*/false, 382 /*isMustAlias=*/false, 383 /*isArtificial=*/true)); 384 } else if (SpecialAddressLatency > 0 && 385 UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) { 386 // The entire loop body is within the current scheduling region 387 // and the latency of this operation is assumed to be greater 388 // than the latency of the loop. 389 // TODO: Recursively mark data-edge predecessors as 390 // isScheduleHigh too. 391 SU->isScheduleHigh = true; 392 } 393 } 394 LoopRegs.Deps.erase(I); 395 } 396 } 397 398 UseList.clear(); 399 if (!MO.isDead()) 400 DefList.clear(); 401 402 // Calls will not be reordered because of chain dependencies (see 403 // below). Since call operands are dead, calls may continue to be added 404 // to the DefList making dependence checking quadratic in the size of 405 // the block. Instead, we leave only one call at the back of the 406 // DefList. 407 if (SU->isCall) { 408 while (!DefList.empty() && DefList.back()->isCall) 409 DefList.pop_back(); 410 } 411 DefList.push_back(SU); 412 } else { 413 UseList.push_back(SU); 414 } 415 } 416 417 // Add chain dependencies. 418 // Chain dependencies used to enforce memory order should have 419 // latency of 0 (except for true dependency of Store followed by 420 // aliased Load... we estimate that with a single cycle of latency 421 // assuming the hardware will bypass) 422 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable 423 // after stack slots are lowered to actual addresses. 424 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and 425 // produce more precise dependence information. 426#define STORE_LOAD_LATENCY 1 427 unsigned TrueMemOrderLatency = 0; 428 if (MI->isCall() || MI->hasUnmodeledSideEffects() || 429 (MI->hasVolatileMemoryRef() && 430 (!MI->mayLoad() || !MI->isInvariantLoad(AA)))) { 431 // Be conservative with these and add dependencies on all memory 432 // references, even those that are known to not alias. 433 for (std::map<const Value *, SUnit *>::iterator I = 434 NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) { 435 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 436 } 437 for (std::map<const Value *, std::vector<SUnit *> >::iterator I = 438 NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) { 439 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 440 I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 441 } 442 NonAliasMemDefs.clear(); 443 NonAliasMemUses.clear(); 444 // Add SU to the barrier chain. 445 if (BarrierChain) 446 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 447 BarrierChain = SU; 448 449 // fall-through 450 new_alias_chain: 451 // Chain all possibly aliasing memory references though SU. 452 if (AliasChain) 453 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 454 AliasChain = SU; 455 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 456 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 457 for (std::map<const Value *, SUnit *>::iterator I = AliasMemDefs.begin(), 458 E = AliasMemDefs.end(); I != E; ++I) { 459 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 460 } 461 for (std::map<const Value *, std::vector<SUnit *> >::iterator I = 462 AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) { 463 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 464 I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 465 } 466 PendingLoads.clear(); 467 AliasMemDefs.clear(); 468 AliasMemUses.clear(); 469 } else if (MI->mayStore()) { 470 bool MayAlias = true; 471 TrueMemOrderLatency = STORE_LOAD_LATENCY; 472 if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) { 473 // A store to a specific PseudoSourceValue. Add precise dependencies. 474 // Record the def in MemDefs, first adding a dep if there is 475 // an existing def. 476 std::map<const Value *, SUnit *>::iterator I = 477 ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); 478 std::map<const Value *, SUnit *>::iterator IE = 479 ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); 480 if (I != IE) { 481 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0, 482 /*isNormalMemory=*/true)); 483 I->second = SU; 484 } else { 485 if (MayAlias) 486 AliasMemDefs[V] = SU; 487 else 488 NonAliasMemDefs[V] = SU; 489 } 490 // Handle the uses in MemUses, if there are any. 491 std::map<const Value *, std::vector<SUnit *> >::iterator J = 492 ((MayAlias) ? AliasMemUses.find(V) : NonAliasMemUses.find(V)); 493 std::map<const Value *, std::vector<SUnit *> >::iterator JE = 494 ((MayAlias) ? AliasMemUses.end() : NonAliasMemUses.end()); 495 if (J != JE) { 496 for (unsigned i = 0, e = J->second.size(); i != e; ++i) 497 J->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency, 498 /*Reg=*/0, /*isNormalMemory=*/true)); 499 J->second.clear(); 500 } 501 if (MayAlias) { 502 // Add dependencies from all the PendingLoads, i.e. loads 503 // with no underlying object. 504 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 505 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 506 // Add dependence on alias chain, if needed. 507 if (AliasChain) 508 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 509 } 510 // Add dependence on barrier chain, if needed. 511 if (BarrierChain) 512 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 513 } else { 514 // Treat all other stores conservatively. 515 goto new_alias_chain; 516 } 517 518 if (!ExitSU.isPred(SU)) 519 // Push store's up a bit to avoid them getting in between cmp 520 // and branches. 521 ExitSU.addPred(SDep(SU, SDep::Order, 0, 522 /*Reg=*/0, /*isNormalMemory=*/false, 523 /*isMustAlias=*/false, 524 /*isArtificial=*/true)); 525 } else if (MI->mayLoad()) { 526 bool MayAlias = true; 527 TrueMemOrderLatency = 0; 528 if (MI->isInvariantLoad(AA)) { 529 // Invariant load, no chain dependencies needed! 530 } else { 531 if (const Value *V = 532 getUnderlyingObjectForInstr(MI, MFI, MayAlias)) { 533 // A load from a specific PseudoSourceValue. Add precise dependencies. 534 std::map<const Value *, SUnit *>::iterator I = 535 ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); 536 std::map<const Value *, SUnit *>::iterator IE = 537 ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); 538 if (I != IE) 539 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0, 540 /*isNormalMemory=*/true)); 541 if (MayAlias) 542 AliasMemUses[V].push_back(SU); 543 else 544 NonAliasMemUses[V].push_back(SU); 545 } else { 546 // A load with no underlying object. Depend on all 547 // potentially aliasing stores. 548 for (std::map<const Value *, SUnit *>::iterator I = 549 AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I) 550 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 551 552 PendingLoads.push_back(SU); 553 MayAlias = true; 554 } 555 556 // Add dependencies on alias and barrier chains, if needed. 557 if (MayAlias && AliasChain) 558 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 559 if (BarrierChain) 560 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 561 } 562 } 563 } 564 if (PrevMI) 565 FirstDbgValue = PrevMI; 566 567 for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) { 568 Defs[i].clear(); 569 Uses[i].clear(); 570 } 571 PendingLoads.clear(); 572} 573 574void ScheduleDAGInstrs::FinishBlock() { 575 // Nothing to do. 576} 577 578void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) { 579 // Compute the latency for the node. 580 if (!InstrItins || InstrItins->isEmpty()) { 581 SU->Latency = 1; 582 583 // Simplistic target-independent heuristic: assume that loads take 584 // extra time. 585 if (SU->getInstr()->mayLoad()) 586 SU->Latency += 2; 587 } else { 588 SU->Latency = TII->getInstrLatency(InstrItins, SU->getInstr()); 589 } 590} 591 592void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use, 593 SDep& dep) const { 594 if (!InstrItins || InstrItins->isEmpty()) 595 return; 596 597 // For a data dependency with a known register... 598 if ((dep.getKind() != SDep::Data) || (dep.getReg() == 0)) 599 return; 600 601 const unsigned Reg = dep.getReg(); 602 603 // ... find the definition of the register in the defining 604 // instruction 605 MachineInstr *DefMI = Def->getInstr(); 606 int DefIdx = DefMI->findRegisterDefOperandIdx(Reg); 607 if (DefIdx != -1) { 608 const MachineOperand &MO = DefMI->getOperand(DefIdx); 609 if (MO.isReg() && MO.isImplicit() && 610 DefIdx >= (int)DefMI->getDesc().getNumOperands()) { 611 // This is an implicit def, getOperandLatency() won't return the correct 612 // latency. e.g. 613 // %D6<def>, %D7<def> = VLD1q16 %R2<kill>, 0, ..., %Q3<imp-def> 614 // %Q1<def> = VMULv8i16 %Q1<kill>, %Q3<kill>, ... 615 // What we want is to compute latency between def of %D6/%D7 and use of 616 // %Q3 instead. 617 DefIdx = DefMI->findRegisterDefOperandIdx(Reg, false, true, TRI); 618 } 619 MachineInstr *UseMI = Use->getInstr(); 620 // For all uses of the register, calculate the maxmimum latency 621 int Latency = -1; 622 if (UseMI) { 623 for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) { 624 const MachineOperand &MO = UseMI->getOperand(i); 625 if (!MO.isReg() || !MO.isUse()) 626 continue; 627 unsigned MOReg = MO.getReg(); 628 if (MOReg != Reg) 629 continue; 630 631 int UseCycle = TII->getOperandLatency(InstrItins, DefMI, DefIdx, 632 UseMI, i); 633 Latency = std::max(Latency, UseCycle); 634 } 635 } else { 636 // UseMI is null, then it must be a scheduling barrier. 637 if (!InstrItins || InstrItins->isEmpty()) 638 return; 639 unsigned DefClass = DefMI->getDesc().getSchedClass(); 640 Latency = InstrItins->getOperandCycle(DefClass, DefIdx); 641 } 642 643 // If we found a latency, then replace the existing dependence latency. 644 if (Latency >= 0) 645 dep.setLatency(Latency); 646 } 647} 648 649void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const { 650 SU->getInstr()->dump(); 651} 652 653std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { 654 std::string s; 655 raw_string_ostream oss(s); 656 if (SU == &EntrySU) 657 oss << "<entry>"; 658 else if (SU == &ExitSU) 659 oss << "<exit>"; 660 else 661 SU->getInstr()->print(oss); 662 return oss.str(); 663} 664 665// EmitSchedule - Emit the machine code in scheduled order. 666MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() { 667 Begin = InsertPos; 668 669 // If first instruction was a DBG_VALUE then put it back. 670 if (FirstDbgValue) 671 BB->splice(InsertPos, BB, FirstDbgValue); 672 673 // Then re-insert them according to the given schedule. 674 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 675 if (SUnit *SU = Sequence[i]) 676 BB->splice(InsertPos, BB, SU->getInstr()); 677 else 678 // Null SUnit* is a noop. 679 EmitNoop(); 680 681 // Update the Begin iterator, as the first instruction in the block 682 // may have been scheduled later. 683 if (i == 0) 684 Begin = prior(InsertPos); 685 } 686 687 // Reinsert any remaining debug_values. 688 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 689 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 690 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI); 691 MachineInstr *DbgValue = P.first; 692 MachineBasicBlock::iterator OrigPrivMI = P.second; 693 BB->splice(++OrigPrivMI, BB, DbgValue); 694 } 695 DbgValues.clear(); 696 FirstDbgValue = NULL; 697 return BB; 698} 699