ScheduleDAGInstrs.cpp revision 3c58ba8ea7ec097d69d7f7be5930a4a4d7405a18
1//===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the ScheduleDAGInstrs class, which implements re-scheduling 11// of MachineInstrs. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "sched-instrs" 16#include "ScheduleDAGInstrs.h" 17#include "llvm/Operator.h" 18#include "llvm/Analysis/AliasAnalysis.h" 19#include "llvm/Analysis/ValueTracking.h" 20#include "llvm/CodeGen/MachineFunctionPass.h" 21#include "llvm/CodeGen/MachineMemOperand.h" 22#include "llvm/CodeGen/MachineRegisterInfo.h" 23#include "llvm/CodeGen/PseudoSourceValue.h" 24#include "llvm/MC/MCInstrItineraries.h" 25#include "llvm/Target/TargetMachine.h" 26#include "llvm/Target/TargetInstrInfo.h" 27#include "llvm/Target/TargetRegisterInfo.h" 28#include "llvm/Target/TargetSubtargetInfo.h" 29#include "llvm/Support/Debug.h" 30#include "llvm/Support/raw_ostream.h" 31#include "llvm/ADT/SmallSet.h" 32using namespace llvm; 33 34ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, 35 const MachineLoopInfo &mli, 36 const MachineDominatorTree &mdt, 37 bool IsPostRAFlag) 38 : ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()), 39 InstrItins(mf.getTarget().getInstrItineraryData()), IsPostRA(IsPostRAFlag), 40 UnitLatencies(false), Defs(TRI->getNumRegs()), Uses(TRI->getNumRegs()), 41 LoopRegs(MLI, MDT), FirstDbgValue(0) { 42 DbgValues.clear(); 43} 44 45/// Run - perform scheduling. 46/// 47void ScheduleDAGInstrs::Run(MachineBasicBlock *bb, 48 MachineBasicBlock::iterator begin, 49 MachineBasicBlock::iterator end, 50 unsigned endcount) { 51 BB = bb; 52 Begin = begin; 53 InsertPosIndex = endcount; 54 55 // Check to see if the scheduler cares about latencies. 56 UnitLatencies = ForceUnitLatencies(); 57 58 ScheduleDAG::Run(bb, end); 59} 60 61/// getUnderlyingObjectFromInt - This is the function that does the work of 62/// looking through basic ptrtoint+arithmetic+inttoptr sequences. 63static const Value *getUnderlyingObjectFromInt(const Value *V) { 64 do { 65 if (const Operator *U = dyn_cast<Operator>(V)) { 66 // If we find a ptrtoint, we can transfer control back to the 67 // regular getUnderlyingObjectFromInt. 68 if (U->getOpcode() == Instruction::PtrToInt) 69 return U->getOperand(0); 70 // If we find an add of a constant or a multiplied value, it's 71 // likely that the other operand will lead us to the base 72 // object. We don't have to worry about the case where the 73 // object address is somehow being computed by the multiply, 74 // because our callers only care when the result is an 75 // identifibale object. 76 if (U->getOpcode() != Instruction::Add || 77 (!isa<ConstantInt>(U->getOperand(1)) && 78 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul)) 79 return V; 80 V = U->getOperand(0); 81 } else { 82 return V; 83 } 84 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 85 } while (1); 86} 87 88/// getUnderlyingObject - This is a wrapper around GetUnderlyingObject 89/// and adds support for basic ptrtoint+arithmetic+inttoptr sequences. 90static const Value *getUnderlyingObject(const Value *V) { 91 // First just call Value::getUnderlyingObject to let it do what it does. 92 do { 93 V = GetUnderlyingObject(V); 94 // If it found an inttoptr, use special code to continue climing. 95 if (Operator::getOpcode(V) != Instruction::IntToPtr) 96 break; 97 const Value *O = getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 98 // If that succeeded in finding a pointer, continue the search. 99 if (!O->getType()->isPointerTy()) 100 break; 101 V = O; 102 } while (1); 103 return V; 104} 105 106/// getUnderlyingObjectForInstr - If this machine instr has memory reference 107/// information and it can be tracked to a normal reference to a known 108/// object, return the Value for that object. Otherwise return null. 109static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI, 110 const MachineFrameInfo *MFI, 111 bool &MayAlias) { 112 MayAlias = true; 113 if (!MI->hasOneMemOperand() || 114 !(*MI->memoperands_begin())->getValue() || 115 (*MI->memoperands_begin())->isVolatile()) 116 return 0; 117 118 const Value *V = (*MI->memoperands_begin())->getValue(); 119 if (!V) 120 return 0; 121 122 V = getUnderlyingObject(V); 123 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) { 124 // For now, ignore PseudoSourceValues which may alias LLVM IR values 125 // because the code that uses this function has no way to cope with 126 // such aliases. 127 if (PSV->isAliased(MFI)) 128 return 0; 129 130 MayAlias = PSV->mayAlias(MFI); 131 return V; 132 } 133 134 if (isIdentifiedObject(V)) 135 return V; 136 137 return 0; 138} 139 140void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) { 141 LoopRegs.Deps.clear(); 142 if (MachineLoop *ML = MLI.getLoopFor(BB)) 143 if (BB == ML->getLoopLatch()) 144 LoopRegs.VisitLoop(ML); 145} 146 147/// AddSchedBarrierDeps - Add dependencies from instructions in the current 148/// list of instructions being scheduled to scheduling barrier by adding 149/// the exit SU to the register defs and use list. This is because we want to 150/// make sure instructions which define registers that are either used by 151/// the terminator or are live-out are properly scheduled. This is 152/// especially important when the definition latency of the return value(s) 153/// are too high to be hidden by the branch or when the liveout registers 154/// used by instructions in the fallthrough block. 155void ScheduleDAGInstrs::AddSchedBarrierDeps() { 156 MachineInstr *ExitMI = InsertPos != BB->end() ? &*InsertPos : 0; 157 ExitSU.setInstr(ExitMI); 158 bool AllDepKnown = ExitMI && 159 (ExitMI->isCall() || ExitMI->isBarrier()); 160 if (ExitMI && AllDepKnown) { 161 // If it's a call or a barrier, add dependencies on the defs and uses of 162 // instruction. 163 for (unsigned i = 0, e = ExitMI->getNumOperands(); i != e; ++i) { 164 const MachineOperand &MO = ExitMI->getOperand(i); 165 if (!MO.isReg() || MO.isDef()) continue; 166 unsigned Reg = MO.getReg(); 167 if (Reg == 0) continue; 168 169 if (TRI->isPhysicalRegister(Reg)) 170 Uses[Reg].push_back(&ExitSU); 171 else 172 assert(!IsPostRA && "Virtual register encountered after regalloc."); 173 } 174 } else { 175 // For others, e.g. fallthrough, conditional branch, assume the exit 176 // uses all the registers that are livein to the successor blocks. 177 SmallSet<unsigned, 8> Seen; 178 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 179 SE = BB->succ_end(); SI != SE; ++SI) 180 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 181 E = (*SI)->livein_end(); I != E; ++I) { 182 unsigned Reg = *I; 183 if (Seen.insert(Reg)) 184 Uses[Reg].push_back(&ExitSU); 185 } 186 } 187} 188 189/// addPhysRegDeps - Add register dependencies (data, anti, and output) from 190/// this SUnit to following instructions in the same scheduling region that 191/// depend the physical register referenced at OperIdx. 192void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { 193 const MachineInstr *MI = SU->getInstr(); 194 const MachineOperand &MO = MI->getOperand(OperIdx); 195 unsigned Reg = MO.getReg(); 196 197 // Ask the target if address-backscheduling is desirable, and if so how much. 198 const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>(); 199 unsigned SpecialAddressLatency = ST.getSpecialAddressLatency(); 200 201 // Optionally add output and anti dependencies. For anti 202 // dependencies we use a latency of 0 because for a multi-issue 203 // target we want to allow the defining instruction to issue 204 // in the same cycle as the using instruction. 205 // TODO: Using a latency of 1 here for output dependencies assumes 206 // there's no cost for reusing registers. 207 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output; 208 for (const unsigned *Alias = TRI->getOverlaps(Reg); *Alias; ++Alias) { 209 std::vector<SUnit *> &DefList = Defs[*Alias]; 210 for (unsigned i = 0, e = DefList.size(); i != e; ++i) { 211 SUnit *DefSU = DefList[i]; 212 if (DefSU == &ExitSU) 213 continue; 214 if (DefSU != SU && 215 (Kind != SDep::Output || !MO.isDead() || 216 !DefSU->getInstr()->registerDefIsDead(*Alias))) { 217 if (Kind == SDep::Anti) 218 DefSU->addPred(SDep(SU, Kind, 0, /*Reg=*/*Alias)); 219 else { 220 unsigned AOLat = TII->getOutputLatency(InstrItins, MI, OperIdx, 221 DefSU->getInstr()); 222 DefSU->addPred(SDep(SU, Kind, AOLat, /*Reg=*/*Alias)); 223 } 224 } 225 } 226 } 227 228 // Retrieve the UseList to add data dependencies and update uses. 229 std::vector<SUnit *> &UseList = Uses[Reg]; 230 if (MO.isDef()) { 231 // Update DefList. Defs are pushed in the order they are visited and 232 // never reordered. 233 std::vector<SUnit *> &DefList = Defs[Reg]; 234 235 // Add any data dependencies. 236 unsigned DataLatency = SU->Latency; 237 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 238 SUnit *UseSU = UseList[i]; 239 if (UseSU == SU) 240 continue; 241 unsigned LDataLatency = DataLatency; 242 // Optionally add in a special extra latency for nodes that 243 // feed addresses. 244 // TODO: Do this for register aliases too. 245 // TODO: Perhaps we should get rid of 246 // SpecialAddressLatency and just move this into 247 // adjustSchedDependency for the targets that care about it. 248 if (SpecialAddressLatency != 0 && !UnitLatencies && 249 UseSU != &ExitSU) { 250 MachineInstr *UseMI = UseSU->getInstr(); 251 const MCInstrDesc &UseMCID = UseMI->getDesc(); 252 int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg); 253 assert(RegUseIndex >= 0 && "UseMI doesn's use register!"); 254 if (RegUseIndex >= 0 && 255 (UseMI->mayLoad() || UseMI->mayStore()) && 256 (unsigned)RegUseIndex < UseMCID.getNumOperands() && 257 UseMCID.OpInfo[RegUseIndex].isLookupPtrRegClass()) 258 LDataLatency += SpecialAddressLatency; 259 } 260 // Adjust the dependence latency using operand def/use 261 // information (if any), and then allow the target to 262 // perform its own adjustments. 263 const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg); 264 if (!UnitLatencies) { 265 ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep)); 266 ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep)); 267 } 268 UseSU->addPred(dep); 269 } 270 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) { 271 std::vector<SUnit *> &UseList = Uses[*Alias]; 272 for (unsigned i = 0, e = UseList.size(); i != e; ++i) { 273 SUnit *UseSU = UseList[i]; 274 if (UseSU == SU) 275 continue; 276 const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias); 277 if (!UnitLatencies) { 278 ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep)); 279 ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep)); 280 } 281 UseSU->addPred(dep); 282 } 283 } 284 285 // If a def is going to wrap back around to the top of the loop, 286 // backschedule it. 287 if (!UnitLatencies && DefList.empty()) { 288 LoopDependencies::LoopDeps::iterator I = LoopRegs.Deps.find(Reg); 289 if (I != LoopRegs.Deps.end()) { 290 const MachineOperand *UseMO = I->second.first; 291 unsigned Count = I->second.second; 292 const MachineInstr *UseMI = UseMO->getParent(); 293 unsigned UseMOIdx = UseMO - &UseMI->getOperand(0); 294 const MCInstrDesc &UseMCID = UseMI->getDesc(); 295 // TODO: If we knew the total depth of the region here, we could 296 // handle the case where the whole loop is inside the region but 297 // is large enough that the isScheduleHigh trick isn't needed. 298 if (UseMOIdx < UseMCID.getNumOperands()) { 299 // Currently, we only support scheduling regions consisting of 300 // single basic blocks. Check to see if the instruction is in 301 // the same region by checking to see if it has the same parent. 302 if (UseMI->getParent() != MI->getParent()) { 303 unsigned Latency = SU->Latency; 304 if (UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) 305 Latency += SpecialAddressLatency; 306 // This is a wild guess as to the portion of the latency which 307 // will be overlapped by work done outside the current 308 // scheduling region. 309 Latency -= std::min(Latency, Count); 310 // Add the artificial edge. 311 ExitSU.addPred(SDep(SU, SDep::Order, Latency, 312 /*Reg=*/0, /*isNormalMemory=*/false, 313 /*isMustAlias=*/false, 314 /*isArtificial=*/true)); 315 } else if (SpecialAddressLatency > 0 && 316 UseMCID.OpInfo[UseMOIdx].isLookupPtrRegClass()) { 317 // The entire loop body is within the current scheduling region 318 // and the latency of this operation is assumed to be greater 319 // than the latency of the loop. 320 // TODO: Recursively mark data-edge predecessors as 321 // isScheduleHigh too. 322 SU->isScheduleHigh = true; 323 } 324 } 325 LoopRegs.Deps.erase(I); 326 } 327 } 328 329 UseList.clear(); 330 if (!MO.isDead()) 331 DefList.clear(); 332 333 // Calls will not be reordered because of chain dependencies (see 334 // below). Since call operands are dead, calls may continue to be added 335 // to the DefList making dependence checking quadratic in the size of 336 // the block. Instead, we leave only one call at the back of the 337 // DefList. 338 if (SU->isCall) { 339 while (!DefList.empty() && DefList.back()->isCall) 340 DefList.pop_back(); 341 } 342 DefList.push_back(SU); 343 } else { 344 UseList.push_back(SU); 345 } 346} 347 348/// addVRegDefDeps - Add register output and data dependencies from this SUnit 349/// to instructions that occur later in the same scheduling region if they read 350/// from or write to the virtual register defined at OperIdx. 351/// 352/// TODO: Hoist loop induction variable increments. This has to be 353/// reevaluated. Generally, IV scheduling should be done before coalescing. 354void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) { 355 const MachineInstr *MI = SU->getInstr(); 356 unsigned Reg = MI->getOperand(OperIdx).getReg(); 357 358 const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>(); 359 360 // Add output dependence to the next nearest def of this vreg. 361 // 362 // Unless this definition is dead, the output dependence should be 363 // transitively redundant with antidependencies from this definition's 364 // uses. We're conservative for now until we have a way to guarantee the uses 365 // are not eliminated sometime during scheduling. The output dependence edge 366 // is also useful if output latency exceeds def-use latency. 367 SUnit *DefSU = VRegDefs[Reg]; 368 if (DefSU && DefSU != SU && DefSU != &ExitSU) { 369 unsigned OutLatency = TII->getOutputLatency(InstrItins, MI, OperIdx, 370 DefSU->getInstr()); 371 DefSU->addPred(SDep(SU, SDep::Output, OutLatency, Reg)); 372 } 373 VRegDefs[Reg] = SU; 374 375 // Add data dependence to any uses of this vreg before the next nearest def. 376 // 377 // TODO: Handle ExitSU properly. 378 // 379 // TODO: Data dependence could be handled more efficiently at the use-side. 380 std::vector<SUnit*> &UseList = VRegUses[Reg]; 381 for (std::vector<SUnit*>::const_iterator UI = UseList.begin(), 382 UE = UseList.end(); UI != UE; ++UI) { 383 SUnit *UseSU = *UI; 384 if (UseSU == SU) continue; 385 386 // TODO: Handle "special" address latencies cleanly. 387 const SDep& dep = SDep(SU, SDep::Data, SU->Latency, Reg); 388 if (!UnitLatencies) { 389 // Adjust the dependence latency using operand def/use information, then 390 // allow the target to perform its own adjustments. 391 ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep)); 392 ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep)); 393 } 394 UseSU->addPred(dep); 395 } 396 UseList.clear(); 397} 398 399/// addVRegUseDeps - Add register antidependencies from this SUnit to 400/// instructions that occur later in the same scheduling region if they 401/// write the virtual register referenced at OperIdx. 402void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) { 403 unsigned Reg = SU->getInstr()->getOperand(OperIdx).getReg(); 404 405 // Add antidependence to the following def of the vreg it uses. 406 SUnit *DefSU = VRegDefs[Reg]; 407 if (DefSU && DefSU != SU) 408 DefSU->addPred(SDep(SU, SDep::Anti, 0, Reg)); 409 410 // Add this SUnit to the use list of the vreg it uses. 411 // 412 // TODO: pinch the DAG before we see too many uses to avoid quadratic 413 // behavior. Limiting the scheduling window can accomplish the same thing. 414 VRegUses[Reg].push_back(SU); 415} 416 417void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { 418 // We'll be allocating one SUnit for each instruction, plus one for 419 // the region exit node. 420 SUnits.reserve(BB->size()); 421 422 // We build scheduling units by walking a block's instruction list from bottom 423 // to top. 424 425 // Remember where a generic side-effecting instruction is as we procede. 426 SUnit *BarrierChain = 0, *AliasChain = 0; 427 428 // Memory references to specific known memory locations are tracked 429 // so that they can be given more precise dependencies. We track 430 // separately the known memory locations that may alias and those 431 // that are known not to alias 432 std::map<const Value *, SUnit *> AliasMemDefs, NonAliasMemDefs; 433 std::map<const Value *, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses; 434 435 // Remove any stale debug info; sometimes BuildSchedGraph is called again 436 // without emitting the info from the previous call. 437 DbgValues.clear(); 438 FirstDbgValue = NULL; 439 440 // Model data dependencies between instructions being scheduled and the 441 // ExitSU. 442 AddSchedBarrierDeps(); 443 444 for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) { 445 assert(Defs[i].empty() && "Only BuildGraph should push/pop Defs"); 446 } 447 448 // Reinitialize the large VReg vectors, while reusing the memory. 449 // 450 // Note: this can be an expensive part of DAG building. We may want to be more 451 // clever. Reevaluate after VRegUses goes away. 452 assert(VRegDefs.size() == 0 && VRegUses.size() == 0 && 453 "Only BuildSchedGraph should access VRegDefs/Uses"); 454 VRegDefs.resize(MF.getRegInfo().getNumVirtRegs()); 455 VRegUses.resize(MF.getRegInfo().getNumVirtRegs()); 456 457 // Walk the list of instructions, from bottom moving up. 458 MachineInstr *PrevMI = NULL; 459 for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin; 460 MII != MIE; --MII) { 461 MachineInstr *MI = prior(MII); 462 if (MI && PrevMI) { 463 DbgValues.push_back(std::make_pair(PrevMI, MI)); 464 PrevMI = NULL; 465 } 466 467 if (MI->isDebugValue()) { 468 PrevMI = MI; 469 continue; 470 } 471 472 assert(!MI->isTerminator() && !MI->isLabel() && 473 "Cannot schedule terminators or labels!"); 474 // Create the SUnit for this MI. 475 SUnit *SU = NewSUnit(MI); 476 SU->isCall = MI->isCall(); 477 SU->isCommutable = MI->isCommutable(); 478 479 // Assign the Latency field of SU using target-provided information. 480 if (UnitLatencies) 481 SU->Latency = 1; 482 else 483 ComputeLatency(SU); 484 485 // Add register-based dependencies (data, anti, and output). 486 for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) { 487 const MachineOperand &MO = MI->getOperand(j); 488 if (!MO.isReg()) continue; 489 unsigned Reg = MO.getReg(); 490 if (Reg == 0) continue; 491 492 if (TRI->isPhysicalRegister(Reg)) 493 addPhysRegDeps(SU, j); 494 else { 495 assert(!IsPostRA && "Virtual register encountered!"); 496 if (MO.isDef()) 497 addVRegDefDeps(SU, j); 498 else 499 addVRegUseDeps(SU, j); 500 } 501 } 502 503 // Add chain dependencies. 504 // Chain dependencies used to enforce memory order should have 505 // latency of 0 (except for true dependency of Store followed by 506 // aliased Load... we estimate that with a single cycle of latency 507 // assuming the hardware will bypass) 508 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable 509 // after stack slots are lowered to actual addresses. 510 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and 511 // produce more precise dependence information. 512#define STORE_LOAD_LATENCY 1 513 unsigned TrueMemOrderLatency = 0; 514 if (MI->isCall() || MI->hasUnmodeledSideEffects() || 515 (MI->hasVolatileMemoryRef() && 516 (!MI->mayLoad() || !MI->isInvariantLoad(AA)))) { 517 // Be conservative with these and add dependencies on all memory 518 // references, even those that are known to not alias. 519 for (std::map<const Value *, SUnit *>::iterator I = 520 NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) { 521 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 522 } 523 for (std::map<const Value *, std::vector<SUnit *> >::iterator I = 524 NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) { 525 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 526 I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 527 } 528 NonAliasMemDefs.clear(); 529 NonAliasMemUses.clear(); 530 // Add SU to the barrier chain. 531 if (BarrierChain) 532 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 533 BarrierChain = SU; 534 535 // fall-through 536 new_alias_chain: 537 // Chain all possibly aliasing memory references though SU. 538 if (AliasChain) 539 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 540 AliasChain = SU; 541 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 542 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 543 for (std::map<const Value *, SUnit *>::iterator I = AliasMemDefs.begin(), 544 E = AliasMemDefs.end(); I != E; ++I) { 545 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 546 } 547 for (std::map<const Value *, std::vector<SUnit *> >::iterator I = 548 AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) { 549 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 550 I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 551 } 552 PendingLoads.clear(); 553 AliasMemDefs.clear(); 554 AliasMemUses.clear(); 555 } else if (MI->mayStore()) { 556 bool MayAlias = true; 557 TrueMemOrderLatency = STORE_LOAD_LATENCY; 558 if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) { 559 // A store to a specific PseudoSourceValue. Add precise dependencies. 560 // Record the def in MemDefs, first adding a dep if there is 561 // an existing def. 562 std::map<const Value *, SUnit *>::iterator I = 563 ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); 564 std::map<const Value *, SUnit *>::iterator IE = 565 ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); 566 if (I != IE) { 567 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0, 568 /*isNormalMemory=*/true)); 569 I->second = SU; 570 } else { 571 if (MayAlias) 572 AliasMemDefs[V] = SU; 573 else 574 NonAliasMemDefs[V] = SU; 575 } 576 // Handle the uses in MemUses, if there are any. 577 std::map<const Value *, std::vector<SUnit *> >::iterator J = 578 ((MayAlias) ? AliasMemUses.find(V) : NonAliasMemUses.find(V)); 579 std::map<const Value *, std::vector<SUnit *> >::iterator JE = 580 ((MayAlias) ? AliasMemUses.end() : NonAliasMemUses.end()); 581 if (J != JE) { 582 for (unsigned i = 0, e = J->second.size(); i != e; ++i) 583 J->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency, 584 /*Reg=*/0, /*isNormalMemory=*/true)); 585 J->second.clear(); 586 } 587 if (MayAlias) { 588 // Add dependencies from all the PendingLoads, i.e. loads 589 // with no underlying object. 590 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 591 PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency)); 592 // Add dependence on alias chain, if needed. 593 if (AliasChain) 594 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 595 } 596 // Add dependence on barrier chain, if needed. 597 if (BarrierChain) 598 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 599 } else { 600 // Treat all other stores conservatively. 601 goto new_alias_chain; 602 } 603 604 if (!ExitSU.isPred(SU)) 605 // Push store's up a bit to avoid them getting in between cmp 606 // and branches. 607 ExitSU.addPred(SDep(SU, SDep::Order, 0, 608 /*Reg=*/0, /*isNormalMemory=*/false, 609 /*isMustAlias=*/false, 610 /*isArtificial=*/true)); 611 } else if (MI->mayLoad()) { 612 bool MayAlias = true; 613 TrueMemOrderLatency = 0; 614 if (MI->isInvariantLoad(AA)) { 615 // Invariant load, no chain dependencies needed! 616 } else { 617 if (const Value *V = 618 getUnderlyingObjectForInstr(MI, MFI, MayAlias)) { 619 // A load from a specific PseudoSourceValue. Add precise dependencies. 620 std::map<const Value *, SUnit *>::iterator I = 621 ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); 622 std::map<const Value *, SUnit *>::iterator IE = 623 ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); 624 if (I != IE) 625 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0, 626 /*isNormalMemory=*/true)); 627 if (MayAlias) 628 AliasMemUses[V].push_back(SU); 629 else 630 NonAliasMemUses[V].push_back(SU); 631 } else { 632 // A load with no underlying object. Depend on all 633 // potentially aliasing stores. 634 for (std::map<const Value *, SUnit *>::iterator I = 635 AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I) 636 I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 637 638 PendingLoads.push_back(SU); 639 MayAlias = true; 640 } 641 642 // Add dependencies on alias and barrier chains, if needed. 643 if (MayAlias && AliasChain) 644 AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 645 if (BarrierChain) 646 BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0)); 647 } 648 } 649 } 650 if (PrevMI) 651 FirstDbgValue = PrevMI; 652 653 for (int i = 0, e = TRI->getNumRegs(); i != e; ++i) { 654 Defs[i].clear(); 655 Uses[i].clear(); 656 } 657 VRegDefs.clear(); 658 VRegUses.clear(); 659 PendingLoads.clear(); 660} 661 662void ScheduleDAGInstrs::FinishBlock() { 663 // Nothing to do. 664} 665 666void ScheduleDAGInstrs::ComputeLatency(SUnit *SU) { 667 // Compute the latency for the node. 668 if (!InstrItins || InstrItins->isEmpty()) { 669 SU->Latency = 1; 670 671 // Simplistic target-independent heuristic: assume that loads take 672 // extra time. 673 if (SU->getInstr()->mayLoad()) 674 SU->Latency += 2; 675 } else { 676 SU->Latency = TII->getInstrLatency(InstrItins, SU->getInstr()); 677 } 678} 679 680void ScheduleDAGInstrs::ComputeOperandLatency(SUnit *Def, SUnit *Use, 681 SDep& dep) const { 682 if (!InstrItins || InstrItins->isEmpty()) 683 return; 684 685 // For a data dependency with a known register... 686 if ((dep.getKind() != SDep::Data) || (dep.getReg() == 0)) 687 return; 688 689 const unsigned Reg = dep.getReg(); 690 691 // ... find the definition of the register in the defining 692 // instruction 693 MachineInstr *DefMI = Def->getInstr(); 694 int DefIdx = DefMI->findRegisterDefOperandIdx(Reg); 695 if (DefIdx != -1) { 696 const MachineOperand &MO = DefMI->getOperand(DefIdx); 697 if (MO.isReg() && MO.isImplicit() && 698 DefIdx >= (int)DefMI->getDesc().getNumOperands()) { 699 // This is an implicit def, getOperandLatency() won't return the correct 700 // latency. e.g. 701 // %D6<def>, %D7<def> = VLD1q16 %R2<kill>, 0, ..., %Q3<imp-def> 702 // %Q1<def> = VMULv8i16 %Q1<kill>, %Q3<kill>, ... 703 // What we want is to compute latency between def of %D6/%D7 and use of 704 // %Q3 instead. 705 DefIdx = DefMI->findRegisterDefOperandIdx(Reg, false, true, TRI); 706 } 707 MachineInstr *UseMI = Use->getInstr(); 708 // For all uses of the register, calculate the maxmimum latency 709 int Latency = -1; 710 if (UseMI) { 711 for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) { 712 const MachineOperand &MO = UseMI->getOperand(i); 713 if (!MO.isReg() || !MO.isUse()) 714 continue; 715 unsigned MOReg = MO.getReg(); 716 if (MOReg != Reg) 717 continue; 718 719 int UseCycle = TII->getOperandLatency(InstrItins, DefMI, DefIdx, 720 UseMI, i); 721 Latency = std::max(Latency, UseCycle); 722 } 723 } else { 724 // UseMI is null, then it must be a scheduling barrier. 725 if (!InstrItins || InstrItins->isEmpty()) 726 return; 727 unsigned DefClass = DefMI->getDesc().getSchedClass(); 728 Latency = InstrItins->getOperandCycle(DefClass, DefIdx); 729 } 730 731 // If we found a latency, then replace the existing dependence latency. 732 if (Latency >= 0) 733 dep.setLatency(Latency); 734 } 735} 736 737void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const { 738 SU->getInstr()->dump(); 739} 740 741std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { 742 std::string s; 743 raw_string_ostream oss(s); 744 if (SU == &EntrySU) 745 oss << "<entry>"; 746 else if (SU == &ExitSU) 747 oss << "<exit>"; 748 else 749 SU->getInstr()->print(oss); 750 return oss.str(); 751} 752 753// EmitSchedule - Emit the machine code in scheduled order. 754MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() { 755 Begin = InsertPos; 756 757 // If first instruction was a DBG_VALUE then put it back. 758 if (FirstDbgValue) 759 BB->splice(InsertPos, BB, FirstDbgValue); 760 761 // Then re-insert them according to the given schedule. 762 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 763 if (SUnit *SU = Sequence[i]) 764 BB->splice(InsertPos, BB, SU->getInstr()); 765 else 766 // Null SUnit* is a noop. 767 EmitNoop(); 768 769 // Update the Begin iterator, as the first instruction in the block 770 // may have been scheduled later. 771 if (i == 0) 772 Begin = prior(InsertPos); 773 } 774 775 // Reinsert any remaining debug_values. 776 for (std::vector<std::pair<MachineInstr *, MachineInstr *> >::iterator 777 DI = DbgValues.end(), DE = DbgValues.begin(); DI != DE; --DI) { 778 std::pair<MachineInstr *, MachineInstr *> P = *prior(DI); 779 MachineInstr *DbgValue = P.first; 780 MachineBasicBlock::iterator OrigPrivMI = P.second; 781 BB->splice(++OrigPrivMI, BB, DbgValue); 782 } 783 DbgValues.clear(); 784 FirstDbgValue = NULL; 785 return BB; 786} 787