ScheduleDAGSDNodes.cpp revision 12f0dc6bb556976f22d89ebcf42bce273c9e7d38
1//===--- ScheduleDAGSDNodes.cpp - Implement the ScheduleDAGSDNodes class --===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the ScheduleDAG class, which is a base class used by 11// scheduling implementation classes. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "pre-RA-sched" 16#include "SDNodeDbgValue.h" 17#include "ScheduleDAGSDNodes.h" 18#include "InstrEmitter.h" 19#include "llvm/CodeGen/SelectionDAG.h" 20#include "llvm/Target/TargetMachine.h" 21#include "llvm/Target/TargetInstrInfo.h" 22#include "llvm/Target/TargetLowering.h" 23#include "llvm/Target/TargetRegisterInfo.h" 24#include "llvm/Target/TargetSubtarget.h" 25#include "llvm/ADT/DenseMap.h" 26#include "llvm/ADT/SmallPtrSet.h" 27#include "llvm/ADT/SmallSet.h" 28#include "llvm/ADT/SmallVector.h" 29#include "llvm/ADT/Statistic.h" 30#include "llvm/Support/CommandLine.h" 31#include "llvm/Support/Debug.h" 32#include "llvm/Support/raw_ostream.h" 33using namespace llvm; 34 35STATISTIC(LoadsClustered, "Number of loads clustered together"); 36 37// This allows latency based scheduler to notice high latency instructions 38// without a target itinerary. The choise if number here has more to do with 39// balancing scheduler heursitics than with the actual machine latency. 40static cl::opt<int> HighLatencyCycles( 41 "sched-high-latency-cycles", cl::Hidden, cl::init(10), 42 cl::desc("Roughly estimate the number of cycles that 'long latency'" 43 "instructions take for targets with no itinerary")); 44 45ScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction &mf) 46 : ScheduleDAG(mf), 47 InstrItins(mf.getTarget().getInstrItineraryData()) {} 48 49/// Run - perform scheduling. 50/// 51void ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb, 52 MachineBasicBlock::iterator insertPos) { 53 DAG = dag; 54 ScheduleDAG::Run(bb, insertPos); 55} 56 57/// NewSUnit - Creates a new SUnit and return a ptr to it. 58/// 59SUnit *ScheduleDAGSDNodes::NewSUnit(SDNode *N) { 60#ifndef NDEBUG 61 const SUnit *Addr = 0; 62 if (!SUnits.empty()) 63 Addr = &SUnits[0]; 64#endif 65 SUnits.push_back(SUnit(N, (unsigned)SUnits.size())); 66 assert((Addr == 0 || Addr == &SUnits[0]) && 67 "SUnits std::vector reallocated on the fly!"); 68 SUnits.back().OrigNode = &SUnits.back(); 69 SUnit *SU = &SUnits.back(); 70 const TargetLowering &TLI = DAG->getTargetLoweringInfo(); 71 if (!N || 72 (N->isMachineOpcode() && 73 N->getMachineOpcode() == TargetOpcode::IMPLICIT_DEF)) 74 SU->SchedulingPref = Sched::None; 75 else 76 SU->SchedulingPref = TLI.getSchedulingPreference(N); 77 return SU; 78} 79 80SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) { 81 SUnit *SU = NewSUnit(Old->getNode()); 82 SU->OrigNode = Old->OrigNode; 83 SU->Latency = Old->Latency; 84 SU->isVRegCycle = Old->isVRegCycle; 85 SU->isCall = Old->isCall; 86 SU->isTwoAddress = Old->isTwoAddress; 87 SU->isCommutable = Old->isCommutable; 88 SU->hasPhysRegDefs = Old->hasPhysRegDefs; 89 SU->hasPhysRegClobbers = Old->hasPhysRegClobbers; 90 SU->isScheduleHigh = Old->isScheduleHigh; 91 SU->isScheduleLow = Old->isScheduleLow; 92 SU->SchedulingPref = Old->SchedulingPref; 93 Old->isCloned = true; 94 return SU; 95} 96 97/// CheckForPhysRegDependency - Check if the dependency between def and use of 98/// a specified operand is a physical register dependency. If so, returns the 99/// register and the cost of copying the register. 100static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op, 101 const TargetRegisterInfo *TRI, 102 const TargetInstrInfo *TII, 103 unsigned &PhysReg, int &Cost) { 104 if (Op != 2 || User->getOpcode() != ISD::CopyToReg) 105 return; 106 107 unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg(); 108 if (TargetRegisterInfo::isVirtualRegister(Reg)) 109 return; 110 111 unsigned ResNo = User->getOperand(2).getResNo(); 112 if (Def->isMachineOpcode()) { 113 const TargetInstrDesc &II = TII->get(Def->getMachineOpcode()); 114 if (ResNo >= II.getNumDefs() && 115 II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) { 116 PhysReg = Reg; 117 const TargetRegisterClass *RC = 118 TRI->getMinimalPhysRegClass(Reg, Def->getValueType(ResNo)); 119 Cost = RC->getCopyCost(); 120 } 121 } 122} 123 124static void AddGlue(SDNode *N, SDValue Glue, bool AddGlue, SelectionDAG *DAG) { 125 SmallVector<EVT, 4> VTs; 126 SDNode *GlueDestNode = Glue.getNode(); 127 128 // Don't add glue from a node to itself. 129 if (GlueDestNode == N) return; 130 131 // Don't add glue to something which already has glue. 132 if (N->getValueType(N->getNumValues() - 1) == MVT::Glue) return; 133 134 for (unsigned I = 0, E = N->getNumValues(); I != E; ++I) 135 VTs.push_back(N->getValueType(I)); 136 137 if (AddGlue) 138 VTs.push_back(MVT::Glue); 139 140 SmallVector<SDValue, 4> Ops; 141 for (unsigned I = 0, E = N->getNumOperands(); I != E; ++I) 142 Ops.push_back(N->getOperand(I)); 143 144 if (GlueDestNode) 145 Ops.push_back(Glue); 146 147 SDVTList VTList = DAG->getVTList(&VTs[0], VTs.size()); 148 MachineSDNode::mmo_iterator Begin = 0, End = 0; 149 MachineSDNode *MN = dyn_cast<MachineSDNode>(N); 150 151 // Store memory references. 152 if (MN) { 153 Begin = MN->memoperands_begin(); 154 End = MN->memoperands_end(); 155 } 156 157 DAG->MorphNodeTo(N, N->getOpcode(), VTList, &Ops[0], Ops.size()); 158 159 // Reset the memory references 160 if (MN) 161 MN->setMemRefs(Begin, End); 162} 163 164/// ClusterNeighboringLoads - Force nearby loads together by "gluing" them. 165/// This function finds loads of the same base and different offsets. If the 166/// offsets are not far apart (target specific), it add MVT::Glue inputs and 167/// outputs to ensure they are scheduled together and in order. This 168/// optimization may benefit some targets by improving cache locality. 169void ScheduleDAGSDNodes::ClusterNeighboringLoads(SDNode *Node) { 170 SDNode *Chain = 0; 171 unsigned NumOps = Node->getNumOperands(); 172 if (Node->getOperand(NumOps-1).getValueType() == MVT::Other) 173 Chain = Node->getOperand(NumOps-1).getNode(); 174 if (!Chain) 175 return; 176 177 // Look for other loads of the same chain. Find loads that are loading from 178 // the same base pointer and different offsets. 179 SmallPtrSet<SDNode*, 16> Visited; 180 SmallVector<int64_t, 4> Offsets; 181 DenseMap<long long, SDNode*> O2SMap; // Map from offset to SDNode. 182 bool Cluster = false; 183 SDNode *Base = Node; 184 for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end(); 185 I != E; ++I) { 186 SDNode *User = *I; 187 if (User == Node || !Visited.insert(User)) 188 continue; 189 int64_t Offset1, Offset2; 190 if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) || 191 Offset1 == Offset2) 192 // FIXME: Should be ok if they addresses are identical. But earlier 193 // optimizations really should have eliminated one of the loads. 194 continue; 195 if (O2SMap.insert(std::make_pair(Offset1, Base)).second) 196 Offsets.push_back(Offset1); 197 O2SMap.insert(std::make_pair(Offset2, User)); 198 Offsets.push_back(Offset2); 199 if (Offset2 < Offset1) 200 Base = User; 201 Cluster = true; 202 } 203 204 if (!Cluster) 205 return; 206 207 // Sort them in increasing order. 208 std::sort(Offsets.begin(), Offsets.end()); 209 210 // Check if the loads are close enough. 211 SmallVector<SDNode*, 4> Loads; 212 unsigned NumLoads = 0; 213 int64_t BaseOff = Offsets[0]; 214 SDNode *BaseLoad = O2SMap[BaseOff]; 215 Loads.push_back(BaseLoad); 216 for (unsigned i = 1, e = Offsets.size(); i != e; ++i) { 217 int64_t Offset = Offsets[i]; 218 SDNode *Load = O2SMap[Offset]; 219 if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset,NumLoads)) 220 break; // Stop right here. Ignore loads that are further away. 221 Loads.push_back(Load); 222 ++NumLoads; 223 } 224 225 if (NumLoads == 0) 226 return; 227 228 // Cluster loads by adding MVT::Glue outputs and inputs. This also 229 // ensure they are scheduled in order of increasing addresses. 230 SDNode *Lead = Loads[0]; 231 AddGlue(Lead, SDValue(0, 0), true, DAG); 232 233 SDValue InGlue = SDValue(Lead, Lead->getNumValues() - 1); 234 for (unsigned I = 1, E = Loads.size(); I != E; ++I) { 235 bool OutGlue = I < E - 1; 236 SDNode *Load = Loads[I]; 237 238 AddGlue(Load, InGlue, OutGlue, DAG); 239 240 if (OutGlue) 241 InGlue = SDValue(Load, Load->getNumValues() - 1); 242 243 ++LoadsClustered; 244 } 245} 246 247/// ClusterNodes - Cluster certain nodes which should be scheduled together. 248/// 249void ScheduleDAGSDNodes::ClusterNodes() { 250 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(), 251 E = DAG->allnodes_end(); NI != E; ++NI) { 252 SDNode *Node = &*NI; 253 if (!Node || !Node->isMachineOpcode()) 254 continue; 255 256 unsigned Opc = Node->getMachineOpcode(); 257 const TargetInstrDesc &TID = TII->get(Opc); 258 if (TID.mayLoad()) 259 // Cluster loads from "near" addresses into combined SUnits. 260 ClusterNeighboringLoads(Node); 261 } 262} 263 264void ScheduleDAGSDNodes::BuildSchedUnits() { 265 // During scheduling, the NodeId field of SDNode is used to map SDNodes 266 // to their associated SUnits by holding SUnits table indices. A value 267 // of -1 means the SDNode does not yet have an associated SUnit. 268 unsigned NumNodes = 0; 269 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(), 270 E = DAG->allnodes_end(); NI != E; ++NI) { 271 NI->setNodeId(-1); 272 ++NumNodes; 273 } 274 275 // Reserve entries in the vector for each of the SUnits we are creating. This 276 // ensure that reallocation of the vector won't happen, so SUnit*'s won't get 277 // invalidated. 278 // FIXME: Multiply by 2 because we may clone nodes during scheduling. 279 // This is a temporary workaround. 280 SUnits.reserve(NumNodes * 2); 281 282 // Add all nodes in depth first order. 283 SmallVector<SDNode*, 64> Worklist; 284 SmallPtrSet<SDNode*, 64> Visited; 285 Worklist.push_back(DAG->getRoot().getNode()); 286 Visited.insert(DAG->getRoot().getNode()); 287 288 while (!Worklist.empty()) { 289 SDNode *NI = Worklist.pop_back_val(); 290 291 // Add all operands to the worklist unless they've already been added. 292 for (unsigned i = 0, e = NI->getNumOperands(); i != e; ++i) 293 if (Visited.insert(NI->getOperand(i).getNode())) 294 Worklist.push_back(NI->getOperand(i).getNode()); 295 296 if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate. 297 continue; 298 299 // If this node has already been processed, stop now. 300 if (NI->getNodeId() != -1) continue; 301 302 SUnit *NodeSUnit = NewSUnit(NI); 303 304 // See if anything is glued to this node, if so, add them to glued 305 // nodes. Nodes can have at most one glue input and one glue output. Glue 306 // is required to be the last operand and result of a node. 307 308 // Scan up to find glued preds. 309 SDNode *N = NI; 310 while (N->getNumOperands() && 311 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Glue) { 312 N = N->getOperand(N->getNumOperands()-1).getNode(); 313 assert(N->getNodeId() == -1 && "Node already inserted!"); 314 N->setNodeId(NodeSUnit->NodeNum); 315 if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall()) 316 NodeSUnit->isCall = true; 317 } 318 319 // Scan down to find any glued succs. 320 N = NI; 321 while (N->getValueType(N->getNumValues()-1) == MVT::Glue) { 322 SDValue GlueVal(N, N->getNumValues()-1); 323 324 // There are either zero or one users of the Glue result. 325 bool HasGlueUse = false; 326 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); 327 UI != E; ++UI) 328 if (GlueVal.isOperandOf(*UI)) { 329 HasGlueUse = true; 330 assert(N->getNodeId() == -1 && "Node already inserted!"); 331 N->setNodeId(NodeSUnit->NodeNum); 332 N = *UI; 333 if (N->isMachineOpcode() && TII->get(N->getMachineOpcode()).isCall()) 334 NodeSUnit->isCall = true; 335 break; 336 } 337 if (!HasGlueUse) break; 338 } 339 340 // Schedule zero-latency TokenFactor below any nodes that may increase the 341 // schedule height. Otherwise, ancestors of the TokenFactor may appear to 342 // have false stalls. 343 if (NI->getOpcode() == ISD::TokenFactor) 344 NodeSUnit->isScheduleLow = true; 345 346 // If there are glue operands involved, N is now the bottom-most node 347 // of the sequence of nodes that are glued together. 348 // Update the SUnit. 349 NodeSUnit->setNode(N); 350 assert(N->getNodeId() == -1 && "Node already inserted!"); 351 N->setNodeId(NodeSUnit->NodeNum); 352 353 // Compute NumRegDefsLeft. This must be done before AddSchedEdges. 354 InitNumRegDefsLeft(NodeSUnit); 355 356 // Assign the Latency field of NodeSUnit using target-provided information. 357 ComputeLatency(NodeSUnit); 358 } 359} 360 361void ScheduleDAGSDNodes::AddSchedEdges() { 362 const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>(); 363 364 // Check to see if the scheduler cares about latencies. 365 bool UnitLatencies = ForceUnitLatencies(); 366 367 // Pass 2: add the preds, succs, etc. 368 for (unsigned su = 0, e = SUnits.size(); su != e; ++su) { 369 SUnit *SU = &SUnits[su]; 370 SDNode *MainNode = SU->getNode(); 371 372 if (MainNode->isMachineOpcode()) { 373 unsigned Opc = MainNode->getMachineOpcode(); 374 const TargetInstrDesc &TID = TII->get(Opc); 375 for (unsigned i = 0; i != TID.getNumOperands(); ++i) { 376 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) { 377 SU->isTwoAddress = true; 378 break; 379 } 380 } 381 if (TID.isCommutable()) 382 SU->isCommutable = true; 383 } 384 385 // Find all predecessors and successors of the group. 386 for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) { 387 if (N->isMachineOpcode() && 388 TII->get(N->getMachineOpcode()).getImplicitDefs()) { 389 SU->hasPhysRegClobbers = true; 390 unsigned NumUsed = InstrEmitter::CountResults(N); 391 while (NumUsed != 0 && !N->hasAnyUseOfValue(NumUsed - 1)) 392 --NumUsed; // Skip over unused values at the end. 393 if (NumUsed > TII->get(N->getMachineOpcode()).getNumDefs()) 394 SU->hasPhysRegDefs = true; 395 } 396 397 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 398 SDNode *OpN = N->getOperand(i).getNode(); 399 if (isPassiveNode(OpN)) continue; // Not scheduled. 400 SUnit *OpSU = &SUnits[OpN->getNodeId()]; 401 assert(OpSU && "Node has no SUnit!"); 402 if (OpSU == SU) continue; // In the same group. 403 404 EVT OpVT = N->getOperand(i).getValueType(); 405 assert(OpVT != MVT::Glue && "Glued nodes should be in same sunit!"); 406 bool isChain = OpVT == MVT::Other; 407 408 unsigned PhysReg = 0; 409 int Cost = 1; 410 // Determine if this is a physical register dependency. 411 CheckForPhysRegDependency(OpN, N, i, TRI, TII, PhysReg, Cost); 412 assert((PhysReg == 0 || !isChain) && 413 "Chain dependence via physreg data?"); 414 // FIXME: See ScheduleDAGSDNodes::EmitCopyFromReg. For now, scheduler 415 // emits a copy from the physical register to a virtual register unless 416 // it requires a cross class copy (cost < 0). That means we are only 417 // treating "expensive to copy" register dependency as physical register 418 // dependency. This may change in the future though. 419 if (Cost >= 0) 420 PhysReg = 0; 421 422 // If this is a ctrl dep, latency is 1. 423 unsigned OpLatency = isChain ? 1 : OpSU->Latency; 424 // Special-case TokenFactor chains as zero-latency. 425 if(isChain && OpN->getOpcode() == ISD::TokenFactor) 426 OpLatency = 0; 427 428 const SDep &dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data, 429 OpLatency, PhysReg); 430 if (!isChain && !UnitLatencies) { 431 ComputeOperandLatency(OpN, N, i, const_cast<SDep &>(dep)); 432 ST.adjustSchedDependency(OpSU, SU, const_cast<SDep &>(dep)); 433 } 434 435 if (!SU->addPred(dep) && !dep.isCtrl() && OpSU->NumRegDefsLeft > 1) { 436 // Multiple register uses are combined in the same SUnit. For example, 437 // we could have a set of glued nodes with all their defs consumed by 438 // another set of glued nodes. Register pressure tracking sees this as 439 // a single use, so to keep pressure balanced we reduce the defs. 440 // 441 // We can't tell (without more book-keeping) if this results from 442 // glued nodes or duplicate operands. As long as we don't reduce 443 // NumRegDefsLeft to zero, we handle the common cases well. 444 --OpSU->NumRegDefsLeft; 445 } 446 } 447 } 448 } 449} 450 451/// BuildSchedGraph - Build the SUnit graph from the selection dag that we 452/// are input. This SUnit graph is similar to the SelectionDAG, but 453/// excludes nodes that aren't interesting to scheduling, and represents 454/// glued together nodes with a single SUnit. 455void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) { 456 // Cluster certain nodes which should be scheduled together. 457 ClusterNodes(); 458 // Populate the SUnits array. 459 BuildSchedUnits(); 460 // Compute all the scheduling dependencies between nodes. 461 AddSchedEdges(); 462} 463 464// Initialize NumNodeDefs for the current Node's opcode. 465void ScheduleDAGSDNodes::RegDefIter::InitNodeNumDefs() { 466 // Check for phys reg copy. 467 if (!Node) 468 return; 469 470 if (!Node->isMachineOpcode()) { 471 if (Node->getOpcode() == ISD::CopyFromReg) 472 NodeNumDefs = 1; 473 else 474 NodeNumDefs = 0; 475 return; 476 } 477 unsigned POpc = Node->getMachineOpcode(); 478 if (POpc == TargetOpcode::IMPLICIT_DEF) { 479 // No register need be allocated for this. 480 NodeNumDefs = 0; 481 return; 482 } 483 unsigned NRegDefs = SchedDAG->TII->get(Node->getMachineOpcode()).getNumDefs(); 484 // Some instructions define regs that are not represented in the selection DAG 485 // (e.g. unused flags). See tMOVi8. Make sure we don't access past NumValues. 486 NodeNumDefs = std::min(Node->getNumValues(), NRegDefs); 487 DefIdx = 0; 488} 489 490// Construct a RegDefIter for this SUnit and find the first valid value. 491ScheduleDAGSDNodes::RegDefIter::RegDefIter(const SUnit *SU, 492 const ScheduleDAGSDNodes *SD) 493 : SchedDAG(SD), Node(SU->getNode()), DefIdx(0), NodeNumDefs(0) { 494 InitNodeNumDefs(); 495 Advance(); 496} 497 498// Advance to the next valid value defined by the SUnit. 499void ScheduleDAGSDNodes::RegDefIter::Advance() { 500 for (;Node;) { // Visit all glued nodes. 501 for (;DefIdx < NodeNumDefs; ++DefIdx) { 502 if (!Node->hasAnyUseOfValue(DefIdx)) 503 continue; 504 if (Node->isMachineOpcode() && 505 Node->getMachineOpcode() == TargetOpcode::EXTRACT_SUBREG) { 506 // Propagate the incoming (full-register) type. I doubt it's needed. 507 ValueType = Node->getOperand(0).getValueType(); 508 } 509 else { 510 ValueType = Node->getValueType(DefIdx); 511 } 512 ++DefIdx; 513 return; // Found a normal regdef. 514 } 515 Node = Node->getGluedNode(); 516 if (Node == NULL) { 517 return; // No values left to visit. 518 } 519 InitNodeNumDefs(); 520 } 521} 522 523void ScheduleDAGSDNodes::InitNumRegDefsLeft(SUnit *SU) { 524 assert(SU->NumRegDefsLeft == 0 && "expect a new node"); 525 for (RegDefIter I(SU, this); I.IsValid(); I.Advance()) { 526 assert(SU->NumRegDefsLeft < USHRT_MAX && "overflow is ok but unexpected"); 527 ++SU->NumRegDefsLeft; 528 } 529} 530 531void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) { 532 SDNode *N = SU->getNode(); 533 534 // TokenFactor operands are considered zero latency, and some schedulers 535 // (e.g. Top-Down list) may rely on the fact that operand latency is nonzero 536 // whenever node latency is nonzero. 537 if (N && N->getOpcode() == ISD::TokenFactor) { 538 SU->Latency = 0; 539 return; 540 } 541 542 // Check to see if the scheduler cares about latencies. 543 if (ForceUnitLatencies()) { 544 SU->Latency = 1; 545 return; 546 } 547 548 if (!InstrItins || InstrItins->isEmpty()) { 549 if (N && N->isMachineOpcode() && 550 TII->isHighLatencyDef(N->getMachineOpcode())) 551 SU->Latency = HighLatencyCycles; 552 else 553 SU->Latency = 1; 554 return; 555 } 556 557 // Compute the latency for the node. We use the sum of the latencies for 558 // all nodes glued together into this SUnit. 559 SU->Latency = 0; 560 for (SDNode *N = SU->getNode(); N; N = N->getGluedNode()) 561 if (N->isMachineOpcode()) 562 SU->Latency += TII->getInstrLatency(InstrItins, N); 563} 564 565void ScheduleDAGSDNodes::ComputeOperandLatency(SDNode *Def, SDNode *Use, 566 unsigned OpIdx, SDep& dep) const{ 567 // Check to see if the scheduler cares about latencies. 568 if (ForceUnitLatencies()) 569 return; 570 571 if (dep.getKind() != SDep::Data) 572 return; 573 574 unsigned DefIdx = Use->getOperand(OpIdx).getResNo(); 575 if (Use->isMachineOpcode()) 576 // Adjust the use operand index by num of defs. 577 OpIdx += TII->get(Use->getMachineOpcode()).getNumDefs(); 578 int Latency = TII->getOperandLatency(InstrItins, Def, DefIdx, Use, OpIdx); 579 if (Latency > 1 && Use->getOpcode() == ISD::CopyToReg && 580 !BB->succ_empty()) { 581 unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg(); 582 if (TargetRegisterInfo::isVirtualRegister(Reg)) 583 // This copy is a liveout value. It is likely coalesced, so reduce the 584 // latency so not to penalize the def. 585 // FIXME: need target specific adjustment here? 586 Latency = (Latency > 1) ? Latency - 1 : 1; 587 } 588 if (Latency >= 0) 589 dep.setLatency(Latency); 590} 591 592void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const { 593 if (!SU->getNode()) { 594 dbgs() << "PHYS REG COPY\n"; 595 return; 596 } 597 598 SU->getNode()->dump(DAG); 599 dbgs() << "\n"; 600 SmallVector<SDNode *, 4> GluedNodes; 601 for (SDNode *N = SU->getNode()->getGluedNode(); N; N = N->getGluedNode()) 602 GluedNodes.push_back(N); 603 while (!GluedNodes.empty()) { 604 dbgs() << " "; 605 GluedNodes.back()->dump(DAG); 606 dbgs() << "\n"; 607 GluedNodes.pop_back(); 608 } 609} 610 611namespace { 612 struct OrderSorter { 613 bool operator()(const std::pair<unsigned, MachineInstr*> &A, 614 const std::pair<unsigned, MachineInstr*> &B) { 615 return A.first < B.first; 616 } 617 }; 618} 619 620/// ProcessSDDbgValues - Process SDDbgValues assoicated with this node. 621static void ProcessSDDbgValues(SDNode *N, SelectionDAG *DAG, 622 InstrEmitter &Emitter, 623 SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders, 624 DenseMap<SDValue, unsigned> &VRBaseMap, 625 unsigned Order) { 626 if (!N->getHasDebugValue()) 627 return; 628 629 // Opportunistically insert immediate dbg_value uses, i.e. those with source 630 // order number right after the N. 631 MachineBasicBlock *BB = Emitter.getBlock(); 632 MachineBasicBlock::iterator InsertPos = Emitter.getInsertPos(); 633 SmallVector<SDDbgValue*,2> &DVs = DAG->GetDbgValues(N); 634 for (unsigned i = 0, e = DVs.size(); i != e; ++i) { 635 if (DVs[i]->isInvalidated()) 636 continue; 637 unsigned DVOrder = DVs[i]->getOrder(); 638 if (!Order || DVOrder == ++Order) { 639 MachineInstr *DbgMI = Emitter.EmitDbgValue(DVs[i], VRBaseMap); 640 if (DbgMI) { 641 Orders.push_back(std::make_pair(DVOrder, DbgMI)); 642 BB->insert(InsertPos, DbgMI); 643 } 644 DVs[i]->setIsInvalidated(); 645 } 646 } 647} 648 649// ProcessSourceNode - Process nodes with source order numbers. These are added 650// to a vector which EmitSchedule uses to determine how to insert dbg_value 651// instructions in the right order. 652static void ProcessSourceNode(SDNode *N, SelectionDAG *DAG, 653 InstrEmitter &Emitter, 654 DenseMap<SDValue, unsigned> &VRBaseMap, 655 SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders, 656 SmallSet<unsigned, 8> &Seen) { 657 unsigned Order = DAG->GetOrdering(N); 658 if (!Order || !Seen.insert(Order)) { 659 // Process any valid SDDbgValues even if node does not have any order 660 // assigned. 661 ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, 0); 662 return; 663 } 664 665 MachineBasicBlock *BB = Emitter.getBlock(); 666 if (Emitter.getInsertPos() == BB->begin() || BB->back().isPHI()) { 667 // Did not insert any instruction. 668 Orders.push_back(std::make_pair(Order, (MachineInstr*)0)); 669 return; 670 } 671 672 Orders.push_back(std::make_pair(Order, prior(Emitter.getInsertPos()))); 673 ProcessSDDbgValues(N, DAG, Emitter, Orders, VRBaseMap, Order); 674} 675 676 677/// EmitSchedule - Emit the machine code in scheduled order. 678MachineBasicBlock *ScheduleDAGSDNodes::EmitSchedule() { 679 InstrEmitter Emitter(BB, InsertPos); 680 DenseMap<SDValue, unsigned> VRBaseMap; 681 DenseMap<SUnit*, unsigned> CopyVRBaseMap; 682 SmallVector<std::pair<unsigned, MachineInstr*>, 32> Orders; 683 SmallSet<unsigned, 8> Seen; 684 bool HasDbg = DAG->hasDebugValues(); 685 686 // If this is the first BB, emit byval parameter dbg_value's. 687 if (HasDbg && BB->getParent()->begin() == MachineFunction::iterator(BB)) { 688 SDDbgInfo::DbgIterator PDI = DAG->ByvalParmDbgBegin(); 689 SDDbgInfo::DbgIterator PDE = DAG->ByvalParmDbgEnd(); 690 for (; PDI != PDE; ++PDI) { 691 MachineInstr *DbgMI= Emitter.EmitDbgValue(*PDI, VRBaseMap); 692 if (DbgMI) 693 BB->insert(InsertPos, DbgMI); 694 } 695 } 696 697 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 698 SUnit *SU = Sequence[i]; 699 if (!SU) { 700 // Null SUnit* is a noop. 701 EmitNoop(); 702 continue; 703 } 704 705 // For pre-regalloc scheduling, create instructions corresponding to the 706 // SDNode and any glued SDNodes and append them to the block. 707 if (!SU->getNode()) { 708 // Emit a copy. 709 EmitPhysRegCopy(SU, CopyVRBaseMap); 710 continue; 711 } 712 713 SmallVector<SDNode *, 4> GluedNodes; 714 for (SDNode *N = SU->getNode()->getGluedNode(); N; 715 N = N->getGluedNode()) 716 GluedNodes.push_back(N); 717 while (!GluedNodes.empty()) { 718 SDNode *N = GluedNodes.back(); 719 Emitter.EmitNode(GluedNodes.back(), SU->OrigNode != SU, SU->isCloned, 720 VRBaseMap); 721 // Remember the source order of the inserted instruction. 722 if (HasDbg) 723 ProcessSourceNode(N, DAG, Emitter, VRBaseMap, Orders, Seen); 724 GluedNodes.pop_back(); 725 } 726 Emitter.EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned, 727 VRBaseMap); 728 // Remember the source order of the inserted instruction. 729 if (HasDbg) 730 ProcessSourceNode(SU->getNode(), DAG, Emitter, VRBaseMap, Orders, 731 Seen); 732 } 733 734 // Insert all the dbg_values which have not already been inserted in source 735 // order sequence. 736 if (HasDbg) { 737 MachineBasicBlock::iterator BBBegin = BB->getFirstNonPHI(); 738 739 // Sort the source order instructions and use the order to insert debug 740 // values. 741 std::sort(Orders.begin(), Orders.end(), OrderSorter()); 742 743 SDDbgInfo::DbgIterator DI = DAG->DbgBegin(); 744 SDDbgInfo::DbgIterator DE = DAG->DbgEnd(); 745 // Now emit the rest according to source order. 746 unsigned LastOrder = 0; 747 for (unsigned i = 0, e = Orders.size(); i != e && DI != DE; ++i) { 748 unsigned Order = Orders[i].first; 749 MachineInstr *MI = Orders[i].second; 750 // Insert all SDDbgValue's whose order(s) are before "Order". 751 if (!MI) 752 continue; 753 for (; DI != DE && 754 (*DI)->getOrder() >= LastOrder && (*DI)->getOrder() < Order; ++DI) { 755 if ((*DI)->isInvalidated()) 756 continue; 757 MachineInstr *DbgMI = Emitter.EmitDbgValue(*DI, VRBaseMap); 758 if (DbgMI) { 759 if (!LastOrder) 760 // Insert to start of the BB (after PHIs). 761 BB->insert(BBBegin, DbgMI); 762 else { 763 // Insert at the instruction, which may be in a different 764 // block, if the block was split by a custom inserter. 765 MachineBasicBlock::iterator Pos = MI; 766 MI->getParent()->insert(llvm::next(Pos), DbgMI); 767 } 768 } 769 } 770 LastOrder = Order; 771 } 772 // Add trailing DbgValue's before the terminator. FIXME: May want to add 773 // some of them before one or more conditional branches? 774 while (DI != DE) { 775 MachineBasicBlock *InsertBB = Emitter.getBlock(); 776 MachineBasicBlock::iterator Pos= Emitter.getBlock()->getFirstTerminator(); 777 if (!(*DI)->isInvalidated()) { 778 MachineInstr *DbgMI= Emitter.EmitDbgValue(*DI, VRBaseMap); 779 if (DbgMI) 780 InsertBB->insert(Pos, DbgMI); 781 } 782 ++DI; 783 } 784 } 785 786 BB = Emitter.getBlock(); 787 InsertPos = Emitter.getInsertPos(); 788 return BB; 789} 790