ScheduleDAGSDNodes.cpp revision deb48434e382de441595b0ac39cd585cab092080
1//===--- ScheduleDAGSDNodes.cpp - Implement the ScheduleDAGSDNodes class --===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the ScheduleDAG class, which is a base class used by 11// scheduling implementation classes. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "pre-RA-sched" 16#include "ScheduleDAGSDNodes.h" 17#include "InstrEmitter.h" 18#include "llvm/CodeGen/SelectionDAG.h" 19#include "llvm/Target/TargetMachine.h" 20#include "llvm/Target/TargetInstrInfo.h" 21#include "llvm/Target/TargetRegisterInfo.h" 22#include "llvm/Target/TargetSubtarget.h" 23#include "llvm/ADT/DenseMap.h" 24#include "llvm/ADT/SmallPtrSet.h" 25#include "llvm/ADT/SmallVector.h" 26#include "llvm/ADT/Statistic.h" 27#include "llvm/Support/Debug.h" 28#include "llvm/Support/raw_ostream.h" 29using namespace llvm; 30 31STATISTIC(LoadsClustered, "Number of loads clustered together"); 32 33ScheduleDAGSDNodes::ScheduleDAGSDNodes(MachineFunction &mf) 34 : ScheduleDAG(mf) { 35} 36 37/// Run - perform scheduling. 38/// 39void ScheduleDAGSDNodes::Run(SelectionDAG *dag, MachineBasicBlock *bb, 40 MachineBasicBlock::iterator insertPos) { 41 DAG = dag; 42 ScheduleDAG::Run(bb, insertPos); 43} 44 45SUnit *ScheduleDAGSDNodes::Clone(SUnit *Old) { 46 SUnit *SU = NewSUnit(Old->getNode()); 47 SU->OrigNode = Old->OrigNode; 48 SU->Latency = Old->Latency; 49 SU->isTwoAddress = Old->isTwoAddress; 50 SU->isCommutable = Old->isCommutable; 51 SU->hasPhysRegDefs = Old->hasPhysRegDefs; 52 SU->hasPhysRegClobbers = Old->hasPhysRegClobbers; 53 Old->isCloned = true; 54 return SU; 55} 56 57/// CheckForPhysRegDependency - Check if the dependency between def and use of 58/// a specified operand is a physical register dependency. If so, returns the 59/// register and the cost of copying the register. 60static void CheckForPhysRegDependency(SDNode *Def, SDNode *User, unsigned Op, 61 const TargetRegisterInfo *TRI, 62 const TargetInstrInfo *TII, 63 unsigned &PhysReg, int &Cost) { 64 if (Op != 2 || User->getOpcode() != ISD::CopyToReg) 65 return; 66 67 unsigned Reg = cast<RegisterSDNode>(User->getOperand(1))->getReg(); 68 if (TargetRegisterInfo::isVirtualRegister(Reg)) 69 return; 70 71 unsigned ResNo = User->getOperand(2).getResNo(); 72 if (Def->isMachineOpcode()) { 73 const TargetInstrDesc &II = TII->get(Def->getMachineOpcode()); 74 if (ResNo >= II.getNumDefs() && 75 II.ImplicitDefs[ResNo - II.getNumDefs()] == Reg) { 76 PhysReg = Reg; 77 const TargetRegisterClass *RC = 78 TRI->getPhysicalRegisterRegClass(Reg, Def->getValueType(ResNo)); 79 Cost = RC->getCopyCost(); 80 } 81 } 82} 83 84static void AddFlags(SDNode *N, SDValue Flag, bool AddFlag, 85 SelectionDAG *DAG) { 86 SmallVector<EVT, 4> VTs; 87 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) 88 VTs.push_back(N->getValueType(i)); 89 if (AddFlag) 90 VTs.push_back(MVT::Flag); 91 SmallVector<SDValue, 4> Ops; 92 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) 93 Ops.push_back(N->getOperand(i)); 94 if (Flag.getNode()) 95 Ops.push_back(Flag); 96 SDVTList VTList = DAG->getVTList(&VTs[0], VTs.size()); 97 DAG->MorphNodeTo(N, N->getOpcode(), VTList, &Ops[0], Ops.size()); 98} 99 100/// ClusterNeighboringLoads - Force nearby loads together by "flagging" them. 101/// This function finds loads of the same base and different offsets. If the 102/// offsets are not far apart (target specific), it add MVT::Flag inputs and 103/// outputs to ensure they are scheduled together and in order. This 104/// optimization may benefit some targets by improving cache locality. 105void ScheduleDAGSDNodes::ClusterNeighboringLoads() { 106 SmallPtrSet<SDNode*, 16> Visited; 107 SmallVector<int64_t, 4> Offsets; 108 DenseMap<long long, SDNode*> O2SMap; // Map from offset to SDNode. 109 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(), 110 E = DAG->allnodes_end(); NI != E; ++NI) { 111 SDNode *Node = &*NI; 112 if (!Node || !Node->isMachineOpcode()) 113 continue; 114 115 unsigned Opc = Node->getMachineOpcode(); 116 const TargetInstrDesc &TID = TII->get(Opc); 117 if (!TID.mayLoad()) 118 continue; 119 120 SDNode *Chain = 0; 121 unsigned NumOps = Node->getNumOperands(); 122 if (Node->getOperand(NumOps-1).getValueType() == MVT::Other) 123 Chain = Node->getOperand(NumOps-1).getNode(); 124 if (!Chain) 125 continue; 126 127 // Look for other loads of the same chain. Find loads that are loading from 128 // the same base pointer and different offsets. 129 Visited.clear(); 130 Offsets.clear(); 131 O2SMap.clear(); 132 bool Cluster = false; 133 SDNode *Base = Node; 134 int64_t BaseOffset; 135 for (SDNode::use_iterator I = Chain->use_begin(), E = Chain->use_end(); 136 I != E; ++I) { 137 SDNode *User = *I; 138 if (User == Node || !Visited.insert(User)) 139 continue; 140 int64_t Offset1, Offset2; 141 if (!TII->areLoadsFromSameBasePtr(Base, User, Offset1, Offset2) || 142 Offset1 == Offset2) 143 // FIXME: Should be ok if they addresses are identical. But earlier 144 // optimizations really should have eliminated one of the loads. 145 continue; 146 if (O2SMap.insert(std::make_pair(Offset1, Base)).second) 147 Offsets.push_back(Offset1); 148 O2SMap.insert(std::make_pair(Offset2, User)); 149 Offsets.push_back(Offset2); 150 if (Offset2 < Offset1) { 151 Base = User; 152 BaseOffset = Offset2; 153 } else { 154 BaseOffset = Offset1; 155 } 156 Cluster = true; 157 } 158 159 if (!Cluster) 160 continue; 161 162 // Sort them in increasing order. 163 std::sort(Offsets.begin(), Offsets.end()); 164 165 // Check if the loads are close enough. 166 SmallVector<SDNode*, 4> Loads; 167 unsigned NumLoads = 0; 168 int64_t BaseOff = Offsets[0]; 169 SDNode *BaseLoad = O2SMap[BaseOff]; 170 Loads.push_back(BaseLoad); 171 for (unsigned i = 1, e = Offsets.size(); i != e; ++i) { 172 int64_t Offset = Offsets[i]; 173 SDNode *Load = O2SMap[Offset]; 174 if (!TII->shouldScheduleLoadsNear(BaseLoad, Load, BaseOff, Offset, 175 NumLoads)) 176 break; // Stop right here. Ignore loads that are further away. 177 Loads.push_back(Load); 178 ++NumLoads; 179 } 180 181 if (NumLoads == 0) 182 continue; 183 184 // Cluster loads by adding MVT::Flag outputs and inputs. This also 185 // ensure they are scheduled in order of increasing addresses. 186 SDNode *Lead = Loads[0]; 187 AddFlags(Lead, SDValue(0,0), true, DAG); 188 SDValue InFlag = SDValue(Lead, Lead->getNumValues()-1); 189 for (unsigned i = 1, e = Loads.size(); i != e; ++i) { 190 bool OutFlag = i < e-1; 191 SDNode *Load = Loads[i]; 192 AddFlags(Load, InFlag, OutFlag, DAG); 193 if (OutFlag) 194 InFlag = SDValue(Load, Load->getNumValues()-1); 195 ++LoadsClustered; 196 } 197 } 198} 199 200void ScheduleDAGSDNodes::BuildSchedUnits() { 201 // During scheduling, the NodeId field of SDNode is used to map SDNodes 202 // to their associated SUnits by holding SUnits table indices. A value 203 // of -1 means the SDNode does not yet have an associated SUnit. 204 unsigned NumNodes = 0; 205 for (SelectionDAG::allnodes_iterator NI = DAG->allnodes_begin(), 206 E = DAG->allnodes_end(); NI != E; ++NI) { 207 NI->setNodeId(-1); 208 ++NumNodes; 209 } 210 211 // Reserve entries in the vector for each of the SUnits we are creating. This 212 // ensure that reallocation of the vector won't happen, so SUnit*'s won't get 213 // invalidated. 214 // FIXME: Multiply by 2 because we may clone nodes during scheduling. 215 // This is a temporary workaround. 216 SUnits.reserve(NumNodes * 2); 217 218 // Check to see if the scheduler cares about latencies. 219 bool UnitLatencies = ForceUnitLatencies(); 220 221 // Add all nodes in depth first order. 222 SmallVector<SDNode*, 64> Worklist; 223 SmallPtrSet<SDNode*, 64> Visited; 224 Worklist.push_back(DAG->getRoot().getNode()); 225 Visited.insert(DAG->getRoot().getNode()); 226 227 while (!Worklist.empty()) { 228 SDNode *NI = Worklist.pop_back_val(); 229 230 // Add all operands to the worklist unless they've already been added. 231 for (unsigned i = 0, e = NI->getNumOperands(); i != e; ++i) 232 if (Visited.insert(NI->getOperand(i).getNode())) 233 Worklist.push_back(NI->getOperand(i).getNode()); 234 235 if (isPassiveNode(NI)) // Leaf node, e.g. a TargetImmediate. 236 continue; 237 238 // If this node has already been processed, stop now. 239 if (NI->getNodeId() != -1) continue; 240 241 SUnit *NodeSUnit = NewSUnit(NI); 242 243 // See if anything is flagged to this node, if so, add them to flagged 244 // nodes. Nodes can have at most one flag input and one flag output. Flags 245 // are required to be the last operand and result of a node. 246 247 // Scan up to find flagged preds. 248 SDNode *N = NI; 249 while (N->getNumOperands() && 250 N->getOperand(N->getNumOperands()-1).getValueType() == MVT::Flag) { 251 N = N->getOperand(N->getNumOperands()-1).getNode(); 252 assert(N->getNodeId() == -1 && "Node already inserted!"); 253 N->setNodeId(NodeSUnit->NodeNum); 254 } 255 256 // Scan down to find any flagged succs. 257 N = NI; 258 while (N->getValueType(N->getNumValues()-1) == MVT::Flag) { 259 SDValue FlagVal(N, N->getNumValues()-1); 260 261 // There are either zero or one users of the Flag result. 262 bool HasFlagUse = false; 263 for (SDNode::use_iterator UI = N->use_begin(), E = N->use_end(); 264 UI != E; ++UI) 265 if (FlagVal.isOperandOf(*UI)) { 266 HasFlagUse = true; 267 assert(N->getNodeId() == -1 && "Node already inserted!"); 268 N->setNodeId(NodeSUnit->NodeNum); 269 N = *UI; 270 break; 271 } 272 if (!HasFlagUse) break; 273 } 274 275 // If there are flag operands involved, N is now the bottom-most node 276 // of the sequence of nodes that are flagged together. 277 // Update the SUnit. 278 NodeSUnit->setNode(N); 279 assert(N->getNodeId() == -1 && "Node already inserted!"); 280 N->setNodeId(NodeSUnit->NodeNum); 281 282 // Assign the Latency field of NodeSUnit using target-provided information. 283 if (UnitLatencies) 284 NodeSUnit->Latency = 1; 285 else 286 ComputeLatency(NodeSUnit); 287 } 288} 289 290void ScheduleDAGSDNodes::AddSchedEdges() { 291 const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>(); 292 293 // Check to see if the scheduler cares about latencies. 294 bool UnitLatencies = ForceUnitLatencies(); 295 296 // Pass 2: add the preds, succs, etc. 297 for (unsigned su = 0, e = SUnits.size(); su != e; ++su) { 298 SUnit *SU = &SUnits[su]; 299 SDNode *MainNode = SU->getNode(); 300 301 if (MainNode->isMachineOpcode()) { 302 unsigned Opc = MainNode->getMachineOpcode(); 303 const TargetInstrDesc &TID = TII->get(Opc); 304 for (unsigned i = 0; i != TID.getNumOperands(); ++i) { 305 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) { 306 SU->isTwoAddress = true; 307 break; 308 } 309 } 310 if (TID.isCommutable()) 311 SU->isCommutable = true; 312 } 313 314 // Find all predecessors and successors of the group. 315 for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode()) { 316 if (N->isMachineOpcode() && 317 TII->get(N->getMachineOpcode()).getImplicitDefs()) { 318 SU->hasPhysRegClobbers = true; 319 unsigned NumUsed = InstrEmitter::CountResults(N); 320 while (NumUsed != 0 && !N->hasAnyUseOfValue(NumUsed - 1)) 321 --NumUsed; // Skip over unused values at the end. 322 if (NumUsed > TII->get(N->getMachineOpcode()).getNumDefs()) 323 SU->hasPhysRegDefs = true; 324 } 325 326 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { 327 SDNode *OpN = N->getOperand(i).getNode(); 328 if (isPassiveNode(OpN)) continue; // Not scheduled. 329 SUnit *OpSU = &SUnits[OpN->getNodeId()]; 330 assert(OpSU && "Node has no SUnit!"); 331 if (OpSU == SU) continue; // In the same group. 332 333 EVT OpVT = N->getOperand(i).getValueType(); 334 assert(OpVT != MVT::Flag && "Flagged nodes should be in same sunit!"); 335 bool isChain = OpVT == MVT::Other; 336 337 unsigned PhysReg = 0; 338 int Cost = 1; 339 // Determine if this is a physical register dependency. 340 CheckForPhysRegDependency(OpN, N, i, TRI, TII, PhysReg, Cost); 341 assert((PhysReg == 0 || !isChain) && 342 "Chain dependence via physreg data?"); 343 // FIXME: See ScheduleDAGSDNodes::EmitCopyFromReg. For now, scheduler 344 // emits a copy from the physical register to a virtual register unless 345 // it requires a cross class copy (cost < 0). That means we are only 346 // treating "expensive to copy" register dependency as physical register 347 // dependency. This may change in the future though. 348 if (Cost >= 0) 349 PhysReg = 0; 350 351 const SDep& dep = SDep(OpSU, isChain ? SDep::Order : SDep::Data, 352 OpSU->Latency, PhysReg); 353 if (!isChain && !UnitLatencies) { 354 ComputeOperandLatency(OpSU, SU, (SDep &)dep); 355 ST.adjustSchedDependency(OpSU, SU, (SDep &)dep); 356 } 357 358 SU->addPred(dep); 359 } 360 } 361 } 362} 363 364/// BuildSchedGraph - Build the SUnit graph from the selection dag that we 365/// are input. This SUnit graph is similar to the SelectionDAG, but 366/// excludes nodes that aren't interesting to scheduling, and represents 367/// flagged together nodes with a single SUnit. 368void ScheduleDAGSDNodes::BuildSchedGraph(AliasAnalysis *AA) { 369 // Cluster loads from "near" addresses into combined SUnits. 370 ClusterNeighboringLoads(); 371 // Populate the SUnits array. 372 BuildSchedUnits(); 373 // Compute all the scheduling dependencies between nodes. 374 AddSchedEdges(); 375} 376 377void ScheduleDAGSDNodes::ComputeLatency(SUnit *SU) { 378 const InstrItineraryData &InstrItins = TM.getInstrItineraryData(); 379 380 // Compute the latency for the node. We use the sum of the latencies for 381 // all nodes flagged together into this SUnit. 382 SU->Latency = 0; 383 for (SDNode *N = SU->getNode(); N; N = N->getFlaggedNode()) 384 if (N->isMachineOpcode()) { 385 SU->Latency += InstrItins. 386 getStageLatency(TII->get(N->getMachineOpcode()).getSchedClass()); 387 } 388} 389 390void ScheduleDAGSDNodes::dumpNode(const SUnit *SU) const { 391 if (!SU->getNode()) { 392 dbgs() << "PHYS REG COPY\n"; 393 return; 394 } 395 396 SU->getNode()->dump(DAG); 397 dbgs() << "\n"; 398 SmallVector<SDNode *, 4> FlaggedNodes; 399 for (SDNode *N = SU->getNode()->getFlaggedNode(); N; N = N->getFlaggedNode()) 400 FlaggedNodes.push_back(N); 401 while (!FlaggedNodes.empty()) { 402 dbgs() << " "; 403 FlaggedNodes.back()->dump(DAG); 404 dbgs() << "\n"; 405 FlaggedNodes.pop_back(); 406 } 407} 408 409/// EmitSchedule - Emit the machine code in scheduled order. 410MachineBasicBlock *ScheduleDAGSDNodes:: 411EmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) { 412 InstrEmitter Emitter(BB, InsertPos); 413 DenseMap<SDValue, unsigned> VRBaseMap; 414 DenseMap<SUnit*, unsigned> CopyVRBaseMap; 415 for (unsigned i = 0, e = Sequence.size(); i != e; i++) { 416 SUnit *SU = Sequence[i]; 417 if (!SU) { 418 // Null SUnit* is a noop. 419 EmitNoop(); 420 continue; 421 } 422 423 // For pre-regalloc scheduling, create instructions corresponding to the 424 // SDNode and any flagged SDNodes and append them to the block. 425 if (!SU->getNode()) { 426 // Emit a copy. 427 EmitPhysRegCopy(SU, CopyVRBaseMap); 428 continue; 429 } 430 431 SmallVector<SDNode *, 4> FlaggedNodes; 432 for (SDNode *N = SU->getNode()->getFlaggedNode(); N; 433 N = N->getFlaggedNode()) 434 FlaggedNodes.push_back(N); 435 while (!FlaggedNodes.empty()) { 436 Emitter.EmitNode(FlaggedNodes.back(), SU->OrigNode != SU, SU->isCloned, 437 VRBaseMap, EM); 438 FlaggedNodes.pop_back(); 439 } 440 Emitter.EmitNode(SU->getNode(), SU->OrigNode != SU, SU->isCloned, 441 VRBaseMap, EM); 442 } 443 444 BB = Emitter.getBlock(); 445 InsertPos = Emitter.getInsertPos(); 446 return BB; 447} 448