ScheduleDAGInstrs.cpp revision b86a0cdb674549d8493043331cecd9cbf53b80da
1//===---- ScheduleDAGInstrs.cpp - MachineInstr Rescheduling ---------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This implements the ScheduleDAGInstrs class, which implements re-scheduling 11// of MachineInstrs. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "misched" 16#include "llvm/CodeGen/ScheduleDAGInstrs.h" 17#include "llvm/ADT/MapVector.h" 18#include "llvm/ADT/SmallPtrSet.h" 19#include "llvm/ADT/SmallSet.h" 20#include "llvm/Analysis/AliasAnalysis.h" 21#include "llvm/Analysis/ValueTracking.h" 22#include "llvm/CodeGen/LiveIntervalAnalysis.h" 23#include "llvm/CodeGen/MachineFunctionPass.h" 24#include "llvm/CodeGen/MachineMemOperand.h" 25#include "llvm/CodeGen/MachineRegisterInfo.h" 26#include "llvm/CodeGen/PseudoSourceValue.h" 27#include "llvm/CodeGen/RegisterPressure.h" 28#include "llvm/CodeGen/ScheduleDFS.h" 29#include "llvm/IR/Operator.h" 30#include "llvm/MC/MCInstrItineraries.h" 31#include "llvm/Support/CommandLine.h" 32#include "llvm/Support/Debug.h" 33#include "llvm/Support/Format.h" 34#include "llvm/Support/raw_ostream.h" 35#include "llvm/Target/TargetInstrInfo.h" 36#include "llvm/Target/TargetMachine.h" 37#include "llvm/Target/TargetRegisterInfo.h" 38#include "llvm/Target/TargetSubtargetInfo.h" 39using namespace llvm; 40 41static cl::opt<bool> EnableAASchedMI("enable-aa-sched-mi", cl::Hidden, 42 cl::ZeroOrMore, cl::init(false), 43 cl::desc("Enable use of AA during MI GAD construction")); 44 45ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, 46 const MachineLoopInfo &mli, 47 const MachineDominatorTree &mdt, 48 bool IsPostRAFlag, 49 LiveIntervals *lis) 50 : ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()), LIS(lis), 51 IsPostRA(IsPostRAFlag), CanHandleTerminators(false), FirstDbgValue(0) { 52 assert((IsPostRA || LIS) && "PreRA scheduling requires LiveIntervals"); 53 DbgValues.clear(); 54 assert(!(IsPostRA && MRI.getNumVirtRegs()) && 55 "Virtual registers must be removed prior to PostRA scheduling"); 56 57 const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>(); 58 SchedModel.init(*ST.getSchedModel(), &ST, TII); 59} 60 61/// getUnderlyingObjectFromInt - This is the function that does the work of 62/// looking through basic ptrtoint+arithmetic+inttoptr sequences. 63static const Value *getUnderlyingObjectFromInt(const Value *V) { 64 do { 65 if (const Operator *U = dyn_cast<Operator>(V)) { 66 // If we find a ptrtoint, we can transfer control back to the 67 // regular getUnderlyingObjectFromInt. 68 if (U->getOpcode() == Instruction::PtrToInt) 69 return U->getOperand(0); 70 // If we find an add of a constant, a multiplied value, or a phi, it's 71 // likely that the other operand will lead us to the base 72 // object. We don't have to worry about the case where the 73 // object address is somehow being computed by the multiply, 74 // because our callers only care when the result is an 75 // identifiable object. 76 if (U->getOpcode() != Instruction::Add || 77 (!isa<ConstantInt>(U->getOperand(1)) && 78 Operator::getOpcode(U->getOperand(1)) != Instruction::Mul && 79 !isa<PHINode>(U->getOperand(1)))) 80 return V; 81 V = U->getOperand(0); 82 } else { 83 return V; 84 } 85 assert(V->getType()->isIntegerTy() && "Unexpected operand type!"); 86 } while (1); 87} 88 89/// getUnderlyingObjects - This is a wrapper around GetUnderlyingObjects 90/// and adds support for basic ptrtoint+arithmetic+inttoptr sequences. 91static void getUnderlyingObjects(const Value *V, 92 SmallVectorImpl<Value *> &Objects) { 93 SmallPtrSet<const Value*, 16> Visited; 94 SmallVector<const Value *, 4> Working(1, V); 95 do { 96 V = Working.pop_back_val(); 97 98 SmallVector<Value *, 4> Objs; 99 GetUnderlyingObjects(const_cast<Value *>(V), Objs); 100 101 for (SmallVector<Value *, 4>::iterator I = Objs.begin(), IE = Objs.end(); 102 I != IE; ++I) { 103 V = *I; 104 if (!Visited.insert(V)) 105 continue; 106 if (Operator::getOpcode(V) == Instruction::IntToPtr) { 107 const Value *O = 108 getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0)); 109 if (O->getType()->isPointerTy()) { 110 Working.push_back(O); 111 continue; 112 } 113 } 114 Objects.push_back(const_cast<Value *>(V)); 115 } 116 } while (!Working.empty()); 117} 118 119/// getUnderlyingObjectsForInstr - If this machine instr has memory reference 120/// information and it can be tracked to a normal reference to a known 121/// object, return the Value for that object. 122static void getUnderlyingObjectsForInstr(const MachineInstr *MI, 123 const MachineFrameInfo *MFI, 124 SmallVectorImpl<std::pair<const Value *, bool> > &Objects) { 125 if (!MI->hasOneMemOperand() || 126 !(*MI->memoperands_begin())->getValue() || 127 (*MI->memoperands_begin())->isVolatile()) 128 return; 129 130 const Value *V = (*MI->memoperands_begin())->getValue(); 131 if (!V) 132 return; 133 134 SmallVector<Value *, 4> Objs; 135 getUnderlyingObjects(V, Objs); 136 137 for (SmallVector<Value *, 4>::iterator I = Objs.begin(), IE = Objs.end(); 138 I != IE; ++I) { 139 bool MayAlias = true; 140 V = *I; 141 142 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) { 143 // For now, ignore PseudoSourceValues which may alias LLVM IR values 144 // because the code that uses this function has no way to cope with 145 // such aliases. 146 147 if (PSV->isAliased(MFI)) { 148 Objects.clear(); 149 return; 150 } 151 152 MayAlias = PSV->mayAlias(MFI); 153 } else if (!isIdentifiedObject(V)) { 154 Objects.clear(); 155 return; 156 } 157 158 Objects.push_back(std::make_pair(V, MayAlias)); 159 } 160} 161 162void ScheduleDAGInstrs::startBlock(MachineBasicBlock *bb) { 163 BB = bb; 164} 165 166void ScheduleDAGInstrs::finishBlock() { 167 // Subclasses should no longer refer to the old block. 168 BB = 0; 169} 170 171/// Initialize the DAG and common scheduler state for the current scheduling 172/// region. This does not actually create the DAG, only clears it. The 173/// scheduling driver may call BuildSchedGraph multiple times per scheduling 174/// region. 175void ScheduleDAGInstrs::enterRegion(MachineBasicBlock *bb, 176 MachineBasicBlock::iterator begin, 177 MachineBasicBlock::iterator end, 178 unsigned endcount) { 179 assert(bb == BB && "startBlock should set BB"); 180 RegionBegin = begin; 181 RegionEnd = end; 182 EndIndex = endcount; 183 MISUnitMap.clear(); 184 185 ScheduleDAG::clearDAG(); 186} 187 188/// Close the current scheduling region. Don't clear any state in case the 189/// driver wants to refer to the previous scheduling region. 190void ScheduleDAGInstrs::exitRegion() { 191 // Nothing to do. 192} 193 194/// addSchedBarrierDeps - Add dependencies from instructions in the current 195/// list of instructions being scheduled to scheduling barrier by adding 196/// the exit SU to the register defs and use list. This is because we want to 197/// make sure instructions which define registers that are either used by 198/// the terminator or are live-out are properly scheduled. This is 199/// especially important when the definition latency of the return value(s) 200/// are too high to be hidden by the branch or when the liveout registers 201/// used by instructions in the fallthrough block. 202void ScheduleDAGInstrs::addSchedBarrierDeps() { 203 MachineInstr *ExitMI = RegionEnd != BB->end() ? &*RegionEnd : 0; 204 ExitSU.setInstr(ExitMI); 205 bool AllDepKnown = ExitMI && 206 (ExitMI->isCall() || ExitMI->isBarrier()); 207 if (ExitMI && AllDepKnown) { 208 // If it's a call or a barrier, add dependencies on the defs and uses of 209 // instruction. 210 for (unsigned i = 0, e = ExitMI->getNumOperands(); i != e; ++i) { 211 const MachineOperand &MO = ExitMI->getOperand(i); 212 if (!MO.isReg() || MO.isDef()) continue; 213 unsigned Reg = MO.getReg(); 214 if (Reg == 0) continue; 215 216 if (TRI->isPhysicalRegister(Reg)) 217 Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg)); 218 else { 219 assert(!IsPostRA && "Virtual register encountered after regalloc."); 220 if (MO.readsReg()) // ignore undef operands 221 addVRegUseDeps(&ExitSU, i); 222 } 223 } 224 } else { 225 // For others, e.g. fallthrough, conditional branch, assume the exit 226 // uses all the registers that are livein to the successor blocks. 227 assert(Uses.empty() && "Uses in set before adding deps?"); 228 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), 229 SE = BB->succ_end(); SI != SE; ++SI) 230 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), 231 E = (*SI)->livein_end(); I != E; ++I) { 232 unsigned Reg = *I; 233 if (!Uses.contains(Reg)) 234 Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg)); 235 } 236 } 237} 238 239/// MO is an operand of SU's instruction that defines a physical register. Add 240/// data dependencies from SU to any uses of the physical register. 241void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) { 242 const MachineOperand &MO = SU->getInstr()->getOperand(OperIdx); 243 assert(MO.isDef() && "expect physreg def"); 244 245 // Ask the target if address-backscheduling is desirable, and if so how much. 246 const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>(); 247 248 for (MCRegAliasIterator Alias(MO.getReg(), TRI, true); 249 Alias.isValid(); ++Alias) { 250 if (!Uses.contains(*Alias)) 251 continue; 252 for (Reg2SUnitsMap::iterator I = Uses.find(*Alias); I != Uses.end(); ++I) { 253 SUnit *UseSU = I->SU; 254 if (UseSU == SU) 255 continue; 256 257 // Adjust the dependence latency using operand def/use information, 258 // then allow the target to perform its own adjustments. 259 int UseOp = I->OpIdx; 260 MachineInstr *RegUse = 0; 261 SDep Dep; 262 if (UseOp < 0) 263 Dep = SDep(SU, SDep::Artificial); 264 else { 265 // Set the hasPhysRegDefs only for physreg defs that have a use within 266 // the scheduling region. 267 SU->hasPhysRegDefs = true; 268 Dep = SDep(SU, SDep::Data, *Alias); 269 RegUse = UseSU->getInstr(); 270 } 271 Dep.setLatency( 272 SchedModel.computeOperandLatency(SU->getInstr(), OperIdx, RegUse, 273 UseOp)); 274 275 ST.adjustSchedDependency(SU, UseSU, Dep); 276 UseSU->addPred(Dep); 277 } 278 } 279} 280 281/// addPhysRegDeps - Add register dependencies (data, anti, and output) from 282/// this SUnit to following instructions in the same scheduling region that 283/// depend the physical register referenced at OperIdx. 284void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { 285 const MachineInstr *MI = SU->getInstr(); 286 const MachineOperand &MO = MI->getOperand(OperIdx); 287 288 // Optionally add output and anti dependencies. For anti 289 // dependencies we use a latency of 0 because for a multi-issue 290 // target we want to allow the defining instruction to issue 291 // in the same cycle as the using instruction. 292 // TODO: Using a latency of 1 here for output dependencies assumes 293 // there's no cost for reusing registers. 294 SDep::Kind Kind = MO.isUse() ? SDep::Anti : SDep::Output; 295 for (MCRegAliasIterator Alias(MO.getReg(), TRI, true); 296 Alias.isValid(); ++Alias) { 297 if (!Defs.contains(*Alias)) 298 continue; 299 for (Reg2SUnitsMap::iterator I = Defs.find(*Alias); I != Defs.end(); ++I) { 300 SUnit *DefSU = I->SU; 301 if (DefSU == &ExitSU) 302 continue; 303 if (DefSU != SU && 304 (Kind != SDep::Output || !MO.isDead() || 305 !DefSU->getInstr()->registerDefIsDead(*Alias))) { 306 if (Kind == SDep::Anti) 307 DefSU->addPred(SDep(SU, Kind, /*Reg=*/*Alias)); 308 else { 309 SDep Dep(SU, Kind, /*Reg=*/*Alias); 310 Dep.setLatency( 311 SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr())); 312 DefSU->addPred(Dep); 313 } 314 } 315 } 316 } 317 318 if (!MO.isDef()) { 319 SU->hasPhysRegUses = true; 320 // Either insert a new Reg2SUnits entry with an empty SUnits list, or 321 // retrieve the existing SUnits list for this register's uses. 322 // Push this SUnit on the use list. 323 Uses.insert(PhysRegSUOper(SU, OperIdx, MO.getReg())); 324 } 325 else { 326 addPhysRegDataDeps(SU, OperIdx); 327 unsigned Reg = MO.getReg(); 328 329 // clear this register's use list 330 if (Uses.contains(Reg)) 331 Uses.eraseAll(Reg); 332 333 if (!MO.isDead()) { 334 Defs.eraseAll(Reg); 335 } else if (SU->isCall) { 336 // Calls will not be reordered because of chain dependencies (see 337 // below). Since call operands are dead, calls may continue to be added 338 // to the DefList making dependence checking quadratic in the size of 339 // the block. Instead, we leave only one call at the back of the 340 // DefList. 341 Reg2SUnitsMap::RangePair P = Defs.equal_range(Reg); 342 Reg2SUnitsMap::iterator B = P.first; 343 Reg2SUnitsMap::iterator I = P.second; 344 for (bool isBegin = I == B; !isBegin; /* empty */) { 345 isBegin = (--I) == B; 346 if (!I->SU->isCall) 347 break; 348 I = Defs.erase(I); 349 } 350 } 351 352 // Defs are pushed in the order they are visited and never reordered. 353 Defs.insert(PhysRegSUOper(SU, OperIdx, Reg)); 354 } 355} 356 357/// addVRegDefDeps - Add register output and data dependencies from this SUnit 358/// to instructions that occur later in the same scheduling region if they read 359/// from or write to the virtual register defined at OperIdx. 360/// 361/// TODO: Hoist loop induction variable increments. This has to be 362/// reevaluated. Generally, IV scheduling should be done before coalescing. 363void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) { 364 const MachineInstr *MI = SU->getInstr(); 365 unsigned Reg = MI->getOperand(OperIdx).getReg(); 366 367 // Singly defined vregs do not have output/anti dependencies. 368 // The current operand is a def, so we have at least one. 369 // Check here if there are any others... 370 if (MRI.hasOneDef(Reg)) 371 return; 372 373 // Add output dependence to the next nearest def of this vreg. 374 // 375 // Unless this definition is dead, the output dependence should be 376 // transitively redundant with antidependencies from this definition's 377 // uses. We're conservative for now until we have a way to guarantee the uses 378 // are not eliminated sometime during scheduling. The output dependence edge 379 // is also useful if output latency exceeds def-use latency. 380 VReg2SUnitMap::iterator DefI = VRegDefs.find(Reg); 381 if (DefI == VRegDefs.end()) 382 VRegDefs.insert(VReg2SUnit(Reg, SU)); 383 else { 384 SUnit *DefSU = DefI->SU; 385 if (DefSU != SU && DefSU != &ExitSU) { 386 SDep Dep(SU, SDep::Output, Reg); 387 Dep.setLatency( 388 SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr())); 389 DefSU->addPred(Dep); 390 } 391 DefI->SU = SU; 392 } 393} 394 395/// addVRegUseDeps - Add a register data dependency if the instruction that 396/// defines the virtual register used at OperIdx is mapped to an SUnit. Add a 397/// register antidependency from this SUnit to instructions that occur later in 398/// the same scheduling region if they write the virtual register. 399/// 400/// TODO: Handle ExitSU "uses" properly. 401void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) { 402 MachineInstr *MI = SU->getInstr(); 403 unsigned Reg = MI->getOperand(OperIdx).getReg(); 404 405 // Lookup this operand's reaching definition. 406 assert(LIS && "vreg dependencies requires LiveIntervals"); 407 LiveRangeQuery LRQ(LIS->getInterval(Reg), LIS->getInstructionIndex(MI)); 408 VNInfo *VNI = LRQ.valueIn(); 409 410 // VNI will be valid because MachineOperand::readsReg() is checked by caller. 411 assert(VNI && "No value to read by operand"); 412 MachineInstr *Def = LIS->getInstructionFromIndex(VNI->def); 413 // Phis and other noninstructions (after coalescing) have a NULL Def. 414 if (Def) { 415 SUnit *DefSU = getSUnit(Def); 416 if (DefSU) { 417 // The reaching Def lives within this scheduling region. 418 // Create a data dependence. 419 SDep dep(DefSU, SDep::Data, Reg); 420 // Adjust the dependence latency using operand def/use information, then 421 // allow the target to perform its own adjustments. 422 int DefOp = Def->findRegisterDefOperandIdx(Reg); 423 dep.setLatency(SchedModel.computeOperandLatency(Def, DefOp, MI, OperIdx)); 424 425 const TargetSubtargetInfo &ST = TM.getSubtarget<TargetSubtargetInfo>(); 426 ST.adjustSchedDependency(DefSU, SU, const_cast<SDep &>(dep)); 427 SU->addPred(dep); 428 } 429 } 430 431 // Add antidependence to the following def of the vreg it uses. 432 VReg2SUnitMap::iterator DefI = VRegDefs.find(Reg); 433 if (DefI != VRegDefs.end() && DefI->SU != SU) 434 DefI->SU->addPred(SDep(SU, SDep::Anti, Reg)); 435} 436 437/// Return true if MI is an instruction we are unable to reason about 438/// (like a call or something with unmodeled side effects). 439static inline bool isGlobalMemoryObject(AliasAnalysis *AA, MachineInstr *MI) { 440 if (MI->isCall() || MI->hasUnmodeledSideEffects() || 441 (MI->hasOrderedMemoryRef() && 442 (!MI->mayLoad() || !MI->isInvariantLoad(AA)))) 443 return true; 444 return false; 445} 446 447// This MI might have either incomplete info, or known to be unsafe 448// to deal with (i.e. volatile object). 449static inline bool isUnsafeMemoryObject(MachineInstr *MI, 450 const MachineFrameInfo *MFI) { 451 if (!MI || MI->memoperands_empty()) 452 return true; 453 // We purposefully do no check for hasOneMemOperand() here 454 // in hope to trigger an assert downstream in order to 455 // finish implementation. 456 if ((*MI->memoperands_begin())->isVolatile() || 457 MI->hasUnmodeledSideEffects()) 458 return true; 459 const Value *V = (*MI->memoperands_begin())->getValue(); 460 if (!V) 461 return true; 462 463 SmallVector<Value *, 4> Objs; 464 getUnderlyingObjects(V, Objs); 465 for (SmallVector<Value *, 4>::iterator I = Objs.begin(), 466 IE = Objs.end(); I != IE; ++I) { 467 V = *I; 468 469 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) { 470 // Similarly to getUnderlyingObjectForInstr: 471 // For now, ignore PseudoSourceValues which may alias LLVM IR values 472 // because the code that uses this function has no way to cope with 473 // such aliases. 474 if (PSV->isAliased(MFI)) 475 return true; 476 } 477 478 // Does this pointer refer to a distinct and identifiable object? 479 if (!isIdentifiedObject(V)) 480 return true; 481 } 482 483 return false; 484} 485 486/// This returns true if the two MIs need a chain edge betwee them. 487/// If these are not even memory operations, we still may need 488/// chain deps between them. The question really is - could 489/// these two MIs be reordered during scheduling from memory dependency 490/// point of view. 491static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI, 492 MachineInstr *MIa, 493 MachineInstr *MIb) { 494 // Cover a trivial case - no edge is need to itself. 495 if (MIa == MIb) 496 return false; 497 498 if (isUnsafeMemoryObject(MIa, MFI) || isUnsafeMemoryObject(MIb, MFI)) 499 return true; 500 501 // If we are dealing with two "normal" loads, we do not need an edge 502 // between them - they could be reordered. 503 if (!MIa->mayStore() && !MIb->mayStore()) 504 return false; 505 506 // To this point analysis is generic. From here on we do need AA. 507 if (!AA) 508 return true; 509 510 MachineMemOperand *MMOa = *MIa->memoperands_begin(); 511 MachineMemOperand *MMOb = *MIb->memoperands_begin(); 512 513 // FIXME: Need to handle multiple memory operands to support all targets. 514 if (!MIa->hasOneMemOperand() || !MIb->hasOneMemOperand()) 515 llvm_unreachable("Multiple memory operands."); 516 517 // The following interface to AA is fashioned after DAGCombiner::isAlias 518 // and operates with MachineMemOperand offset with some important 519 // assumptions: 520 // - LLVM fundamentally assumes flat address spaces. 521 // - MachineOperand offset can *only* result from legalization and 522 // cannot affect queries other than the trivial case of overlap 523 // checking. 524 // - These offsets never wrap and never step outside 525 // of allocated objects. 526 // - There should never be any negative offsets here. 527 // 528 // FIXME: Modify API to hide this math from "user" 529 // FIXME: Even before we go to AA we can reason locally about some 530 // memory objects. It can save compile time, and possibly catch some 531 // corner cases not currently covered. 532 533 assert ((MMOa->getOffset() >= 0) && "Negative MachineMemOperand offset"); 534 assert ((MMOb->getOffset() >= 0) && "Negative MachineMemOperand offset"); 535 536 int64_t MinOffset = std::min(MMOa->getOffset(), MMOb->getOffset()); 537 int64_t Overlapa = MMOa->getSize() + MMOa->getOffset() - MinOffset; 538 int64_t Overlapb = MMOb->getSize() + MMOb->getOffset() - MinOffset; 539 540 AliasAnalysis::AliasResult AAResult = AA->alias( 541 AliasAnalysis::Location(MMOa->getValue(), Overlapa, 542 MMOa->getTBAAInfo()), 543 AliasAnalysis::Location(MMOb->getValue(), Overlapb, 544 MMOb->getTBAAInfo())); 545 546 return (AAResult != AliasAnalysis::NoAlias); 547} 548 549/// This recursive function iterates over chain deps of SUb looking for 550/// "latest" node that needs a chain edge to SUa. 551static unsigned 552iterateChainSucc(AliasAnalysis *AA, const MachineFrameInfo *MFI, 553 SUnit *SUa, SUnit *SUb, SUnit *ExitSU, unsigned *Depth, 554 SmallPtrSet<const SUnit*, 16> &Visited) { 555 if (!SUa || !SUb || SUb == ExitSU) 556 return *Depth; 557 558 // Remember visited nodes. 559 if (!Visited.insert(SUb)) 560 return *Depth; 561 // If there is _some_ dependency already in place, do not 562 // descend any further. 563 // TODO: Need to make sure that if that dependency got eliminated or ignored 564 // for any reason in the future, we would not violate DAG topology. 565 // Currently it does not happen, but makes an implicit assumption about 566 // future implementation. 567 // 568 // Independently, if we encounter node that is some sort of global 569 // object (like a call) we already have full set of dependencies to it 570 // and we can stop descending. 571 if (SUa->isSucc(SUb) || 572 isGlobalMemoryObject(AA, SUb->getInstr())) 573 return *Depth; 574 575 // If we do need an edge, or we have exceeded depth budget, 576 // add that edge to the predecessors chain of SUb, 577 // and stop descending. 578 if (*Depth > 200 || 579 MIsNeedChainEdge(AA, MFI, SUa->getInstr(), SUb->getInstr())) { 580 SUb->addPred(SDep(SUa, SDep::MayAliasMem)); 581 return *Depth; 582 } 583 // Track current depth. 584 (*Depth)++; 585 // Iterate over chain dependencies only. 586 for (SUnit::const_succ_iterator I = SUb->Succs.begin(), E = SUb->Succs.end(); 587 I != E; ++I) 588 if (I->isCtrl()) 589 iterateChainSucc (AA, MFI, SUa, I->getSUnit(), ExitSU, Depth, Visited); 590 return *Depth; 591} 592 593/// This function assumes that "downward" from SU there exist 594/// tail/leaf of already constructed DAG. It iterates downward and 595/// checks whether SU can be aliasing any node dominated 596/// by it. 597static void adjustChainDeps(AliasAnalysis *AA, const MachineFrameInfo *MFI, 598 SUnit *SU, SUnit *ExitSU, std::set<SUnit *> &CheckList, 599 unsigned LatencyToLoad) { 600 if (!SU) 601 return; 602 603 SmallPtrSet<const SUnit*, 16> Visited; 604 unsigned Depth = 0; 605 606 for (std::set<SUnit *>::iterator I = CheckList.begin(), IE = CheckList.end(); 607 I != IE; ++I) { 608 if (SU == *I) 609 continue; 610 if (MIsNeedChainEdge(AA, MFI, SU->getInstr(), (*I)->getInstr())) { 611 SDep Dep(SU, SDep::MayAliasMem); 612 Dep.setLatency(((*I)->getInstr()->mayLoad()) ? LatencyToLoad : 0); 613 (*I)->addPred(Dep); 614 } 615 // Now go through all the chain successors and iterate from them. 616 // Keep track of visited nodes. 617 for (SUnit::const_succ_iterator J = (*I)->Succs.begin(), 618 JE = (*I)->Succs.end(); J != JE; ++J) 619 if (J->isCtrl()) 620 iterateChainSucc (AA, MFI, SU, J->getSUnit(), 621 ExitSU, &Depth, Visited); 622 } 623} 624 625/// Check whether two objects need a chain edge, if so, add it 626/// otherwise remember the rejected SU. 627static inline 628void addChainDependency (AliasAnalysis *AA, const MachineFrameInfo *MFI, 629 SUnit *SUa, SUnit *SUb, 630 std::set<SUnit *> &RejectList, 631 unsigned TrueMemOrderLatency = 0, 632 bool isNormalMemory = false) { 633 // If this is a false dependency, 634 // do not add the edge, but rememeber the rejected node. 635 if (!EnableAASchedMI || 636 MIsNeedChainEdge(AA, MFI, SUa->getInstr(), SUb->getInstr())) { 637 SDep Dep(SUa, isNormalMemory ? SDep::MayAliasMem : SDep::Barrier); 638 Dep.setLatency(TrueMemOrderLatency); 639 SUb->addPred(Dep); 640 } 641 else { 642 // Duplicate entries should be ignored. 643 RejectList.insert(SUb); 644 DEBUG(dbgs() << "\tReject chain dep between SU(" 645 << SUa->NodeNum << ") and SU(" 646 << SUb->NodeNum << ")\n"); 647 } 648} 649 650/// Create an SUnit for each real instruction, numbered in top-down toplological 651/// order. The instruction order A < B, implies that no edge exists from B to A. 652/// 653/// Map each real instruction to its SUnit. 654/// 655/// After initSUnits, the SUnits vector cannot be resized and the scheduler may 656/// hang onto SUnit pointers. We may relax this in the future by using SUnit IDs 657/// instead of pointers. 658/// 659/// MachineScheduler relies on initSUnits numbering the nodes by their order in 660/// the original instruction list. 661void ScheduleDAGInstrs::initSUnits() { 662 // We'll be allocating one SUnit for each real instruction in the region, 663 // which is contained within a basic block. 664 SUnits.reserve(BB->size()); 665 666 for (MachineBasicBlock::iterator I = RegionBegin; I != RegionEnd; ++I) { 667 MachineInstr *MI = I; 668 if (MI->isDebugValue()) 669 continue; 670 671 SUnit *SU = newSUnit(MI); 672 MISUnitMap[MI] = SU; 673 674 SU->isCall = MI->isCall(); 675 SU->isCommutable = MI->isCommutable(); 676 677 // Assign the Latency field of SU using target-provided information. 678 SU->Latency = SchedModel.computeInstrLatency(SU->getInstr()); 679 } 680} 681 682/// If RegPressure is non null, compute register pressure as a side effect. The 683/// DAG builder is an efficient place to do it because it already visits 684/// operands. 685void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, 686 RegPressureTracker *RPTracker) { 687 // Create an SUnit for each real instruction. 688 initSUnits(); 689 690 // We build scheduling units by walking a block's instruction list from bottom 691 // to top. 692 693 // Remember where a generic side-effecting instruction is as we procede. 694 SUnit *BarrierChain = 0, *AliasChain = 0; 695 696 // Memory references to specific known memory locations are tracked 697 // so that they can be given more precise dependencies. We track 698 // separately the known memory locations that may alias and those 699 // that are known not to alias 700 MapVector<const Value *, SUnit *> AliasMemDefs, NonAliasMemDefs; 701 MapVector<const Value *, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses; 702 std::set<SUnit*> RejectMemNodes; 703 704 // Remove any stale debug info; sometimes BuildSchedGraph is called again 705 // without emitting the info from the previous call. 706 DbgValues.clear(); 707 FirstDbgValue = NULL; 708 709 assert(Defs.empty() && Uses.empty() && 710 "Only BuildGraph should update Defs/Uses"); 711 Defs.setUniverse(TRI->getNumRegs()); 712 Uses.setUniverse(TRI->getNumRegs()); 713 714 assert(VRegDefs.empty() && "Only BuildSchedGraph may access VRegDefs"); 715 // FIXME: Allow SparseSet to reserve space for the creation of virtual 716 // registers during scheduling. Don't artificially inflate the Universe 717 // because we want to assert that vregs are not created during DAG building. 718 VRegDefs.setUniverse(MRI.getNumVirtRegs()); 719 720 // Model data dependencies between instructions being scheduled and the 721 // ExitSU. 722 addSchedBarrierDeps(); 723 724 // Walk the list of instructions, from bottom moving up. 725 MachineInstr *DbgMI = NULL; 726 for (MachineBasicBlock::iterator MII = RegionEnd, MIE = RegionBegin; 727 MII != MIE; --MII) { 728 MachineInstr *MI = prior(MII); 729 if (MI && DbgMI) { 730 DbgValues.push_back(std::make_pair(DbgMI, MI)); 731 DbgMI = NULL; 732 } 733 734 if (MI->isDebugValue()) { 735 DbgMI = MI; 736 continue; 737 } 738 if (RPTracker) { 739 RPTracker->recede(); 740 assert(RPTracker->getPos() == prior(MII) && "RPTracker can't find MI"); 741 } 742 743 assert((CanHandleTerminators || (!MI->isTerminator() && !MI->isLabel())) && 744 "Cannot schedule terminators or labels!"); 745 746 SUnit *SU = MISUnitMap[MI]; 747 assert(SU && "No SUnit mapped to this MI"); 748 749 // Add register-based dependencies (data, anti, and output). 750 bool HasVRegDef = false; 751 for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) { 752 const MachineOperand &MO = MI->getOperand(j); 753 if (!MO.isReg()) continue; 754 unsigned Reg = MO.getReg(); 755 if (Reg == 0) continue; 756 757 if (TRI->isPhysicalRegister(Reg)) 758 addPhysRegDeps(SU, j); 759 else { 760 assert(!IsPostRA && "Virtual register encountered!"); 761 if (MO.isDef()) { 762 HasVRegDef = true; 763 addVRegDefDeps(SU, j); 764 } 765 else if (MO.readsReg()) // ignore undef operands 766 addVRegUseDeps(SU, j); 767 } 768 } 769 // If we haven't seen any uses in this scheduling region, create a 770 // dependence edge to ExitSU to model the live-out latency. This is required 771 // for vreg defs with no in-region use, and prefetches with no vreg def. 772 // 773 // FIXME: NumDataSuccs would be more precise than NumSuccs here. This 774 // check currently relies on being called before adding chain deps. 775 if (SU->NumSuccs == 0 && SU->Latency > 1 776 && (HasVRegDef || MI->mayLoad())) { 777 SDep Dep(SU, SDep::Artificial); 778 Dep.setLatency(SU->Latency - 1); 779 ExitSU.addPred(Dep); 780 } 781 782 // Add chain dependencies. 783 // Chain dependencies used to enforce memory order should have 784 // latency of 0 (except for true dependency of Store followed by 785 // aliased Load... we estimate that with a single cycle of latency 786 // assuming the hardware will bypass) 787 // Note that isStoreToStackSlot and isLoadFromStackSLot are not usable 788 // after stack slots are lowered to actual addresses. 789 // TODO: Use an AliasAnalysis and do real alias-analysis queries, and 790 // produce more precise dependence information. 791 unsigned TrueMemOrderLatency = MI->mayStore() ? 1 : 0; 792 if (isGlobalMemoryObject(AA, MI)) { 793 // Be conservative with these and add dependencies on all memory 794 // references, even those that are known to not alias. 795 for (MapVector<const Value *, SUnit *>::iterator I = 796 NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) { 797 I->second->addPred(SDep(SU, SDep::Barrier)); 798 } 799 for (MapVector<const Value *, std::vector<SUnit *> >::iterator I = 800 NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) { 801 for (unsigned i = 0, e = I->second.size(); i != e; ++i) { 802 SDep Dep(SU, SDep::Barrier); 803 Dep.setLatency(TrueMemOrderLatency); 804 I->second[i]->addPred(Dep); 805 } 806 } 807 // Add SU to the barrier chain. 808 if (BarrierChain) 809 BarrierChain->addPred(SDep(SU, SDep::Barrier)); 810 BarrierChain = SU; 811 // This is a barrier event that acts as a pivotal node in the DAG, 812 // so it is safe to clear list of exposed nodes. 813 adjustChainDeps(AA, MFI, SU, &ExitSU, RejectMemNodes, 814 TrueMemOrderLatency); 815 RejectMemNodes.clear(); 816 NonAliasMemDefs.clear(); 817 NonAliasMemUses.clear(); 818 819 // fall-through 820 new_alias_chain: 821 // Chain all possibly aliasing memory references though SU. 822 if (AliasChain) { 823 unsigned ChainLatency = 0; 824 if (AliasChain->getInstr()->mayLoad()) 825 ChainLatency = TrueMemOrderLatency; 826 addChainDependency(AA, MFI, SU, AliasChain, RejectMemNodes, 827 ChainLatency); 828 } 829 AliasChain = SU; 830 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 831 addChainDependency(AA, MFI, SU, PendingLoads[k], RejectMemNodes, 832 TrueMemOrderLatency); 833 for (MapVector<const Value *, SUnit *>::iterator I = AliasMemDefs.begin(), 834 E = AliasMemDefs.end(); I != E; ++I) 835 addChainDependency(AA, MFI, SU, I->second, RejectMemNodes); 836 for (MapVector<const Value *, std::vector<SUnit *> >::iterator I = 837 AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) { 838 for (unsigned i = 0, e = I->second.size(); i != e; ++i) 839 addChainDependency(AA, MFI, SU, I->second[i], RejectMemNodes, 840 TrueMemOrderLatency); 841 } 842 adjustChainDeps(AA, MFI, SU, &ExitSU, RejectMemNodes, 843 TrueMemOrderLatency); 844 PendingLoads.clear(); 845 AliasMemDefs.clear(); 846 AliasMemUses.clear(); 847 } else if (MI->mayStore()) { 848 SmallVector<std::pair<const Value *, bool>, 4> Objs; 849 getUnderlyingObjectsForInstr(MI, MFI, Objs); 850 851 if (Objs.empty()) { 852 // Treat all other stores conservatively. 853 goto new_alias_chain; 854 } 855 856 bool MayAlias = false; 857 for (SmallVector<std::pair<const Value *, bool>, 4>::iterator 858 K = Objs.begin(), KE = Objs.end(); K != KE; ++K) { 859 const Value *V = K->first; 860 bool ThisMayAlias = K->second; 861 if (ThisMayAlias) 862 MayAlias = true; 863 864 // A store to a specific PseudoSourceValue. Add precise dependencies. 865 // Record the def in MemDefs, first adding a dep if there is 866 // an existing def. 867 MapVector<const Value *, SUnit *>::iterator I = 868 ((ThisMayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); 869 MapVector<const Value *, SUnit *>::iterator IE = 870 ((ThisMayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); 871 if (I != IE) { 872 addChainDependency(AA, MFI, SU, I->second, RejectMemNodes, 0, true); 873 I->second = SU; 874 } else { 875 if (ThisMayAlias) 876 AliasMemDefs[V] = SU; 877 else 878 NonAliasMemDefs[V] = SU; 879 } 880 // Handle the uses in MemUses, if there are any. 881 MapVector<const Value *, std::vector<SUnit *> >::iterator J = 882 ((ThisMayAlias) ? AliasMemUses.find(V) : NonAliasMemUses.find(V)); 883 MapVector<const Value *, std::vector<SUnit *> >::iterator JE = 884 ((ThisMayAlias) ? AliasMemUses.end() : NonAliasMemUses.end()); 885 if (J != JE) { 886 for (unsigned i = 0, e = J->second.size(); i != e; ++i) 887 addChainDependency(AA, MFI, SU, J->second[i], RejectMemNodes, 888 TrueMemOrderLatency, true); 889 J->second.clear(); 890 } 891 } 892 if (MayAlias) { 893 // Add dependencies from all the PendingLoads, i.e. loads 894 // with no underlying object. 895 for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) 896 addChainDependency(AA, MFI, SU, PendingLoads[k], RejectMemNodes, 897 TrueMemOrderLatency); 898 // Add dependence on alias chain, if needed. 899 if (AliasChain) 900 addChainDependency(AA, MFI, SU, AliasChain, RejectMemNodes); 901 // But we also should check dependent instructions for the 902 // SU in question. 903 adjustChainDeps(AA, MFI, SU, &ExitSU, RejectMemNodes, 904 TrueMemOrderLatency); 905 } 906 // Add dependence on barrier chain, if needed. 907 // There is no point to check aliasing on barrier event. Even if 908 // SU and barrier _could_ be reordered, they should not. In addition, 909 // we have lost all RejectMemNodes below barrier. 910 if (BarrierChain) 911 BarrierChain->addPred(SDep(SU, SDep::Barrier)); 912 913 if (!ExitSU.isPred(SU)) 914 // Push store's up a bit to avoid them getting in between cmp 915 // and branches. 916 ExitSU.addPred(SDep(SU, SDep::Artificial)); 917 } else if (MI->mayLoad()) { 918 bool MayAlias = true; 919 if (MI->isInvariantLoad(AA)) { 920 // Invariant load, no chain dependencies needed! 921 } else { 922 SmallVector<std::pair<const Value *, bool>, 4> Objs; 923 getUnderlyingObjectsForInstr(MI, MFI, Objs); 924 925 if (Objs.empty()) { 926 // A load with no underlying object. Depend on all 927 // potentially aliasing stores. 928 for (MapVector<const Value *, SUnit *>::iterator I = 929 AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I) 930 addChainDependency(AA, MFI, SU, I->second, RejectMemNodes); 931 932 PendingLoads.push_back(SU); 933 MayAlias = true; 934 } else { 935 MayAlias = false; 936 } 937 938 for (SmallVector<std::pair<const Value *, bool>, 4>::iterator 939 J = Objs.begin(), JE = Objs.end(); J != JE; ++J) { 940 const Value *V = J->first; 941 bool ThisMayAlias = J->second; 942 943 if (ThisMayAlias) 944 MayAlias = true; 945 946 // A load from a specific PseudoSourceValue. Add precise dependencies. 947 MapVector<const Value *, SUnit *>::iterator I = 948 ((ThisMayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V)); 949 MapVector<const Value *, SUnit *>::iterator IE = 950 ((ThisMayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); 951 if (I != IE) 952 addChainDependency(AA, MFI, SU, I->second, RejectMemNodes, 0, true); 953 if (ThisMayAlias) 954 AliasMemUses[V].push_back(SU); 955 else 956 NonAliasMemUses[V].push_back(SU); 957 } 958 if (MayAlias) 959 adjustChainDeps(AA, MFI, SU, &ExitSU, RejectMemNodes, /*Latency=*/0); 960 // Add dependencies on alias and barrier chains, if needed. 961 if (MayAlias && AliasChain) 962 addChainDependency(AA, MFI, SU, AliasChain, RejectMemNodes); 963 if (BarrierChain) 964 BarrierChain->addPred(SDep(SU, SDep::Barrier)); 965 } 966 } 967 } 968 if (DbgMI) 969 FirstDbgValue = DbgMI; 970 971 Defs.clear(); 972 Uses.clear(); 973 VRegDefs.clear(); 974 PendingLoads.clear(); 975} 976 977void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const { 978#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 979 SU->getInstr()->dump(); 980#endif 981} 982 983std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { 984 std::string s; 985 raw_string_ostream oss(s); 986 if (SU == &EntrySU) 987 oss << "<entry>"; 988 else if (SU == &ExitSU) 989 oss << "<exit>"; 990 else 991 SU->getInstr()->print(oss, &TM, /*SkipOpers=*/true); 992 return oss.str(); 993} 994 995/// Return the basic block label. It is not necessarilly unique because a block 996/// contains multiple scheduling regions. But it is fine for visualization. 997std::string ScheduleDAGInstrs::getDAGName() const { 998 return "dag." + BB->getFullName(); 999} 1000 1001//===----------------------------------------------------------------------===// 1002// SchedDFSResult Implementation 1003//===----------------------------------------------------------------------===// 1004 1005namespace llvm { 1006/// \brief Internal state used to compute SchedDFSResult. 1007class SchedDFSImpl { 1008 SchedDFSResult &R; 1009 1010 /// Join DAG nodes into equivalence classes by their subtree. 1011 IntEqClasses SubtreeClasses; 1012 /// List PredSU, SuccSU pairs that represent data edges between subtrees. 1013 std::vector<std::pair<const SUnit*, const SUnit*> > ConnectionPairs; 1014 1015 struct RootData { 1016 unsigned NodeID; 1017 unsigned ParentNodeID; // Parent node (member of the parent subtree). 1018 unsigned SubInstrCount; // Instr count in this tree only, not children. 1019 1020 RootData(unsigned id): NodeID(id), 1021 ParentNodeID(SchedDFSResult::InvalidSubtreeID), 1022 SubInstrCount(0) {} 1023 1024 unsigned getSparseSetIndex() const { return NodeID; } 1025 }; 1026 1027 SparseSet<RootData> RootSet; 1028 1029public: 1030 SchedDFSImpl(SchedDFSResult &r): R(r), SubtreeClasses(R.DFSNodeData.size()) { 1031 RootSet.setUniverse(R.DFSNodeData.size()); 1032 } 1033 1034 /// Return true if this node been visited by the DFS traversal. 1035 /// 1036 /// During visitPostorderNode the Node's SubtreeID is assigned to the Node 1037 /// ID. Later, SubtreeID is updated but remains valid. 1038 bool isVisited(const SUnit *SU) const { 1039 return R.DFSNodeData[SU->NodeNum].SubtreeID 1040 != SchedDFSResult::InvalidSubtreeID; 1041 } 1042 1043 /// Initialize this node's instruction count. We don't need to flag the node 1044 /// visited until visitPostorder because the DAG cannot have cycles. 1045 void visitPreorder(const SUnit *SU) { 1046 R.DFSNodeData[SU->NodeNum].InstrCount = 1047 SU->getInstr()->isTransient() ? 0 : 1; 1048 } 1049 1050 /// Called once for each node after all predecessors are visited. Revisit this 1051 /// node's predecessors and potentially join them now that we know the ILP of 1052 /// the other predecessors. 1053 void visitPostorderNode(const SUnit *SU) { 1054 // Mark this node as the root of a subtree. It may be joined with its 1055 // successors later. 1056 R.DFSNodeData[SU->NodeNum].SubtreeID = SU->NodeNum; 1057 RootData RData(SU->NodeNum); 1058 RData.SubInstrCount = SU->getInstr()->isTransient() ? 0 : 1; 1059 1060 // If any predecessors are still in their own subtree, they either cannot be 1061 // joined or are large enough to remain separate. If this parent node's 1062 // total instruction count is not greater than a child subtree by at least 1063 // the subtree limit, then try to join it now since splitting subtrees is 1064 // only useful if multiple high-pressure paths are possible. 1065 unsigned InstrCount = R.DFSNodeData[SU->NodeNum].InstrCount; 1066 for (SUnit::const_pred_iterator 1067 PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) { 1068 if (PI->getKind() != SDep::Data) 1069 continue; 1070 unsigned PredNum = PI->getSUnit()->NodeNum; 1071 if ((InstrCount - R.DFSNodeData[PredNum].InstrCount) < R.SubtreeLimit) 1072 joinPredSubtree(*PI, SU, /*CheckLimit=*/false); 1073 1074 // Either link or merge the TreeData entry from the child to the parent. 1075 if (R.DFSNodeData[PredNum].SubtreeID == PredNum) { 1076 // If the predecessor's parent is invalid, this is a tree edge and the 1077 // current node is the parent. 1078 if (RootSet[PredNum].ParentNodeID == SchedDFSResult::InvalidSubtreeID) 1079 RootSet[PredNum].ParentNodeID = SU->NodeNum; 1080 } 1081 else if (RootSet.count(PredNum)) { 1082 // The predecessor is not a root, but is still in the root set. This 1083 // must be the new parent that it was just joined to. Note that 1084 // RootSet[PredNum].ParentNodeID may either be invalid or may still be 1085 // set to the original parent. 1086 RData.SubInstrCount += RootSet[PredNum].SubInstrCount; 1087 RootSet.erase(PredNum); 1088 } 1089 } 1090 RootSet[SU->NodeNum] = RData; 1091 } 1092 1093 /// Called once for each tree edge after calling visitPostOrderNode on the 1094 /// predecessor. Increment the parent node's instruction count and 1095 /// preemptively join this subtree to its parent's if it is small enough. 1096 void visitPostorderEdge(const SDep &PredDep, const SUnit *Succ) { 1097 R.DFSNodeData[Succ->NodeNum].InstrCount 1098 += R.DFSNodeData[PredDep.getSUnit()->NodeNum].InstrCount; 1099 joinPredSubtree(PredDep, Succ); 1100 } 1101 1102 /// Add a connection for cross edges. 1103 void visitCrossEdge(const SDep &PredDep, const SUnit *Succ) { 1104 ConnectionPairs.push_back(std::make_pair(PredDep.getSUnit(), Succ)); 1105 } 1106 1107 /// Set each node's subtree ID to the representative ID and record connections 1108 /// between trees. 1109 void finalize() { 1110 SubtreeClasses.compress(); 1111 R.DFSTreeData.resize(SubtreeClasses.getNumClasses()); 1112 assert(SubtreeClasses.getNumClasses() == RootSet.size() 1113 && "number of roots should match trees"); 1114 for (SparseSet<RootData>::const_iterator 1115 RI = RootSet.begin(), RE = RootSet.end(); RI != RE; ++RI) { 1116 unsigned TreeID = SubtreeClasses[RI->NodeID]; 1117 if (RI->ParentNodeID != SchedDFSResult::InvalidSubtreeID) 1118 R.DFSTreeData[TreeID].ParentTreeID = SubtreeClasses[RI->ParentNodeID]; 1119 R.DFSTreeData[TreeID].SubInstrCount = RI->SubInstrCount; 1120 // Note that SubInstrCount may be greater than InstrCount if we joined 1121 // subtrees across a cross edge. InstrCount will be attributed to the 1122 // original parent, while SubInstrCount will be attributed to the joined 1123 // parent. 1124 } 1125 R.SubtreeConnections.resize(SubtreeClasses.getNumClasses()); 1126 R.SubtreeConnectLevels.resize(SubtreeClasses.getNumClasses()); 1127 DEBUG(dbgs() << R.getNumSubtrees() << " subtrees:\n"); 1128 for (unsigned Idx = 0, End = R.DFSNodeData.size(); Idx != End; ++Idx) { 1129 R.DFSNodeData[Idx].SubtreeID = SubtreeClasses[Idx]; 1130 DEBUG(dbgs() << " SU(" << Idx << ") in tree " 1131 << R.DFSNodeData[Idx].SubtreeID << '\n'); 1132 } 1133 for (std::vector<std::pair<const SUnit*, const SUnit*> >::const_iterator 1134 I = ConnectionPairs.begin(), E = ConnectionPairs.end(); 1135 I != E; ++I) { 1136 unsigned PredTree = SubtreeClasses[I->first->NodeNum]; 1137 unsigned SuccTree = SubtreeClasses[I->second->NodeNum]; 1138 if (PredTree == SuccTree) 1139 continue; 1140 unsigned Depth = I->first->getDepth(); 1141 addConnection(PredTree, SuccTree, Depth); 1142 addConnection(SuccTree, PredTree, Depth); 1143 } 1144 } 1145 1146protected: 1147 /// Join the predecessor subtree with the successor that is its DFS 1148 /// parent. Apply some heuristics before joining. 1149 bool joinPredSubtree(const SDep &PredDep, const SUnit *Succ, 1150 bool CheckLimit = true) { 1151 assert(PredDep.getKind() == SDep::Data && "Subtrees are for data edges"); 1152 1153 // Check if the predecessor is already joined. 1154 const SUnit *PredSU = PredDep.getSUnit(); 1155 unsigned PredNum = PredSU->NodeNum; 1156 if (R.DFSNodeData[PredNum].SubtreeID != PredNum) 1157 return false; 1158 1159 // Four is the magic number of successors before a node is considered a 1160 // pinch point. 1161 unsigned NumDataSucs = 0; 1162 for (SUnit::const_succ_iterator SI = PredSU->Succs.begin(), 1163 SE = PredSU->Succs.end(); SI != SE; ++SI) { 1164 if (SI->getKind() == SDep::Data) { 1165 if (++NumDataSucs >= 4) 1166 return false; 1167 } 1168 } 1169 if (CheckLimit && R.DFSNodeData[PredNum].InstrCount > R.SubtreeLimit) 1170 return false; 1171 R.DFSNodeData[PredNum].SubtreeID = Succ->NodeNum; 1172 SubtreeClasses.join(Succ->NodeNum, PredNum); 1173 return true; 1174 } 1175 1176 /// Called by finalize() to record a connection between trees. 1177 void addConnection(unsigned FromTree, unsigned ToTree, unsigned Depth) { 1178 if (!Depth) 1179 return; 1180 1181 do { 1182 SmallVectorImpl<SchedDFSResult::Connection> &Connections = 1183 R.SubtreeConnections[FromTree]; 1184 for (SmallVectorImpl<SchedDFSResult::Connection>::iterator 1185 I = Connections.begin(), E = Connections.end(); I != E; ++I) { 1186 if (I->TreeID == ToTree) { 1187 I->Level = std::max(I->Level, Depth); 1188 return; 1189 } 1190 } 1191 Connections.push_back(SchedDFSResult::Connection(ToTree, Depth)); 1192 FromTree = R.DFSTreeData[FromTree].ParentTreeID; 1193 } while (FromTree != SchedDFSResult::InvalidSubtreeID); 1194 } 1195}; 1196} // namespace llvm 1197 1198namespace { 1199/// \brief Manage the stack used by a reverse depth-first search over the DAG. 1200class SchedDAGReverseDFS { 1201 std::vector<std::pair<const SUnit*, SUnit::const_pred_iterator> > DFSStack; 1202public: 1203 bool isComplete() const { return DFSStack.empty(); } 1204 1205 void follow(const SUnit *SU) { 1206 DFSStack.push_back(std::make_pair(SU, SU->Preds.begin())); 1207 } 1208 void advance() { ++DFSStack.back().second; } 1209 1210 const SDep *backtrack() { 1211 DFSStack.pop_back(); 1212 return DFSStack.empty() ? 0 : llvm::prior(DFSStack.back().second); 1213 } 1214 1215 const SUnit *getCurr() const { return DFSStack.back().first; } 1216 1217 SUnit::const_pred_iterator getPred() const { return DFSStack.back().second; } 1218 1219 SUnit::const_pred_iterator getPredEnd() const { 1220 return getCurr()->Preds.end(); 1221 } 1222}; 1223} // anonymous 1224 1225static bool hasDataSucc(const SUnit *SU) { 1226 for (SUnit::const_succ_iterator 1227 SI = SU->Succs.begin(), SE = SU->Succs.end(); SI != SE; ++SI) { 1228 if (SI->getKind() == SDep::Data && !SI->getSUnit()->isBoundaryNode()) 1229 return true; 1230 } 1231 return false; 1232} 1233 1234/// Compute an ILP metric for all nodes in the subDAG reachable via depth-first 1235/// search from this root. 1236void SchedDFSResult::compute(ArrayRef<SUnit> SUnits) { 1237 if (!IsBottomUp) 1238 llvm_unreachable("Top-down ILP metric is unimplemnted"); 1239 1240 SchedDFSImpl Impl(*this); 1241 for (ArrayRef<SUnit>::const_iterator 1242 SI = SUnits.begin(), SE = SUnits.end(); SI != SE; ++SI) { 1243 const SUnit *SU = &*SI; 1244 if (Impl.isVisited(SU) || hasDataSucc(SU)) 1245 continue; 1246 1247 SchedDAGReverseDFS DFS; 1248 Impl.visitPreorder(SU); 1249 DFS.follow(SU); 1250 for (;;) { 1251 // Traverse the leftmost path as far as possible. 1252 while (DFS.getPred() != DFS.getPredEnd()) { 1253 const SDep &PredDep = *DFS.getPred(); 1254 DFS.advance(); 1255 // Ignore non-data edges. 1256 if (PredDep.getKind() != SDep::Data 1257 || PredDep.getSUnit()->isBoundaryNode()) { 1258 continue; 1259 } 1260 // An already visited edge is a cross edge, assuming an acyclic DAG. 1261 if (Impl.isVisited(PredDep.getSUnit())) { 1262 Impl.visitCrossEdge(PredDep, DFS.getCurr()); 1263 continue; 1264 } 1265 Impl.visitPreorder(PredDep.getSUnit()); 1266 DFS.follow(PredDep.getSUnit()); 1267 } 1268 // Visit the top of the stack in postorder and backtrack. 1269 const SUnit *Child = DFS.getCurr(); 1270 const SDep *PredDep = DFS.backtrack(); 1271 Impl.visitPostorderNode(Child); 1272 if (PredDep) 1273 Impl.visitPostorderEdge(*PredDep, DFS.getCurr()); 1274 if (DFS.isComplete()) 1275 break; 1276 } 1277 } 1278 Impl.finalize(); 1279} 1280 1281/// The root of the given SubtreeID was just scheduled. For all subtrees 1282/// connected to this tree, record the depth of the connection so that the 1283/// nearest connected subtrees can be prioritized. 1284void SchedDFSResult::scheduleTree(unsigned SubtreeID) { 1285 for (SmallVectorImpl<Connection>::const_iterator 1286 I = SubtreeConnections[SubtreeID].begin(), 1287 E = SubtreeConnections[SubtreeID].end(); I != E; ++I) { 1288 SubtreeConnectLevels[I->TreeID] = 1289 std::max(SubtreeConnectLevels[I->TreeID], I->Level); 1290 DEBUG(dbgs() << " Tree: " << I->TreeID 1291 << " @" << SubtreeConnectLevels[I->TreeID] << '\n'); 1292 } 1293} 1294 1295#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) 1296void ILPValue::print(raw_ostream &OS) const { 1297 OS << InstrCount << " / " << Length << " = "; 1298 if (!Length) 1299 OS << "BADILP"; 1300 else 1301 OS << format("%g", ((double)InstrCount / Length)); 1302} 1303 1304void ILPValue::dump() const { 1305 dbgs() << *this << '\n'; 1306} 1307 1308namespace llvm { 1309 1310raw_ostream &operator<<(raw_ostream &OS, const ILPValue &Val) { 1311 Val.print(OS); 1312 return OS; 1313} 1314 1315} // namespace llvm 1316#endif // !NDEBUG || LLVM_ENABLE_DUMP 1317