TargetInstrInfo.cpp revision 849596ced42f2760c5b63f7676e16829b808b5c9
1//===-- TargetInstrInfo.cpp - Target Instruction Information --------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the TargetInstrInfo class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/Target/TargetInstrInfo.h" 15#include "llvm/CodeGen/MachineFrameInfo.h" 16#include "llvm/CodeGen/MachineMemOperand.h" 17#include "llvm/CodeGen/MachineRegisterInfo.h" 18#include "llvm/CodeGen/PseudoSourceValue.h" 19#include "llvm/CodeGen/ScoreboardHazardRecognizer.h" 20#include "llvm/MC/MCAsmInfo.h" 21#include "llvm/MC/MCInstrItineraries.h" 22#include "llvm/Support/CommandLine.h" 23#include "llvm/Support/ErrorHandling.h" 24#include "llvm/Support/raw_ostream.h" 25#include "llvm/Target/TargetLowering.h" 26#include "llvm/Target/TargetMachine.h" 27#include "llvm/Target/TargetRegisterInfo.h" 28#include <cctype> 29using namespace llvm; 30 31static cl::opt<bool> DisableHazardRecognizer( 32 "disable-sched-hazard", cl::Hidden, cl::init(false), 33 cl::desc("Disable hazard detection during preRA scheduling")); 34 35TargetInstrInfo::~TargetInstrInfo() { 36} 37 38const TargetRegisterClass* 39TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum, 40 const TargetRegisterInfo *TRI, 41 const MachineFunction &MF) const { 42 if (OpNum >= MCID.getNumOperands()) 43 return 0; 44 45 short RegClass = MCID.OpInfo[OpNum].RegClass; 46 if (MCID.OpInfo[OpNum].isLookupPtrRegClass()) 47 return TRI->getPointerRegClass(MF, RegClass); 48 49 // Instructions like INSERT_SUBREG do not have fixed register classes. 50 if (RegClass < 0) 51 return 0; 52 53 // Otherwise just look it up normally. 54 return TRI->getRegClass(RegClass); 55} 56 57/// insertNoop - Insert a noop into the instruction stream at the specified 58/// point. 59void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB, 60 MachineBasicBlock::iterator MI) const { 61 llvm_unreachable("Target didn't implement insertNoop!"); 62} 63 64/// Measure the specified inline asm to determine an approximation of its 65/// length. 66/// Comments (which run till the next SeparatorString or newline) do not 67/// count as an instruction. 68/// Any other non-whitespace text is considered an instruction, with 69/// multiple instructions separated by SeparatorString or newlines. 70/// Variable-length instructions are not handled here; this function 71/// may be overloaded in the target code to do that. 72unsigned TargetInstrInfo::getInlineAsmLength(const char *Str, 73 const MCAsmInfo &MAI) const { 74 75 76 // Count the number of instructions in the asm. 77 bool atInsnStart = true; 78 unsigned Length = 0; 79 for (; *Str; ++Str) { 80 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(), 81 strlen(MAI.getSeparatorString())) == 0) 82 atInsnStart = true; 83 if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) { 84 Length += MAI.getMaxInstLength(); 85 atInsnStart = false; 86 } 87 if (atInsnStart && strncmp(Str, MAI.getCommentString(), 88 strlen(MAI.getCommentString())) == 0) 89 atInsnStart = false; 90 } 91 92 return Length; 93} 94 95/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything 96/// after it, replacing it with an unconditional branch to NewDest. 97void 98TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, 99 MachineBasicBlock *NewDest) const { 100 MachineBasicBlock *MBB = Tail->getParent(); 101 102 // Remove all the old successors of MBB from the CFG. 103 while (!MBB->succ_empty()) 104 MBB->removeSuccessor(MBB->succ_begin()); 105 106 // Remove all the dead instructions from the end of MBB. 107 MBB->erase(Tail, MBB->end()); 108 109 // If MBB isn't immediately before MBB, insert a branch to it. 110 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest)) 111 InsertBranch(*MBB, NewDest, 0, SmallVector<MachineOperand, 0>(), 112 Tail->getDebugLoc()); 113 MBB->addSuccessor(NewDest); 114} 115 116// commuteInstruction - The default implementation of this method just exchanges 117// the two operands returned by findCommutedOpIndices. 118MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI, 119 bool NewMI) const { 120 const MCInstrDesc &MCID = MI->getDesc(); 121 bool HasDef = MCID.getNumDefs(); 122 if (HasDef && !MI->getOperand(0).isReg()) 123 // No idea how to commute this instruction. Target should implement its own. 124 return 0; 125 unsigned Idx1, Idx2; 126 if (!findCommutedOpIndices(MI, Idx1, Idx2)) { 127 std::string msg; 128 raw_string_ostream Msg(msg); 129 Msg << "Don't know how to commute: " << *MI; 130 report_fatal_error(Msg.str()); 131 } 132 133 assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() && 134 "This only knows how to commute register operands so far"); 135 unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0; 136 unsigned Reg1 = MI->getOperand(Idx1).getReg(); 137 unsigned Reg2 = MI->getOperand(Idx2).getReg(); 138 unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0; 139 unsigned SubReg1 = MI->getOperand(Idx1).getSubReg(); 140 unsigned SubReg2 = MI->getOperand(Idx2).getSubReg(); 141 bool Reg1IsKill = MI->getOperand(Idx1).isKill(); 142 bool Reg2IsKill = MI->getOperand(Idx2).isKill(); 143 // If destination is tied to either of the commuted source register, then 144 // it must be updated. 145 if (HasDef && Reg0 == Reg1 && 146 MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) { 147 Reg2IsKill = false; 148 Reg0 = Reg2; 149 SubReg0 = SubReg2; 150 } else if (HasDef && Reg0 == Reg2 && 151 MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) { 152 Reg1IsKill = false; 153 Reg0 = Reg1; 154 SubReg0 = SubReg1; 155 } 156 157 if (NewMI) { 158 // Create a new instruction. 159 MachineFunction &MF = *MI->getParent()->getParent(); 160 MI = MF.CloneMachineInstr(MI); 161 } 162 163 if (HasDef) { 164 MI->getOperand(0).setReg(Reg0); 165 MI->getOperand(0).setSubReg(SubReg0); 166 } 167 MI->getOperand(Idx2).setReg(Reg1); 168 MI->getOperand(Idx1).setReg(Reg2); 169 MI->getOperand(Idx2).setSubReg(SubReg1); 170 MI->getOperand(Idx1).setSubReg(SubReg2); 171 MI->getOperand(Idx2).setIsKill(Reg1IsKill); 172 MI->getOperand(Idx1).setIsKill(Reg2IsKill); 173 return MI; 174} 175 176/// findCommutedOpIndices - If specified MI is commutable, return the two 177/// operand indices that would swap value. Return true if the instruction 178/// is not in a form which this routine understands. 179bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI, 180 unsigned &SrcOpIdx1, 181 unsigned &SrcOpIdx2) const { 182 assert(!MI->isBundle() && 183 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles"); 184 185 const MCInstrDesc &MCID = MI->getDesc(); 186 if (!MCID.isCommutable()) 187 return false; 188 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this 189 // is not true, then the target must implement this. 190 SrcOpIdx1 = MCID.getNumDefs(); 191 SrcOpIdx2 = SrcOpIdx1 + 1; 192 if (!MI->getOperand(SrcOpIdx1).isReg() || 193 !MI->getOperand(SrcOpIdx2).isReg()) 194 // No idea. 195 return false; 196 return true; 197} 198 199 200bool 201TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { 202 if (!MI->isTerminator()) return false; 203 204 // Conditional branch is a special case. 205 if (MI->isBranch() && !MI->isBarrier()) 206 return true; 207 if (!MI->isPredicable()) 208 return true; 209 return !isPredicated(MI); 210} 211 212 213bool TargetInstrInfo::PredicateInstruction(MachineInstr *MI, 214 const SmallVectorImpl<MachineOperand> &Pred) const { 215 bool MadeChange = false; 216 217 assert(!MI->isBundle() && 218 "TargetInstrInfo::PredicateInstruction() can't handle bundles"); 219 220 const MCInstrDesc &MCID = MI->getDesc(); 221 if (!MI->isPredicable()) 222 return false; 223 224 for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) { 225 if (MCID.OpInfo[i].isPredicate()) { 226 MachineOperand &MO = MI->getOperand(i); 227 if (MO.isReg()) { 228 MO.setReg(Pred[j].getReg()); 229 MadeChange = true; 230 } else if (MO.isImm()) { 231 MO.setImm(Pred[j].getImm()); 232 MadeChange = true; 233 } else if (MO.isMBB()) { 234 MO.setMBB(Pred[j].getMBB()); 235 MadeChange = true; 236 } 237 ++j; 238 } 239 } 240 return MadeChange; 241} 242 243bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI, 244 const MachineMemOperand *&MMO, 245 int &FrameIndex) const { 246 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(), 247 oe = MI->memoperands_end(); 248 o != oe; 249 ++o) { 250 if ((*o)->isLoad() && (*o)->getValue()) 251 if (const FixedStackPseudoSourceValue *Value = 252 dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) { 253 FrameIndex = Value->getFrameIndex(); 254 MMO = *o; 255 return true; 256 } 257 } 258 return false; 259} 260 261bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI, 262 const MachineMemOperand *&MMO, 263 int &FrameIndex) const { 264 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(), 265 oe = MI->memoperands_end(); 266 o != oe; 267 ++o) { 268 if ((*o)->isStore() && (*o)->getValue()) 269 if (const FixedStackPseudoSourceValue *Value = 270 dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) { 271 FrameIndex = Value->getFrameIndex(); 272 MMO = *o; 273 return true; 274 } 275 } 276 return false; 277} 278 279void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB, 280 MachineBasicBlock::iterator I, 281 unsigned DestReg, 282 unsigned SubIdx, 283 const MachineInstr *Orig, 284 const TargetRegisterInfo &TRI) const { 285 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig); 286 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI); 287 MBB.insert(I, MI); 288} 289 290bool 291TargetInstrInfo::produceSameValue(const MachineInstr *MI0, 292 const MachineInstr *MI1, 293 const MachineRegisterInfo *MRI) const { 294 return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); 295} 296 297MachineInstr *TargetInstrInfo::duplicate(MachineInstr *Orig, 298 MachineFunction &MF) const { 299 assert(!Orig->isNotDuplicable() && 300 "Instruction cannot be duplicated"); 301 return MF.CloneMachineInstr(Orig); 302} 303 304// If the COPY instruction in MI can be folded to a stack operation, return 305// the register class to use. 306static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI, 307 unsigned FoldIdx) { 308 assert(MI->isCopy() && "MI must be a COPY instruction"); 309 if (MI->getNumOperands() != 2) 310 return 0; 311 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand"); 312 313 const MachineOperand &FoldOp = MI->getOperand(FoldIdx); 314 const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx); 315 316 if (FoldOp.getSubReg() || LiveOp.getSubReg()) 317 return 0; 318 319 unsigned FoldReg = FoldOp.getReg(); 320 unsigned LiveReg = LiveOp.getReg(); 321 322 assert(TargetRegisterInfo::isVirtualRegister(FoldReg) && 323 "Cannot fold physregs"); 324 325 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 326 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg); 327 328 if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg())) 329 return RC->contains(LiveOp.getReg()) ? RC : 0; 330 331 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg))) 332 return RC; 333 334 // FIXME: Allow folding when register classes are memory compatible. 335 return 0; 336} 337 338bool TargetInstrInfo:: 339canFoldMemoryOperand(const MachineInstr *MI, 340 const SmallVectorImpl<unsigned> &Ops) const { 341 return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]); 342} 343 344/// foldMemoryOperand - Attempt to fold a load or store of the specified stack 345/// slot into the specified machine instruction for the specified operand(s). 346/// If this is possible, a new instruction is returned with the specified 347/// operand folded, otherwise NULL is returned. The client is responsible for 348/// removing the old instruction and adding the new one in the instruction 349/// stream. 350MachineInstr* 351TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, 352 const SmallVectorImpl<unsigned> &Ops, 353 int FI) const { 354 unsigned Flags = 0; 355 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 356 if (MI->getOperand(Ops[i]).isDef()) 357 Flags |= MachineMemOperand::MOStore; 358 else 359 Flags |= MachineMemOperand::MOLoad; 360 361 MachineBasicBlock *MBB = MI->getParent(); 362 assert(MBB && "foldMemoryOperand needs an inserted instruction"); 363 MachineFunction &MF = *MBB->getParent(); 364 365 // Ask the target to do the actual folding. 366 if (MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FI)) { 367 NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 368 // Add a memory operand, foldMemoryOperandImpl doesn't do that. 369 assert((!(Flags & MachineMemOperand::MOStore) || 370 NewMI->mayStore()) && 371 "Folded a def to a non-store!"); 372 assert((!(Flags & MachineMemOperand::MOLoad) || 373 NewMI->mayLoad()) && 374 "Folded a use to a non-load!"); 375 const MachineFrameInfo &MFI = *MF.getFrameInfo(); 376 assert(MFI.getObjectOffset(FI) != -1); 377 MachineMemOperand *MMO = 378 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 379 Flags, MFI.getObjectSize(FI), 380 MFI.getObjectAlignment(FI)); 381 NewMI->addMemOperand(MF, MMO); 382 383 // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI. 384 return MBB->insert(MI, NewMI); 385 } 386 387 // Straight COPY may fold as load/store. 388 if (!MI->isCopy() || Ops.size() != 1) 389 return 0; 390 391 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]); 392 if (!RC) 393 return 0; 394 395 const MachineOperand &MO = MI->getOperand(1-Ops[0]); 396 MachineBasicBlock::iterator Pos = MI; 397 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo(); 398 399 if (Flags == MachineMemOperand::MOStore) 400 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI); 401 else 402 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI); 403 return --Pos; 404} 405 406/// foldMemoryOperand - Same as the previous version except it allows folding 407/// of any load and store from / to any address, not just from a specific 408/// stack slot. 409MachineInstr* 410TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, 411 const SmallVectorImpl<unsigned> &Ops, 412 MachineInstr* LoadMI) const { 413 assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!"); 414#ifndef NDEBUG 415 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 416 assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!"); 417#endif 418 MachineBasicBlock &MBB = *MI->getParent(); 419 MachineFunction &MF = *MBB.getParent(); 420 421 // Ask the target to do the actual folding. 422 MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI); 423 if (!NewMI) return 0; 424 425 NewMI = MBB.insert(MI, NewMI); 426 427 // Copy the memoperands from the load to the folded instruction. 428 if (MI->memoperands_empty()) { 429 NewMI->setMemRefs(LoadMI->memoperands_begin(), 430 LoadMI->memoperands_end()); 431 } 432 else { 433 // Handle the rare case of folding multiple loads. 434 NewMI->setMemRefs(MI->memoperands_begin(), 435 MI->memoperands_end()); 436 for (MachineInstr::mmo_iterator I = LoadMI->memoperands_begin(), 437 E = LoadMI->memoperands_end(); I != E; ++I) { 438 NewMI->addMemOperand(MF, *I); 439 } 440 } 441 return NewMI; 442} 443 444bool TargetInstrInfo:: 445isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI, 446 AliasAnalysis *AA) const { 447 const MachineFunction &MF = *MI->getParent()->getParent(); 448 const MachineRegisterInfo &MRI = MF.getRegInfo(); 449 const TargetMachine &TM = MF.getTarget(); 450 const TargetInstrInfo &TII = *TM.getInstrInfo(); 451 452 // Remat clients assume operand 0 is the defined register. 453 if (!MI->getNumOperands() || !MI->getOperand(0).isReg()) 454 return false; 455 unsigned DefReg = MI->getOperand(0).getReg(); 456 457 // A sub-register definition can only be rematerialized if the instruction 458 // doesn't read the other parts of the register. Otherwise it is really a 459 // read-modify-write operation on the full virtual register which cannot be 460 // moved safely. 461 if (TargetRegisterInfo::isVirtualRegister(DefReg) && 462 MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg)) 463 return false; 464 465 // A load from a fixed stack slot can be rematerialized. This may be 466 // redundant with subsequent checks, but it's target-independent, 467 // simple, and a common case. 468 int FrameIdx = 0; 469 if (TII.isLoadFromStackSlot(MI, FrameIdx) && 470 MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx)) 471 return true; 472 473 // Avoid instructions obviously unsafe for remat. 474 if (MI->isNotDuplicable() || MI->mayStore() || 475 MI->hasUnmodeledSideEffects()) 476 return false; 477 478 // Don't remat inline asm. We have no idea how expensive it is 479 // even if it's side effect free. 480 if (MI->isInlineAsm()) 481 return false; 482 483 // Avoid instructions which load from potentially varying memory. 484 if (MI->mayLoad() && !MI->isInvariantLoad(AA)) 485 return false; 486 487 // If any of the registers accessed are non-constant, conservatively assume 488 // the instruction is not rematerializable. 489 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 490 const MachineOperand &MO = MI->getOperand(i); 491 if (!MO.isReg()) continue; 492 unsigned Reg = MO.getReg(); 493 if (Reg == 0) 494 continue; 495 496 // Check for a well-behaved physical register. 497 if (TargetRegisterInfo::isPhysicalRegister(Reg)) { 498 if (MO.isUse()) { 499 // If the physreg has no defs anywhere, it's just an ambient register 500 // and we can freely move its uses. Alternatively, if it's allocatable, 501 // it could get allocated to something with a def during allocation. 502 if (!MRI.isConstantPhysReg(Reg, MF)) 503 return false; 504 } else { 505 // A physreg def. We can't remat it. 506 return false; 507 } 508 continue; 509 } 510 511 // Only allow one virtual-register def. There may be multiple defs of the 512 // same virtual register, though. 513 if (MO.isDef() && Reg != DefReg) 514 return false; 515 516 // Don't allow any virtual-register uses. Rematting an instruction with 517 // virtual register uses would length the live ranges of the uses, which 518 // is not necessarily a good idea, certainly not "trivial". 519 if (MO.isUse()) 520 return false; 521 } 522 523 // Everything checked out. 524 return true; 525} 526 527/// isSchedulingBoundary - Test if the given instruction should be 528/// considered a scheduling boundary. This primarily includes labels 529/// and terminators. 530bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI, 531 const MachineBasicBlock *MBB, 532 const MachineFunction &MF) const { 533 // Terminators and labels can't be scheduled around. 534 if (MI->isTerminator() || MI->isLabel()) 535 return true; 536 537 // Don't attempt to schedule around any instruction that defines 538 // a stack-oriented pointer, as it's unlikely to be profitable. This 539 // saves compile time, because it doesn't require every single 540 // stack slot reference to depend on the instruction that does the 541 // modification. 542 const TargetLowering &TLI = *MF.getTarget().getTargetLowering(); 543 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo(); 544 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI)) 545 return true; 546 547 return false; 548} 549 550// Provide a global flag for disabling the PreRA hazard recognizer that targets 551// may choose to honor. 552bool TargetInstrInfo::usePreRAHazardRecognizer() const { 553 return !DisableHazardRecognizer; 554} 555 556// Default implementation of CreateTargetRAHazardRecognizer. 557ScheduleHazardRecognizer *TargetInstrInfo:: 558CreateTargetHazardRecognizer(const TargetMachine *TM, 559 const ScheduleDAG *DAG) const { 560 // Dummy hazard recognizer allows all instructions to issue. 561 return new ScheduleHazardRecognizer(); 562} 563 564// Default implementation of CreateTargetMIHazardRecognizer. 565ScheduleHazardRecognizer *TargetInstrInfo:: 566CreateTargetMIHazardRecognizer(const InstrItineraryData *II, 567 const ScheduleDAG *DAG) const { 568 return (ScheduleHazardRecognizer *) 569 new ScoreboardHazardRecognizer(II, DAG, "misched"); 570} 571 572// Default implementation of CreateTargetPostRAHazardRecognizer. 573ScheduleHazardRecognizer *TargetInstrInfo:: 574CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 575 const ScheduleDAG *DAG) const { 576 return (ScheduleHazardRecognizer *) 577 new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched"); 578} 579 580//===----------------------------------------------------------------------===// 581// SelectionDAG latency interface. 582//===----------------------------------------------------------------------===// 583 584int 585TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 586 SDNode *DefNode, unsigned DefIdx, 587 SDNode *UseNode, unsigned UseIdx) const { 588 if (!ItinData || ItinData->isEmpty()) 589 return -1; 590 591 if (!DefNode->isMachineOpcode()) 592 return -1; 593 594 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass(); 595 if (!UseNode->isMachineOpcode()) 596 return ItinData->getOperandCycle(DefClass, DefIdx); 597 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass(); 598 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 599} 600 601int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 602 SDNode *N) const { 603 if (!ItinData || ItinData->isEmpty()) 604 return 1; 605 606 if (!N->isMachineOpcode()) 607 return 1; 608 609 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass()); 610} 611 612//===----------------------------------------------------------------------===// 613// MachineInstr latency interface. 614//===----------------------------------------------------------------------===// 615 616unsigned 617TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, 618 const MachineInstr *MI) const { 619 if (!ItinData || ItinData->isEmpty()) 620 return 1; 621 622 unsigned Class = MI->getDesc().getSchedClass(); 623 int UOps = ItinData->Itineraries[Class].NumMicroOps; 624 if (UOps >= 0) 625 return UOps; 626 627 // The # of u-ops is dynamically determined. The specific target should 628 // override this function to return the right number. 629 return 1; 630} 631 632/// Return the default expected latency for a def based on it's opcode. 633unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel *SchedModel, 634 const MachineInstr *DefMI) const { 635 if (DefMI->isTransient()) 636 return 0; 637 if (DefMI->mayLoad()) 638 return SchedModel->LoadLatency; 639 if (isHighLatencyDef(DefMI->getOpcode())) 640 return SchedModel->HighLatency; 641 return 1; 642} 643 644unsigned TargetInstrInfo::getPredicationCost(const MachineInstr *) const { 645 return 0; 646} 647 648unsigned TargetInstrInfo:: 649getInstrLatency(const InstrItineraryData *ItinData, 650 const MachineInstr *MI, 651 unsigned *PredCost) const { 652 // Default to one cycle for no itinerary. However, an "empty" itinerary may 653 // still have a MinLatency property, which getStageLatency checks. 654 if (!ItinData) 655 return MI->mayLoad() ? 2 : 1; 656 657 return ItinData->getStageLatency(MI->getDesc().getSchedClass()); 658} 659 660bool TargetInstrInfo::hasLowDefLatency(const InstrItineraryData *ItinData, 661 const MachineInstr *DefMI, 662 unsigned DefIdx) const { 663 if (!ItinData || ItinData->isEmpty()) 664 return false; 665 666 unsigned DefClass = DefMI->getDesc().getSchedClass(); 667 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 668 return (DefCycle != -1 && DefCycle <= 1); 669} 670 671/// Both DefMI and UseMI must be valid. By default, call directly to the 672/// itinerary. This may be overriden by the target. 673int TargetInstrInfo:: 674getOperandLatency(const InstrItineraryData *ItinData, 675 const MachineInstr *DefMI, unsigned DefIdx, 676 const MachineInstr *UseMI, unsigned UseIdx) const { 677 unsigned DefClass = DefMI->getDesc().getSchedClass(); 678 unsigned UseClass = UseMI->getDesc().getSchedClass(); 679 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 680} 681 682/// If we can determine the operand latency from the def only, without itinerary 683/// lookup, do so. Otherwise return -1. 684int TargetInstrInfo::computeDefOperandLatency( 685 const InstrItineraryData *ItinData, 686 const MachineInstr *DefMI) const { 687 688 // Let the target hook getInstrLatency handle missing itineraries. 689 if (!ItinData) 690 return getInstrLatency(ItinData, DefMI); 691 692 if(ItinData->isEmpty()) 693 return defaultDefLatency(ItinData->SchedModel, DefMI); 694 695 // ...operand lookup required 696 return -1; 697} 698 699/// computeOperandLatency - Compute and return the latency of the given data 700/// dependent def and use when the operand indices are already known. UseMI may 701/// be NULL for an unknown use. 702/// 703/// FindMin may be set to get the minimum vs. expected latency. Minimum 704/// latency is used for scheduling groups, while expected latency is for 705/// instruction cost and critical path. 706/// 707/// Depending on the subtarget's itinerary properties, this may or may not need 708/// to call getOperandLatency(). For most subtargets, we don't need DefIdx or 709/// UseIdx to compute min latency. 710unsigned TargetInstrInfo:: 711computeOperandLatency(const InstrItineraryData *ItinData, 712 const MachineInstr *DefMI, unsigned DefIdx, 713 const MachineInstr *UseMI, unsigned UseIdx) const { 714 715 int DefLatency = computeDefOperandLatency(ItinData, DefMI); 716 if (DefLatency >= 0) 717 return DefLatency; 718 719 assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail"); 720 721 int OperLatency = 0; 722 if (UseMI) 723 OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx); 724 else { 725 unsigned DefClass = DefMI->getDesc().getSchedClass(); 726 OperLatency = ItinData->getOperandCycle(DefClass, DefIdx); 727 } 728 if (OperLatency >= 0) 729 return OperLatency; 730 731 // No operand latency was found. 732 unsigned InstrLatency = getInstrLatency(ItinData, DefMI); 733 734 // Expected latency is the max of the stage latency and itinerary props. 735 InstrLatency = std::max(InstrLatency, 736 defaultDefLatency(ItinData->SchedModel, DefMI)); 737 return InstrLatency; 738} 739