TargetInstrInfo.cpp revision 36b56886974eae4f9c5ebc96befd3e7bfe5de338
1//===-- TargetInstrInfo.cpp - Target Instruction Information --------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the TargetInstrInfo class. 11// 12//===----------------------------------------------------------------------===// 13 14#include "llvm/Target/TargetInstrInfo.h" 15#include "llvm/CodeGen/MachineFrameInfo.h" 16#include "llvm/CodeGen/MachineInstrBuilder.h" 17#include "llvm/CodeGen/MachineMemOperand.h" 18#include "llvm/CodeGen/MachineRegisterInfo.h" 19#include "llvm/CodeGen/PseudoSourceValue.h" 20#include "llvm/CodeGen/ScoreboardHazardRecognizer.h" 21#include "llvm/CodeGen/StackMaps.h" 22#include "llvm/IR/DataLayout.h" 23#include "llvm/MC/MCAsmInfo.h" 24#include "llvm/MC/MCInstrItineraries.h" 25#include "llvm/Support/CommandLine.h" 26#include "llvm/Support/ErrorHandling.h" 27#include "llvm/Support/raw_ostream.h" 28#include "llvm/Target/TargetLowering.h" 29#include "llvm/Target/TargetMachine.h" 30#include "llvm/Target/TargetRegisterInfo.h" 31#include <cctype> 32using namespace llvm; 33 34static cl::opt<bool> DisableHazardRecognizer( 35 "disable-sched-hazard", cl::Hidden, cl::init(false), 36 cl::desc("Disable hazard detection during preRA scheduling")); 37 38TargetInstrInfo::~TargetInstrInfo() { 39} 40 41const TargetRegisterClass* 42TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum, 43 const TargetRegisterInfo *TRI, 44 const MachineFunction &MF) const { 45 if (OpNum >= MCID.getNumOperands()) 46 return 0; 47 48 short RegClass = MCID.OpInfo[OpNum].RegClass; 49 if (MCID.OpInfo[OpNum].isLookupPtrRegClass()) 50 return TRI->getPointerRegClass(MF, RegClass); 51 52 // Instructions like INSERT_SUBREG do not have fixed register classes. 53 if (RegClass < 0) 54 return 0; 55 56 // Otherwise just look it up normally. 57 return TRI->getRegClass(RegClass); 58} 59 60/// insertNoop - Insert a noop into the instruction stream at the specified 61/// point. 62void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB, 63 MachineBasicBlock::iterator MI) const { 64 llvm_unreachable("Target didn't implement insertNoop!"); 65} 66 67/// Measure the specified inline asm to determine an approximation of its 68/// length. 69/// Comments (which run till the next SeparatorString or newline) do not 70/// count as an instruction. 71/// Any other non-whitespace text is considered an instruction, with 72/// multiple instructions separated by SeparatorString or newlines. 73/// Variable-length instructions are not handled here; this function 74/// may be overloaded in the target code to do that. 75unsigned TargetInstrInfo::getInlineAsmLength(const char *Str, 76 const MCAsmInfo &MAI) const { 77 78 79 // Count the number of instructions in the asm. 80 bool atInsnStart = true; 81 unsigned Length = 0; 82 for (; *Str; ++Str) { 83 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(), 84 strlen(MAI.getSeparatorString())) == 0) 85 atInsnStart = true; 86 if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) { 87 Length += MAI.getMaxInstLength(); 88 atInsnStart = false; 89 } 90 if (atInsnStart && strncmp(Str, MAI.getCommentString(), 91 strlen(MAI.getCommentString())) == 0) 92 atInsnStart = false; 93 } 94 95 return Length; 96} 97 98/// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything 99/// after it, replacing it with an unconditional branch to NewDest. 100void 101TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail, 102 MachineBasicBlock *NewDest) const { 103 MachineBasicBlock *MBB = Tail->getParent(); 104 105 // Remove all the old successors of MBB from the CFG. 106 while (!MBB->succ_empty()) 107 MBB->removeSuccessor(MBB->succ_begin()); 108 109 // Remove all the dead instructions from the end of MBB. 110 MBB->erase(Tail, MBB->end()); 111 112 // If MBB isn't immediately before MBB, insert a branch to it. 113 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest)) 114 InsertBranch(*MBB, NewDest, 0, SmallVector<MachineOperand, 0>(), 115 Tail->getDebugLoc()); 116 MBB->addSuccessor(NewDest); 117} 118 119// commuteInstruction - The default implementation of this method just exchanges 120// the two operands returned by findCommutedOpIndices. 121MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI, 122 bool NewMI) const { 123 const MCInstrDesc &MCID = MI->getDesc(); 124 bool HasDef = MCID.getNumDefs(); 125 if (HasDef && !MI->getOperand(0).isReg()) 126 // No idea how to commute this instruction. Target should implement its own. 127 return 0; 128 unsigned Idx1, Idx2; 129 if (!findCommutedOpIndices(MI, Idx1, Idx2)) { 130 std::string msg; 131 raw_string_ostream Msg(msg); 132 Msg << "Don't know how to commute: " << *MI; 133 report_fatal_error(Msg.str()); 134 } 135 136 assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() && 137 "This only knows how to commute register operands so far"); 138 unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0; 139 unsigned Reg1 = MI->getOperand(Idx1).getReg(); 140 unsigned Reg2 = MI->getOperand(Idx2).getReg(); 141 unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0; 142 unsigned SubReg1 = MI->getOperand(Idx1).getSubReg(); 143 unsigned SubReg2 = MI->getOperand(Idx2).getSubReg(); 144 bool Reg1IsKill = MI->getOperand(Idx1).isKill(); 145 bool Reg2IsKill = MI->getOperand(Idx2).isKill(); 146 // If destination is tied to either of the commuted source register, then 147 // it must be updated. 148 if (HasDef && Reg0 == Reg1 && 149 MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) { 150 Reg2IsKill = false; 151 Reg0 = Reg2; 152 SubReg0 = SubReg2; 153 } else if (HasDef && Reg0 == Reg2 && 154 MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) { 155 Reg1IsKill = false; 156 Reg0 = Reg1; 157 SubReg0 = SubReg1; 158 } 159 160 if (NewMI) { 161 // Create a new instruction. 162 MachineFunction &MF = *MI->getParent()->getParent(); 163 MI = MF.CloneMachineInstr(MI); 164 } 165 166 if (HasDef) { 167 MI->getOperand(0).setReg(Reg0); 168 MI->getOperand(0).setSubReg(SubReg0); 169 } 170 MI->getOperand(Idx2).setReg(Reg1); 171 MI->getOperand(Idx1).setReg(Reg2); 172 MI->getOperand(Idx2).setSubReg(SubReg1); 173 MI->getOperand(Idx1).setSubReg(SubReg2); 174 MI->getOperand(Idx2).setIsKill(Reg1IsKill); 175 MI->getOperand(Idx1).setIsKill(Reg2IsKill); 176 return MI; 177} 178 179/// findCommutedOpIndices - If specified MI is commutable, return the two 180/// operand indices that would swap value. Return true if the instruction 181/// is not in a form which this routine understands. 182bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI, 183 unsigned &SrcOpIdx1, 184 unsigned &SrcOpIdx2) const { 185 assert(!MI->isBundle() && 186 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles"); 187 188 const MCInstrDesc &MCID = MI->getDesc(); 189 if (!MCID.isCommutable()) 190 return false; 191 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this 192 // is not true, then the target must implement this. 193 SrcOpIdx1 = MCID.getNumDefs(); 194 SrcOpIdx2 = SrcOpIdx1 + 1; 195 if (!MI->getOperand(SrcOpIdx1).isReg() || 196 !MI->getOperand(SrcOpIdx2).isReg()) 197 // No idea. 198 return false; 199 return true; 200} 201 202 203bool 204TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const { 205 if (!MI->isTerminator()) return false; 206 207 // Conditional branch is a special case. 208 if (MI->isBranch() && !MI->isBarrier()) 209 return true; 210 if (!MI->isPredicable()) 211 return true; 212 return !isPredicated(MI); 213} 214 215 216bool TargetInstrInfo::PredicateInstruction(MachineInstr *MI, 217 const SmallVectorImpl<MachineOperand> &Pred) const { 218 bool MadeChange = false; 219 220 assert(!MI->isBundle() && 221 "TargetInstrInfo::PredicateInstruction() can't handle bundles"); 222 223 const MCInstrDesc &MCID = MI->getDesc(); 224 if (!MI->isPredicable()) 225 return false; 226 227 for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) { 228 if (MCID.OpInfo[i].isPredicate()) { 229 MachineOperand &MO = MI->getOperand(i); 230 if (MO.isReg()) { 231 MO.setReg(Pred[j].getReg()); 232 MadeChange = true; 233 } else if (MO.isImm()) { 234 MO.setImm(Pred[j].getImm()); 235 MadeChange = true; 236 } else if (MO.isMBB()) { 237 MO.setMBB(Pred[j].getMBB()); 238 MadeChange = true; 239 } 240 ++j; 241 } 242 } 243 return MadeChange; 244} 245 246bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI, 247 const MachineMemOperand *&MMO, 248 int &FrameIndex) const { 249 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(), 250 oe = MI->memoperands_end(); 251 o != oe; 252 ++o) { 253 if ((*o)->isLoad() && (*o)->getValue()) 254 if (const FixedStackPseudoSourceValue *Value = 255 dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) { 256 FrameIndex = Value->getFrameIndex(); 257 MMO = *o; 258 return true; 259 } 260 } 261 return false; 262} 263 264bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI, 265 const MachineMemOperand *&MMO, 266 int &FrameIndex) const { 267 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(), 268 oe = MI->memoperands_end(); 269 o != oe; 270 ++o) { 271 if ((*o)->isStore() && (*o)->getValue()) 272 if (const FixedStackPseudoSourceValue *Value = 273 dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) { 274 FrameIndex = Value->getFrameIndex(); 275 MMO = *o; 276 return true; 277 } 278 } 279 return false; 280} 281 282bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC, 283 unsigned SubIdx, unsigned &Size, 284 unsigned &Offset, 285 const TargetMachine *TM) const { 286 if (!SubIdx) { 287 Size = RC->getSize(); 288 Offset = 0; 289 return true; 290 } 291 unsigned BitSize = TM->getRegisterInfo()->getSubRegIdxSize(SubIdx); 292 // Convert bit size to byte size to be consistent with 293 // MCRegisterClass::getSize(). 294 if (BitSize % 8) 295 return false; 296 297 int BitOffset = TM->getRegisterInfo()->getSubRegIdxOffset(SubIdx); 298 if (BitOffset < 0 || BitOffset % 8) 299 return false; 300 301 Size = BitSize /= 8; 302 Offset = (unsigned)BitOffset / 8; 303 304 assert(RC->getSize() >= (Offset + Size) && "bad subregister range"); 305 306 if (!TM->getDataLayout()->isLittleEndian()) { 307 Offset = RC->getSize() - (Offset + Size); 308 } 309 return true; 310} 311 312void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB, 313 MachineBasicBlock::iterator I, 314 unsigned DestReg, 315 unsigned SubIdx, 316 const MachineInstr *Orig, 317 const TargetRegisterInfo &TRI) const { 318 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig); 319 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI); 320 MBB.insert(I, MI); 321} 322 323bool 324TargetInstrInfo::produceSameValue(const MachineInstr *MI0, 325 const MachineInstr *MI1, 326 const MachineRegisterInfo *MRI) const { 327 return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs); 328} 329 330MachineInstr *TargetInstrInfo::duplicate(MachineInstr *Orig, 331 MachineFunction &MF) const { 332 assert(!Orig->isNotDuplicable() && 333 "Instruction cannot be duplicated"); 334 return MF.CloneMachineInstr(Orig); 335} 336 337// If the COPY instruction in MI can be folded to a stack operation, return 338// the register class to use. 339static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI, 340 unsigned FoldIdx) { 341 assert(MI->isCopy() && "MI must be a COPY instruction"); 342 if (MI->getNumOperands() != 2) 343 return 0; 344 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand"); 345 346 const MachineOperand &FoldOp = MI->getOperand(FoldIdx); 347 const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx); 348 349 if (FoldOp.getSubReg() || LiveOp.getSubReg()) 350 return 0; 351 352 unsigned FoldReg = FoldOp.getReg(); 353 unsigned LiveReg = LiveOp.getReg(); 354 355 assert(TargetRegisterInfo::isVirtualRegister(FoldReg) && 356 "Cannot fold physregs"); 357 358 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo(); 359 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg); 360 361 if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg())) 362 return RC->contains(LiveOp.getReg()) ? RC : 0; 363 364 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg))) 365 return RC; 366 367 // FIXME: Allow folding when register classes are memory compatible. 368 return 0; 369} 370 371bool TargetInstrInfo:: 372canFoldMemoryOperand(const MachineInstr *MI, 373 const SmallVectorImpl<unsigned> &Ops) const { 374 return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]); 375} 376 377static MachineInstr* foldPatchpoint(MachineFunction &MF, 378 MachineInstr *MI, 379 const SmallVectorImpl<unsigned> &Ops, 380 int FrameIndex, 381 const TargetInstrInfo &TII) { 382 unsigned StartIdx = 0; 383 switch (MI->getOpcode()) { 384 case TargetOpcode::STACKMAP: 385 StartIdx = 2; // Skip ID, nShadowBytes. 386 break; 387 case TargetOpcode::PATCHPOINT: { 388 // For PatchPoint, the call args are not foldable. 389 PatchPointOpers opers(MI); 390 StartIdx = opers.getVarIdx(); 391 break; 392 } 393 default: 394 llvm_unreachable("unexpected stackmap opcode"); 395 } 396 397 // Return false if any operands requested for folding are not foldable (not 398 // part of the stackmap's live values). 399 for (SmallVectorImpl<unsigned>::const_iterator I = Ops.begin(), E = Ops.end(); 400 I != E; ++I) { 401 if (*I < StartIdx) 402 return 0; 403 } 404 405 MachineInstr *NewMI = 406 MF.CreateMachineInstr(TII.get(MI->getOpcode()), MI->getDebugLoc(), true); 407 MachineInstrBuilder MIB(MF, NewMI); 408 409 // No need to fold return, the meta data, and function arguments 410 for (unsigned i = 0; i < StartIdx; ++i) 411 MIB.addOperand(MI->getOperand(i)); 412 413 for (unsigned i = StartIdx; i < MI->getNumOperands(); ++i) { 414 MachineOperand &MO = MI->getOperand(i); 415 if (std::find(Ops.begin(), Ops.end(), i) != Ops.end()) { 416 unsigned SpillSize; 417 unsigned SpillOffset; 418 // Compute the spill slot size and offset. 419 const TargetRegisterClass *RC = 420 MF.getRegInfo().getRegClass(MO.getReg()); 421 bool Valid = TII.getStackSlotRange(RC, MO.getSubReg(), SpillSize, 422 SpillOffset, &MF.getTarget()); 423 if (!Valid) 424 report_fatal_error("cannot spill patchpoint subregister operand"); 425 MIB.addImm(StackMaps::IndirectMemRefOp); 426 MIB.addImm(SpillSize); 427 MIB.addFrameIndex(FrameIndex); 428 MIB.addImm(SpillOffset); 429 } 430 else 431 MIB.addOperand(MO); 432 } 433 return NewMI; 434} 435 436/// foldMemoryOperand - Attempt to fold a load or store of the specified stack 437/// slot into the specified machine instruction for the specified operand(s). 438/// If this is possible, a new instruction is returned with the specified 439/// operand folded, otherwise NULL is returned. The client is responsible for 440/// removing the old instruction and adding the new one in the instruction 441/// stream. 442MachineInstr* 443TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, 444 const SmallVectorImpl<unsigned> &Ops, 445 int FI) const { 446 unsigned Flags = 0; 447 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 448 if (MI->getOperand(Ops[i]).isDef()) 449 Flags |= MachineMemOperand::MOStore; 450 else 451 Flags |= MachineMemOperand::MOLoad; 452 453 MachineBasicBlock *MBB = MI->getParent(); 454 assert(MBB && "foldMemoryOperand needs an inserted instruction"); 455 MachineFunction &MF = *MBB->getParent(); 456 457 MachineInstr *NewMI = 0; 458 459 if (MI->getOpcode() == TargetOpcode::STACKMAP || 460 MI->getOpcode() == TargetOpcode::PATCHPOINT) { 461 // Fold stackmap/patchpoint. 462 NewMI = foldPatchpoint(MF, MI, Ops, FI, *this); 463 } else { 464 // Ask the target to do the actual folding. 465 NewMI =foldMemoryOperandImpl(MF, MI, Ops, FI); 466 } 467 468 if (NewMI) { 469 NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); 470 // Add a memory operand, foldMemoryOperandImpl doesn't do that. 471 assert((!(Flags & MachineMemOperand::MOStore) || 472 NewMI->mayStore()) && 473 "Folded a def to a non-store!"); 474 assert((!(Flags & MachineMemOperand::MOLoad) || 475 NewMI->mayLoad()) && 476 "Folded a use to a non-load!"); 477 const MachineFrameInfo &MFI = *MF.getFrameInfo(); 478 assert(MFI.getObjectOffset(FI) != -1); 479 MachineMemOperand *MMO = 480 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI), 481 Flags, MFI.getObjectSize(FI), 482 MFI.getObjectAlignment(FI)); 483 NewMI->addMemOperand(MF, MMO); 484 485 // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI. 486 return MBB->insert(MI, NewMI); 487 } 488 489 // Straight COPY may fold as load/store. 490 if (!MI->isCopy() || Ops.size() != 1) 491 return 0; 492 493 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]); 494 if (!RC) 495 return 0; 496 497 const MachineOperand &MO = MI->getOperand(1-Ops[0]); 498 MachineBasicBlock::iterator Pos = MI; 499 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo(); 500 501 if (Flags == MachineMemOperand::MOStore) 502 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI); 503 else 504 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI); 505 return --Pos; 506} 507 508/// foldMemoryOperand - Same as the previous version except it allows folding 509/// of any load and store from / to any address, not just from a specific 510/// stack slot. 511MachineInstr* 512TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI, 513 const SmallVectorImpl<unsigned> &Ops, 514 MachineInstr* LoadMI) const { 515 assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!"); 516#ifndef NDEBUG 517 for (unsigned i = 0, e = Ops.size(); i != e; ++i) 518 assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!"); 519#endif 520 MachineBasicBlock &MBB = *MI->getParent(); 521 MachineFunction &MF = *MBB.getParent(); 522 523 // Ask the target to do the actual folding. 524 MachineInstr *NewMI = 0; 525 int FrameIndex = 0; 526 527 if ((MI->getOpcode() == TargetOpcode::STACKMAP || 528 MI->getOpcode() == TargetOpcode::PATCHPOINT) && 529 isLoadFromStackSlot(LoadMI, FrameIndex)) { 530 // Fold stackmap/patchpoint. 531 NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this); 532 } else { 533 // Ask the target to do the actual folding. 534 NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI); 535 } 536 537 if (!NewMI) return 0; 538 539 NewMI = MBB.insert(MI, NewMI); 540 541 // Copy the memoperands from the load to the folded instruction. 542 if (MI->memoperands_empty()) { 543 NewMI->setMemRefs(LoadMI->memoperands_begin(), 544 LoadMI->memoperands_end()); 545 } 546 else { 547 // Handle the rare case of folding multiple loads. 548 NewMI->setMemRefs(MI->memoperands_begin(), 549 MI->memoperands_end()); 550 for (MachineInstr::mmo_iterator I = LoadMI->memoperands_begin(), 551 E = LoadMI->memoperands_end(); I != E; ++I) { 552 NewMI->addMemOperand(MF, *I); 553 } 554 } 555 return NewMI; 556} 557 558bool TargetInstrInfo:: 559isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI, 560 AliasAnalysis *AA) const { 561 const MachineFunction &MF = *MI->getParent()->getParent(); 562 const MachineRegisterInfo &MRI = MF.getRegInfo(); 563 const TargetMachine &TM = MF.getTarget(); 564 const TargetInstrInfo &TII = *TM.getInstrInfo(); 565 566 // Remat clients assume operand 0 is the defined register. 567 if (!MI->getNumOperands() || !MI->getOperand(0).isReg()) 568 return false; 569 unsigned DefReg = MI->getOperand(0).getReg(); 570 571 // A sub-register definition can only be rematerialized if the instruction 572 // doesn't read the other parts of the register. Otherwise it is really a 573 // read-modify-write operation on the full virtual register which cannot be 574 // moved safely. 575 if (TargetRegisterInfo::isVirtualRegister(DefReg) && 576 MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg)) 577 return false; 578 579 // A load from a fixed stack slot can be rematerialized. This may be 580 // redundant with subsequent checks, but it's target-independent, 581 // simple, and a common case. 582 int FrameIdx = 0; 583 if (TII.isLoadFromStackSlot(MI, FrameIdx) && 584 MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx)) 585 return true; 586 587 // Avoid instructions obviously unsafe for remat. 588 if (MI->isNotDuplicable() || MI->mayStore() || 589 MI->hasUnmodeledSideEffects()) 590 return false; 591 592 // Don't remat inline asm. We have no idea how expensive it is 593 // even if it's side effect free. 594 if (MI->isInlineAsm()) 595 return false; 596 597 // Avoid instructions which load from potentially varying memory. 598 if (MI->mayLoad() && !MI->isInvariantLoad(AA)) 599 return false; 600 601 // If any of the registers accessed are non-constant, conservatively assume 602 // the instruction is not rematerializable. 603 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 604 const MachineOperand &MO = MI->getOperand(i); 605 if (!MO.isReg()) continue; 606 unsigned Reg = MO.getReg(); 607 if (Reg == 0) 608 continue; 609 610 // Check for a well-behaved physical register. 611 if (TargetRegisterInfo::isPhysicalRegister(Reg)) { 612 if (MO.isUse()) { 613 // If the physreg has no defs anywhere, it's just an ambient register 614 // and we can freely move its uses. Alternatively, if it's allocatable, 615 // it could get allocated to something with a def during allocation. 616 if (!MRI.isConstantPhysReg(Reg, MF)) 617 return false; 618 } else { 619 // A physreg def. We can't remat it. 620 return false; 621 } 622 continue; 623 } 624 625 // Only allow one virtual-register def. There may be multiple defs of the 626 // same virtual register, though. 627 if (MO.isDef() && Reg != DefReg) 628 return false; 629 630 // Don't allow any virtual-register uses. Rematting an instruction with 631 // virtual register uses would length the live ranges of the uses, which 632 // is not necessarily a good idea, certainly not "trivial". 633 if (MO.isUse()) 634 return false; 635 } 636 637 // Everything checked out. 638 return true; 639} 640 641/// isSchedulingBoundary - Test if the given instruction should be 642/// considered a scheduling boundary. This primarily includes labels 643/// and terminators. 644bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI, 645 const MachineBasicBlock *MBB, 646 const MachineFunction &MF) const { 647 // Terminators and labels can't be scheduled around. 648 if (MI->isTerminator() || MI->isPosition()) 649 return true; 650 651 // Don't attempt to schedule around any instruction that defines 652 // a stack-oriented pointer, as it's unlikely to be profitable. This 653 // saves compile time, because it doesn't require every single 654 // stack slot reference to depend on the instruction that does the 655 // modification. 656 const TargetLowering &TLI = *MF.getTarget().getTargetLowering(); 657 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo(); 658 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI)) 659 return true; 660 661 return false; 662} 663 664// Provide a global flag for disabling the PreRA hazard recognizer that targets 665// may choose to honor. 666bool TargetInstrInfo::usePreRAHazardRecognizer() const { 667 return !DisableHazardRecognizer; 668} 669 670// Default implementation of CreateTargetRAHazardRecognizer. 671ScheduleHazardRecognizer *TargetInstrInfo:: 672CreateTargetHazardRecognizer(const TargetMachine *TM, 673 const ScheduleDAG *DAG) const { 674 // Dummy hazard recognizer allows all instructions to issue. 675 return new ScheduleHazardRecognizer(); 676} 677 678// Default implementation of CreateTargetMIHazardRecognizer. 679ScheduleHazardRecognizer *TargetInstrInfo:: 680CreateTargetMIHazardRecognizer(const InstrItineraryData *II, 681 const ScheduleDAG *DAG) const { 682 return (ScheduleHazardRecognizer *) 683 new ScoreboardHazardRecognizer(II, DAG, "misched"); 684} 685 686// Default implementation of CreateTargetPostRAHazardRecognizer. 687ScheduleHazardRecognizer *TargetInstrInfo:: 688CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 689 const ScheduleDAG *DAG) const { 690 return (ScheduleHazardRecognizer *) 691 new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched"); 692} 693 694//===----------------------------------------------------------------------===// 695// SelectionDAG latency interface. 696//===----------------------------------------------------------------------===// 697 698int 699TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData, 700 SDNode *DefNode, unsigned DefIdx, 701 SDNode *UseNode, unsigned UseIdx) const { 702 if (!ItinData || ItinData->isEmpty()) 703 return -1; 704 705 if (!DefNode->isMachineOpcode()) 706 return -1; 707 708 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass(); 709 if (!UseNode->isMachineOpcode()) 710 return ItinData->getOperandCycle(DefClass, DefIdx); 711 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass(); 712 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 713} 714 715int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData, 716 SDNode *N) const { 717 if (!ItinData || ItinData->isEmpty()) 718 return 1; 719 720 if (!N->isMachineOpcode()) 721 return 1; 722 723 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass()); 724} 725 726//===----------------------------------------------------------------------===// 727// MachineInstr latency interface. 728//===----------------------------------------------------------------------===// 729 730unsigned 731TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData, 732 const MachineInstr *MI) const { 733 if (!ItinData || ItinData->isEmpty()) 734 return 1; 735 736 unsigned Class = MI->getDesc().getSchedClass(); 737 int UOps = ItinData->Itineraries[Class].NumMicroOps; 738 if (UOps >= 0) 739 return UOps; 740 741 // The # of u-ops is dynamically determined. The specific target should 742 // override this function to return the right number. 743 return 1; 744} 745 746/// Return the default expected latency for a def based on it's opcode. 747unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel *SchedModel, 748 const MachineInstr *DefMI) const { 749 if (DefMI->isTransient()) 750 return 0; 751 if (DefMI->mayLoad()) 752 return SchedModel->LoadLatency; 753 if (isHighLatencyDef(DefMI->getOpcode())) 754 return SchedModel->HighLatency; 755 return 1; 756} 757 758unsigned TargetInstrInfo::getPredicationCost(const MachineInstr *) const { 759 return 0; 760} 761 762unsigned TargetInstrInfo:: 763getInstrLatency(const InstrItineraryData *ItinData, 764 const MachineInstr *MI, 765 unsigned *PredCost) const { 766 // Default to one cycle for no itinerary. However, an "empty" itinerary may 767 // still have a MinLatency property, which getStageLatency checks. 768 if (!ItinData) 769 return MI->mayLoad() ? 2 : 1; 770 771 return ItinData->getStageLatency(MI->getDesc().getSchedClass()); 772} 773 774bool TargetInstrInfo::hasLowDefLatency(const InstrItineraryData *ItinData, 775 const MachineInstr *DefMI, 776 unsigned DefIdx) const { 777 if (!ItinData || ItinData->isEmpty()) 778 return false; 779 780 unsigned DefClass = DefMI->getDesc().getSchedClass(); 781 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx); 782 return (DefCycle != -1 && DefCycle <= 1); 783} 784 785/// Both DefMI and UseMI must be valid. By default, call directly to the 786/// itinerary. This may be overriden by the target. 787int TargetInstrInfo:: 788getOperandLatency(const InstrItineraryData *ItinData, 789 const MachineInstr *DefMI, unsigned DefIdx, 790 const MachineInstr *UseMI, unsigned UseIdx) const { 791 unsigned DefClass = DefMI->getDesc().getSchedClass(); 792 unsigned UseClass = UseMI->getDesc().getSchedClass(); 793 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx); 794} 795 796/// If we can determine the operand latency from the def only, without itinerary 797/// lookup, do so. Otherwise return -1. 798int TargetInstrInfo::computeDefOperandLatency( 799 const InstrItineraryData *ItinData, 800 const MachineInstr *DefMI) const { 801 802 // Let the target hook getInstrLatency handle missing itineraries. 803 if (!ItinData) 804 return getInstrLatency(ItinData, DefMI); 805 806 if(ItinData->isEmpty()) 807 return defaultDefLatency(ItinData->SchedModel, DefMI); 808 809 // ...operand lookup required 810 return -1; 811} 812 813/// computeOperandLatency - Compute and return the latency of the given data 814/// dependent def and use when the operand indices are already known. UseMI may 815/// be NULL for an unknown use. 816/// 817/// FindMin may be set to get the minimum vs. expected latency. Minimum 818/// latency is used for scheduling groups, while expected latency is for 819/// instruction cost and critical path. 820/// 821/// Depending on the subtarget's itinerary properties, this may or may not need 822/// to call getOperandLatency(). For most subtargets, we don't need DefIdx or 823/// UseIdx to compute min latency. 824unsigned TargetInstrInfo:: 825computeOperandLatency(const InstrItineraryData *ItinData, 826 const MachineInstr *DefMI, unsigned DefIdx, 827 const MachineInstr *UseMI, unsigned UseIdx) const { 828 829 int DefLatency = computeDefOperandLatency(ItinData, DefMI); 830 if (DefLatency >= 0) 831 return DefLatency; 832 833 assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail"); 834 835 int OperLatency = 0; 836 if (UseMI) 837 OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx); 838 else { 839 unsigned DefClass = DefMI->getDesc().getSchedClass(); 840 OperLatency = ItinData->getOperandCycle(DefClass, DefIdx); 841 } 842 if (OperLatency >= 0) 843 return OperLatency; 844 845 // No operand latency was found. 846 unsigned InstrLatency = getInstrLatency(ItinData, DefMI); 847 848 // Expected latency is the max of the stage latency and itinerary props. 849 InstrLatency = std::max(InstrLatency, 850 defaultDefLatency(ItinData->SchedModel, DefMI)); 851 return InstrLatency; 852} 853