/external/llvm/lib/Target/AArch64/ |
H A D | AArch64ExpandPseudoInsts.cpp | 61 static void transferImpOps(MachineInstr &OldMI, MachineInstrBuilder &UseMI, argument 69 UseMI.addOperand(MO);
|
/external/llvm/lib/Target/Mips/ |
H A D | Mips16RegisterInfo.cpp | 63 MachineBasicBlock::iterator &UseMI, 69 TII.copyPhysReg(MBB, UseMI, DL, Reg, Mips::T0, true); 60 saveScavengerRegister(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MachineBasicBlock::iterator &UseMI, const TargetRegisterClass *RC, unsigned Reg) const argument
|
/external/llvm/lib/CodeGen/ |
H A D | LiveRangeEdit.cpp | 166 MachineInstr *DefMI = nullptr, *UseMI = nullptr; local 178 if (UseMI && UseMI != MI) 183 UseMI = MI; 186 if (!DefMI || !UseMI) 193 LIS.getInstructionIndex(UseMI))) 197 // Assume there are stores between DefMI and UseMI. 203 << " into single use: " << *UseMI); local 206 if (UseMI->readsWritesVirtualRegister(LI->reg, &Ops).second) 209 MachineInstr *FoldMI = TII.foldMemoryOperand(UseMI, Op [all...] |
H A D | RegisterScavenging.cpp | 286 /// longest after StartMII. UseMI is set to the instruction where the search 294 MachineBasicBlock::iterator &UseMI) { 352 UseMI = RestorePointMI; 389 MachineBasicBlock::iterator UseMI; local 390 unsigned SReg = findSurvivorReg(I, Candidates, 25, UseMI); 415 if (!TRI->saveScavengerRegister(*MBB, I, UseMI, RC, SReg)) { 427 TII->loadRegFromStackSlot(*MBB, UseMI, SReg, Scavenged[SI].FrameIndex, 429 II = std::prev(UseMI); 435 Scavenged[SI].Restore = std::prev(UseMI); 291 findSurvivorReg(MachineBasicBlock::iterator StartMI, BitVector &Candidates, unsigned InstrLimit, MachineBasicBlock::iterator &UseMI) argument
|
H A D | TargetSchedule.cpp | 156 const MachineInstr *UseMI, unsigned UseOperIdx) const { 163 if (UseMI) { 165 UseMI, UseOperIdx); 195 if (!UseMI) 199 const MCSchedClassDesc *UseDesc = resolveSchedClass(UseMI); 202 unsigned UseIdx = findUseIdx(UseMI, UseOperIdx); 154 computeOperandLatency( const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *UseMI, unsigned UseOperIdx) const argument
|
H A D | MachineRegisterInfo.cpp | 439 MachineInstr *UseMI = &*I; local 440 if (UseMI->isDebugValue()) 441 UseMI->getOperand(0).setReg(0U);
|
H A D | MachineSSAUpdater.cpp | 223 MachineInstr *UseMI = U.getParent(); local 225 if (UseMI->isPHI()) { 226 MachineBasicBlock *SourceBB = findCorrespondingPred(UseMI, &U); 229 NewVR = GetValueInMiddleOfBlock(UseMI->getParent());
|
H A D | RegAllocFast.cpp | 601 const MachineInstr &UseMI = *MRI->use_instr_nodbg_begin(VirtReg); local 603 if (UseMI.isCopyLike()) 604 Hint = UseMI.getOperand(0).getReg();
|
H A D | TailDuplication.cpp | 271 MachineInstr *UseMI = UseMO.getParent(); local 273 if (UseMI->isDebugValue()) { 278 UseMI->eraseFromParent(); 281 if (UseMI->getParent() == DefBB && !UseMI->isPHI()) 346 for (MachineInstr &UseMI : MRI->use_instructions(Reg)) { 347 if (UseMI.isDebugValue()) 349 if (UseMI.getParent() != BB)
|
H A D | LiveIntervalAnalysis.cpp | 439 MachineInstr *UseMI = &*(I++); local 440 if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg)) 442 SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot(); 449 DEBUG(dbgs() << Idx << '\t' << *UseMI 539 MachineInstr *UseMI = MO.getParent(); local 540 if (UseMI->isDebugValue()) 550 SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot();
|
H A D | PeepholeOptimizer.cpp | 456 MachineInstr *UseMI = UseMO.getParent(); local 457 if (UseMI == MI) 460 if (UseMI->isPHI()) { 486 if (UseMI->getOpcode() == TargetOpcode::SUBREG_TO_REG) 489 MachineBasicBlock *UseMBB = UseMI->getParent(); 492 if (!LocalMIs.count(UseMI)) 529 MachineInstr *UseMI = UseMO->getParent(); local 530 MachineBasicBlock *UseMBB = UseMI->getParent(); 541 MachineInstr *Copy = BuildMI(*UseMBB, UseMI, UseMI [all...] |
H A D | TargetInstrInfo.cpp | 1069 /// Both DefMI and UseMI must be valid. By default, call directly to the 1074 const MachineInstr *UseMI, unsigned UseIdx) const { 1076 unsigned UseClass = UseMI->getDesc().getSchedClass(); 1098 /// dependent def and use when the operand indices are already known. UseMI may 1111 const MachineInstr *UseMI, unsigned UseIdx) const { 1120 if (UseMI) 1121 OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx); 1072 getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr *DefMI, unsigned DefIdx, const MachineInstr *UseMI, unsigned UseIdx) const argument 1109 computeOperandLatency(const InstrItineraryData *ItinData, const MachineInstr *DefMI, unsigned DefIdx, const MachineInstr *UseMI, unsigned UseIdx) const argument
|
H A D | RegisterCoalescer.cpp | 693 MachineInstr *UseMI = MO.getParent(); local 694 unsigned OpNo = &MO - &UseMI->getOperand(0); 695 SlotIndex UseIdx = LIS->getInstructionIndex(UseMI); 700 if (UseMI->isRegTiedToDefOperand(OpNo)) 742 MachineInstr *UseMI = UseMO.getParent(); local 743 if (UseMI->isDebugValue()) { 749 SlotIndex UseIdx = LIS->getInstructionIndex(UseMI).getRegSlot(true); 760 if (UseMI == CopyMI) 762 if (!UseMI->isCopy()) 764 if (UseMI 774 DEBUG(dbgs() << "\\t\\tnoop: " << DefIdx << '\\t' << *UseMI); local 1069 MachineInstr *UseMI = UseMO.getParent(); local 1172 MachineInstr *UseMI = &*(I++); local [all...] |
/external/llvm/lib/Target/AMDGPU/ |
H A D | SIFoldOperands.cpp | 55 MachineInstr *UseMI; member in struct:__anon12495::FoldCandidate 61 UseMI(MI), UseOpNo(OpNo) { 109 MachineInstr *MI = Fold.UseMI; 133 if (Candidate.UseMI == MI) 199 static void foldOperand(MachineOperand &OpToFold, MachineInstr *UseMI, argument 205 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx); 244 if (UseMI->getOpcode() == AMDGPU::COPY) { 245 unsigned DestReg = UseMI->getOperand(0).getReg(); 255 UseMI->setDesc(TII->get(MovOp)); 256 CopiesToReplace.push_back(UseMI); 354 MachineInstr *UseMI = Use->getParent(); local [all...] |
H A D | SIInstrInfo.cpp | 1047 bool SIInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI, argument 1052 unsigned Opc = UseMI->getOpcode(); 1056 if (hasModifiersSet(*UseMI, AMDGPU::OpName::src0_modifiers) || 1057 hasModifiersSet(*UseMI, AMDGPU::OpName::src1_modifiers) || 1058 hasModifiersSet(*UseMI, AMDGPU::OpName::src2_modifiers)) { 1062 MachineOperand *Src0 = getNamedOperand(*UseMI, AMDGPU::OpName::src0); 1063 MachineOperand *Src1 = getNamedOperand(*UseMI, AMDGPU::OpName::src1); 1064 MachineOperand *Src2 = getNamedOperand(*UseMI, AMDGPU::OpName::src2); 1092 UseMI->RemoveOperand(AMDGPU::getNamedOperandIdx(Opc, 1094 UseMI [all...] |
/external/llvm/lib/Target/PowerPC/ |
H A D | PPCVSXFMAMutate.cpp | 266 MachineInstr *UseMI = UseMO.getParent(); local 270 if (UseMI == AddendMI)
|
H A D | PPCInstrInfo.cpp | 142 const MachineInstr *UseMI, 145 UseMI, UseIdx); 164 if (UseMI->isBranch() && IsRegCR) { 1188 bool PPCInstrInfo::FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI, argument 1204 const MCInstrDesc &UseMCID = UseMI->getDesc(); 1211 for (UseIdx = 0; UseIdx < UseMI->getNumOperands(); ++UseIdx) 1212 if (UseMI->getOperand(UseIdx).isReg() && 1213 UseMI->getOperand(UseIdx).getReg() == Reg) 1216 assert(UseIdx < UseMI->getNumOperands() && "Cannot find Reg in UseMI"); 140 getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr *DefMI, unsigned DefIdx, const MachineInstr *UseMI, unsigned UseIdx) const argument 1570 MachineInstr *UseMI = &*I; local 1704 MachineInstr *UseMI = &*I; local [all...] |
/external/llvm/lib/Target/ARM/ |
H A D | MLxExpansionPass.cpp | 124 MachineInstr *UseMI = &*MRI->use_instr_nodbg_begin(Reg); local 125 if (UseMI->getParent() != MBB) 128 while (UseMI->isCopy() || UseMI->isInsertSubreg()) { 129 Reg = UseMI->getOperand(0).getReg(); 133 UseMI = &*MRI->use_instr_nodbg_begin(Reg); 134 if (UseMI->getParent() != MBB)
|
H A D | ARMExpandPseudoInsts.cpp | 59 MachineInstrBuilder &UseMI, MachineInstrBuilder &DefMI); 77 MachineInstrBuilder &UseMI, 85 UseMI.addOperand(MO); 76 TransferImpOps(MachineInstr &OldMI, MachineInstrBuilder &UseMI, MachineInstrBuilder &DefMI) argument
|
H A D | ARMBaseInstrInfo.cpp | 2636 bool ARMBaseInstrInfo::FoldImmediate(MachineInstr *UseMI, argument 2660 const MCInstrDesc &UseMCID = UseMI->getDesc(); 2663 if (UseMI->getOperand(NumOps-1).getReg() == ARM::CPSR) 2669 unsigned UseOpc = UseMI->getOpcode(); 2684 Commute = UseMI->getOperand(2).getReg() != Reg; 2736 unsigned Reg1 = UseMI->getOperand(OpIdx).getReg(); 2737 bool isKill = UseMI->getOperand(OpIdx).isKill(); 2739 AddDefaultCC(AddDefaultPred(BuildMI(*UseMI->getParent(), 2740 UseMI, UseMI 3643 getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr *DefMI, unsigned DefIdx, const MachineInstr *UseMI, unsigned UseIdx) const argument 4036 hasHighOperandLatency(const TargetSchedModel &SchedModel, const MachineRegisterInfo *MRI, const MachineInstr *DefMI, unsigned DefIdx, const MachineInstr *UseMI, unsigned UseIdx) const argument [all...] |
/external/llvm/include/llvm/Target/ |
H A D | TargetInstrInfo.h | 1132 virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI, argument 1166 const MachineInstr *UseMI, 1173 const MachineInstr *UseMI, unsigned UseIdx) 1207 const MachineInstr *UseMI, unsigned UseIdx) const { 1204 hasHighOperandLatency(const TargetSchedModel &SchedModel, const MachineRegisterInfo *MRI, const MachineInstr *DefMI, unsigned DefIdx, const MachineInstr *UseMI, unsigned UseIdx) const argument
|
/external/llvm/lib/CodeGen/SelectionDAG/ |
H A D | SelectionDAGISel.cpp | 565 MachineInstr *UseMI = &*(UI++); local 566 if (UseMI->isDebugValue()) continue; 567 if (UseMI->isCopy() && !CopyUseMI && UseMI->getParent() == EntryMBB) { 568 CopyUseMI = UseMI; continue;
|
/external/llvm/lib/Target/Hexagon/ |
H A D | HexagonHardwareLoops.cpp | 1028 MachineInstr *UseMI = Use.getParent(); local 1031 if (MI != UseMI) 1062 MachineInstr *UseMI = I->getParent(); local 1063 if (UseMI == MI) 1066 UseMI->getOperand(0).setReg(0U);
|
/external/llvm/lib/Target/X86/ |
H A D | X86InstrInfo.cpp | 6987 const MachineInstr *UseMI, unsigned UseIdx) const { 6984 hasHighOperandLatency(const TargetSchedModel &SchedModel, const MachineRegisterInfo *MRI, const MachineInstr *DefMI, unsigned DefIdx, const MachineInstr *UseMI, unsigned UseIdx) const argument
|