ARMBaseInstrInfo.h revision 020f4106f820648fd7e91956859844a80de13974
1//===- ARMBaseInstrInfo.h - ARM Base Instruction Information ----*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file contains the Base ARM implementation of the TargetInstrInfo class. 11// 12//===----------------------------------------------------------------------===// 13 14#ifndef ARMBASEINSTRUCTIONINFO_H 15#define ARMBASEINSTRUCTIONINFO_H 16 17#include "ARM.h" 18#include "llvm/CodeGen/MachineInstrBuilder.h" 19#include "llvm/Target/TargetInstrInfo.h" 20#include "llvm/ADT/DenseMap.h" 21#include "llvm/ADT/SmallSet.h" 22 23#define GET_INSTRINFO_HEADER 24#include "ARMGenInstrInfo.inc" 25 26namespace llvm { 27 class ARMSubtarget; 28 class ARMBaseRegisterInfo; 29 30class ARMBaseInstrInfo : public ARMGenInstrInfo { 31 const ARMSubtarget &Subtarget; 32 33protected: 34 // Can be only subclassed. 35 explicit ARMBaseInstrInfo(const ARMSubtarget &STI); 36 37public: 38 // Return the non-pre/post incrementing version of 'Opc'. Return 0 39 // if there is not such an opcode. 40 virtual unsigned getUnindexedOpcode(unsigned Opc) const =0; 41 42 virtual MachineInstr *convertToThreeAddress(MachineFunction::iterator &MFI, 43 MachineBasicBlock::iterator &MBBI, 44 LiveVariables *LV) const; 45 46 virtual const ARMBaseRegisterInfo &getRegisterInfo() const =0; 47 const ARMSubtarget &getSubtarget() const { return Subtarget; } 48 49 ScheduleHazardRecognizer * 50 CreateTargetHazardRecognizer(const TargetMachine *TM, 51 const ScheduleDAG *DAG) const; 52 53 ScheduleHazardRecognizer * 54 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, 55 const ScheduleDAG *DAG) const; 56 57 // Branch analysis. 58 virtual bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, 59 MachineBasicBlock *&FBB, 60 SmallVectorImpl<MachineOperand> &Cond, 61 bool AllowModify = false) const; 62 virtual unsigned RemoveBranch(MachineBasicBlock &MBB) const; 63 virtual unsigned InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, 64 MachineBasicBlock *FBB, 65 const SmallVectorImpl<MachineOperand> &Cond, 66 DebugLoc DL) const; 67 68 virtual 69 bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const; 70 71 // Predication support. 72 bool isPredicated(const MachineInstr *MI) const; 73 74 ARMCC::CondCodes getPredicate(const MachineInstr *MI) const { 75 int PIdx = MI->findFirstPredOperandIdx(); 76 return PIdx != -1 ? (ARMCC::CondCodes)MI->getOperand(PIdx).getImm() 77 : ARMCC::AL; 78 } 79 80 virtual 81 bool PredicateInstruction(MachineInstr *MI, 82 const SmallVectorImpl<MachineOperand> &Pred) const; 83 84 virtual 85 bool SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1, 86 const SmallVectorImpl<MachineOperand> &Pred2) const; 87 88 virtual bool DefinesPredicate(MachineInstr *MI, 89 std::vector<MachineOperand> &Pred) const; 90 91 virtual bool isPredicable(MachineInstr *MI) const; 92 93 /// GetInstSize - Returns the size of the specified MachineInstr. 94 /// 95 virtual unsigned GetInstSizeInBytes(const MachineInstr* MI) const; 96 97 virtual unsigned isLoadFromStackSlot(const MachineInstr *MI, 98 int &FrameIndex) const; 99 virtual unsigned isStoreToStackSlot(const MachineInstr *MI, 100 int &FrameIndex) const; 101 virtual unsigned isLoadFromStackSlotPostFE(const MachineInstr *MI, 102 int &FrameIndex) const; 103 virtual unsigned isStoreToStackSlotPostFE(const MachineInstr *MI, 104 int &FrameIndex) const; 105 106 virtual void copyPhysReg(MachineBasicBlock &MBB, 107 MachineBasicBlock::iterator I, DebugLoc DL, 108 unsigned DestReg, unsigned SrcReg, 109 bool KillSrc) const; 110 111 virtual void storeRegToStackSlot(MachineBasicBlock &MBB, 112 MachineBasicBlock::iterator MBBI, 113 unsigned SrcReg, bool isKill, int FrameIndex, 114 const TargetRegisterClass *RC, 115 const TargetRegisterInfo *TRI) const; 116 117 virtual void loadRegFromStackSlot(MachineBasicBlock &MBB, 118 MachineBasicBlock::iterator MBBI, 119 unsigned DestReg, int FrameIndex, 120 const TargetRegisterClass *RC, 121 const TargetRegisterInfo *TRI) const; 122 123 virtual bool expandPostRAPseudo(MachineBasicBlock::iterator MI) const; 124 125 virtual MachineInstr *emitFrameIndexDebugValue(MachineFunction &MF, 126 int FrameIx, 127 uint64_t Offset, 128 const MDNode *MDPtr, 129 DebugLoc DL) const; 130 131 virtual void reMaterialize(MachineBasicBlock &MBB, 132 MachineBasicBlock::iterator MI, 133 unsigned DestReg, unsigned SubIdx, 134 const MachineInstr *Orig, 135 const TargetRegisterInfo &TRI) const; 136 137 MachineInstr *duplicate(MachineInstr *Orig, MachineFunction &MF) const; 138 139 virtual bool produceSameValue(const MachineInstr *MI0, 140 const MachineInstr *MI1, 141 const MachineRegisterInfo *MRI) const; 142 143 /// areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to 144 /// determine if two loads are loading from the same base address. It should 145 /// only return true if the base pointers are the same and the only 146 /// differences between the two addresses is the offset. It also returns the 147 /// offsets by reference. 148 virtual bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, 149 int64_t &Offset1, int64_t &Offset2)const; 150 151 /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to 152 /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads 153 /// should be scheduled togther. On some targets if two loads are loading from 154 /// addresses in the same cache line, it's better if they are scheduled 155 /// together. This function takes two integers that represent the load offsets 156 /// from the common base address. It returns true if it decides it's desirable 157 /// to schedule the two loads together. "NumLoads" is the number of loads that 158 /// have already been scheduled after Load1. 159 virtual bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, 160 int64_t Offset1, int64_t Offset2, 161 unsigned NumLoads) const; 162 163 virtual bool isSchedulingBoundary(const MachineInstr *MI, 164 const MachineBasicBlock *MBB, 165 const MachineFunction &MF) const; 166 167 virtual bool isProfitableToIfCvt(MachineBasicBlock &MBB, 168 unsigned NumCycles, unsigned ExtraPredCycles, 169 const BranchProbability &Probability) const; 170 171 virtual bool isProfitableToIfCvt(MachineBasicBlock &TMBB, 172 unsigned NumT, unsigned ExtraT, 173 MachineBasicBlock &FMBB, 174 unsigned NumF, unsigned ExtraF, 175 const BranchProbability &Probability) const; 176 177 virtual bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, 178 unsigned NumCycles, 179 const BranchProbability 180 &Probability) const { 181 return NumCycles == 1; 182 } 183 184 /// AnalyzeCompare - For a comparison instruction, return the source register 185 /// in SrcReg and the value it compares against in CmpValue. Return true if 186 /// the comparison instruction can be analyzed. 187 virtual bool AnalyzeCompare(const MachineInstr *MI, unsigned &SrcReg, 188 int &CmpMask, int &CmpValue) const; 189 190 /// OptimizeCompareInstr - Convert the instruction to set the zero flag so 191 /// that we can remove a "comparison with zero". 192 virtual bool OptimizeCompareInstr(MachineInstr *CmpInstr, unsigned SrcReg, 193 int CmpMask, int CmpValue, 194 const MachineRegisterInfo *MRI) const; 195 196 /// FoldImmediate - 'Reg' is known to be defined by a move immediate 197 /// instruction, try to fold the immediate into the use instruction. 198 virtual bool FoldImmediate(MachineInstr *UseMI, MachineInstr *DefMI, 199 unsigned Reg, MachineRegisterInfo *MRI) const; 200 201 virtual unsigned getNumMicroOps(const InstrItineraryData *ItinData, 202 const MachineInstr *MI) const; 203 204 virtual 205 int getOperandLatency(const InstrItineraryData *ItinData, 206 const MachineInstr *DefMI, unsigned DefIdx, 207 const MachineInstr *UseMI, unsigned UseIdx) const; 208 virtual 209 int getOperandLatency(const InstrItineraryData *ItinData, 210 SDNode *DefNode, unsigned DefIdx, 211 SDNode *UseNode, unsigned UseIdx) const; 212 213 virtual unsigned getOutputLatency(const InstrItineraryData *ItinData, 214 const MachineInstr *DefMI, unsigned DefIdx, 215 const MachineInstr *DepMI) const; 216 217 /// VFP/NEON execution domains. 218 std::pair<uint16_t, uint16_t> 219 getExecutionDomain(const MachineInstr *MI) const; 220 void setExecutionDomain(MachineInstr *MI, unsigned Domain) const; 221 222private: 223 unsigned getInstBundleLength(const MachineInstr *MI) const; 224 225 int getVLDMDefCycle(const InstrItineraryData *ItinData, 226 const MCInstrDesc &DefMCID, 227 unsigned DefClass, 228 unsigned DefIdx, unsigned DefAlign) const; 229 int getLDMDefCycle(const InstrItineraryData *ItinData, 230 const MCInstrDesc &DefMCID, 231 unsigned DefClass, 232 unsigned DefIdx, unsigned DefAlign) const; 233 int getVSTMUseCycle(const InstrItineraryData *ItinData, 234 const MCInstrDesc &UseMCID, 235 unsigned UseClass, 236 unsigned UseIdx, unsigned UseAlign) const; 237 int getSTMUseCycle(const InstrItineraryData *ItinData, 238 const MCInstrDesc &UseMCID, 239 unsigned UseClass, 240 unsigned UseIdx, unsigned UseAlign) const; 241 int getOperandLatency(const InstrItineraryData *ItinData, 242 const MCInstrDesc &DefMCID, 243 unsigned DefIdx, unsigned DefAlign, 244 const MCInstrDesc &UseMCID, 245 unsigned UseIdx, unsigned UseAlign) const; 246 247 int getInstrLatency(const InstrItineraryData *ItinData, 248 const MachineInstr *MI, unsigned *PredCost = 0) const; 249 250 int getInstrLatency(const InstrItineraryData *ItinData, 251 SDNode *Node) const; 252 253 bool hasHighOperandLatency(const InstrItineraryData *ItinData, 254 const MachineRegisterInfo *MRI, 255 const MachineInstr *DefMI, unsigned DefIdx, 256 const MachineInstr *UseMI, unsigned UseIdx) const; 257 bool hasLowDefLatency(const InstrItineraryData *ItinData, 258 const MachineInstr *DefMI, unsigned DefIdx) const; 259 260 /// verifyInstruction - Perform target specific instruction verification. 261 bool verifyInstruction(const MachineInstr *MI, StringRef &ErrInfo) const; 262 263private: 264 /// Modeling special VFP / NEON fp MLA / MLS hazards. 265 266 /// MLxEntryMap - Map fp MLA / MLS to the corresponding entry in the internal 267 /// MLx table. 268 DenseMap<unsigned, unsigned> MLxEntryMap; 269 270 /// MLxHazardOpcodes - Set of add / sub and multiply opcodes that would cause 271 /// stalls when scheduled together with fp MLA / MLS opcodes. 272 SmallSet<unsigned, 16> MLxHazardOpcodes; 273 274public: 275 /// isFpMLxInstruction - Return true if the specified opcode is a fp MLA / MLS 276 /// instruction. 277 bool isFpMLxInstruction(unsigned Opcode) const { 278 return MLxEntryMap.count(Opcode); 279 } 280 281 /// isFpMLxInstruction - This version also returns the multiply opcode and the 282 /// addition / subtraction opcode to expand to. Return true for 'HasLane' for 283 /// the MLX instructions with an extra lane operand. 284 bool isFpMLxInstruction(unsigned Opcode, unsigned &MulOpc, 285 unsigned &AddSubOpc, bool &NegAcc, 286 bool &HasLane) const; 287 288 /// canCauseFpMLxStall - Return true if an instruction of the specified opcode 289 /// will cause stalls when scheduled after (within 4-cycle window) a fp 290 /// MLA / MLS instruction. 291 bool canCauseFpMLxStall(unsigned Opcode) const { 292 return MLxHazardOpcodes.count(Opcode); 293 } 294}; 295 296static inline 297const MachineInstrBuilder &AddDefaultPred(const MachineInstrBuilder &MIB) { 298 return MIB.addImm((int64_t)ARMCC::AL).addReg(0); 299} 300 301static inline 302const MachineInstrBuilder &AddDefaultCC(const MachineInstrBuilder &MIB) { 303 return MIB.addReg(0); 304} 305 306static inline 307const MachineInstrBuilder &AddDefaultT1CC(const MachineInstrBuilder &MIB, 308 bool isDead = false) { 309 return MIB.addReg(ARM::CPSR, getDefRegState(true) | getDeadRegState(isDead)); 310} 311 312static inline 313const MachineInstrBuilder &AddNoT1CC(const MachineInstrBuilder &MIB) { 314 return MIB.addReg(0); 315} 316 317static inline 318bool isUncondBranchOpcode(int Opc) { 319 return Opc == ARM::B || Opc == ARM::tB || Opc == ARM::t2B; 320} 321 322static inline 323bool isCondBranchOpcode(int Opc) { 324 return Opc == ARM::Bcc || Opc == ARM::tBcc || Opc == ARM::t2Bcc; 325} 326 327static inline 328bool isJumpTableBranchOpcode(int Opc) { 329 return Opc == ARM::BR_JTr || Opc == ARM::BR_JTm || Opc == ARM::BR_JTadd || 330 Opc == ARM::tBR_JTr || Opc == ARM::t2BR_JT; 331} 332 333static inline 334bool isIndirectBranchOpcode(int Opc) { 335 return Opc == ARM::BX || Opc == ARM::MOVPCRX || Opc == ARM::tBRIND; 336} 337 338/// getInstrPredicate - If instruction is predicated, returns its predicate 339/// condition, otherwise returns AL. It also returns the condition code 340/// register by reference. 341ARMCC::CondCodes getInstrPredicate(const MachineInstr *MI, unsigned &PredReg); 342 343int getMatchingCondBranchOpcode(int Opc); 344 345 346/// Map pseudo instructions that imply an 'S' bit onto real opcodes. Whether 347/// the instruction is encoded with an 'S' bit is determined by the optional 348/// CPSR def operand. 349unsigned convertAddSubFlagsOpcode(unsigned OldOpc); 350 351/// emitARMRegPlusImmediate / emitT2RegPlusImmediate - Emits a series of 352/// instructions to materializea destreg = basereg + immediate in ARM / Thumb2 353/// code. 354void emitARMRegPlusImmediate(MachineBasicBlock &MBB, 355 MachineBasicBlock::iterator &MBBI, DebugLoc dl, 356 unsigned DestReg, unsigned BaseReg, int NumBytes, 357 ARMCC::CondCodes Pred, unsigned PredReg, 358 const ARMBaseInstrInfo &TII, unsigned MIFlags = 0); 359 360void emitT2RegPlusImmediate(MachineBasicBlock &MBB, 361 MachineBasicBlock::iterator &MBBI, DebugLoc dl, 362 unsigned DestReg, unsigned BaseReg, int NumBytes, 363 ARMCC::CondCodes Pred, unsigned PredReg, 364 const ARMBaseInstrInfo &TII, unsigned MIFlags = 0); 365void emitThumbRegPlusImmediate(MachineBasicBlock &MBB, 366 MachineBasicBlock::iterator &MBBI, DebugLoc dl, 367 unsigned DestReg, unsigned BaseReg, 368 int NumBytes, const TargetInstrInfo &TII, 369 const ARMBaseRegisterInfo& MRI, 370 unsigned MIFlags = 0); 371 372 373/// rewriteARMFrameIndex / rewriteT2FrameIndex - 374/// Rewrite MI to access 'Offset' bytes from the FP. Return false if the 375/// offset could not be handled directly in MI, and return the left-over 376/// portion by reference. 377bool rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, 378 unsigned FrameReg, int &Offset, 379 const ARMBaseInstrInfo &TII); 380 381bool rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, 382 unsigned FrameReg, int &Offset, 383 const ARMBaseInstrInfo &TII); 384 385} // End llvm namespace 386 387#endif 388