1//===- llvm/CodeGen/TargetSchedule.h - Sched Machine Model ------*- C++ -*-===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines a wrapper around MCSchedModel that allows the interface to 11// benefit from information currently only available in TargetInstrInfo. 12// Ideally, the scheduling interface would be fully defined in the MC layer. 13// 14//===----------------------------------------------------------------------===// 15 16#ifndef LLVM_CODEGEN_TARGETSCHEDULE_H 17#define LLVM_CODEGEN_TARGETSCHEDULE_H 18 19#include "llvm/ADT/Optional.h" 20#include "llvm/ADT/SmallVector.h" 21#include "llvm/MC/MCInstrItineraries.h" 22#include "llvm/MC/MCSchedule.h" 23#include "llvm/Target/TargetSubtargetInfo.h" 24 25namespace llvm { 26 27class MachineInstr; 28class TargetInstrInfo; 29 30/// Provide an instruction scheduling machine model to CodeGen passes. 31class TargetSchedModel { 32 // For efficiency, hold a copy of the statically defined MCSchedModel for this 33 // processor. 34 MCSchedModel SchedModel; 35 InstrItineraryData InstrItins; 36 const TargetSubtargetInfo *STI = nullptr; 37 const TargetInstrInfo *TII = nullptr; 38 39 SmallVector<unsigned, 16> ResourceFactors; 40 unsigned MicroOpFactor; // Multiply to normalize microops to resource units. 41 unsigned ResourceLCM; // Resource units per cycle. Latency normalization factor. 42 43 unsigned computeInstrLatency(const MCSchedClassDesc &SCDesc) const; 44 45public: 46 TargetSchedModel() : SchedModel(MCSchedModel::GetDefaultSchedModel()) {} 47 48 /// \brief Initialize the machine model for instruction scheduling. 49 /// 50 /// The machine model API keeps a copy of the top-level MCSchedModel table 51 /// indices and may query TargetSubtargetInfo and TargetInstrInfo to resolve 52 /// dynamic properties. 53 void init(const MCSchedModel &sm, const TargetSubtargetInfo *sti, 54 const TargetInstrInfo *tii); 55 56 /// Return the MCSchedClassDesc for this instruction. 57 const MCSchedClassDesc *resolveSchedClass(const MachineInstr *MI) const; 58 59 /// \brief TargetSubtargetInfo getter. 60 const TargetSubtargetInfo *getSubtargetInfo() const { return STI; } 61 62 /// \brief TargetInstrInfo getter. 63 const TargetInstrInfo *getInstrInfo() const { return TII; } 64 65 /// \brief Return true if this machine model includes an instruction-level 66 /// scheduling model. 67 /// 68 /// This is more detailed than the course grain IssueWidth and default 69 /// latency properties, but separate from the per-cycle itinerary data. 70 bool hasInstrSchedModel() const; 71 72 const MCSchedModel *getMCSchedModel() const { return &SchedModel; } 73 74 /// \brief Return true if this machine model includes cycle-to-cycle itinerary 75 /// data. 76 /// 77 /// This models scheduling at each stage in the processor pipeline. 78 bool hasInstrItineraries() const; 79 80 const InstrItineraryData *getInstrItineraries() const { 81 if (hasInstrItineraries()) 82 return &InstrItins; 83 return nullptr; 84 } 85 86 /// \brief Return true if this machine model includes an instruction-level 87 /// scheduling model or cycle-to-cycle itinerary data. 88 bool hasInstrSchedModelOrItineraries() const { 89 return hasInstrSchedModel() || hasInstrItineraries(); 90 } 91 92 /// \brief Identify the processor corresponding to the current subtarget. 93 unsigned getProcessorID() const { return SchedModel.getProcessorID(); } 94 95 /// \brief Maximum number of micro-ops that may be scheduled per cycle. 96 unsigned getIssueWidth() const { return SchedModel.IssueWidth; } 97 98 /// \brief Return true if new group must begin. 99 bool mustBeginGroup(const MachineInstr *MI, 100 const MCSchedClassDesc *SC = nullptr) const; 101 /// \brief Return true if current group must end. 102 bool mustEndGroup(const MachineInstr *MI, 103 const MCSchedClassDesc *SC = nullptr) const; 104 105 /// \brief Return the number of issue slots required for this MI. 106 unsigned getNumMicroOps(const MachineInstr *MI, 107 const MCSchedClassDesc *SC = nullptr) const; 108 109 /// \brief Get the number of kinds of resources for this target. 110 unsigned getNumProcResourceKinds() const { 111 return SchedModel.getNumProcResourceKinds(); 112 } 113 114 /// \brief Get a processor resource by ID for convenience. 115 const MCProcResourceDesc *getProcResource(unsigned PIdx) const { 116 return SchedModel.getProcResource(PIdx); 117 } 118 119#ifndef NDEBUG 120 const char *getResourceName(unsigned PIdx) const { 121 if (!PIdx) 122 return "MOps"; 123 return SchedModel.getProcResource(PIdx)->Name; 124 } 125#endif 126 127 using ProcResIter = const MCWriteProcResEntry *; 128 129 // \brief Get an iterator into the processor resources consumed by this 130 // scheduling class. 131 ProcResIter getWriteProcResBegin(const MCSchedClassDesc *SC) const { 132 // The subtarget holds a single resource table for all processors. 133 return STI->getWriteProcResBegin(SC); 134 } 135 ProcResIter getWriteProcResEnd(const MCSchedClassDesc *SC) const { 136 return STI->getWriteProcResEnd(SC); 137 } 138 139 /// \brief Multiply the number of units consumed for a resource by this factor 140 /// to normalize it relative to other resources. 141 unsigned getResourceFactor(unsigned ResIdx) const { 142 return ResourceFactors[ResIdx]; 143 } 144 145 /// \brief Multiply number of micro-ops by this factor to normalize it 146 /// relative to other resources. 147 unsigned getMicroOpFactor() const { 148 return MicroOpFactor; 149 } 150 151 /// \brief Multiply cycle count by this factor to normalize it relative to 152 /// other resources. This is the number of resource units per cycle. 153 unsigned getLatencyFactor() const { 154 return ResourceLCM; 155 } 156 157 /// \brief Number of micro-ops that may be buffered for OOO execution. 158 unsigned getMicroOpBufferSize() const { return SchedModel.MicroOpBufferSize; } 159 160 /// \brief Number of resource units that may be buffered for OOO execution. 161 /// \return The buffer size in resource units or -1 for unlimited. 162 int getResourceBufferSize(unsigned PIdx) const { 163 return SchedModel.getProcResource(PIdx)->BufferSize; 164 } 165 166 /// \brief Compute operand latency based on the available machine model. 167 /// 168 /// Compute and return the latency of the given data dependent def and use 169 /// when the operand indices are already known. UseMI may be NULL for an 170 /// unknown user. 171 unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, 172 const MachineInstr *UseMI, unsigned UseOperIdx) 173 const; 174 175 /// \brief Compute the instruction latency based on the available machine 176 /// model. 177 /// 178 /// Compute and return the expected latency of this instruction independent of 179 /// a particular use. computeOperandLatency is the preferred API, but this is 180 /// occasionally useful to help estimate instruction cost. 181 /// 182 /// If UseDefaultDefLatency is false and no new machine sched model is 183 /// present this method falls back to TII->getInstrLatency with an empty 184 /// instruction itinerary (this is so we preserve the previous behavior of the 185 /// if converter after moving it to TargetSchedModel). 186 unsigned computeInstrLatency(const MachineInstr *MI, 187 bool UseDefaultDefLatency = true) const; 188 unsigned computeInstrLatency(unsigned Opcode) const; 189 190 191 /// \brief Output dependency latency of a pair of defs of the same register. 192 /// 193 /// This is typically one cycle. 194 unsigned computeOutputLatency(const MachineInstr *DefMI, unsigned DefIdx, 195 const MachineInstr *DepMI) const; 196 197 /// \brief Compute the reciprocal throughput of the given instruction. 198 Optional<double> computeInstrRThroughput(const MachineInstr *MI) const; 199 Optional<double> computeInstrRThroughput(unsigned Opcode) const; 200}; 201 202} // end namespace llvm 203 204#endif // LLVM_CODEGEN_TARGETSCHEDULE_H 205