1//===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file contains the implementation of the TargetInstrInfo class that is
11// common to all AMD GPUs.
12//
13//===----------------------------------------------------------------------===//
14
15#include "AMDGPUInstrInfo.h"
16#include "AMDGPURegisterInfo.h"
17#include "AMDGPUTargetMachine.h"
18#include "AMDIL.h"
19#include "AMDILUtilityFunctions.h"
20#include "llvm/CodeGen/MachineFrameInfo.h"
21#include "llvm/CodeGen/MachineInstrBuilder.h"
22#include "llvm/CodeGen/MachineRegisterInfo.h"
23
24#define GET_INSTRINFO_CTOR
25#include "AMDGPUGenInstrInfo.inc"
26
27using namespace llvm;
28
29AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm)
30  : AMDGPUGenInstrInfo(0,0), RI(tm, *this), TM(tm) { }
31
32const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
33  return RI;
34}
35
36bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
37                                           unsigned &SrcReg, unsigned &DstReg,
38                                           unsigned &SubIdx) const {
39// TODO: Implement this function
40  return false;
41}
42
43unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
44                                             int &FrameIndex) const {
45// TODO: Implement this function
46  return 0;
47}
48
49unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
50                                                   int &FrameIndex) const {
51// TODO: Implement this function
52  return 0;
53}
54
55bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
56                                          const MachineMemOperand *&MMO,
57                                          int &FrameIndex) const {
58// TODO: Implement this function
59  return false;
60}
61unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
62                                              int &FrameIndex) const {
63// TODO: Implement this function
64  return 0;
65}
66unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
67                                                    int &FrameIndex) const {
68// TODO: Implement this function
69  return 0;
70}
71bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
72                                           const MachineMemOperand *&MMO,
73                                           int &FrameIndex) const {
74// TODO: Implement this function
75  return false;
76}
77
78MachineInstr *
79AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
80                                      MachineBasicBlock::iterator &MBBI,
81                                      LiveVariables *LV) const {
82// TODO: Implement this function
83  return NULL;
84}
85bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
86                                        MachineBasicBlock &MBB) const {
87  while (iter != MBB.end()) {
88    switch (iter->getOpcode()) {
89    default:
90      break;
91      ExpandCaseToAllScalarTypes(AMDGPU::BRANCH_COND);
92    case AMDGPU::BRANCH:
93      return true;
94    };
95    ++iter;
96  }
97  return false;
98}
99
100MachineBasicBlock::iterator skipFlowControl(MachineBasicBlock *MBB) {
101  MachineBasicBlock::iterator tmp = MBB->end();
102  if (!MBB->size()) {
103    return MBB->end();
104  }
105  while (--tmp) {
106    if (tmp->getOpcode() == AMDGPU::ENDLOOP
107        || tmp->getOpcode() == AMDGPU::ENDIF
108        || tmp->getOpcode() == AMDGPU::ELSE) {
109      if (tmp == MBB->begin()) {
110        return tmp;
111      } else {
112        continue;
113      }
114    }  else {
115      return ++tmp;
116    }
117  }
118  return MBB->end();
119}
120
121void
122AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
123                                    MachineBasicBlock::iterator MI,
124                                    unsigned SrcReg, bool isKill,
125                                    int FrameIndex,
126                                    const TargetRegisterClass *RC,
127                                    const TargetRegisterInfo *TRI) const {
128  assert(!"Not Implemented");
129}
130
131void
132AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
133                                     MachineBasicBlock::iterator MI,
134                                     unsigned DestReg, int FrameIndex,
135                                     const TargetRegisterClass *RC,
136                                     const TargetRegisterInfo *TRI) const {
137  assert(!"Not Implemented");
138}
139
140MachineInstr *
141AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
142                                      MachineInstr *MI,
143                                      const SmallVectorImpl<unsigned> &Ops,
144                                      int FrameIndex) const {
145// TODO: Implement this function
146  return 0;
147}
148MachineInstr*
149AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
150                                      MachineInstr *MI,
151                                      const SmallVectorImpl<unsigned> &Ops,
152                                      MachineInstr *LoadMI) const {
153  // TODO: Implement this function
154  return 0;
155}
156bool
157AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
158                                     const SmallVectorImpl<unsigned> &Ops) const
159{
160  // TODO: Implement this function
161  return false;
162}
163bool
164AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
165                                 unsigned Reg, bool UnfoldLoad,
166                                 bool UnfoldStore,
167                                 SmallVectorImpl<MachineInstr*> &NewMIs) const {
168  // TODO: Implement this function
169  return false;
170}
171
172bool
173AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
174                                    SmallVectorImpl<SDNode*> &NewNodes) const {
175  // TODO: Implement this function
176  return false;
177}
178
179unsigned
180AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
181                                           bool UnfoldLoad, bool UnfoldStore,
182                                           unsigned *LoadRegIndex) const {
183  // TODO: Implement this function
184  return 0;
185}
186
187bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
188                                             int64_t Offset1, int64_t Offset2,
189                                             unsigned NumLoads) const {
190  assert(Offset2 > Offset1
191         && "Second offset should be larger than first offset!");
192  // If we have less than 16 loads in a row, and the offsets are within 16,
193  // then schedule together.
194  // TODO: Make the loads schedule near if it fits in a cacheline
195  return (NumLoads < 16 && (Offset2 - Offset1) < 16);
196}
197
198bool
199AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
200  const {
201  // TODO: Implement this function
202  return true;
203}
204void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
205                                MachineBasicBlock::iterator MI) const {
206  // TODO: Implement this function
207}
208
209bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
210  // TODO: Implement this function
211  return false;
212}
213bool
214AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
215                                  const SmallVectorImpl<MachineOperand> &Pred2)
216  const {
217  // TODO: Implement this function
218  return false;
219}
220
221bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
222                                      std::vector<MachineOperand> &Pred) const {
223  // TODO: Implement this function
224  return false;
225}
226
227bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
228  // TODO: Implement this function
229  return MI->getDesc().isPredicable();
230}
231
232bool
233AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
234  // TODO: Implement this function
235  return true;
236}
237
238void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
239    DebugLoc DL) const
240{
241  MachineRegisterInfo &MRI = MF.getRegInfo();
242  const AMDGPURegisterInfo & RI = getRegisterInfo();
243
244  for (unsigned i = 0; i < MI.getNumOperands(); i++) {
245    MachineOperand &MO = MI.getOperand(i);
246    // Convert dst regclass to one that is supported by the ISA
247    if (MO.isReg() && MO.isDef()) {
248      if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
249        const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
250        const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);
251
252        assert(newRegClass);
253
254        MRI.setRegClass(MO.getReg(), newRegClass);
255      }
256    }
257  }
258}
259