1//===-- AMDGPUInstrInfo.cpp - Base class for AMD GPU InstrInfo ------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Implementation of the TargetInstrInfo class that is common to all
12/// AMD GPUs.
13//
14//===----------------------------------------------------------------------===//
15
16#include "AMDGPUInstrInfo.h"
17#include "AMDGPURegisterInfo.h"
18#include "AMDGPUTargetMachine.h"
19#include "AMDIL.h"
20#include "llvm/CodeGen/MachineFrameInfo.h"
21#include "llvm/CodeGen/MachineInstrBuilder.h"
22#include "llvm/CodeGen/MachineRegisterInfo.h"
23
24#define GET_INSTRINFO_CTOR
25#define GET_INSTRMAP_INFO
26#include "AMDGPUGenInstrInfo.inc"
27
28using namespace llvm;
29
30AMDGPUInstrInfo::AMDGPUInstrInfo(TargetMachine &tm)
31  : AMDGPUGenInstrInfo(0,0), RI(tm, *this), TM(tm) { }
32
33const AMDGPURegisterInfo &AMDGPUInstrInfo::getRegisterInfo() const {
34  return RI;
35}
36
37bool AMDGPUInstrInfo::isCoalescableExtInstr(const MachineInstr &MI,
38                                           unsigned &SrcReg, unsigned &DstReg,
39                                           unsigned &SubIdx) const {
40// TODO: Implement this function
41  return false;
42}
43
44unsigned AMDGPUInstrInfo::isLoadFromStackSlot(const MachineInstr *MI,
45                                             int &FrameIndex) const {
46// TODO: Implement this function
47  return 0;
48}
49
50unsigned AMDGPUInstrInfo::isLoadFromStackSlotPostFE(const MachineInstr *MI,
51                                                   int &FrameIndex) const {
52// TODO: Implement this function
53  return 0;
54}
55
56bool AMDGPUInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
57                                          const MachineMemOperand *&MMO,
58                                          int &FrameIndex) const {
59// TODO: Implement this function
60  return false;
61}
62unsigned AMDGPUInstrInfo::isStoreFromStackSlot(const MachineInstr *MI,
63                                              int &FrameIndex) const {
64// TODO: Implement this function
65  return 0;
66}
67unsigned AMDGPUInstrInfo::isStoreFromStackSlotPostFE(const MachineInstr *MI,
68                                                    int &FrameIndex) const {
69// TODO: Implement this function
70  return 0;
71}
72bool AMDGPUInstrInfo::hasStoreFromStackSlot(const MachineInstr *MI,
73                                           const MachineMemOperand *&MMO,
74                                           int &FrameIndex) const {
75// TODO: Implement this function
76  return false;
77}
78
79MachineInstr *
80AMDGPUInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
81                                      MachineBasicBlock::iterator &MBBI,
82                                      LiveVariables *LV) const {
83// TODO: Implement this function
84  return NULL;
85}
86bool AMDGPUInstrInfo::getNextBranchInstr(MachineBasicBlock::iterator &iter,
87                                        MachineBasicBlock &MBB) const {
88  while (iter != MBB.end()) {
89    switch (iter->getOpcode()) {
90    default:
91      break;
92    case AMDGPU::BRANCH_COND_i32:
93    case AMDGPU::BRANCH_COND_f32:
94    case AMDGPU::BRANCH:
95      return true;
96    };
97    ++iter;
98  }
99  return false;
100}
101
102MachineBasicBlock::iterator skipFlowControl(MachineBasicBlock *MBB) {
103  MachineBasicBlock::iterator tmp = MBB->end();
104  if (!MBB->size()) {
105    return MBB->end();
106  }
107  while (--tmp) {
108    if (tmp->getOpcode() == AMDGPU::ENDLOOP
109        || tmp->getOpcode() == AMDGPU::ENDIF
110        || tmp->getOpcode() == AMDGPU::ELSE) {
111      if (tmp == MBB->begin()) {
112        return tmp;
113      } else {
114        continue;
115      }
116    }  else {
117      return ++tmp;
118    }
119  }
120  return MBB->end();
121}
122
123void
124AMDGPUInstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
125                                    MachineBasicBlock::iterator MI,
126                                    unsigned SrcReg, bool isKill,
127                                    int FrameIndex,
128                                    const TargetRegisterClass *RC,
129                                    const TargetRegisterInfo *TRI) const {
130  assert(!"Not Implemented");
131}
132
133void
134AMDGPUInstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
135                                     MachineBasicBlock::iterator MI,
136                                     unsigned DestReg, int FrameIndex,
137                                     const TargetRegisterClass *RC,
138                                     const TargetRegisterInfo *TRI) const {
139  assert(!"Not Implemented");
140}
141
142MachineInstr *
143AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
144                                      MachineInstr *MI,
145                                      const SmallVectorImpl<unsigned> &Ops,
146                                      int FrameIndex) const {
147// TODO: Implement this function
148  return 0;
149}
150MachineInstr*
151AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
152                                      MachineInstr *MI,
153                                      const SmallVectorImpl<unsigned> &Ops,
154                                      MachineInstr *LoadMI) const {
155  // TODO: Implement this function
156  return 0;
157}
158bool
159AMDGPUInstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
160                                     const SmallVectorImpl<unsigned> &Ops) const {
161  // TODO: Implement this function
162  return false;
163}
164bool
165AMDGPUInstrInfo::unfoldMemoryOperand(MachineFunction &MF, MachineInstr *MI,
166                                 unsigned Reg, bool UnfoldLoad,
167                                 bool UnfoldStore,
168                                 SmallVectorImpl<MachineInstr*> &NewMIs) const {
169  // TODO: Implement this function
170  return false;
171}
172
173bool
174AMDGPUInstrInfo::unfoldMemoryOperand(SelectionDAG &DAG, SDNode *N,
175                                    SmallVectorImpl<SDNode*> &NewNodes) const {
176  // TODO: Implement this function
177  return false;
178}
179
180unsigned
181AMDGPUInstrInfo::getOpcodeAfterMemoryUnfold(unsigned Opc,
182                                           bool UnfoldLoad, bool UnfoldStore,
183                                           unsigned *LoadRegIndex) const {
184  // TODO: Implement this function
185  return 0;
186}
187
188bool AMDGPUInstrInfo::shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2,
189                                             int64_t Offset1, int64_t Offset2,
190                                             unsigned NumLoads) const {
191  assert(Offset2 > Offset1
192         && "Second offset should be larger than first offset!");
193  // If we have less than 16 loads in a row, and the offsets are within 16,
194  // then schedule together.
195  // TODO: Make the loads schedule near if it fits in a cacheline
196  return (NumLoads < 16 && (Offset2 - Offset1) < 16);
197}
198
199bool
200AMDGPUInstrInfo::ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond)
201  const {
202  // TODO: Implement this function
203  return true;
204}
205void AMDGPUInstrInfo::insertNoop(MachineBasicBlock &MBB,
206                                MachineBasicBlock::iterator MI) const {
207  // TODO: Implement this function
208}
209
210bool AMDGPUInstrInfo::isPredicated(const MachineInstr *MI) const {
211  // TODO: Implement this function
212  return false;
213}
214bool
215AMDGPUInstrInfo::SubsumesPredicate(const SmallVectorImpl<MachineOperand> &Pred1,
216                                  const SmallVectorImpl<MachineOperand> &Pred2)
217  const {
218  // TODO: Implement this function
219  return false;
220}
221
222bool AMDGPUInstrInfo::DefinesPredicate(MachineInstr *MI,
223                                      std::vector<MachineOperand> &Pred) const {
224  // TODO: Implement this function
225  return false;
226}
227
228bool AMDGPUInstrInfo::isPredicable(MachineInstr *MI) const {
229  // TODO: Implement this function
230  return MI->getDesc().isPredicable();
231}
232
233bool
234AMDGPUInstrInfo::isSafeToMoveRegClassDefs(const TargetRegisterClass *RC) const {
235  // TODO: Implement this function
236  return true;
237}
238
239bool AMDGPUInstrInfo::isRegisterStore(const MachineInstr &MI) const {
240  return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_STORE;
241}
242
243bool AMDGPUInstrInfo::isRegisterLoad(const MachineInstr &MI) const {
244  return get(MI.getOpcode()).TSFlags & AMDGPU_FLAG_REGISTER_LOAD;
245}
246
247
248void AMDGPUInstrInfo::convertToISA(MachineInstr & MI, MachineFunction &MF,
249    DebugLoc DL) const {
250  MachineRegisterInfo &MRI = MF.getRegInfo();
251  const AMDGPURegisterInfo & RI = getRegisterInfo();
252
253  for (unsigned i = 0; i < MI.getNumOperands(); i++) {
254    MachineOperand &MO = MI.getOperand(i);
255    // Convert dst regclass to one that is supported by the ISA
256    if (MO.isReg() && MO.isDef()) {
257      if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
258        const TargetRegisterClass * oldRegClass = MRI.getRegClass(MO.getReg());
259        const TargetRegisterClass * newRegClass = RI.getISARegClass(oldRegClass);
260
261        assert(newRegClass);
262
263        MRI.setRegClass(MO.getReg(), newRegClass);
264      }
265    }
266  }
267}
268