R600ExpandSpecialInstrs.cpp revision e67a4afb5da59c02338622eea68e096ba143113f
1//===-- R600ExpandSpecialInstrs.cpp - Expand special instructions ---------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// Vector, Reduction, and Cube instructions need to fill the entire instruction
12/// group to work correctly.  This pass expands these individual instructions
13/// into several instructions that will completely fill the instruction group.
14//
15//===----------------------------------------------------------------------===//
16
17#include "AMDGPU.h"
18#include "R600Defines.h"
19#include "R600InstrInfo.h"
20#include "R600MachineFunctionInfo.h"
21#include "R600RegisterInfo.h"
22#include "llvm/CodeGen/MachineFunctionPass.h"
23#include "llvm/CodeGen/MachineInstrBuilder.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25
26using namespace llvm;
27
28namespace {
29
30class R600ExpandSpecialInstrsPass : public MachineFunctionPass {
31
32private:
33  static char ID;
34  const R600InstrInfo *TII;
35
36  bool ExpandInputPerspective(MachineInstr& MI);
37  bool ExpandInputConstant(MachineInstr& MI);
38
39public:
40  R600ExpandSpecialInstrsPass(TargetMachine &tm) : MachineFunctionPass(ID),
41    TII (static_cast<const R600InstrInfo *>(tm.getInstrInfo())) { }
42
43  virtual bool runOnMachineFunction(MachineFunction &MF);
44
45  const char *getPassName() const {
46    return "R600 Expand special instructions pass";
47  }
48};
49
50} // End anonymous namespace
51
52char R600ExpandSpecialInstrsPass::ID = 0;
53
54FunctionPass *llvm::createR600ExpandSpecialInstrsPass(TargetMachine &TM) {
55  return new R600ExpandSpecialInstrsPass(TM);
56}
57
58bool R600ExpandSpecialInstrsPass::runOnMachineFunction(MachineFunction &MF) {
59
60  const R600RegisterInfo &TRI = TII->getRegisterInfo();
61
62  for (MachineFunction::iterator BB = MF.begin(), BB_E = MF.end();
63                                                  BB != BB_E; ++BB) {
64    MachineBasicBlock &MBB = *BB;
65    MachineBasicBlock::iterator I = MBB.begin();
66    while (I != MBB.end()) {
67      MachineInstr &MI = *I;
68      I = llvm::next(I);
69
70      switch (MI.getOpcode()) {
71      default: break;
72      // Expand PRED_X to one of the PRED_SET instructions.
73      case AMDGPU::PRED_X: {
74        uint64_t Flags = MI.getOperand(3).getImm();
75        // The native opcode used by PRED_X is stored as an immediate in the
76        // third operand.
77        MachineInstr *PredSet = TII->buildDefaultInstruction(MBB, I,
78                                            MI.getOperand(2).getImm(), // opcode
79                                            MI.getOperand(0).getReg(), // dst
80                                            MI.getOperand(1).getReg(), // src0
81                                            AMDGPU::ZERO);             // src1
82        TII->addFlag(PredSet, 0, MO_FLAG_MASK);
83        if (Flags & MO_FLAG_PUSH) {
84          TII->setImmOperand(PredSet, R600Operands::UPDATE_EXEC_MASK, 1);
85        } else {
86          TII->setImmOperand(PredSet, R600Operands::UPDATE_PREDICATE, 1);
87        }
88        MI.eraseFromParent();
89        continue;
90        }
91      case AMDGPU::BREAK: {
92        MachineInstr *PredSet = TII->buildDefaultInstruction(MBB, I,
93                                          AMDGPU::PRED_SETE_INT,
94                                          AMDGPU::PREDICATE_BIT,
95                                          AMDGPU::ZERO,
96                                          AMDGPU::ZERO);
97        TII->addFlag(PredSet, 0, MO_FLAG_MASK);
98        TII->setImmOperand(PredSet, R600Operands::UPDATE_EXEC_MASK, 1);
99
100        BuildMI(MBB, I, MBB.findDebugLoc(I),
101                TII->get(AMDGPU::PREDICATED_BREAK))
102                .addReg(AMDGPU::PREDICATE_BIT);
103        MI.eraseFromParent();
104        continue;
105        }
106
107      case AMDGPU::INTERP_PAIR_XY: {
108        MachineInstr *BMI;
109        unsigned PReg = AMDGPU::R600_ArrayBaseRegClass.getRegister(
110                MI.getOperand(2).getImm());
111
112        for (unsigned Chan = 0; Chan < 4; ++Chan) {
113          unsigned DstReg;
114
115          if (Chan < 2)
116            DstReg = MI.getOperand(Chan).getReg();
117          else
118            DstReg = Chan == 2 ? AMDGPU::T0_Z : AMDGPU::T0_W;
119
120          BMI = TII->buildDefaultInstruction(MBB, I, AMDGPU::INTERP_XY,
121              DstReg, MI.getOperand(3 + (Chan % 2)).getReg(), PReg);
122
123          if (Chan > 0) {
124            BMI->bundleWithPred();
125          }
126          if (Chan >= 2)
127            TII->addFlag(BMI, 0, MO_FLAG_MASK);
128          if (Chan != 3)
129            TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
130        }
131
132        MI.eraseFromParent();
133        continue;
134        }
135
136      case AMDGPU::INTERP_PAIR_ZW: {
137        MachineInstr *BMI;
138        unsigned PReg = AMDGPU::R600_ArrayBaseRegClass.getRegister(
139                MI.getOperand(2).getImm());
140
141        for (unsigned Chan = 0; Chan < 4; ++Chan) {
142          unsigned DstReg;
143
144          if (Chan < 2)
145            DstReg = Chan == 0 ? AMDGPU::T0_X : AMDGPU::T0_Y;
146          else
147            DstReg = MI.getOperand(Chan-2).getReg();
148
149          BMI = TII->buildDefaultInstruction(MBB, I, AMDGPU::INTERP_ZW,
150              DstReg, MI.getOperand(3 + (Chan % 2)).getReg(), PReg);
151
152          if (Chan > 0) {
153            BMI->bundleWithPred();
154          }
155          if (Chan < 2)
156            TII->addFlag(BMI, 0, MO_FLAG_MASK);
157          if (Chan != 3)
158            TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
159        }
160
161        MI.eraseFromParent();
162        continue;
163        }
164
165      case AMDGPU::INTERP_VEC_LOAD: {
166        const R600RegisterInfo &TRI = TII->getRegisterInfo();
167        MachineInstr *BMI;
168        unsigned PReg = AMDGPU::R600_ArrayBaseRegClass.getRegister(
169                MI.getOperand(1).getImm());
170        unsigned DstReg = MI.getOperand(0).getReg();
171
172        for (unsigned Chan = 0; Chan < 4; ++Chan) {
173          BMI = TII->buildDefaultInstruction(MBB, I, AMDGPU::INTERP_LOAD_P0,
174              TRI.getSubReg(DstReg, TRI.getSubRegFromChannel(Chan)), PReg);
175          if (Chan > 0) {
176            BMI->bundleWithPred();
177          }
178          if (Chan != 3)
179            TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
180        }
181
182        MI.eraseFromParent();
183        continue;
184        }
185      case AMDGPU::DOT_4: {
186
187        const R600RegisterInfo &TRI = TII->getRegisterInfo();
188
189        unsigned DstReg = MI.getOperand(0).getReg();
190        unsigned DstBase = TRI.getEncodingValue(DstReg) & HW_REG_MASK;
191
192        for (unsigned Chan = 0; Chan < 4; ++Chan) {
193          bool Mask = (Chan != TRI.getHWRegChan(DstReg));
194          unsigned SubDstReg =
195              AMDGPU::R600_TReg32RegClass.getRegister((DstBase * 4) + Chan);
196          MachineInstr *BMI =
197              TII->buildSlotOfVectorInstruction(MBB, &MI, Chan, SubDstReg);
198          if (Chan > 0) {
199            BMI->bundleWithPred();
200          }
201          if (Mask) {
202            TII->addFlag(BMI, 0, MO_FLAG_MASK);
203          }
204          if (Chan != 3)
205            TII->addFlag(BMI, 0, MO_FLAG_NOT_LAST);
206          unsigned Opcode = BMI->getOpcode();
207          // While not strictly necessary from hw point of view, we force
208          // all src operands of a dot4 inst to belong to the same slot.
209          unsigned Src0 = BMI->getOperand(
210              TII->getOperandIdx(Opcode, R600Operands::SRC0))
211              .getReg();
212          unsigned Src1 = BMI->getOperand(
213              TII->getOperandIdx(Opcode, R600Operands::SRC1))
214              .getReg();
215          (void) Src0;
216          (void) Src1;
217          if ((TRI.getEncodingValue(Src0) & 0xff) < 127 &&
218              (TRI.getEncodingValue(Src1) & 0xff) < 127)
219            assert(TRI.getHWRegChan(Src0) == TRI.getHWRegChan(Src1));
220        }
221        MI.eraseFromParent();
222        continue;
223      }
224      }
225
226      bool IsReduction = TII->isReductionOp(MI.getOpcode());
227      bool IsVector = TII->isVector(MI);
228      bool IsCube = TII->isCubeOp(MI.getOpcode());
229      if (!IsReduction && !IsVector && !IsCube) {
230        continue;
231      }
232
233      // Expand the instruction
234      //
235      // Reduction instructions:
236      // T0_X = DP4 T1_XYZW, T2_XYZW
237      // becomes:
238      // TO_X = DP4 T1_X, T2_X
239      // TO_Y (write masked) = DP4 T1_Y, T2_Y
240      // TO_Z (write masked) = DP4 T1_Z, T2_Z
241      // TO_W (write masked) = DP4 T1_W, T2_W
242      //
243      // Vector instructions:
244      // T0_X = MULLO_INT T1_X, T2_X
245      // becomes:
246      // T0_X = MULLO_INT T1_X, T2_X
247      // T0_Y (write masked) = MULLO_INT T1_X, T2_X
248      // T0_Z (write masked) = MULLO_INT T1_X, T2_X
249      // T0_W (write masked) = MULLO_INT T1_X, T2_X
250      //
251      // Cube instructions:
252      // T0_XYZW = CUBE T1_XYZW
253      // becomes:
254      // TO_X = CUBE T1_Z, T1_Y
255      // T0_Y = CUBE T1_Z, T1_X
256      // T0_Z = CUBE T1_X, T1_Z
257      // T0_W = CUBE T1_Y, T1_Z
258      for (unsigned Chan = 0; Chan < 4; Chan++) {
259        unsigned DstReg = MI.getOperand(
260                            TII->getOperandIdx(MI, R600Operands::DST)).getReg();
261        unsigned Src0 = MI.getOperand(
262                           TII->getOperandIdx(MI, R600Operands::SRC0)).getReg();
263        unsigned Src1 = 0;
264
265        // Determine the correct source registers
266        if (!IsCube) {
267          int Src1Idx = TII->getOperandIdx(MI, R600Operands::SRC1);
268          if (Src1Idx != -1) {
269            Src1 = MI.getOperand(Src1Idx).getReg();
270          }
271        }
272        if (IsReduction) {
273          unsigned SubRegIndex = TRI.getSubRegFromChannel(Chan);
274          Src0 = TRI.getSubReg(Src0, SubRegIndex);
275          Src1 = TRI.getSubReg(Src1, SubRegIndex);
276        } else if (IsCube) {
277          static const int CubeSrcSwz[] = {2, 2, 0, 1};
278          unsigned SubRegIndex0 = TRI.getSubRegFromChannel(CubeSrcSwz[Chan]);
279          unsigned SubRegIndex1 = TRI.getSubRegFromChannel(CubeSrcSwz[3 - Chan]);
280          Src1 = TRI.getSubReg(Src0, SubRegIndex1);
281          Src0 = TRI.getSubReg(Src0, SubRegIndex0);
282        }
283
284        // Determine the correct destination registers;
285        bool Mask = false;
286        bool NotLast = true;
287        if (IsCube) {
288          unsigned SubRegIndex = TRI.getSubRegFromChannel(Chan);
289          DstReg = TRI.getSubReg(DstReg, SubRegIndex);
290        } else {
291          // Mask the write if the original instruction does not write to
292          // the current Channel.
293          Mask = (Chan != TRI.getHWRegChan(DstReg));
294          unsigned DstBase = TRI.getEncodingValue(DstReg) & HW_REG_MASK;
295          DstReg = AMDGPU::R600_TReg32RegClass.getRegister((DstBase * 4) + Chan);
296        }
297
298        // Set the IsLast bit
299        NotLast = (Chan != 3 );
300
301        // Add the new instruction
302        unsigned Opcode = MI.getOpcode();
303        switch (Opcode) {
304        case AMDGPU::CUBE_r600_pseudo:
305          Opcode = AMDGPU::CUBE_r600_real;
306          break;
307        case AMDGPU::CUBE_eg_pseudo:
308          Opcode = AMDGPU::CUBE_eg_real;
309          break;
310        default:
311          break;
312        }
313
314        MachineInstr *NewMI =
315          TII->buildDefaultInstruction(MBB, I, Opcode, DstReg, Src0, Src1);
316
317        if (Chan != 0)
318          NewMI->bundleWithPred();
319        if (Mask) {
320          TII->addFlag(NewMI, 0, MO_FLAG_MASK);
321        }
322        if (NotLast) {
323          TII->addFlag(NewMI, 0, MO_FLAG_NOT_LAST);
324        }
325      }
326      MI.eraseFromParent();
327    }
328  }
329  return false;
330}
331