SIISelLowering.cpp revision 73a2c4b9db638cad83e412097ed3433649aab47b
1//===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// Most of the DAG lowering is handled in AMDGPUISelLowering.cpp.  This file is
11// mostly EmitInstrWithCustomInserter().
12//
13//===----------------------------------------------------------------------===//
14
15#include "SIISelLowering.h"
16#include "AMDIL.h"
17#include "AMDILIntrinsicInfo.h"
18#include "SIInstrInfo.h"
19#include "SIRegisterInfo.h"
20#include "llvm/CodeGen/MachineInstrBuilder.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22#include "llvm/CodeGen/SelectionDAG.h"
23
24using namespace llvm;
25
26SITargetLowering::SITargetLowering(TargetMachine &TM) :
27    AMDGPUTargetLowering(TM),
28    TII(static_cast<const SIInstrInfo*>(TM.getInstrInfo()))
29{
30  addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
31  addRegisterClass(MVT::f32, &AMDGPU::VReg_32RegClass);
32  addRegisterClass(MVT::i32, &AMDGPU::VReg_32RegClass);
33  addRegisterClass(MVT::i64, &AMDGPU::VReg_64RegClass);
34  addRegisterClass(MVT::i1, &AMDGPU::SCCRegRegClass);
35  addRegisterClass(MVT::i1, &AMDGPU::VCCRegRegClass);
36
37  addRegisterClass(MVT::v4i32, &AMDGPU::SReg_128RegClass);
38  addRegisterClass(MVT::v8i32, &AMDGPU::SReg_256RegClass);
39
40  computeRegisterProperties();
41
42  setOperationAction(ISD::AND, MVT::i1, Custom);
43
44  setOperationAction(ISD::ADD, MVT::i64, Legal);
45  setOperationAction(ISD::ADD, MVT::i32, Legal);
46
47  setOperationAction(ISD::BR_CC, MVT::i32, Custom);
48
49  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
50
51  // We need to custom lower loads from the USER_SGPR address space, so we can
52  // add the SGPRs as livein registers.
53  setOperationAction(ISD::LOAD, MVT::i32, Custom);
54  setOperationAction(ISD::LOAD, MVT::i64, Custom);
55
56  setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
57  setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
58
59  setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
60  setTargetDAGCombine(ISD::SELECT_CC);
61
62  setTargetDAGCombine(ISD::SETCC);
63}
64
65MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter(
66    MachineInstr * MI, MachineBasicBlock * BB) const
67{
68  const TargetInstrInfo * TII = getTargetMachine().getInstrInfo();
69  MachineRegisterInfo & MRI = BB->getParent()->getRegInfo();
70  MachineBasicBlock::iterator I = MI;
71
72  if (TII->get(MI->getOpcode()).TSFlags & SIInstrFlags::NEED_WAIT) {
73    AppendS_WAITCNT(MI, *BB, llvm::next(I));
74    return BB;
75  }
76
77  switch (MI->getOpcode()) {
78  default:
79    return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
80
81  case AMDGPU::CLAMP_SI:
82    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::V_MOV_B32_e64))
83           .addOperand(MI->getOperand(0))
84           .addOperand(MI->getOperand(1))
85           // VSRC1-2 are unused, but we still need to fill all the
86           // operand slots, so we just reuse the VSRC0 operand
87           .addOperand(MI->getOperand(1))
88           .addOperand(MI->getOperand(1))
89           .addImm(0) // ABS
90           .addImm(1) // CLAMP
91           .addImm(0) // OMOD
92           .addImm(0); // NEG
93    MI->eraseFromParent();
94    break;
95
96  case AMDGPU::FABS_SI:
97    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::V_MOV_B32_e64))
98                 .addOperand(MI->getOperand(0))
99                 .addOperand(MI->getOperand(1))
100                 // VSRC1-2 are unused, but we still need to fill all the
101                 // operand slots, so we just reuse the VSRC0 operand
102                 .addOperand(MI->getOperand(1))
103                 .addOperand(MI->getOperand(1))
104                 .addImm(1) // ABS
105                 .addImm(0) // CLAMP
106                 .addImm(0) // OMOD
107                 .addImm(0); // NEG
108    MI->eraseFromParent();
109    break;
110
111  case AMDGPU::FNEG_SI:
112    BuildMI(*BB, I, BB->findDebugLoc(I), TII->get(AMDGPU::V_MOV_B32_e64))
113                 .addOperand(MI->getOperand(0))
114                 .addOperand(MI->getOperand(1))
115                 // VSRC1-2 are unused, but we still need to fill all the
116                 // operand slots, so we just reuse the VSRC0 operand
117                 .addOperand(MI->getOperand(1))
118                 .addOperand(MI->getOperand(1))
119                 .addImm(0) // ABS
120                 .addImm(0) // CLAMP
121                 .addImm(0) // OMOD
122                 .addImm(1); // NEG
123    MI->eraseFromParent();
124    break;
125
126  case AMDGPU::SI_INTERP:
127    LowerSI_INTERP(MI, *BB, I, MRI);
128    break;
129  case AMDGPU::SI_INTERP_CONST:
130    LowerSI_INTERP_CONST(MI, *BB, I);
131    break;
132  case AMDGPU::SI_KIL:
133    LowerSI_KIL(MI, *BB, I, MRI);
134    break;
135  case AMDGPU::SI_V_CNDLT:
136    LowerSI_V_CNDLT(MI, *BB, I, MRI);
137    break;
138  }
139  return BB;
140}
141
142void SITargetLowering::AppendS_WAITCNT(MachineInstr *MI, MachineBasicBlock &BB,
143    MachineBasicBlock::iterator I) const
144{
145  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_WAITCNT))
146          .addImm(0);
147}
148
149void SITargetLowering::LowerSI_INTERP(MachineInstr *MI, MachineBasicBlock &BB,
150    MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const
151{
152  unsigned tmp = MRI.createVirtualRegister(&AMDGPU::VReg_32RegClass);
153  MachineOperand dst = MI->getOperand(0);
154  MachineOperand iReg = MI->getOperand(1);
155  MachineOperand jReg = MI->getOperand(2);
156  MachineOperand attr_chan = MI->getOperand(3);
157  MachineOperand attr = MI->getOperand(4);
158  MachineOperand params = MI->getOperand(5);
159
160  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
161          .addOperand(params);
162
163  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_INTERP_P1_F32), tmp)
164          .addOperand(iReg)
165          .addOperand(attr_chan)
166          .addOperand(attr);
167
168  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_INTERP_P2_F32))
169          .addOperand(dst)
170          .addReg(tmp)
171          .addOperand(jReg)
172          .addOperand(attr_chan)
173          .addOperand(attr);
174
175  MI->eraseFromParent();
176}
177
178void SITargetLowering::LowerSI_INTERP_CONST(MachineInstr *MI,
179    MachineBasicBlock &BB, MachineBasicBlock::iterator I) const
180{
181  MachineOperand dst = MI->getOperand(0);
182  MachineOperand attr_chan = MI->getOperand(1);
183  MachineOperand attr = MI->getOperand(2);
184  MachineOperand params = MI->getOperand(3);
185
186  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_MOV_B32), AMDGPU::M0)
187          .addOperand(params);
188
189  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_INTERP_MOV_F32))
190          .addOperand(dst)
191          .addOperand(attr_chan)
192          .addOperand(attr);
193
194  MI->eraseFromParent();
195}
196
197void SITargetLowering::LowerSI_KIL(MachineInstr *MI, MachineBasicBlock &BB,
198    MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const
199{
200  // Clear this pixel from the exec mask if the operand is negative
201  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_CMPX_LE_F32_e32),
202          AMDGPU::VCC)
203          .addReg(AMDGPU::SREG_LIT_0)
204          .addOperand(MI->getOperand(0));
205
206  // If the exec mask is non-zero, skip the next two instructions
207  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_CBRANCH_EXECNZ))
208          .addImm(3)
209          .addReg(AMDGPU::EXEC);
210
211  // Exec mask is zero: Export to NULL target...
212  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::EXP))
213          .addImm(0)
214          .addImm(0x09) // V_008DFC_SQ_EXP_NULL
215          .addImm(0)
216          .addImm(1)
217          .addImm(1)
218          .addReg(AMDGPU::SREG_LIT_0)
219          .addReg(AMDGPU::SREG_LIT_0)
220          .addReg(AMDGPU::SREG_LIT_0)
221          .addReg(AMDGPU::SREG_LIT_0);
222
223  // ... and terminate wavefront
224  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::S_ENDPGM));
225
226  MI->eraseFromParent();
227}
228
229void SITargetLowering::LowerSI_V_CNDLT(MachineInstr *MI, MachineBasicBlock &BB,
230    MachineBasicBlock::iterator I, MachineRegisterInfo & MRI) const
231{
232  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_CMP_LT_F32_e32),
233          AMDGPU::VCC)
234          .addOperand(MI->getOperand(1))
235          .addReg(AMDGPU::SREG_LIT_0);
236
237  BuildMI(BB, I, BB.findDebugLoc(I), TII->get(AMDGPU::V_CNDMASK_B32))
238          .addOperand(MI->getOperand(0))
239	  .addReg(AMDGPU::VCC)
240          .addOperand(MI->getOperand(2))
241          .addOperand(MI->getOperand(3));
242
243  MI->eraseFromParent();
244}
245
246EVT SITargetLowering::getSetCCResultType(EVT VT) const
247{
248  return MVT::i1;
249}
250
251//===----------------------------------------------------------------------===//
252// Custom DAG Lowering Operations
253//===----------------------------------------------------------------------===//
254
255SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const
256{
257  switch (Op.getOpcode()) {
258  default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
259  case ISD::BR_CC: return LowerBR_CC(Op, DAG);
260  case ISD::LOAD: return LowerLOAD(Op, DAG);
261  case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
262  case ISD::AND: return Loweri1ContextSwitch(Op, DAG, ISD::AND);
263  case ISD::INTRINSIC_WO_CHAIN: {
264    unsigned IntrinsicID =
265                         cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
266    EVT VT = Op.getValueType();
267    switch (IntrinsicID) {
268    case AMDGPUIntrinsic::SI_vs_load_buffer_index:
269      return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass,
270                                  AMDGPU::VGPR0, VT);
271    default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
272    }
273    break;
274  }
275  }
276  return SDValue();
277}
278
279/// Loweri1ContextSwitch - The function is for lowering i1 operations on the
280/// VCC register.  In the VALU context, VCC is a one bit register, but in the
281/// SALU context the VCC is a 64-bit register (1-bit per thread).  Since only
282/// the SALU can perform operations on the VCC register, we need to promote
283/// the operand types from i1 to i64 in order for tablegen to be able to match
284/// this operation to the correct SALU instruction.  We do this promotion by
285/// wrapping the operands in a CopyToReg node.
286///
287SDValue SITargetLowering::Loweri1ContextSwitch(SDValue Op,
288                                               SelectionDAG &DAG,
289                                               unsigned VCCNode) const
290{
291  DebugLoc DL = Op.getDebugLoc();
292
293  SDValue OpNode = DAG.getNode(VCCNode, DL, MVT::i64,
294                               DAG.getNode(SIISD::VCC_BITCAST, DL, MVT::i64,
295                                           Op.getOperand(0)),
296                               DAG.getNode(SIISD::VCC_BITCAST, DL, MVT::i64,
297                                           Op.getOperand(1)));
298
299  return DAG.getNode(SIISD::VCC_BITCAST, DL, MVT::i1, OpNode);
300}
301
302SDValue SITargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const
303{
304  SDValue Chain = Op.getOperand(0);
305  SDValue CC = Op.getOperand(1);
306  SDValue LHS   = Op.getOperand(2);
307  SDValue RHS   = Op.getOperand(3);
308  SDValue JumpT  = Op.getOperand(4);
309  SDValue CmpValue;
310  SDValue Result;
311  CmpValue = DAG.getNode(
312      ISD::SETCC,
313      Op.getDebugLoc(),
314      MVT::i1,
315      LHS, RHS,
316      CC);
317
318  Result = DAG.getNode(
319      AMDGPUISD::BRANCH_COND,
320      CmpValue.getDebugLoc(),
321      MVT::Other, Chain,
322      JumpT, CmpValue);
323  return Result;
324}
325
326SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const
327{
328  EVT VT = Op.getValueType();
329  LoadSDNode *Ptr = dyn_cast<LoadSDNode>(Op);
330
331  assert(Ptr);
332
333  unsigned AddrSpace = Ptr->getPointerInfo().getAddrSpace();
334
335  // We only need to lower USER_SGPR address space loads
336  if (AddrSpace != AMDGPUAS::USER_SGPR_ADDRESS) {
337    return SDValue();
338  }
339
340  // Loads from the USER_SGPR address space can only have constant value
341  // pointers.
342  ConstantSDNode *BasePtr = dyn_cast<ConstantSDNode>(Ptr->getBasePtr());
343  assert(BasePtr);
344
345  unsigned TypeDwordWidth = VT.getSizeInBits() / 32;
346  const TargetRegisterClass * dstClass;
347  switch (TypeDwordWidth) {
348    default:
349      assert(!"USER_SGPR value size not implemented");
350      return SDValue();
351    case 1:
352      dstClass = &AMDGPU::SReg_32RegClass;
353      break;
354    case 2:
355      dstClass = &AMDGPU::SReg_64RegClass;
356      break;
357  }
358  uint64_t Index = BasePtr->getZExtValue();
359  assert(Index % TypeDwordWidth == 0 && "USER_SGPR not properly aligned");
360  unsigned SGPRIndex = Index / TypeDwordWidth;
361  unsigned Reg = dstClass->getRegister(SGPRIndex);
362
363  DAG.ReplaceAllUsesOfValueWith(Op, CreateLiveInRegister(DAG, dstClass, Reg,
364                                                         VT));
365  return SDValue();
366}
367
368SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
369{
370  SDValue LHS = Op.getOperand(0);
371  SDValue RHS = Op.getOperand(1);
372  SDValue True = Op.getOperand(2);
373  SDValue False = Op.getOperand(3);
374  SDValue CC = Op.getOperand(4);
375  EVT VT = Op.getValueType();
376  DebugLoc DL = Op.getDebugLoc();
377
378  SDValue Cond = DAG.getNode(ISD::SETCC, DL, MVT::i1, LHS, RHS, CC);
379  return DAG.getNode(ISD::SELECT, DL, VT, Cond, True, False);
380}
381
382//===----------------------------------------------------------------------===//
383// Custom DAG optimizations
384//===----------------------------------------------------------------------===//
385
386SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
387                                            DAGCombinerInfo &DCI) const {
388  SelectionDAG &DAG = DCI.DAG;
389  DebugLoc DL = N->getDebugLoc();
390  EVT VT = N->getValueType(0);
391
392  switch (N->getOpcode()) {
393    default: break;
394    case ISD::SELECT_CC: {
395      N->dump();
396      ConstantSDNode *True, *False;
397      // i1 selectcc(l, r, -1, 0, cc) -> i1 setcc(l, r, cc)
398      if ((True = dyn_cast<ConstantSDNode>(N->getOperand(2)))
399          && (False = dyn_cast<ConstantSDNode>(N->getOperand(3)))
400          && True->isAllOnesValue()
401          && False->isNullValue()
402          && VT == MVT::i1) {
403        return DAG.getNode(ISD::SETCC, DL, VT, N->getOperand(0),
404                           N->getOperand(1), N->getOperand(4));
405
406      }
407      break;
408    }
409    case ISD::SETCC: {
410      SDValue Arg0 = N->getOperand(0);
411      SDValue Arg1 = N->getOperand(1);
412      SDValue CC = N->getOperand(2);
413      ConstantSDNode * C = NULL;
414      ISD::CondCode CCOp = dyn_cast<CondCodeSDNode>(CC)->get();
415
416      // i1 setcc (sext(i1), 0, setne) -> i1 setcc(i1, 0, setne)
417      if (VT == MVT::i1
418          && Arg0.getOpcode() == ISD::SIGN_EXTEND
419          && Arg0.getOperand(0).getValueType() == MVT::i1
420          && (C = dyn_cast<ConstantSDNode>(Arg1))
421          && C->isNullValue()
422          && CCOp == ISD::SETNE) {
423        return SimplifySetCC(VT, Arg0.getOperand(0),
424                             DAG.getConstant(0, MVT::i1), CCOp, true, DCI, DL);
425      }
426      break;
427    }
428  }
429  return SDValue();
430}
431
432#define NODE_NAME_CASE(node) case SIISD::node: return #node;
433
434const char* SITargetLowering::getTargetNodeName(unsigned Opcode) const
435{
436  switch (Opcode) {
437  default: return AMDGPUTargetLowering::getTargetNodeName(Opcode);
438  NODE_NAME_CASE(VCC_AND)
439  NODE_NAME_CASE(VCC_BITCAST)
440  }
441}
442