SIISelLowering.cpp revision 9242b73286f050c53a26225b2a9acd14aeaa91da
1//===-- SIISelLowering.cpp - SI DAG Lowering Implementation ---------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// \brief Custom DAG lowering for SI
12//
13//===----------------------------------------------------------------------===//
14
15#include "SIISelLowering.h"
16#include "AMDGPU.h"
17#include "AMDILIntrinsicInfo.h"
18#include "SIInstrInfo.h"
19#include "SIMachineFunctionInfo.h"
20#include "SIRegisterInfo.h"
21#include "llvm/CodeGen/CallingConvLower.h"
22#include "llvm/CodeGen/MachineInstrBuilder.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
24#include "llvm/CodeGen/SelectionDAG.h"
25#include "llvm/IR/Function.h"
26
27const uint64_t RSRC_DATA_FORMAT = 0xf00000000000LL;
28
29using namespace llvm;
30
31SITargetLowering::SITargetLowering(TargetMachine &TM) :
32    AMDGPUTargetLowering(TM) {
33
34  addRegisterClass(MVT::i1, &AMDGPU::SReg_64RegClass);
35  addRegisterClass(MVT::i64, &AMDGPU::VSrc_64RegClass);
36
37  addRegisterClass(MVT::v32i8, &AMDGPU::SReg_256RegClass);
38  addRegisterClass(MVT::v64i8, &AMDGPU::SReg_512RegClass);
39
40  addRegisterClass(MVT::i32, &AMDGPU::VSrc_32RegClass);
41  addRegisterClass(MVT::f32, &AMDGPU::VSrc_32RegClass);
42
43  addRegisterClass(MVT::f64, &AMDGPU::VSrc_64RegClass);
44  addRegisterClass(MVT::v2i32, &AMDGPU::VSrc_64RegClass);
45  addRegisterClass(MVT::v2f32, &AMDGPU::VSrc_64RegClass);
46
47  addRegisterClass(MVT::v4i32, &AMDGPU::VReg_128RegClass);
48  addRegisterClass(MVT::v4f32, &AMDGPU::VReg_128RegClass);
49  addRegisterClass(MVT::i128, &AMDGPU::SReg_128RegClass);
50
51  addRegisterClass(MVT::v8i32, &AMDGPU::VReg_256RegClass);
52  addRegisterClass(MVT::v8f32, &AMDGPU::VReg_256RegClass);
53
54  addRegisterClass(MVT::v16i32, &AMDGPU::VReg_512RegClass);
55  addRegisterClass(MVT::v16f32, &AMDGPU::VReg_512RegClass);
56
57  computeRegisterProperties();
58
59  setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8i32, Expand);
60  setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v8f32, Expand);
61  setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16i32, Expand);
62  setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v16f32, Expand);
63
64  setOperationAction(ISD::ADD, MVT::i64, Legal);
65  setOperationAction(ISD::ADD, MVT::i32, Legal);
66
67  setOperationAction(ISD::BITCAST, MVT::i128, Legal);
68
69  // We need to custom lower vector stores from local memory
70  setOperationAction(ISD::LOAD, MVT::v2i32, Custom);
71  setOperationAction(ISD::LOAD, MVT::v4i32, Custom);
72  setOperationAction(ISD::LOAD, MVT::v8i32, Custom);
73  setOperationAction(ISD::LOAD, MVT::v16i32, Custom);
74
75  setOperationAction(ISD::STORE, MVT::v8i32, Custom);
76  setOperationAction(ISD::STORE, MVT::v16i32, Custom);
77
78  setOperationAction(ISD::SELECT_CC, MVT::f32, Custom);
79  setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
80
81  setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
82
83  setOperationAction(ISD::SETCC, MVT::v2i1, Expand);
84  setOperationAction(ISD::SETCC, MVT::v4i1, Expand);
85
86  setOperationAction(ISD::ANY_EXTEND, MVT::i64, Custom);
87  setOperationAction(ISD::SIGN_EXTEND, MVT::i64, Custom);
88  setOperationAction(ISD::ZERO_EXTEND, MVT::i64, Custom);
89
90  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
91  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::f32, Custom);
92  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v16i8, Custom);
93  setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::v4f32, Custom);
94
95  setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
96
97  setLoadExtAction(ISD::SEXTLOAD, MVT::i32, Expand);
98  setLoadExtAction(ISD::SEXTLOAD, MVT::v8i16, Expand);
99  setLoadExtAction(ISD::SEXTLOAD, MVT::v16i16, Expand);
100
101  setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
102  setTruncStoreAction(MVT::f64, MVT::f32, Expand);
103  setTruncStoreAction(MVT::i64, MVT::i32, Expand);
104  setTruncStoreAction(MVT::i128, MVT::i64, Expand);
105  setTruncStoreAction(MVT::v8i32, MVT::v8i16, Expand);
106  setTruncStoreAction(MVT::v16i32, MVT::v16i16, Expand);
107
108  setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
109
110  setTargetDAGCombine(ISD::SELECT_CC);
111
112  setTargetDAGCombine(ISD::SETCC);
113
114  setSchedulingPreference(Sched::RegPressure);
115}
116
117//===----------------------------------------------------------------------===//
118// TargetLowering queries
119//===----------------------------------------------------------------------===//
120
121bool SITargetLowering::allowsUnalignedMemoryAccesses(EVT  VT,
122                                                     bool *IsFast) const {
123  // XXX: This depends on the address space and also we may want to revist
124  // the alignment values we specify in the DataLayout.
125  return VT.bitsGT(MVT::i32);
126}
127
128bool SITargetLowering::shouldSplitVectorElementType(EVT VT) const {
129  return VT.bitsLE(MVT::i16);
130}
131
132SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
133                                         SDLoc DL, SDValue Chain,
134                                         unsigned Offset) const {
135  MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
136  PointerType *PtrTy = PointerType::get(VT.getTypeForEVT(*DAG.getContext()),
137                                            AMDGPUAS::CONSTANT_ADDRESS);
138  SDValue BasePtr =  DAG.getCopyFromReg(Chain, DL,
139                           MRI.getLiveInVirtReg(AMDGPU::SGPR0_SGPR1), MVT::i64);
140  SDValue Ptr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr,
141                                             DAG.getConstant(Offset, MVT::i64));
142  return DAG.getExtLoad(ISD::SEXTLOAD, DL, VT, Chain, Ptr,
143                            MachinePointerInfo(UndefValue::get(PtrTy)), MemVT,
144                            false, false, MemVT.getSizeInBits() >> 3);
145
146}
147
148SDValue SITargetLowering::LowerFormalArguments(
149                                      SDValue Chain,
150                                      CallingConv::ID CallConv,
151                                      bool isVarArg,
152                                      const SmallVectorImpl<ISD::InputArg> &Ins,
153                                      SDLoc DL, SelectionDAG &DAG,
154                                      SmallVectorImpl<SDValue> &InVals) const {
155
156  const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
157
158  MachineFunction &MF = DAG.getMachineFunction();
159  FunctionType *FType = MF.getFunction()->getFunctionType();
160  SIMachineFunctionInfo *Info = MF.getInfo<SIMachineFunctionInfo>();
161
162  assert(CallConv == CallingConv::C);
163
164  SmallVector<ISD::InputArg, 16> Splits;
165  uint32_t Skipped = 0;
166
167  for (unsigned i = 0, e = Ins.size(), PSInputNum = 0; i != e; ++i) {
168    const ISD::InputArg &Arg = Ins[i];
169
170    // First check if it's a PS input addr
171    if (Info->ShaderType == ShaderType::PIXEL && !Arg.Flags.isInReg() &&
172        !Arg.Flags.isByVal()) {
173
174      assert((PSInputNum <= 15) && "Too many PS inputs!");
175
176      if (!Arg.Used) {
177        // We can savely skip PS inputs
178        Skipped |= 1 << i;
179        ++PSInputNum;
180        continue;
181      }
182
183      Info->PSInputAddr |= 1 << PSInputNum++;
184    }
185
186    // Second split vertices into their elements
187    if (Info->ShaderType != ShaderType::COMPUTE && Arg.VT.isVector()) {
188      ISD::InputArg NewArg = Arg;
189      NewArg.Flags.setSplit();
190      NewArg.VT = Arg.VT.getVectorElementType();
191
192      // We REALLY want the ORIGINAL number of vertex elements here, e.g. a
193      // three or five element vertex only needs three or five registers,
194      // NOT four or eigth.
195      Type *ParamType = FType->getParamType(Arg.OrigArgIndex);
196      unsigned NumElements = ParamType->getVectorNumElements();
197
198      for (unsigned j = 0; j != NumElements; ++j) {
199        Splits.push_back(NewArg);
200        NewArg.PartOffset += NewArg.VT.getStoreSize();
201      }
202
203    } else if (Info->ShaderType != ShaderType::COMPUTE) {
204      Splits.push_back(Arg);
205    }
206  }
207
208  SmallVector<CCValAssign, 16> ArgLocs;
209  CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
210                 getTargetMachine(), ArgLocs, *DAG.getContext());
211
212  // At least one interpolation mode must be enabled or else the GPU will hang.
213  if (Info->ShaderType == ShaderType::PIXEL && (Info->PSInputAddr & 0x7F) == 0) {
214    Info->PSInputAddr |= 1;
215    CCInfo.AllocateReg(AMDGPU::VGPR0);
216    CCInfo.AllocateReg(AMDGPU::VGPR1);
217  }
218
219  // The pointer to the list of arguments is stored in SGPR0, SGPR1
220  if (Info->ShaderType == ShaderType::COMPUTE) {
221    CCInfo.AllocateReg(AMDGPU::SGPR0);
222    CCInfo.AllocateReg(AMDGPU::SGPR1);
223    MF.addLiveIn(AMDGPU::SGPR0_SGPR1, &AMDGPU::SReg_64RegClass);
224  }
225
226  if (Info->ShaderType == ShaderType::COMPUTE) {
227    getOriginalFunctionArgs(DAG, DAG.getMachineFunction().getFunction(), Ins,
228                            Splits);
229  }
230
231  AnalyzeFormalArguments(CCInfo, Splits);
232
233  for (unsigned i = 0, e = Ins.size(), ArgIdx = 0; i != e; ++i) {
234
235    const ISD::InputArg &Arg = Ins[i];
236    if (Skipped & (1 << i)) {
237      InVals.push_back(DAG.getUNDEF(Arg.VT));
238      continue;
239    }
240
241    CCValAssign &VA = ArgLocs[ArgIdx++];
242    EVT VT = VA.getLocVT();
243
244    if (VA.isMemLoc()) {
245      VT = Ins[i].VT;
246      EVT MemVT = Splits[i].VT;
247      // The first 36 bytes of the input buffer contains information about
248      // thread group and global sizes.
249      SDValue Arg = LowerParameter(DAG, VT, MemVT,  DL, DAG.getRoot(),
250                                   36 + VA.getLocMemOffset());
251      InVals.push_back(Arg);
252      continue;
253    }
254    assert(VA.isRegLoc() && "Parameter must be in a register!");
255
256    unsigned Reg = VA.getLocReg();
257
258    if (VT == MVT::i64) {
259      // For now assume it is a pointer
260      Reg = TRI->getMatchingSuperReg(Reg, AMDGPU::sub0,
261                                     &AMDGPU::SReg_64RegClass);
262      Reg = MF.addLiveIn(Reg, &AMDGPU::SReg_64RegClass);
263      InVals.push_back(DAG.getCopyFromReg(Chain, DL, Reg, VT));
264      continue;
265    }
266
267    const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg, VT);
268
269    Reg = MF.addLiveIn(Reg, RC);
270    SDValue Val = DAG.getCopyFromReg(Chain, DL, Reg, VT);
271
272    if (Arg.VT.isVector()) {
273
274      // Build a vector from the registers
275      Type *ParamType = FType->getParamType(Arg.OrigArgIndex);
276      unsigned NumElements = ParamType->getVectorNumElements();
277
278      SmallVector<SDValue, 4> Regs;
279      Regs.push_back(Val);
280      for (unsigned j = 1; j != NumElements; ++j) {
281        Reg = ArgLocs[ArgIdx++].getLocReg();
282        Reg = MF.addLiveIn(Reg, RC);
283        Regs.push_back(DAG.getCopyFromReg(Chain, DL, Reg, VT));
284      }
285
286      // Fill up the missing vector elements
287      NumElements = Arg.VT.getVectorNumElements() - NumElements;
288      for (unsigned j = 0; j != NumElements; ++j)
289        Regs.push_back(DAG.getUNDEF(VT));
290
291      InVals.push_back(DAG.getNode(ISD::BUILD_VECTOR, DL, Arg.VT,
292                                   Regs.data(), Regs.size()));
293      continue;
294    }
295
296    InVals.push_back(Val);
297  }
298  return Chain;
299}
300
301MachineBasicBlock * SITargetLowering::EmitInstrWithCustomInserter(
302    MachineInstr * MI, MachineBasicBlock * BB) const {
303
304  MachineBasicBlock::iterator I = *MI;
305
306  switch (MI->getOpcode()) {
307  default:
308    return AMDGPUTargetLowering::EmitInstrWithCustomInserter(MI, BB);
309  case AMDGPU::BRANCH: return BB;
310  case AMDGPU::SI_ADDR64_RSRC: {
311    const SIInstrInfo *TII =
312      static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
313    MachineRegisterInfo &MRI = BB->getParent()->getRegInfo();
314    unsigned SuperReg = MI->getOperand(0).getReg();
315    unsigned SubRegLo = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
316    unsigned SubRegHi = MRI.createVirtualRegister(&AMDGPU::SReg_64RegClass);
317    unsigned SubRegHiHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
318    unsigned SubRegHiLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
319    BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B64), SubRegLo)
320            .addOperand(MI->getOperand(1));
321    BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B32), SubRegHiLo)
322            .addImm(0);
323    BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::S_MOV_B32), SubRegHiHi)
324            .addImm(RSRC_DATA_FORMAT >> 32);
325    BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::REG_SEQUENCE), SubRegHi)
326            .addReg(SubRegHiLo)
327            .addImm(AMDGPU::sub0)
328            .addReg(SubRegHiHi)
329            .addImm(AMDGPU::sub1);
330    BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::REG_SEQUENCE), SuperReg)
331            .addReg(SubRegLo)
332            .addImm(AMDGPU::sub0_sub1)
333            .addReg(SubRegHi)
334            .addImm(AMDGPU::sub2_sub3);
335    MI->eraseFromParent();
336    break;
337  }
338  case AMDGPU::V_SUB_F64: {
339    const SIInstrInfo *TII =
340      static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
341    BuildMI(*BB, I, MI->getDebugLoc(), TII->get(AMDGPU::V_ADD_F64),
342            MI->getOperand(0).getReg())
343            .addReg(MI->getOperand(1).getReg())
344            .addReg(MI->getOperand(2).getReg())
345            .addImm(0)  /* src2 */
346            .addImm(0)  /* ABS */
347            .addImm(0)  /* CLAMP */
348            .addImm(0)  /* OMOD */
349            .addImm(2); /* NEG */
350    MI->eraseFromParent();
351    break;
352  }
353  }
354  return BB;
355}
356
357EVT SITargetLowering::getSetCCResultType(LLVMContext &, EVT VT) const {
358  if (!VT.isVector()) {
359    return MVT::i1;
360  }
361  return MVT::getVectorVT(MVT::i1, VT.getVectorNumElements());
362}
363
364MVT SITargetLowering::getScalarShiftAmountTy(EVT VT) const {
365  return MVT::i32;
366}
367
368bool SITargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const {
369  VT = VT.getScalarType();
370
371  if (!VT.isSimple())
372    return false;
373
374  switch (VT.getSimpleVT().SimpleTy) {
375  case MVT::f32:
376    return false; /* There is V_MAD_F32 for f32 */
377  case MVT::f64:
378    return true;
379  default:
380    break;
381  }
382
383  return false;
384}
385
386//===----------------------------------------------------------------------===//
387// Custom DAG Lowering Operations
388//===----------------------------------------------------------------------===//
389
390SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
391  MachineFunction &MF = DAG.getMachineFunction();
392  SIMachineFunctionInfo *MFI = MF.getInfo<SIMachineFunctionInfo>();
393  switch (Op.getOpcode()) {
394  default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
395  case ISD::BRCOND: return LowerBRCOND(Op, DAG);
396  case ISD::LOAD: {
397    LoadSDNode *Load = dyn_cast<LoadSDNode>(Op);
398    if (Load->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS &&
399        Op.getValueType().isVector()) {
400      SDValue MergedValues[2] = {
401        SplitVectorLoad(Op, DAG),
402        Load->getChain()
403      };
404      return DAG.getMergeValues(MergedValues, 2, SDLoc(Op));
405    } else {
406      return SDValue();
407    }
408  }
409  case ISD::STORE: {
410    StoreSDNode *Store = dyn_cast<StoreSDNode>(Op);
411    if (Store->getValue().getValueType().isVector() &&
412        Store->getValue().getValueType().getVectorNumElements() >= 8)
413      return SplitVectorStore(Op, DAG);
414    else
415      return AMDGPUTargetLowering::LowerOperation(Op, DAG);
416  }
417
418  case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
419  case ISD::SIGN_EXTEND: return LowerSIGN_EXTEND(Op, DAG);
420  case ISD::ANY_EXTEND: // Fall-through
421  case ISD::ZERO_EXTEND: return LowerZERO_EXTEND(Op, DAG);
422  case ISD::GlobalAddress: return LowerGlobalAddress(MFI, Op, DAG);
423  case ISD::INTRINSIC_WO_CHAIN: {
424    unsigned IntrinsicID =
425                         cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
426    EVT VT = Op.getValueType();
427    SDLoc DL(Op);
428    //XXX: Hardcoded we only use two to store the pointer to the parameters.
429    unsigned NumUserSGPRs = 2;
430    switch (IntrinsicID) {
431    default: return AMDGPUTargetLowering::LowerOperation(Op, DAG);
432    case Intrinsic::r600_read_ngroups_x:
433      return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 0);
434    case Intrinsic::r600_read_ngroups_y:
435      return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 4);
436    case Intrinsic::r600_read_ngroups_z:
437      return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 8);
438    case Intrinsic::r600_read_global_size_x:
439      return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 12);
440    case Intrinsic::r600_read_global_size_y:
441      return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 16);
442    case Intrinsic::r600_read_global_size_z:
443      return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 20);
444    case Intrinsic::r600_read_local_size_x:
445      return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 24);
446    case Intrinsic::r600_read_local_size_y:
447      return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 28);
448    case Intrinsic::r600_read_local_size_z:
449      return LowerParameter(DAG, VT, VT, DL, DAG.getEntryNode(), 32);
450    case Intrinsic::r600_read_tgid_x:
451      return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
452                     AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 0), VT);
453    case Intrinsic::r600_read_tgid_y:
454      return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
455                     AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 1), VT);
456    case Intrinsic::r600_read_tgid_z:
457      return CreateLiveInRegister(DAG, &AMDGPU::SReg_32RegClass,
458                     AMDGPU::SReg_32RegClass.getRegister(NumUserSGPRs + 2), VT);
459    case Intrinsic::r600_read_tidig_x:
460      return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass,
461                                  AMDGPU::VGPR0, VT);
462    case Intrinsic::r600_read_tidig_y:
463      return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass,
464                                  AMDGPU::VGPR1, VT);
465    case Intrinsic::r600_read_tidig_z:
466      return CreateLiveInRegister(DAG, &AMDGPU::VReg_32RegClass,
467                                  AMDGPU::VGPR2, VT);
468    case AMDGPUIntrinsic::SI_load_const: {
469      SDValue Ops [] = {
470        ResourceDescriptorToi128(Op.getOperand(1), DAG),
471        Op.getOperand(2)
472      };
473
474      MachineMemOperand *MMO = MF.getMachineMemOperand(
475          MachinePointerInfo(),
476          MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant,
477          VT.getSizeInBits() / 8, 4);
478      return DAG.getMemIntrinsicNode(AMDGPUISD::LOAD_CONSTANT, DL,
479                                     Op->getVTList(), Ops, 2, VT, MMO);
480    }
481    case AMDGPUIntrinsic::SI_sample:
482      return LowerSampleIntrinsic(AMDGPUISD::SAMPLE, Op, DAG);
483    case AMDGPUIntrinsic::SI_sampleb:
484      return LowerSampleIntrinsic(AMDGPUISD::SAMPLEB, Op, DAG);
485    case AMDGPUIntrinsic::SI_sampled:
486      return LowerSampleIntrinsic(AMDGPUISD::SAMPLED, Op, DAG);
487    case AMDGPUIntrinsic::SI_samplel:
488      return LowerSampleIntrinsic(AMDGPUISD::SAMPLEL, Op, DAG);
489    case AMDGPUIntrinsic::SI_vs_load_input:
490      return DAG.getNode(AMDGPUISD::LOAD_INPUT, DL, VT,
491                         ResourceDescriptorToi128(Op.getOperand(1), DAG),
492                         Op.getOperand(2),
493                         Op.getOperand(3));
494    }
495  }
496
497  case ISD::INTRINSIC_VOID:
498    SDValue Chain = Op.getOperand(0);
499    unsigned IntrinsicID = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
500
501    switch (IntrinsicID) {
502      case AMDGPUIntrinsic::SI_tbuffer_store: {
503        SDLoc DL(Op);
504        SDValue Ops [] = {
505          Chain,
506          ResourceDescriptorToi128(Op.getOperand(2), DAG),
507          Op.getOperand(3),
508          Op.getOperand(4),
509          Op.getOperand(5),
510          Op.getOperand(6),
511          Op.getOperand(7),
512          Op.getOperand(8),
513          Op.getOperand(9),
514          Op.getOperand(10),
515          Op.getOperand(11),
516          Op.getOperand(12),
517          Op.getOperand(13),
518          Op.getOperand(14)
519        };
520        EVT VT = Op.getOperand(3).getValueType();
521
522        MachineMemOperand *MMO = MF.getMachineMemOperand(
523            MachinePointerInfo(),
524            MachineMemOperand::MOStore,
525            VT.getSizeInBits() / 8, 4);
526        return DAG.getMemIntrinsicNode(AMDGPUISD::TBUFFER_STORE_FORMAT, DL,
527                                       Op->getVTList(), Ops,
528                                       sizeof(Ops)/sizeof(Ops[0]), VT, MMO);
529      }
530      default:
531        break;
532    }
533  }
534  return SDValue();
535}
536
537/// \brief Helper function for LowerBRCOND
538static SDNode *findUser(SDValue Value, unsigned Opcode) {
539
540  SDNode *Parent = Value.getNode();
541  for (SDNode::use_iterator I = Parent->use_begin(), E = Parent->use_end();
542       I != E; ++I) {
543
544    if (I.getUse().get() != Value)
545      continue;
546
547    if (I->getOpcode() == Opcode)
548      return *I;
549  }
550  return 0;
551}
552
553/// This transforms the control flow intrinsics to get the branch destination as
554/// last parameter, also switches branch target with BR if the need arise
555SDValue SITargetLowering::LowerBRCOND(SDValue BRCOND,
556                                      SelectionDAG &DAG) const {
557
558  SDLoc DL(BRCOND);
559
560  SDNode *Intr = BRCOND.getOperand(1).getNode();
561  SDValue Target = BRCOND.getOperand(2);
562  SDNode *BR = 0;
563
564  if (Intr->getOpcode() == ISD::SETCC) {
565    // As long as we negate the condition everything is fine
566    SDNode *SetCC = Intr;
567    assert(SetCC->getConstantOperandVal(1) == 1);
568    assert(cast<CondCodeSDNode>(SetCC->getOperand(2).getNode())->get() ==
569           ISD::SETNE);
570    Intr = SetCC->getOperand(0).getNode();
571
572  } else {
573    // Get the target from BR if we don't negate the condition
574    BR = findUser(BRCOND, ISD::BR);
575    Target = BR->getOperand(1);
576  }
577
578  assert(Intr->getOpcode() == ISD::INTRINSIC_W_CHAIN);
579
580  // Build the result and
581  SmallVector<EVT, 4> Res;
582  for (unsigned i = 1, e = Intr->getNumValues(); i != e; ++i)
583    Res.push_back(Intr->getValueType(i));
584
585  // operands of the new intrinsic call
586  SmallVector<SDValue, 4> Ops;
587  Ops.push_back(BRCOND.getOperand(0));
588  for (unsigned i = 1, e = Intr->getNumOperands(); i != e; ++i)
589    Ops.push_back(Intr->getOperand(i));
590  Ops.push_back(Target);
591
592  // build the new intrinsic call
593  SDNode *Result = DAG.getNode(
594    Res.size() > 1 ? ISD::INTRINSIC_W_CHAIN : ISD::INTRINSIC_VOID, DL,
595    DAG.getVTList(Res.data(), Res.size()), Ops.data(), Ops.size()).getNode();
596
597  if (BR) {
598    // Give the branch instruction our target
599    SDValue Ops[] = {
600      BR->getOperand(0),
601      BRCOND.getOperand(2)
602    };
603    DAG.MorphNodeTo(BR, ISD::BR, BR->getVTList(), Ops, 2);
604  }
605
606  SDValue Chain = SDValue(Result, Result->getNumValues() - 1);
607
608  // Copy the intrinsic results to registers
609  for (unsigned i = 1, e = Intr->getNumValues() - 1; i != e; ++i) {
610    SDNode *CopyToReg = findUser(SDValue(Intr, i), ISD::CopyToReg);
611    if (!CopyToReg)
612      continue;
613
614    Chain = DAG.getCopyToReg(
615      Chain, DL,
616      CopyToReg->getOperand(1),
617      SDValue(Result, i - 1),
618      SDValue());
619
620    DAG.ReplaceAllUsesWith(SDValue(CopyToReg, 0), CopyToReg->getOperand(0));
621  }
622
623  // Remove the old intrinsic from the chain
624  DAG.ReplaceAllUsesOfValueWith(
625    SDValue(Intr, Intr->getNumValues() - 1),
626    Intr->getOperand(0));
627
628  return Chain;
629}
630
631SDValue SITargetLowering::ResourceDescriptorToi128(SDValue Op,
632                                             SelectionDAG &DAG) const {
633
634  if (Op.getValueType() == MVT::i128) {
635    return Op;
636  }
637
638  assert(Op.getOpcode() == ISD::UNDEF);
639
640  return DAG.getNode(ISD::BUILD_PAIR, SDLoc(Op), MVT::i128,
641                     DAG.getConstant(0, MVT::i64),
642                     DAG.getConstant(0, MVT::i64));
643}
644
645SDValue SITargetLowering::LowerSampleIntrinsic(unsigned Opcode,
646                                               const SDValue &Op,
647                                               SelectionDAG &DAG) const {
648  return DAG.getNode(Opcode, SDLoc(Op), Op.getValueType(), Op.getOperand(1),
649                     Op.getOperand(2),
650                     ResourceDescriptorToi128(Op.getOperand(3), DAG),
651                     Op.getOperand(4));
652}
653
654SDValue SITargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
655  SDValue LHS = Op.getOperand(0);
656  SDValue RHS = Op.getOperand(1);
657  SDValue True = Op.getOperand(2);
658  SDValue False = Op.getOperand(3);
659  SDValue CC = Op.getOperand(4);
660  EVT VT = Op.getValueType();
661  SDLoc DL(Op);
662
663  // Possible Min/Max pattern
664  SDValue MinMax = LowerMinMax(Op, DAG);
665  if (MinMax.getNode()) {
666    return MinMax;
667  }
668
669  SDValue Cond = DAG.getNode(ISD::SETCC, DL, MVT::i1, LHS, RHS, CC);
670  return DAG.getNode(ISD::SELECT, DL, VT, Cond, True, False);
671}
672
673SDValue SITargetLowering::LowerSIGN_EXTEND(SDValue Op,
674                                           SelectionDAG &DAG) const {
675  EVT VT = Op.getValueType();
676  SDLoc DL(Op);
677
678  if (VT != MVT::i64) {
679    return SDValue();
680  }
681
682  SDValue Hi = DAG.getNode(ISD::SRA, DL, MVT::i32, Op.getOperand(0),
683                                                 DAG.getConstant(31, MVT::i32));
684
685  return DAG.getNode(ISD::BUILD_PAIR, DL, VT, Op.getOperand(0), Hi);
686}
687
688SDValue SITargetLowering::LowerZERO_EXTEND(SDValue Op,
689                                           SelectionDAG &DAG) const {
690  EVT VT = Op.getValueType();
691  SDLoc DL(Op);
692
693  if (VT != MVT::i64) {
694    return SDValue();
695  }
696
697  return DAG.getNode(ISD::BUILD_PAIR, DL, VT, Op.getOperand(0),
698                                              DAG.getConstant(0, MVT::i32));
699}
700
701//===----------------------------------------------------------------------===//
702// Custom DAG optimizations
703//===----------------------------------------------------------------------===//
704
705SDValue SITargetLowering::PerformDAGCombine(SDNode *N,
706                                            DAGCombinerInfo &DCI) const {
707  SelectionDAG &DAG = DCI.DAG;
708  SDLoc DL(N);
709  EVT VT = N->getValueType(0);
710
711  switch (N->getOpcode()) {
712    default: break;
713    case ISD::SELECT_CC: {
714      ConstantSDNode *True, *False;
715      // i1 selectcc(l, r, -1, 0, cc) -> i1 setcc(l, r, cc)
716      if ((True = dyn_cast<ConstantSDNode>(N->getOperand(2)))
717          && (False = dyn_cast<ConstantSDNode>(N->getOperand(3)))
718          && True->isAllOnesValue()
719          && False->isNullValue()
720          && VT == MVT::i1) {
721        return DAG.getNode(ISD::SETCC, DL, VT, N->getOperand(0),
722                           N->getOperand(1), N->getOperand(4));
723
724      }
725      break;
726    }
727    case ISD::SETCC: {
728      SDValue Arg0 = N->getOperand(0);
729      SDValue Arg1 = N->getOperand(1);
730      SDValue CC = N->getOperand(2);
731      ConstantSDNode * C = NULL;
732      ISD::CondCode CCOp = dyn_cast<CondCodeSDNode>(CC)->get();
733
734      // i1 setcc (sext(i1), 0, setne) -> i1 setcc(i1, 0, setne)
735      if (VT == MVT::i1
736          && Arg0.getOpcode() == ISD::SIGN_EXTEND
737          && Arg0.getOperand(0).getValueType() == MVT::i1
738          && (C = dyn_cast<ConstantSDNode>(Arg1))
739          && C->isNullValue()
740          && CCOp == ISD::SETNE) {
741        return SimplifySetCC(VT, Arg0.getOperand(0),
742                             DAG.getConstant(0, MVT::i1), CCOp, true, DCI, DL);
743      }
744      break;
745    }
746  }
747  return SDValue();
748}
749
750/// \brief Test if RegClass is one of the VSrc classes
751static bool isVSrc(unsigned RegClass) {
752  return AMDGPU::VSrc_32RegClassID == RegClass ||
753         AMDGPU::VSrc_64RegClassID == RegClass;
754}
755
756/// \brief Test if RegClass is one of the SSrc classes
757static bool isSSrc(unsigned RegClass) {
758  return AMDGPU::SSrc_32RegClassID == RegClass ||
759         AMDGPU::SSrc_64RegClassID == RegClass;
760}
761
762/// \brief Analyze the possible immediate value Op
763///
764/// Returns -1 if it isn't an immediate, 0 if it's and inline immediate
765/// and the immediate value if it's a literal immediate
766int32_t SITargetLowering::analyzeImmediate(const SDNode *N) const {
767
768  union {
769    int32_t I;
770    float F;
771  } Imm;
772
773  if (const ConstantSDNode *Node = dyn_cast<ConstantSDNode>(N)) {
774    if (Node->getZExtValue() >> 32) {
775        return -1;
776    }
777    Imm.I = Node->getSExtValue();
778  } else if (const ConstantFPSDNode *Node = dyn_cast<ConstantFPSDNode>(N))
779    Imm.F = Node->getValueAPF().convertToFloat();
780  else
781    return -1; // It isn't an immediate
782
783  if ((Imm.I >= -16 && Imm.I <= 64) ||
784      Imm.F == 0.5f || Imm.F == -0.5f ||
785      Imm.F == 1.0f || Imm.F == -1.0f ||
786      Imm.F == 2.0f || Imm.F == -2.0f ||
787      Imm.F == 4.0f || Imm.F == -4.0f)
788    return 0; // It's an inline immediate
789
790  return Imm.I; // It's a literal immediate
791}
792
793/// \brief Try to fold an immediate directly into an instruction
794bool SITargetLowering::foldImm(SDValue &Operand, int32_t &Immediate,
795                               bool &ScalarSlotUsed) const {
796
797  MachineSDNode *Mov = dyn_cast<MachineSDNode>(Operand);
798  const SIInstrInfo *TII =
799    static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
800  if (Mov == 0 || !TII->isMov(Mov->getMachineOpcode()))
801    return false;
802
803  const SDValue &Op = Mov->getOperand(0);
804  int32_t Value = analyzeImmediate(Op.getNode());
805  if (Value == -1) {
806    // Not an immediate at all
807    return false;
808
809  } else if (Value == 0) {
810    // Inline immediates can always be fold
811    Operand = Op;
812    return true;
813
814  } else if (Value == Immediate) {
815    // Already fold literal immediate
816    Operand = Op;
817    return true;
818
819  } else if (!ScalarSlotUsed && !Immediate) {
820    // Fold this literal immediate
821    ScalarSlotUsed = true;
822    Immediate = Value;
823    Operand = Op;
824    return true;
825
826  }
827
828  return false;
829}
830
831const TargetRegisterClass *SITargetLowering::getRegClassForNode(
832                                   SelectionDAG &DAG, const SDValue &Op) const {
833  const SIInstrInfo *TII =
834    static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
835  const SIRegisterInfo &TRI = TII->getRegisterInfo();
836
837  if (!Op->isMachineOpcode()) {
838    switch(Op->getOpcode()) {
839    case ISD::CopyFromReg: {
840      MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
841      unsigned Reg = cast<RegisterSDNode>(Op->getOperand(1))->getReg();
842      if (TargetRegisterInfo::isVirtualRegister(Reg)) {
843        return MRI.getRegClass(Reg);
844      }
845      return TRI.getPhysRegClass(Reg);
846    }
847    default:  return NULL;
848    }
849  }
850  const MCInstrDesc &Desc = TII->get(Op->getMachineOpcode());
851  int OpClassID = Desc.OpInfo[Op.getResNo()].RegClass;
852  if (OpClassID != -1) {
853    return TRI.getRegClass(OpClassID);
854  }
855  switch(Op.getMachineOpcode()) {
856  case AMDGPU::COPY_TO_REGCLASS:
857    // Operand 1 is the register class id for COPY_TO_REGCLASS instructions.
858    OpClassID = cast<ConstantSDNode>(Op->getOperand(1))->getZExtValue();
859
860    // If the COPY_TO_REGCLASS instruction is copying to a VSrc register
861    // class, then the register class for the value could be either a
862    // VReg or and SReg.  In order to get a more accurate
863    if (OpClassID == AMDGPU::VSrc_32RegClassID ||
864        OpClassID == AMDGPU::VSrc_64RegClassID) {
865      return getRegClassForNode(DAG, Op.getOperand(0));
866    }
867    return TRI.getRegClass(OpClassID);
868  case AMDGPU::EXTRACT_SUBREG: {
869    int SubIdx = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
870    const TargetRegisterClass *SuperClass =
871      getRegClassForNode(DAG, Op.getOperand(0));
872    return TRI.getSubClassWithSubReg(SuperClass, SubIdx);
873  }
874  case AMDGPU::REG_SEQUENCE:
875    // Operand 0 is the register class id for REG_SEQUENCE instructions.
876    return TRI.getRegClass(
877      cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue());
878  default:
879    return getRegClassFor(Op.getSimpleValueType());
880  }
881}
882
883/// \brief Does "Op" fit into register class "RegClass" ?
884bool SITargetLowering::fitsRegClass(SelectionDAG &DAG, const SDValue &Op,
885                                    unsigned RegClass) const {
886  const TargetRegisterInfo *TRI = getTargetMachine().getRegisterInfo();
887  const TargetRegisterClass *RC = getRegClassForNode(DAG, Op);
888  if (!RC) {
889    return false;
890  }
891  return TRI->getRegClass(RegClass)->hasSubClassEq(RC);
892}
893
894/// \brief Make sure that we don't exeed the number of allowed scalars
895void SITargetLowering::ensureSRegLimit(SelectionDAG &DAG, SDValue &Operand,
896                                       unsigned RegClass,
897                                       bool &ScalarSlotUsed) const {
898
899  // First map the operands register class to a destination class
900  if (RegClass == AMDGPU::VSrc_32RegClassID)
901    RegClass = AMDGPU::VReg_32RegClassID;
902  else if (RegClass == AMDGPU::VSrc_64RegClassID)
903    RegClass = AMDGPU::VReg_64RegClassID;
904  else
905    return;
906
907  // Nothing todo if they fit naturaly
908  if (fitsRegClass(DAG, Operand, RegClass))
909    return;
910
911  // If the scalar slot isn't used yet use it now
912  if (!ScalarSlotUsed) {
913    ScalarSlotUsed = true;
914    return;
915  }
916
917  // This is a conservative aproach. It is possible that we can't determine the
918  // correct register class and copy too often, but better safe than sorry.
919  SDValue RC = DAG.getTargetConstant(RegClass, MVT::i32);
920  SDNode *Node = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS, SDLoc(),
921                                    Operand.getValueType(), Operand, RC);
922  Operand = SDValue(Node, 0);
923}
924
925/// \returns true if \p Node's operands are different from the SDValue list
926/// \p Ops
927static bool isNodeChanged(const SDNode *Node, const std::vector<SDValue> &Ops) {
928  for (unsigned i = 0, e = Node->getNumOperands(); i < e; ++i) {
929    if (Ops[i].getNode() != Node->getOperand(i).getNode()) {
930      return true;
931    }
932  }
933  return false;
934}
935
936/// \brief Try to fold the Nodes operands into the Node
937SDNode *SITargetLowering::foldOperands(MachineSDNode *Node,
938                                       SelectionDAG &DAG) const {
939
940  // Original encoding (either e32 or e64)
941  int Opcode = Node->getMachineOpcode();
942  const SIInstrInfo *TII =
943    static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
944  const MCInstrDesc *Desc = &TII->get(Opcode);
945
946  unsigned NumDefs = Desc->getNumDefs();
947  unsigned NumOps = Desc->getNumOperands();
948
949  // Commuted opcode if available
950  int OpcodeRev = Desc->isCommutable() ? TII->commuteOpcode(Opcode) : -1;
951  const MCInstrDesc *DescRev = OpcodeRev == -1 ? 0 : &TII->get(OpcodeRev);
952
953  assert(!DescRev || DescRev->getNumDefs() == NumDefs);
954  assert(!DescRev || DescRev->getNumOperands() == NumOps);
955
956  // e64 version if available, -1 otherwise
957  int OpcodeE64 = AMDGPU::getVOPe64(Opcode);
958  const MCInstrDesc *DescE64 = OpcodeE64 == -1 ? 0 : &TII->get(OpcodeE64);
959
960  assert(!DescE64 || DescE64->getNumDefs() == NumDefs);
961  assert(!DescE64 || DescE64->getNumOperands() == (NumOps + 4));
962
963  int32_t Immediate = Desc->getSize() == 4 ? 0 : -1;
964  bool HaveVSrc = false, HaveSSrc = false;
965
966  // First figure out what we alread have in this instruction
967  for (unsigned i = 0, e = Node->getNumOperands(), Op = NumDefs;
968       i != e && Op < NumOps; ++i, ++Op) {
969
970    unsigned RegClass = Desc->OpInfo[Op].RegClass;
971    if (isVSrc(RegClass))
972      HaveVSrc = true;
973    else if (isSSrc(RegClass))
974      HaveSSrc = true;
975    else
976      continue;
977
978    int32_t Imm = analyzeImmediate(Node->getOperand(i).getNode());
979    if (Imm != -1 && Imm != 0) {
980      // Literal immediate
981      Immediate = Imm;
982    }
983  }
984
985  // If we neither have VSrc nor SSrc it makes no sense to continue
986  if (!HaveVSrc && !HaveSSrc)
987    return Node;
988
989  // No scalar allowed when we have both VSrc and SSrc
990  bool ScalarSlotUsed = HaveVSrc && HaveSSrc;
991
992  // Second go over the operands and try to fold them
993  std::vector<SDValue> Ops;
994  bool Promote2e64 = false;
995  for (unsigned i = 0, e = Node->getNumOperands(), Op = NumDefs;
996       i != e && Op < NumOps; ++i, ++Op) {
997
998    const SDValue &Operand = Node->getOperand(i);
999    Ops.push_back(Operand);
1000
1001    // Already folded immediate ?
1002    if (isa<ConstantSDNode>(Operand.getNode()) ||
1003        isa<ConstantFPSDNode>(Operand.getNode()))
1004      continue;
1005
1006    // Is this a VSrc or SSrc operand ?
1007    unsigned RegClass = Desc->OpInfo[Op].RegClass;
1008    if (isVSrc(RegClass) || isSSrc(RegClass)) {
1009      // Try to fold the immediates
1010      if (!foldImm(Ops[i], Immediate, ScalarSlotUsed)) {
1011        // Folding didn't worked, make sure we don't hit the SReg limit
1012        ensureSRegLimit(DAG, Ops[i], RegClass, ScalarSlotUsed);
1013      }
1014      continue;
1015    }
1016
1017    if (i == 1 && DescRev && fitsRegClass(DAG, Ops[0], RegClass)) {
1018
1019      unsigned OtherRegClass = Desc->OpInfo[NumDefs].RegClass;
1020      assert(isVSrc(OtherRegClass) || isSSrc(OtherRegClass));
1021
1022      // Test if it makes sense to swap operands
1023      if (foldImm(Ops[1], Immediate, ScalarSlotUsed) ||
1024          (!fitsRegClass(DAG, Ops[1], RegClass) &&
1025           fitsRegClass(DAG, Ops[1], OtherRegClass))) {
1026
1027        // Swap commutable operands
1028        SDValue Tmp = Ops[1];
1029        Ops[1] = Ops[0];
1030        Ops[0] = Tmp;
1031
1032        Desc = DescRev;
1033        DescRev = 0;
1034        continue;
1035      }
1036    }
1037
1038    if (DescE64 && !Immediate) {
1039
1040      // Test if it makes sense to switch to e64 encoding
1041      unsigned OtherRegClass = DescE64->OpInfo[Op].RegClass;
1042      if (!isVSrc(OtherRegClass) && !isSSrc(OtherRegClass))
1043        continue;
1044
1045      int32_t TmpImm = -1;
1046      if (foldImm(Ops[i], TmpImm, ScalarSlotUsed) ||
1047          (!fitsRegClass(DAG, Ops[i], RegClass) &&
1048           fitsRegClass(DAG, Ops[1], OtherRegClass))) {
1049
1050        // Switch to e64 encoding
1051        Immediate = -1;
1052        Promote2e64 = true;
1053        Desc = DescE64;
1054        DescE64 = 0;
1055      }
1056    }
1057  }
1058
1059  if (Promote2e64) {
1060    // Add the modifier flags while promoting
1061    for (unsigned i = 0; i < 4; ++i)
1062      Ops.push_back(DAG.getTargetConstant(0, MVT::i32));
1063  }
1064
1065  // Add optional chain and glue
1066  for (unsigned i = NumOps - NumDefs, e = Node->getNumOperands(); i < e; ++i)
1067    Ops.push_back(Node->getOperand(i));
1068
1069  // Nodes that have a glue result are not CSE'd by getMachineNode(), so in
1070  // this case a brand new node is always be created, even if the operands
1071  // are the same as before.  So, manually check if anything has been changed.
1072  if (Desc->Opcode == Opcode && !isNodeChanged(Node, Ops)) {
1073    return Node;
1074  }
1075
1076  // Create a complete new instruction
1077  return DAG.getMachineNode(Desc->Opcode, SDLoc(Node), Node->getVTList(), Ops);
1078}
1079
1080/// \brief Helper function for adjustWritemask
1081static unsigned SubIdx2Lane(unsigned Idx) {
1082  switch (Idx) {
1083  default: return 0;
1084  case AMDGPU::sub0: return 0;
1085  case AMDGPU::sub1: return 1;
1086  case AMDGPU::sub2: return 2;
1087  case AMDGPU::sub3: return 3;
1088  }
1089}
1090
1091/// \brief Adjust the writemask of MIMG instructions
1092void SITargetLowering::adjustWritemask(MachineSDNode *&Node,
1093                                       SelectionDAG &DAG) const {
1094  SDNode *Users[4] = { };
1095  unsigned Lane = 0;
1096  unsigned OldDmask = Node->getConstantOperandVal(0);
1097  unsigned NewDmask = 0;
1098
1099  // Try to figure out the used register components
1100  for (SDNode::use_iterator I = Node->use_begin(), E = Node->use_end();
1101       I != E; ++I) {
1102
1103    // Abort if we can't understand the usage
1104    if (!I->isMachineOpcode() ||
1105        I->getMachineOpcode() != TargetOpcode::EXTRACT_SUBREG)
1106      return;
1107
1108    // Lane means which subreg of %VGPRa_VGPRb_VGPRc_VGPRd is used.
1109    // Note that subregs are packed, i.e. Lane==0 is the first bit set
1110    // in OldDmask, so it can be any of X,Y,Z,W; Lane==1 is the second bit
1111    // set, etc.
1112    Lane = SubIdx2Lane(I->getConstantOperandVal(1));
1113
1114    // Set which texture component corresponds to the lane.
1115    unsigned Comp;
1116    for (unsigned i = 0, Dmask = OldDmask; i <= Lane; i++) {
1117      assert(Dmask);
1118      Comp = countTrailingZeros(Dmask);
1119      Dmask &= ~(1 << Comp);
1120    }
1121
1122    // Abort if we have more than one user per component
1123    if (Users[Lane])
1124      return;
1125
1126    Users[Lane] = *I;
1127    NewDmask |= 1 << Comp;
1128  }
1129
1130  // Abort if there's no change
1131  if (NewDmask == OldDmask)
1132    return;
1133
1134  // Adjust the writemask in the node
1135  std::vector<SDValue> Ops;
1136  Ops.push_back(DAG.getTargetConstant(NewDmask, MVT::i32));
1137  for (unsigned i = 1, e = Node->getNumOperands(); i != e; ++i)
1138    Ops.push_back(Node->getOperand(i));
1139  Node = (MachineSDNode*)DAG.UpdateNodeOperands(Node, Ops.data(), Ops.size());
1140
1141  // If we only got one lane, replace it with a copy
1142  // (if NewDmask has only one bit set...)
1143  if (NewDmask && (NewDmask & (NewDmask-1)) == 0) {
1144    SDValue RC = DAG.getTargetConstant(AMDGPU::VReg_32RegClassID, MVT::i32);
1145    SDNode *Copy = DAG.getMachineNode(TargetOpcode::COPY_TO_REGCLASS,
1146                                      SDLoc(), Users[Lane]->getValueType(0),
1147                                      SDValue(Node, 0), RC);
1148    DAG.ReplaceAllUsesWith(Users[Lane], Copy);
1149    return;
1150  }
1151
1152  // Update the users of the node with the new indices
1153  for (unsigned i = 0, Idx = AMDGPU::sub0; i < 4; ++i) {
1154
1155    SDNode *User = Users[i];
1156    if (!User)
1157      continue;
1158
1159    SDValue Op = DAG.getTargetConstant(Idx, MVT::i32);
1160    DAG.UpdateNodeOperands(User, User->getOperand(0), Op);
1161
1162    switch (Idx) {
1163    default: break;
1164    case AMDGPU::sub0: Idx = AMDGPU::sub1; break;
1165    case AMDGPU::sub1: Idx = AMDGPU::sub2; break;
1166    case AMDGPU::sub2: Idx = AMDGPU::sub3; break;
1167    }
1168  }
1169}
1170
1171/// \brief Fold the instructions after slecting them
1172SDNode *SITargetLowering::PostISelFolding(MachineSDNode *Node,
1173                                          SelectionDAG &DAG) const {
1174  const SIInstrInfo *TII =
1175      static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
1176  Node = AdjustRegClass(Node, DAG);
1177
1178  if (TII->isMIMG(Node->getMachineOpcode()))
1179    adjustWritemask(Node, DAG);
1180
1181  return foldOperands(Node, DAG);
1182}
1183
1184/// \brief Assign the register class depending on the number of
1185/// bits set in the writemask
1186void SITargetLowering::AdjustInstrPostInstrSelection(MachineInstr *MI,
1187                                                     SDNode *Node) const {
1188  const SIInstrInfo *TII =
1189      static_cast<const SIInstrInfo*>(getTargetMachine().getInstrInfo());
1190  if (!TII->isMIMG(MI->getOpcode()))
1191    return;
1192
1193  unsigned VReg = MI->getOperand(0).getReg();
1194  unsigned Writemask = MI->getOperand(1).getImm();
1195  unsigned BitsSet = 0;
1196  for (unsigned i = 0; i < 4; ++i)
1197    BitsSet += Writemask & (1 << i) ? 1 : 0;
1198
1199  const TargetRegisterClass *RC;
1200  switch (BitsSet) {
1201  default: return;
1202  case 1:  RC = &AMDGPU::VReg_32RegClass; break;
1203  case 2:  RC = &AMDGPU::VReg_64RegClass; break;
1204  case 3:  RC = &AMDGPU::VReg_96RegClass; break;
1205  }
1206
1207  unsigned NewOpcode = TII->getMaskedMIMGOp(MI->getOpcode(), BitsSet);
1208  MI->setDesc(TII->get(NewOpcode));
1209  MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
1210  MRI.setRegClass(VReg, RC);
1211}
1212
1213MachineSDNode *SITargetLowering::AdjustRegClass(MachineSDNode *N,
1214                                                SelectionDAG &DAG) const {
1215
1216  SDLoc DL(N);
1217  unsigned NewOpcode = N->getMachineOpcode();
1218
1219  switch (N->getMachineOpcode()) {
1220  default: return N;
1221  case AMDGPU::S_LOAD_DWORD_IMM:
1222    NewOpcode = AMDGPU::BUFFER_LOAD_DWORD_ADDR64;
1223    // Fall-through
1224  case AMDGPU::S_LOAD_DWORDX2_SGPR:
1225    if (NewOpcode == N->getMachineOpcode()) {
1226      NewOpcode = AMDGPU::BUFFER_LOAD_DWORDX2_ADDR64;
1227    }
1228    // Fall-through
1229  case AMDGPU::S_LOAD_DWORDX4_IMM:
1230  case AMDGPU::S_LOAD_DWORDX4_SGPR: {
1231    if (NewOpcode == N->getMachineOpcode()) {
1232      NewOpcode = AMDGPU::BUFFER_LOAD_DWORDX4_ADDR64;
1233    }
1234    if (fitsRegClass(DAG, N->getOperand(0), AMDGPU::SReg_64RegClassID)) {
1235      return N;
1236    }
1237    ConstantSDNode *Offset = cast<ConstantSDNode>(N->getOperand(1));
1238    SDValue Ops[] = {
1239      SDValue(DAG.getMachineNode(AMDGPU::SI_ADDR64_RSRC, DL, MVT::i128,
1240                                 DAG.getConstant(0, MVT::i64)), 0),
1241      N->getOperand(0),
1242      DAG.getConstant(Offset->getSExtValue() << 2, MVT::i32)
1243    };
1244    return DAG.getMachineNode(NewOpcode, DL, N->getVTList(), Ops);
1245  }
1246  }
1247}
1248
1249SDValue SITargetLowering::CreateLiveInRegister(SelectionDAG &DAG,
1250                                               const TargetRegisterClass *RC,
1251                                               unsigned Reg, EVT VT) const {
1252  SDValue VReg = AMDGPUTargetLowering::CreateLiveInRegister(DAG, RC, Reg, VT);
1253
1254  return DAG.getCopyFromReg(DAG.getEntryNode(), SDLoc(DAG.getEntryNode()),
1255                            cast<RegisterSDNode>(VReg)->getReg(), VT);
1256}
1257