1//===-- SIFixSGPRCopies.cpp - Remove potential VGPR => SGPR copies --------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10/// \file
11/// Copies from VGPR to SGPR registers are illegal and the register coalescer
12/// will sometimes generate these illegal copies in situations like this:
13///
14///  Register Class <vsrc> is the union of <vgpr> and <sgpr>
15///
16/// BB0:
17///   %vreg0 <sgpr> = SCALAR_INST
18///   %vreg1 <vsrc> = COPY %vreg0 <sgpr>
19///    ...
20///    BRANCH %cond BB1, BB2
21///  BB1:
22///    %vreg2 <vgpr> = VECTOR_INST
23///    %vreg3 <vsrc> = COPY %vreg2 <vgpr>
24///  BB2:
25///    %vreg4 <vsrc> = PHI %vreg1 <vsrc>, <BB#0>, %vreg3 <vrsc>, <BB#1>
26///    %vreg5 <vgpr> = VECTOR_INST %vreg4 <vsrc>
27///
28///
29/// The coalescer will begin at BB0 and eliminate its copy, then the resulting
30/// code will look like this:
31///
32/// BB0:
33///   %vreg0 <sgpr> = SCALAR_INST
34///    ...
35///    BRANCH %cond BB1, BB2
36/// BB1:
37///   %vreg2 <vgpr> = VECTOR_INST
38///   %vreg3 <vsrc> = COPY %vreg2 <vgpr>
39/// BB2:
40///   %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <vsrc>, <BB#1>
41///   %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
42///
43/// Now that the result of the PHI instruction is an SGPR, the register
44/// allocator is now forced to constrain the register class of %vreg3 to
45/// <sgpr> so we end up with final code like this:
46///
47/// BB0:
48///   %vreg0 <sgpr> = SCALAR_INST
49///    ...
50///    BRANCH %cond BB1, BB2
51/// BB1:
52///   %vreg2 <vgpr> = VECTOR_INST
53///   %vreg3 <sgpr> = COPY %vreg2 <vgpr>
54/// BB2:
55///   %vreg4 <sgpr> = PHI %vreg0 <sgpr>, <BB#0>, %vreg3 <sgpr>, <BB#1>
56///   %vreg5 <vgpr> = VECTOR_INST %vreg4 <sgpr>
57///
58/// Now this code contains an illegal copy from a VGPR to an SGPR.
59///
60/// In order to avoid this problem, this pass searches for PHI instructions
61/// which define a <vsrc> register and constrains its definition class to
62/// <vgpr> if the user of the PHI's definition register is a vector instruction.
63/// If the PHI's definition class is constrained to <vgpr> then the coalescer
64/// will be unable to perform the COPY removal from the above example  which
65/// ultimately led to the creation of an illegal COPY.
66//===----------------------------------------------------------------------===//
67
68#include "AMDGPU.h"
69#include "SIInstrInfo.h"
70#include "llvm/CodeGen/MachineFunctionPass.h"
71#include "llvm/CodeGen/MachineInstrBuilder.h"
72#include "llvm/CodeGen/MachineRegisterInfo.h"
73#include "llvm/Support/Debug.h"
74#include "llvm/Support/raw_ostream.h"
75#include "llvm/Target/TargetMachine.h"
76
77using namespace llvm;
78
79#define DEBUG_TYPE "sgpr-copies"
80
81namespace {
82
83class SIFixSGPRCopies : public MachineFunctionPass {
84
85private:
86  static char ID;
87  const TargetRegisterClass *inferRegClassFromUses(const SIRegisterInfo *TRI,
88                                           const MachineRegisterInfo &MRI,
89                                           unsigned Reg,
90                                           unsigned SubReg) const;
91  const TargetRegisterClass *inferRegClassFromDef(const SIRegisterInfo *TRI,
92                                                 const MachineRegisterInfo &MRI,
93                                                 unsigned Reg,
94                                                 unsigned SubReg) const;
95  bool isVGPRToSGPRCopy(const MachineInstr &Copy, const SIRegisterInfo *TRI,
96                        const MachineRegisterInfo &MRI) const;
97
98public:
99  SIFixSGPRCopies(TargetMachine &tm) : MachineFunctionPass(ID) { }
100
101  bool runOnMachineFunction(MachineFunction &MF) override;
102
103  const char *getPassName() const override {
104    return "SI Fix SGPR copies";
105  }
106
107};
108
109} // End anonymous namespace
110
111char SIFixSGPRCopies::ID = 0;
112
113FunctionPass *llvm::createSIFixSGPRCopiesPass(TargetMachine &tm) {
114  return new SIFixSGPRCopies(tm);
115}
116
117static bool hasVGPROperands(const MachineInstr &MI, const SIRegisterInfo *TRI) {
118  const MachineRegisterInfo &MRI = MI.getParent()->getParent()->getRegInfo();
119  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
120    if (!MI.getOperand(i).isReg() ||
121        !TargetRegisterInfo::isVirtualRegister(MI.getOperand(i).getReg()))
122      continue;
123
124    if (TRI->hasVGPRs(MRI.getRegClass(MI.getOperand(i).getReg())))
125      return true;
126  }
127  return false;
128}
129
130/// This functions walks the use list of Reg until it finds an Instruction
131/// that isn't a COPY returns the register class of that instruction.
132/// \return The register defined by the first non-COPY instruction.
133const TargetRegisterClass *SIFixSGPRCopies::inferRegClassFromUses(
134                                                 const SIRegisterInfo *TRI,
135                                                 const MachineRegisterInfo &MRI,
136                                                 unsigned Reg,
137                                                 unsigned SubReg) const {
138  // The Reg parameter to the function must always be defined by either a PHI
139  // or a COPY, therefore it cannot be a physical register.
140  assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
141         "Reg cannot be a physical register");
142
143  const TargetRegisterClass *RC = MRI.getRegClass(Reg);
144  RC = TRI->getSubRegClass(RC, SubReg);
145  for (MachineRegisterInfo::use_instr_iterator
146       I = MRI.use_instr_begin(Reg), E = MRI.use_instr_end(); I != E; ++I) {
147    switch (I->getOpcode()) {
148    case AMDGPU::COPY:
149      RC = TRI->getCommonSubClass(RC, inferRegClassFromUses(TRI, MRI,
150                                  I->getOperand(0).getReg(),
151                                  I->getOperand(0).getSubReg()));
152      break;
153    }
154  }
155
156  return RC;
157}
158
159const TargetRegisterClass *SIFixSGPRCopies::inferRegClassFromDef(
160                                                 const SIRegisterInfo *TRI,
161                                                 const MachineRegisterInfo &MRI,
162                                                 unsigned Reg,
163                                                 unsigned SubReg) const {
164  if (!TargetRegisterInfo::isVirtualRegister(Reg)) {
165    const TargetRegisterClass *RC = TRI->getPhysRegClass(Reg);
166    return TRI->getSubRegClass(RC, SubReg);
167  }
168  MachineInstr *Def = MRI.getVRegDef(Reg);
169  if (Def->getOpcode() != AMDGPU::COPY) {
170    return TRI->getSubRegClass(MRI.getRegClass(Reg), SubReg);
171  }
172
173  return inferRegClassFromDef(TRI, MRI, Def->getOperand(1).getReg(),
174                                   Def->getOperand(1).getSubReg());
175}
176
177bool SIFixSGPRCopies::isVGPRToSGPRCopy(const MachineInstr &Copy,
178                                      const SIRegisterInfo *TRI,
179                                      const MachineRegisterInfo &MRI) const {
180
181  unsigned DstReg = Copy.getOperand(0).getReg();
182  unsigned SrcReg = Copy.getOperand(1).getReg();
183  unsigned SrcSubReg = Copy.getOperand(1).getSubReg();
184  const TargetRegisterClass *DstRC = MRI.getRegClass(DstReg);
185  const TargetRegisterClass *SrcRC;
186
187  if (!TargetRegisterInfo::isVirtualRegister(SrcReg) ||
188      DstRC == &AMDGPU::M0RegRegClass ||
189      MRI.getRegClass(SrcReg) == &AMDGPU::VReg_1RegClass)
190    return false;
191
192  SrcRC = TRI->getSubRegClass(MRI.getRegClass(SrcReg), SrcSubReg);
193  return TRI->isSGPRClass(DstRC) && TRI->hasVGPRs(SrcRC);
194}
195
196bool SIFixSGPRCopies::runOnMachineFunction(MachineFunction &MF) {
197  MachineRegisterInfo &MRI = MF.getRegInfo();
198  const SIRegisterInfo *TRI = static_cast<const SIRegisterInfo *>(
199      MF.getTarget().getRegisterInfo());
200  const SIInstrInfo *TII = static_cast<const SIInstrInfo *>(
201      MF.getTarget().getInstrInfo());
202  for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
203                                                  BI != BE; ++BI) {
204
205    MachineBasicBlock &MBB = *BI;
206    for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end();
207                                                      I != E; ++I) {
208      MachineInstr &MI = *I;
209      if (MI.getOpcode() == AMDGPU::COPY && isVGPRToSGPRCopy(MI, TRI, MRI)) {
210        DEBUG(dbgs() << "Fixing VGPR -> SGPR copy:\n");
211        DEBUG(MI.print(dbgs()));
212        TII->moveToVALU(MI);
213
214      }
215
216      switch (MI.getOpcode()) {
217      default: continue;
218      case AMDGPU::PHI: {
219        DEBUG(dbgs() << " Fixing PHI:\n");
220        DEBUG(MI.print(dbgs()));
221
222        for (unsigned i = 1; i < MI.getNumOperands(); i+=2) {
223          unsigned Reg = MI.getOperand(i).getReg();
224          const TargetRegisterClass *RC = inferRegClassFromDef(TRI, MRI, Reg,
225                                                  MI.getOperand(0).getSubReg());
226          MRI.constrainRegClass(Reg, RC);
227        }
228        unsigned Reg = MI.getOperand(0).getReg();
229        const TargetRegisterClass *RC = inferRegClassFromUses(TRI, MRI, Reg,
230                                                  MI.getOperand(0).getSubReg());
231        if (TRI->getCommonSubClass(RC, &AMDGPU::VReg_32RegClass)) {
232          MRI.constrainRegClass(Reg, &AMDGPU::VReg_32RegClass);
233        }
234
235        if (!TRI->isSGPRClass(MRI.getRegClass(Reg)))
236          break;
237
238        // If a PHI node defines an SGPR and any of its operands are VGPRs,
239        // then we need to move it to the VALU.
240        for (unsigned i = 1; i < MI.getNumOperands(); i+=2) {
241          unsigned Reg = MI.getOperand(i).getReg();
242          if (TRI->hasVGPRs(MRI.getRegClass(Reg))) {
243            TII->moveToVALU(MI);
244            break;
245          }
246        }
247
248        break;
249      }
250      case AMDGPU::REG_SEQUENCE: {
251        if (TRI->hasVGPRs(TII->getOpRegClass(MI, 0)) ||
252            !hasVGPROperands(MI, TRI))
253          continue;
254
255        DEBUG(dbgs() << "Fixing REG_SEQUENCE:\n");
256        DEBUG(MI.print(dbgs()));
257
258        TII->moveToVALU(MI);
259        break;
260      }
261      case AMDGPU::INSERT_SUBREG: {
262        const TargetRegisterClass *DstRC, *Src0RC, *Src1RC;
263        DstRC = MRI.getRegClass(MI.getOperand(0).getReg());
264        Src0RC = MRI.getRegClass(MI.getOperand(1).getReg());
265        Src1RC = MRI.getRegClass(MI.getOperand(2).getReg());
266        if (TRI->isSGPRClass(DstRC) &&
267            (TRI->hasVGPRs(Src0RC) || TRI->hasVGPRs(Src1RC))) {
268          DEBUG(dbgs() << " Fixing INSERT_SUBREG:\n");
269          DEBUG(MI.print(dbgs()));
270          TII->moveToVALU(MI);
271        }
272        break;
273      }
274      }
275    }
276  }
277  return false;
278}
279