1//===-- SILowerI1Copies.cpp - Lower I1 Copies -----------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8/// i1 values are usually inserted by the CFG Structurize pass and they are
9/// unique in that they can be copied from VALU to SALU registers.
10/// This is not possible for any other value type.  Since there are no
11/// MOV instructions for i1, we to use V_CMP_* and V_CNDMASK to move the i1.
12///
13//===----------------------------------------------------------------------===//
14//
15
16#define DEBUG_TYPE "si-i1-copies"
17#include "AMDGPU.h"
18#include "AMDGPUSubtarget.h"
19#include "SIInstrInfo.h"
20#include "llvm/CodeGen/LiveIntervalAnalysis.h"
21#include "llvm/CodeGen/MachineDominators.h"
22#include "llvm/CodeGen/MachineFunctionPass.h"
23#include "llvm/CodeGen/MachineInstrBuilder.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/IR/LLVMContext.h"
26#include "llvm/IR/Function.h"
27#include "llvm/Support/Debug.h"
28#include "llvm/Target/TargetMachine.h"
29
30using namespace llvm;
31
32namespace {
33
34class SILowerI1Copies : public MachineFunctionPass {
35public:
36  static char ID;
37
38public:
39  SILowerI1Copies() : MachineFunctionPass(ID) {
40    initializeSILowerI1CopiesPass(*PassRegistry::getPassRegistry());
41  }
42
43  bool runOnMachineFunction(MachineFunction &MF) override;
44
45  const char *getPassName() const override {
46    return "SI Lower i1 Copies";
47  }
48
49  void getAnalysisUsage(AnalysisUsage &AU) const override {
50    AU.addRequired<MachineDominatorTree>();
51    AU.addPreserved<MachineDominatorTree>();
52    AU.setPreservesCFG();
53    MachineFunctionPass::getAnalysisUsage(AU);
54  }
55};
56
57} // End anonymous namespace.
58
59INITIALIZE_PASS_BEGIN(SILowerI1Copies, DEBUG_TYPE,
60                      "SI Lower i1 Copies", false, false)
61INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
62INITIALIZE_PASS_END(SILowerI1Copies, DEBUG_TYPE,
63                    "SI Lower i1 Copies", false, false)
64
65char SILowerI1Copies::ID = 0;
66
67char &llvm::SILowerI1CopiesID = SILowerI1Copies::ID;
68
69FunctionPass *llvm::createSILowerI1CopiesPass() {
70  return new SILowerI1Copies();
71}
72
73bool SILowerI1Copies::runOnMachineFunction(MachineFunction &MF) {
74  MachineRegisterInfo &MRI = MF.getRegInfo();
75  const SIInstrInfo *TII =
76      static_cast<const SIInstrInfo *>(MF.getSubtarget().getInstrInfo());
77  const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
78  std::vector<unsigned> I1Defs;
79
80  for (MachineFunction::iterator BI = MF.begin(), BE = MF.end();
81                                                  BI != BE; ++BI) {
82
83    MachineBasicBlock &MBB = *BI;
84    MachineBasicBlock::iterator I, Next;
85    for (I = MBB.begin(); I != MBB.end(); I = Next) {
86      Next = std::next(I);
87      MachineInstr &MI = *I;
88
89      if (MI.getOpcode() == AMDGPU::IMPLICIT_DEF) {
90        unsigned Reg = MI.getOperand(0).getReg();
91        const TargetRegisterClass *RC = MRI.getRegClass(Reg);
92        if (RC == &AMDGPU::VReg_1RegClass)
93          MRI.setRegClass(Reg, &AMDGPU::SReg_64RegClass);
94        continue;
95      }
96
97      if (MI.getOpcode() != AMDGPU::COPY)
98        continue;
99
100      const MachineOperand &Dst = MI.getOperand(0);
101      const MachineOperand &Src = MI.getOperand(1);
102
103      if (!TargetRegisterInfo::isVirtualRegister(Src.getReg()) ||
104          !TargetRegisterInfo::isVirtualRegister(Dst.getReg()))
105        continue;
106
107      const TargetRegisterClass *DstRC = MRI.getRegClass(Dst.getReg());
108      const TargetRegisterClass *SrcRC = MRI.getRegClass(Src.getReg());
109
110      if (DstRC == &AMDGPU::VReg_1RegClass &&
111          TRI->getCommonSubClass(SrcRC, &AMDGPU::SGPR_64RegClass)) {
112        I1Defs.push_back(Dst.getReg());
113        DebugLoc DL = MI.getDebugLoc();
114
115        MachineInstr *DefInst = MRI.getUniqueVRegDef(Src.getReg());
116        if (DefInst->getOpcode() == AMDGPU::S_MOV_B64) {
117          if (DefInst->getOperand(1).isImm()) {
118            I1Defs.push_back(Dst.getReg());
119
120            int64_t Val = DefInst->getOperand(1).getImm();
121            assert(Val == 0 || Val == -1);
122
123            BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_MOV_B32_e32))
124              .addOperand(Dst)
125              .addImm(Val);
126            MI.eraseFromParent();
127            continue;
128          }
129        }
130
131        BuildMI(MBB, &MI, DL, TII->get(AMDGPU::V_CNDMASK_B32_e64))
132          .addOperand(Dst)
133          .addImm(0)
134          .addImm(-1)
135          .addOperand(Src);
136        MI.eraseFromParent();
137      } else if (TRI->getCommonSubClass(DstRC, &AMDGPU::SGPR_64RegClass) &&
138                 SrcRC == &AMDGPU::VReg_1RegClass) {
139        BuildMI(MBB, &MI, MI.getDebugLoc(), TII->get(AMDGPU::V_CMP_NE_I32_e64))
140          .addOperand(Dst)
141          .addOperand(Src)
142          .addImm(0);
143        MI.eraseFromParent();
144      }
145    }
146  }
147
148  for (unsigned Reg : I1Defs)
149    MRI.setRegClass(Reg, &AMDGPU::VGPR_32RegClass);
150
151  return false;
152}
153