1//===-- SILoadStoreOptimizer.cpp ------------------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This pass tries to fuse DS instructions with close by immediate offsets.
11// This will fuse operations such as
12//  ds_read_b32 v0, v2 offset:16
13//  ds_read_b32 v1, v2 offset:32
14// ==>
15//   ds_read2_b32 v[0:1], v2, offset0:4 offset1:8
16//
17//
18// Future improvements:
19//
20// - This currently relies on the scheduler to place loads and stores next to
21//   each other, and then only merges adjacent pairs of instructions. It would
22//   be good to be more flexible with interleaved instructions, and possibly run
23//   before scheduling. It currently missing stores of constants because loading
24//   the constant into the data register is placed between the stores, although
25//   this is arguably a scheduling problem.
26//
27// - Live interval recomputing seems inefficient. This currently only matches
28//   one pair, and recomputes live intervals and moves on to the next pair. It
29//   would be better to compute a list of all merges that need to occur
30//
31// - With a list of instructions to process, we can also merge more. If a
32//   cluster of loads have offsets that are too large to fit in the 8-bit
33//   offsets, but are close enough to fit in the 8 bits, we can add to the base
34//   pointer and use the new reduced offsets.
35//
36//===----------------------------------------------------------------------===//
37
38#include "AMDGPU.h"
39#include "SIInstrInfo.h"
40#include "SIRegisterInfo.h"
41#include "llvm/CodeGen/LiveIntervalAnalysis.h"
42#include "llvm/CodeGen/LiveVariables.h"
43#include "llvm/CodeGen/MachineFunction.h"
44#include "llvm/CodeGen/MachineFunctionPass.h"
45#include "llvm/CodeGen/MachineInstrBuilder.h"
46#include "llvm/CodeGen/MachineRegisterInfo.h"
47#include "llvm/Support/Debug.h"
48#include "llvm/Support/raw_ostream.h"
49#include "llvm/Target/TargetMachine.h"
50
51using namespace llvm;
52
53#define DEBUG_TYPE "si-load-store-opt"
54
55namespace {
56
57class SILoadStoreOptimizer : public MachineFunctionPass {
58private:
59  const SIInstrInfo *TII;
60  const SIRegisterInfo *TRI;
61  MachineRegisterInfo *MRI;
62  LiveIntervals *LIS;
63
64
65  static bool offsetsCanBeCombined(unsigned Offset0,
66                                   unsigned Offset1,
67                                   unsigned EltSize);
68
69  MachineBasicBlock::iterator findMatchingDSInst(MachineBasicBlock::iterator I,
70                                                 unsigned EltSize);
71
72  void updateRegDefsUses(unsigned SrcReg,
73                         unsigned DstReg,
74                         unsigned SubIdx);
75
76  MachineBasicBlock::iterator mergeRead2Pair(
77    MachineBasicBlock::iterator I,
78    MachineBasicBlock::iterator Paired,
79    unsigned EltSize);
80
81  MachineBasicBlock::iterator mergeWrite2Pair(
82    MachineBasicBlock::iterator I,
83    MachineBasicBlock::iterator Paired,
84    unsigned EltSize);
85
86public:
87  static char ID;
88
89  SILoadStoreOptimizer()
90      : MachineFunctionPass(ID), TII(nullptr), TRI(nullptr), MRI(nullptr),
91        LIS(nullptr) {}
92
93  SILoadStoreOptimizer(const TargetMachine &TM_) : MachineFunctionPass(ID) {
94    initializeSILoadStoreOptimizerPass(*PassRegistry::getPassRegistry());
95  }
96
97  bool optimizeBlock(MachineBasicBlock &MBB);
98
99  bool runOnMachineFunction(MachineFunction &MF) override;
100
101  const char *getPassName() const override {
102    return "SI Load / Store Optimizer";
103  }
104
105  void getAnalysisUsage(AnalysisUsage &AU) const override {
106    AU.setPreservesCFG();
107    AU.addPreserved<SlotIndexes>();
108    AU.addPreserved<LiveIntervals>();
109    AU.addPreserved<LiveVariables>();
110    AU.addRequired<LiveIntervals>();
111
112    MachineFunctionPass::getAnalysisUsage(AU);
113  }
114};
115
116} // End anonymous namespace.
117
118INITIALIZE_PASS_BEGIN(SILoadStoreOptimizer, DEBUG_TYPE,
119                      "SI Load / Store Optimizer", false, false)
120INITIALIZE_PASS_DEPENDENCY(LiveIntervals)
121INITIALIZE_PASS_DEPENDENCY(LiveVariables)
122INITIALIZE_PASS_DEPENDENCY(SlotIndexes)
123INITIALIZE_PASS_END(SILoadStoreOptimizer, DEBUG_TYPE,
124                    "SI Load / Store Optimizer", false, false)
125
126char SILoadStoreOptimizer::ID = 0;
127
128char &llvm::SILoadStoreOptimizerID = SILoadStoreOptimizer::ID;
129
130FunctionPass *llvm::createSILoadStoreOptimizerPass(TargetMachine &TM) {
131  return new SILoadStoreOptimizer(TM);
132}
133
134bool SILoadStoreOptimizer::offsetsCanBeCombined(unsigned Offset0,
135                                                unsigned Offset1,
136                                                unsigned Size) {
137  // XXX - Would the same offset be OK? Is there any reason this would happen or
138  // be useful?
139  if (Offset0 == Offset1)
140    return false;
141
142  // This won't be valid if the offset isn't aligned.
143  if ((Offset0 % Size != 0) || (Offset1 % Size != 0))
144    return false;
145
146  unsigned EltOffset0 = Offset0 / Size;
147  unsigned EltOffset1 = Offset1 / Size;
148
149  // Check if the new offsets fit in the reduced 8-bit range.
150  if (isUInt<8>(EltOffset0) && isUInt<8>(EltOffset1))
151    return true;
152
153  // If the offset in elements doesn't fit in 8-bits, we might be able to use
154  // the stride 64 versions.
155  if ((EltOffset0 % 64 != 0) || (EltOffset1 % 64) != 0)
156    return false;
157
158  return isUInt<8>(EltOffset0 / 64) && isUInt<8>(EltOffset1 / 64);
159}
160
161MachineBasicBlock::iterator
162SILoadStoreOptimizer::findMatchingDSInst(MachineBasicBlock::iterator I,
163                                         unsigned EltSize){
164  MachineBasicBlock::iterator E = I->getParent()->end();
165  MachineBasicBlock::iterator MBBI = I;
166  ++MBBI;
167
168  if (MBBI->getOpcode() != I->getOpcode())
169    return E;
170
171  // Don't merge volatiles.
172  if (MBBI->hasOrderedMemoryRef())
173    return E;
174
175  int AddrIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(), AMDGPU::OpName::addr);
176  const MachineOperand &AddrReg0 = I->getOperand(AddrIdx);
177  const MachineOperand &AddrReg1 = MBBI->getOperand(AddrIdx);
178
179  // Check same base pointer. Be careful of subregisters, which can occur with
180  // vectors of pointers.
181  if (AddrReg0.getReg() == AddrReg1.getReg() &&
182      AddrReg0.getSubReg() == AddrReg1.getSubReg()) {
183    int OffsetIdx = AMDGPU::getNamedOperandIdx(I->getOpcode(),
184                                               AMDGPU::OpName::offset);
185    unsigned Offset0 = I->getOperand(OffsetIdx).getImm() & 0xffff;
186    unsigned Offset1 = MBBI->getOperand(OffsetIdx).getImm() & 0xffff;
187
188    // Check both offsets fit in the reduced range.
189    if (offsetsCanBeCombined(Offset0, Offset1, EltSize))
190      return MBBI;
191  }
192
193  return E;
194}
195
196void SILoadStoreOptimizer::updateRegDefsUses(unsigned SrcReg,
197                                             unsigned DstReg,
198                                             unsigned SubIdx) {
199  for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(SrcReg),
200         E = MRI->reg_end(); I != E; ) {
201    MachineOperand &O = *I;
202    ++I;
203    O.substVirtReg(DstReg, SubIdx, *TRI);
204  }
205}
206
207MachineBasicBlock::iterator  SILoadStoreOptimizer::mergeRead2Pair(
208  MachineBasicBlock::iterator I,
209  MachineBasicBlock::iterator Paired,
210  unsigned EltSize) {
211  MachineBasicBlock *MBB = I->getParent();
212
213  // Be careful, since the addresses could be subregisters themselves in weird
214  // cases, like vectors of pointers.
215  const MachineOperand *AddrReg = TII->getNamedOperand(*I, AMDGPU::OpName::addr);
216  const MachineOperand *M0Reg = TII->getNamedOperand(*I, AMDGPU::OpName::m0);
217
218  unsigned DestReg0 = TII->getNamedOperand(*I, AMDGPU::OpName::vdst)->getReg();
219  unsigned DestReg1
220    = TII->getNamedOperand(*Paired, AMDGPU::OpName::vdst)->getReg();
221
222  unsigned Offset0
223          = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm() & 0xffff;
224  unsigned Offset1
225    = TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm() & 0xffff;
226
227  unsigned NewOffset0 = Offset0 / EltSize;
228  unsigned NewOffset1 = Offset1 / EltSize;
229  unsigned Opc = (EltSize == 4) ? AMDGPU::DS_READ2_B32 : AMDGPU::DS_READ2_B64;
230
231  // Prefer the st64 form if we can use it, even if we can fit the offset in the
232  // non st64 version. I'm not sure if there's any real reason to do this.
233  bool UseST64 = (NewOffset0 % 64 == 0) && (NewOffset1 % 64 == 0);
234  if (UseST64) {
235    NewOffset0 /= 64;
236    NewOffset1 /= 64;
237    Opc = (EltSize == 4) ? AMDGPU::DS_READ2ST64_B32 : AMDGPU::DS_READ2ST64_B64;
238  }
239
240  assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
241         (NewOffset0 != NewOffset1) &&
242         "Computed offset doesn't fit");
243
244  const MCInstrDesc &Read2Desc = TII->get(Opc);
245
246  const TargetRegisterClass *SuperRC
247    = (EltSize == 4) ? &AMDGPU::VReg_64RegClass : &AMDGPU::VReg_128RegClass;
248  unsigned DestReg = MRI->createVirtualRegister(SuperRC);
249
250  DebugLoc DL = I->getDebugLoc();
251  MachineInstrBuilder Read2
252    = BuildMI(*MBB, I, DL, Read2Desc, DestReg)
253    .addOperand(*AddrReg) // addr
254    .addImm(NewOffset0) // offset0
255    .addImm(NewOffset1) // offset1
256    .addImm(0) // gds
257    .addOperand(*M0Reg) // M0
258    .addMemOperand(*I->memoperands_begin())
259    .addMemOperand(*Paired->memoperands_begin());
260
261  LIS->InsertMachineInstrInMaps(Read2);
262
263  unsigned SubRegIdx0 = (EltSize == 4) ? AMDGPU::sub0 : AMDGPU::sub0_sub1;
264  unsigned SubRegIdx1 = (EltSize == 4) ? AMDGPU::sub1 : AMDGPU::sub2_sub3;
265  updateRegDefsUses(DestReg0, DestReg, SubRegIdx0);
266  updateRegDefsUses(DestReg1, DestReg, SubRegIdx1);
267
268  LIS->RemoveMachineInstrFromMaps(I);
269  LIS->RemoveMachineInstrFromMaps(Paired);
270  I->eraseFromParent();
271  Paired->eraseFromParent();
272
273  LiveInterval &AddrRegLI = LIS->getInterval(AddrReg->getReg());
274  LIS->shrinkToUses(&AddrRegLI);
275
276  LiveInterval &M0RegLI = LIS->getInterval(M0Reg->getReg());
277  LIS->shrinkToUses(&M0RegLI);
278
279  // Currently m0 is treated as a register class with one member instead of an
280  // implicit physical register. We are using the virtual register for the first
281  // one, but we still need to update the live range of the now unused second m0
282  // virtual register to avoid verifier errors.
283  const MachineOperand *PairedM0Reg
284    = TII->getNamedOperand(*Paired, AMDGPU::OpName::m0);
285  LiveInterval &PairedM0RegLI = LIS->getInterval(PairedM0Reg->getReg());
286  LIS->shrinkToUses(&PairedM0RegLI);
287
288  LIS->getInterval(DestReg); // Create new LI
289
290  DEBUG(dbgs() << "Inserted read2: " << *Read2 << '\n');
291  return Read2.getInstr();
292}
293
294MachineBasicBlock::iterator SILoadStoreOptimizer::mergeWrite2Pair(
295  MachineBasicBlock::iterator I,
296  MachineBasicBlock::iterator Paired,
297  unsigned EltSize) {
298  MachineBasicBlock *MBB = I->getParent();
299
300  // Be sure to use .addOperand(), and not .addReg() with these. We want to be
301  // sure we preserve the subregister index and any register flags set on them.
302  const MachineOperand *Addr = TII->getNamedOperand(*I, AMDGPU::OpName::addr);
303  const MachineOperand *M0Reg = TII->getNamedOperand(*I, AMDGPU::OpName::m0);
304  const MachineOperand *Data0 = TII->getNamedOperand(*I, AMDGPU::OpName::data0);
305  const MachineOperand *Data1
306    = TII->getNamedOperand(*Paired, AMDGPU::OpName::data0);
307
308
309  unsigned Offset0
310    = TII->getNamedOperand(*I, AMDGPU::OpName::offset)->getImm() & 0xffff;
311  unsigned Offset1
312    = TII->getNamedOperand(*Paired, AMDGPU::OpName::offset)->getImm() & 0xffff;
313
314  unsigned NewOffset0 = Offset0 / EltSize;
315  unsigned NewOffset1 = Offset1 / EltSize;
316  unsigned Opc = (EltSize == 4) ? AMDGPU::DS_WRITE2_B32 : AMDGPU::DS_WRITE2_B64;
317
318  // Prefer the st64 form if we can use it, even if we can fit the offset in the
319  // non st64 version. I'm not sure if there's any real reason to do this.
320  bool UseST64 = (NewOffset0 % 64 == 0) && (NewOffset1 % 64 == 0);
321  if (UseST64) {
322    NewOffset0 /= 64;
323    NewOffset1 /= 64;
324    Opc = (EltSize == 4) ? AMDGPU::DS_WRITE2ST64_B32 : AMDGPU::DS_WRITE2ST64_B64;
325  }
326
327  assert((isUInt<8>(NewOffset0) && isUInt<8>(NewOffset1)) &&
328         (NewOffset0 != NewOffset1) &&
329         "Computed offset doesn't fit");
330
331  const MCInstrDesc &Write2Desc = TII->get(Opc);
332  DebugLoc DL = I->getDebugLoc();
333
334  MachineInstrBuilder Write2
335    = BuildMI(*MBB, I, DL, Write2Desc)
336    .addOperand(*Addr) // addr
337    .addOperand(*Data0) // data0
338    .addOperand(*Data1) // data1
339    .addImm(NewOffset0) // offset0
340    .addImm(NewOffset1) // offset1
341    .addImm(0) // gds
342    .addOperand(*M0Reg)  // m0
343    .addMemOperand(*I->memoperands_begin())
344    .addMemOperand(*Paired->memoperands_begin());
345
346  // XXX - How do we express subregisters here?
347  unsigned OrigRegs[] = { Data0->getReg(), Data1->getReg(), Addr->getReg(),
348                          M0Reg->getReg()};
349
350  LIS->RemoveMachineInstrFromMaps(I);
351  LIS->RemoveMachineInstrFromMaps(Paired);
352  I->eraseFromParent();
353  Paired->eraseFromParent();
354
355  LIS->repairIntervalsInRange(MBB, Write2, Write2, OrigRegs);
356
357  DEBUG(dbgs() << "Inserted write2 inst: " << *Write2 << '\n');
358  return Write2.getInstr();
359}
360
361// Scan through looking for adjacent LDS operations with constant offsets from
362// the same base register. We rely on the scheduler to do the hard work of
363// clustering nearby loads, and assume these are all adjacent.
364bool SILoadStoreOptimizer::optimizeBlock(MachineBasicBlock &MBB) {
365  bool Modified = false;
366
367  for (MachineBasicBlock::iterator I = MBB.begin(), E = MBB.end(); I != E;) {
368    MachineInstr &MI = *I;
369
370    // Don't combine if volatile.
371    if (MI.hasOrderedMemoryRef()) {
372      ++I;
373      continue;
374    }
375
376    unsigned Opc = MI.getOpcode();
377    if (Opc == AMDGPU::DS_READ_B32 || Opc == AMDGPU::DS_READ_B64) {
378      unsigned Size = (Opc == AMDGPU::DS_READ_B64) ? 8 : 4;
379      MachineBasicBlock::iterator Match = findMatchingDSInst(I, Size);
380      if (Match != E) {
381        Modified = true;
382        I = mergeRead2Pair(I, Match, Size);
383      } else {
384        ++I;
385      }
386
387      continue;
388    } else if (Opc == AMDGPU::DS_WRITE_B32 || Opc == AMDGPU::DS_WRITE_B64) {
389      unsigned Size = (Opc == AMDGPU::DS_WRITE_B64) ? 8 : 4;
390      MachineBasicBlock::iterator Match = findMatchingDSInst(I, Size);
391      if (Match != E) {
392        Modified = true;
393        I = mergeWrite2Pair(I, Match, Size);
394      } else {
395        ++I;
396      }
397
398      continue;
399    }
400
401    ++I;
402  }
403
404  return Modified;
405}
406
407bool SILoadStoreOptimizer::runOnMachineFunction(MachineFunction &MF) {
408  const TargetSubtargetInfo &STM = MF.getSubtarget();
409  TRI = static_cast<const SIRegisterInfo *>(STM.getRegisterInfo());
410  TII = static_cast<const SIInstrInfo *>(STM.getInstrInfo());
411  MRI = &MF.getRegInfo();
412
413  LIS = &getAnalysis<LiveIntervals>();
414
415  DEBUG(dbgs() << "Running SILoadStoreOptimizer\n");
416
417  assert(!MRI->isSSA());
418
419  bool Modified = false;
420
421  for (MachineBasicBlock &MBB : MF)
422    Modified |= optimizeBlock(MBB);
423
424  return Modified;
425}
426