InlineSpiller.cpp revision 8de3b1eb868fc5e9b6acb334ee487d943863f810
1//===-------- InlineSpiller.cpp - Insert spills and restores inline -------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// The inline spiller modifies the machine function directly instead of
11// inserting spills and restores in VirtRegMap.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "spiller"
16#include "Spiller.h"
17#include "VirtRegMap.h"
18#include "llvm/CodeGen/LiveIntervalAnalysis.h"
19#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineFunction.h"
21#include "llvm/CodeGen/MachineRegisterInfo.h"
22#include "llvm/Target/TargetMachine.h"
23#include "llvm/Target/TargetInstrInfo.h"
24#include "llvm/Support/Debug.h"
25#include "llvm/Support/raw_ostream.h"
26
27using namespace llvm;
28
29namespace {
30class InlineSpiller : public Spiller {
31  MachineFunction &mf_;
32  LiveIntervals &lis_;
33  VirtRegMap &vrm_;
34  MachineFrameInfo &mfi_;
35  MachineRegisterInfo &mri_;
36  const TargetInstrInfo &tii_;
37  const TargetRegisterInfo &tri_;
38  const BitVector reserved_;
39
40  // Variables that are valid during spill(), but used by multiple methods.
41  LiveInterval *li_;
42  std::vector<LiveInterval*> *newIntervals_;
43  const TargetRegisterClass *rc_;
44  int stackSlot_;
45  const SmallVectorImpl<LiveInterval*> *spillIs_;
46
47  // Values of the current interval that can potentially remat.
48  SmallPtrSet<VNInfo*, 8> reMattable_;
49
50  // Values in reMattable_ that failed to remat at some point.
51  SmallPtrSet<VNInfo*, 8> usedValues_;
52
53  ~InlineSpiller() {}
54
55public:
56  InlineSpiller(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm)
57    : mf_(*mf), lis_(*lis), vrm_(*vrm),
58      mfi_(*mf->getFrameInfo()),
59      mri_(mf->getRegInfo()),
60      tii_(*mf->getTarget().getInstrInfo()),
61      tri_(*mf->getTarget().getRegisterInfo()),
62      reserved_(tri_.getReservedRegs(mf_)) {}
63
64  void spill(LiveInterval *li,
65             std::vector<LiveInterval*> &newIntervals,
66             SmallVectorImpl<LiveInterval*> &spillIs,
67             SlotIndex *earliestIndex);
68
69private:
70  bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
71                          SlotIndex UseIdx);
72  bool reMaterializeFor(MachineBasicBlock::iterator MI);
73  void reMaterializeAll();
74
75  bool foldMemoryOperand(MachineBasicBlock::iterator MI,
76                         const SmallVectorImpl<unsigned> &Ops);
77  void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
78  void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
79};
80}
81
82namespace llvm {
83Spiller *createInlineSpiller(MachineFunction *mf,
84                             LiveIntervals *lis,
85                             const MachineLoopInfo *mli,
86                             VirtRegMap *vrm) {
87  return new InlineSpiller(mf, lis, vrm);
88}
89}
90
91/// allUsesAvailableAt - Return true if all registers used by OrigMI at
92/// OrigIdx are also available with the same value at UseIdx.
93bool InlineSpiller::allUsesAvailableAt(const MachineInstr *OrigMI,
94                                       SlotIndex OrigIdx,
95                                       SlotIndex UseIdx) {
96  OrigIdx = OrigIdx.getUseIndex();
97  UseIdx = UseIdx.getUseIndex();
98  for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) {
99    const MachineOperand &MO = OrigMI->getOperand(i);
100    if (!MO.isReg() || !MO.getReg() || MO.getReg() == li_->reg)
101      continue;
102    // Reserved registers are OK.
103    if (MO.isUndef() || !lis_.hasInterval(MO.getReg()))
104      continue;
105    // We don't want to move any defs.
106    if (MO.isDef())
107      return false;
108    // We cannot depend on virtual registers in spillIs_. They will be spilled.
109    for (unsigned si = 0, se = spillIs_->size(); si != se; ++si)
110      if ((*spillIs_)[si]->reg == MO.getReg())
111        return false;
112
113    LiveInterval &LI = lis_.getInterval(MO.getReg());
114    const VNInfo *OVNI = LI.getVNInfoAt(OrigIdx);
115    if (!OVNI)
116      continue;
117    if (OVNI != LI.getVNInfoAt(UseIdx))
118      return false;
119  }
120  return true;
121}
122
123/// reMaterializeFor - Attempt to rematerialize li_->reg before MI instead of
124/// reloading it.
125bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) {
126  SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex();
127  VNInfo *OrigVNI = li_->getVNInfoAt(UseIdx);
128  if (!OrigVNI) {
129    DEBUG(dbgs() << "\tadding <undef> flags: ");
130    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
131      MachineOperand &MO = MI->getOperand(i);
132      if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg)
133        MO.setIsUndef();
134    }
135    DEBUG(dbgs() << UseIdx << '\t' << *MI);
136    return true;
137  }
138  if (!reMattable_.count(OrigVNI)) {
139    DEBUG(dbgs() << "\tusing non-remat valno " << OrigVNI->id << ": "
140                 << UseIdx << '\t' << *MI);
141    return false;
142  }
143  MachineInstr *OrigMI = lis_.getInstructionFromIndex(OrigVNI->def);
144  if (!allUsesAvailableAt(OrigMI, OrigVNI->def, UseIdx)) {
145    usedValues_.insert(OrigVNI);
146    DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
147    return false;
148  }
149
150  // If the instruction also writes li_->reg, it had better not require the same
151  // register for uses and defs.
152  bool Reads, Writes;
153  SmallVector<unsigned, 8> Ops;
154  tie(Reads, Writes) = MI->readsWritesVirtualRegister(li_->reg, &Ops);
155  if (Writes) {
156    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
157      MachineOperand &MO = MI->getOperand(Ops[i]);
158      if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) {
159        usedValues_.insert(OrigVNI);
160        DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
161        return false;
162      }
163    }
164  }
165
166  // Alocate a new register for the remat.
167  unsigned NewVReg = mri_.createVirtualRegister(rc_);
168  vrm_.grow();
169  LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
170  NewLI.markNotSpillable();
171  newIntervals_->push_back(&NewLI);
172
173  // Finally we can rematerialize OrigMI before MI.
174  MachineBasicBlock &MBB = *MI->getParent();
175  tii_.reMaterialize(MBB, MI, NewLI.reg, 0, OrigMI, tri_);
176  MachineBasicBlock::iterator RematMI = MI;
177  SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(--RematMI).getDefIndex();
178  DEBUG(dbgs() << "\tremat:  " << DefIdx << '\t' << *RematMI);
179
180  // Replace operands
181  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
182    MachineOperand &MO = MI->getOperand(Ops[i]);
183    if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) {
184      MO.setReg(NewVReg);
185      MO.setIsKill();
186    }
187  }
188  DEBUG(dbgs() << "\t        " << UseIdx << '\t' << *MI);
189
190  VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, true,
191                                       lis_.getVNInfoAllocator());
192  NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI));
193  DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
194  return true;
195}
196
197/// reMaterializeAll - Try to rematerialize as many uses of li_ as possible,
198/// and trim the live ranges after.
199void InlineSpiller::reMaterializeAll() {
200  // Do a quick scan of the interval values to find if any are remattable.
201  reMattable_.clear();
202  usedValues_.clear();
203  for (LiveInterval::const_vni_iterator I = li_->vni_begin(),
204       E = li_->vni_end(); I != E; ++I) {
205    VNInfo *VNI = *I;
206    if (VNI->isUnused() || !VNI->isDefAccurate())
207      continue;
208    MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
209    if (!DefMI || !tii_.isTriviallyReMaterializable(DefMI))
210      continue;
211    reMattable_.insert(VNI);
212  }
213
214  // Often, no defs are remattable.
215  if (reMattable_.empty())
216    return;
217
218  // Try to remat before all uses of li_->reg.
219  bool anyRemat = false;
220  for (MachineRegisterInfo::use_nodbg_iterator
221       RI = mri_.use_nodbg_begin(li_->reg);
222       MachineInstr *MI = RI.skipInstruction();)
223     anyRemat |= reMaterializeFor(MI);
224
225  if (!anyRemat)
226    return;
227
228  // Remove any values that were completely rematted.
229  bool anyRemoved = false;
230  for (SmallPtrSet<VNInfo*, 8>::iterator I = reMattable_.begin(),
231       E = reMattable_.end(); I != E; ++I) {
232    VNInfo *VNI = *I;
233    if (VNI->hasPHIKill() || usedValues_.count(VNI))
234      continue;
235    MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
236    DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI);
237    lis_.RemoveMachineInstrFromMaps(DefMI);
238    vrm_.RemoveMachineInstrFromMaps(DefMI);
239    DefMI->eraseFromParent();
240    li_->removeValNo(VNI);
241    anyRemoved = true;
242  }
243
244  if (!anyRemoved)
245    return;
246
247  // Removing values may cause debug uses where li_ is not live.
248  for (MachineRegisterInfo::use_iterator
249       RI = mri_.use_begin(li_->reg), RE = mri_.use_end(); RI != RE;) {
250    MachineOperand &MO = RI.getOperand();
251    MachineInstr *MI = MO.getParent();
252    ++RI;
253    SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex();
254    DEBUG(dbgs() << "\tremaining use: " << UseIdx << '\t' << *MI);
255    if (li_->liveAt(UseIdx))
256      continue;
257    assert(MI->isDebugValue() && "Remaining non-debug use after remat dead.");
258    if (li_->empty())
259      MO.setIsUndef();
260    else
261      MO.setReg(0);
262  }
263}
264
265/// foldMemoryOperand - Try folding stack slot references in Ops into MI.
266/// Return true on success, and MI will be erased.
267bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
268                                      const SmallVectorImpl<unsigned> &Ops) {
269  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
270  // operands.
271  SmallVector<unsigned, 8> FoldOps;
272  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
273    unsigned Idx = Ops[i];
274    MachineOperand &MO = MI->getOperand(Idx);
275    if (MO.isImplicit())
276      continue;
277    // FIXME: Teach targets to deal with subregs.
278    if (MO.getSubReg())
279      return false;
280    // Tied use operands should not be passed to foldMemoryOperand.
281    if (!MI->isRegTiedToDefOperand(Idx))
282      FoldOps.push_back(Idx);
283  }
284
285  MachineInstr *FoldMI = tii_.foldMemoryOperand(mf_, MI, FoldOps, stackSlot_);
286  if (!FoldMI)
287    return false;
288  MachineBasicBlock &MBB = *MI->getParent();
289  lis_.ReplaceMachineInstrInMaps(MI, FoldMI);
290  vrm_.addSpillSlotUse(stackSlot_, FoldMI);
291  MBB.insert(MBB.erase(MI), FoldMI);
292  DEBUG(dbgs() << "\tfolded: " << *FoldMI);
293  return true;
294}
295
296/// insertReload - Insert a reload of NewLI.reg before MI.
297void InlineSpiller::insertReload(LiveInterval &NewLI,
298                                 MachineBasicBlock::iterator MI) {
299  MachineBasicBlock &MBB = *MI->getParent();
300  SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
301  tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_);
302  --MI; // Point to load instruction.
303  SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
304  vrm_.addSpillSlotUse(stackSlot_, MI);
305  DEBUG(dbgs() << "\treload:  " << LoadIdx << '\t' << *MI);
306  VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, true,
307                                       lis_.getVNInfoAllocator());
308  NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI));
309}
310
311/// insertSpill - Insert a spill of NewLI.reg after MI.
312void InlineSpiller::insertSpill(LiveInterval &NewLI,
313                                MachineBasicBlock::iterator MI) {
314  MachineBasicBlock &MBB = *MI->getParent();
315  SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
316  tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_);
317  --MI; // Point to store instruction.
318  SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
319  vrm_.addSpillSlotUse(stackSlot_, MI);
320  DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI);
321  VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, true,
322                                        lis_.getVNInfoAllocator());
323  NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI));
324}
325
326void InlineSpiller::spill(LiveInterval *li,
327                          std::vector<LiveInterval*> &newIntervals,
328                          SmallVectorImpl<LiveInterval*> &spillIs,
329                          SlotIndex *earliestIndex) {
330  DEBUG(dbgs() << "Inline spilling " << *li << "\n");
331  assert(li->isSpillable() && "Attempting to spill already spilled value.");
332  assert(!li->isStackSlot() && "Trying to spill a stack slot.");
333
334  li_ = li;
335  newIntervals_ = &newIntervals;
336  rc_ = mri_.getRegClass(li->reg);
337  spillIs_ = &spillIs;
338
339  reMaterializeAll();
340
341  // Remat may handle everything.
342  if (li_->empty())
343    return;
344
345  stackSlot_ = vrm_.assignVirt2StackSlot(li->reg);
346
347  // Iterate over instructions using register.
348  for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(li->reg);
349       MachineInstr *MI = RI.skipInstruction();) {
350
351    // Analyze instruction.
352    bool Reads, Writes;
353    SmallVector<unsigned, 8> Ops;
354    tie(Reads, Writes) = MI->readsWritesVirtualRegister(li->reg, &Ops);
355
356    // Attempt to fold memory ops.
357    if (foldMemoryOperand(MI, Ops))
358      continue;
359
360    // Allocate interval around instruction.
361    // FIXME: Infer regclass from instruction alone.
362    unsigned NewVReg = mri_.createVirtualRegister(rc_);
363    vrm_.grow();
364    LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
365    NewLI.markNotSpillable();
366
367    if (Reads)
368      insertReload(NewLI, MI);
369
370    // Rewrite instruction operands.
371    bool hasLiveDef = false;
372    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
373      MachineOperand &MO = MI->getOperand(Ops[i]);
374      MO.setReg(NewVReg);
375      if (MO.isUse()) {
376        if (!MI->isRegTiedToDefOperand(Ops[i]))
377          MO.setIsKill();
378      } else {
379        if (!MO.isDead())
380          hasLiveDef = true;
381      }
382    }
383
384    // FIXME: Use a second vreg if instruction has no tied ops.
385    if (Writes && hasLiveDef)
386      insertSpill(NewLI, MI);
387
388    DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
389    newIntervals.push_back(&NewLI);
390  }
391}
392