InlineSpiller.cpp revision 20d56d957f5b3c3ae4ea6316b6eef8b5df2daebb
1//===-------- InlineSpiller.cpp - Insert spills and restores inline -------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// The inline spiller modifies the machine function directly instead of
11// inserting spills and restores in VirtRegMap.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "spiller"
16#include "Spiller.h"
17#include "SplitKit.h"
18#include "VirtRegMap.h"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "llvm/CodeGen/MachineFrameInfo.h"
21#include "llvm/CodeGen/MachineFunction.h"
22#include "llvm/CodeGen/MachineLoopInfo.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
24#include "llvm/Target/TargetMachine.h"
25#include "llvm/Target/TargetInstrInfo.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Support/raw_ostream.h"
28
29using namespace llvm;
30
31namespace {
32class InlineSpiller : public Spiller {
33  MachineFunction &mf_;
34  LiveIntervals &lis_;
35  MachineLoopInfo &loops_;
36  VirtRegMap &vrm_;
37  MachineFrameInfo &mfi_;
38  MachineRegisterInfo &mri_;
39  const TargetInstrInfo &tii_;
40  const TargetRegisterInfo &tri_;
41  const BitVector reserved_;
42
43  SplitAnalysis splitAnalysis_;
44
45  // Variables that are valid during spill(), but used by multiple methods.
46  LiveInterval *li_;
47  std::vector<LiveInterval*> *newIntervals_;
48  const TargetRegisterClass *rc_;
49  int stackSlot_;
50  const SmallVectorImpl<LiveInterval*> *spillIs_;
51
52  // Values of the current interval that can potentially remat.
53  SmallPtrSet<VNInfo*, 8> reMattable_;
54
55  // Values in reMattable_ that failed to remat at some point.
56  SmallPtrSet<VNInfo*, 8> usedValues_;
57
58  ~InlineSpiller() {}
59
60public:
61  InlineSpiller(MachineFunction *mf, LiveIntervals *lis, MachineLoopInfo *mli,
62                VirtRegMap *vrm)
63    : mf_(*mf), lis_(*lis), loops_(*mli), vrm_(*vrm),
64      mfi_(*mf->getFrameInfo()),
65      mri_(mf->getRegInfo()),
66      tii_(*mf->getTarget().getInstrInfo()),
67      tri_(*mf->getTarget().getRegisterInfo()),
68      reserved_(tri_.getReservedRegs(mf_)),
69      splitAnalysis_(mf, lis, mli) {}
70
71  void spill(LiveInterval *li,
72             std::vector<LiveInterval*> &newIntervals,
73             SmallVectorImpl<LiveInterval*> &spillIs,
74             SlotIndex *earliestIndex);
75
76private:
77  bool split();
78
79  bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
80                          SlotIndex UseIdx);
81  bool reMaterializeFor(MachineBasicBlock::iterator MI);
82  void reMaterializeAll();
83
84  bool foldMemoryOperand(MachineBasicBlock::iterator MI,
85                         const SmallVectorImpl<unsigned> &Ops);
86  void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
87  void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
88};
89}
90
91namespace llvm {
92Spiller *createInlineSpiller(MachineFunction *mf,
93                             LiveIntervals *lis,
94                             MachineLoopInfo *mli,
95                             VirtRegMap *vrm) {
96  return new InlineSpiller(mf, lis, mli, vrm);
97}
98}
99
100/// split - try splitting the current interval into pieces that may allocate
101/// separately. Return true if successful.
102bool InlineSpiller::split() {
103  // FIXME: Add intra-MBB splitting.
104  if (lis_.intervalIsInOneMBB(*li_))
105    return false;
106
107  splitAnalysis_.analyze(li_);
108
109  if (const MachineLoop *loop = splitAnalysis_.getBestSplitLoop()) {
110    if (splitAroundLoop(splitAnalysis_, loop))
111      return true;
112  }
113  return false;
114}
115
116/// allUsesAvailableAt - Return true if all registers used by OrigMI at
117/// OrigIdx are also available with the same value at UseIdx.
118bool InlineSpiller::allUsesAvailableAt(const MachineInstr *OrigMI,
119                                       SlotIndex OrigIdx,
120                                       SlotIndex UseIdx) {
121  OrigIdx = OrigIdx.getUseIndex();
122  UseIdx = UseIdx.getUseIndex();
123  for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) {
124    const MachineOperand &MO = OrigMI->getOperand(i);
125    if (!MO.isReg() || !MO.getReg() || MO.getReg() == li_->reg)
126      continue;
127    // Reserved registers are OK.
128    if (MO.isUndef() || !lis_.hasInterval(MO.getReg()))
129      continue;
130    // We don't want to move any defs.
131    if (MO.isDef())
132      return false;
133    // We cannot depend on virtual registers in spillIs_. They will be spilled.
134    for (unsigned si = 0, se = spillIs_->size(); si != se; ++si)
135      if ((*spillIs_)[si]->reg == MO.getReg())
136        return false;
137
138    LiveInterval &LI = lis_.getInterval(MO.getReg());
139    const VNInfo *OVNI = LI.getVNInfoAt(OrigIdx);
140    if (!OVNI)
141      continue;
142    if (OVNI != LI.getVNInfoAt(UseIdx))
143      return false;
144  }
145  return true;
146}
147
148/// reMaterializeFor - Attempt to rematerialize li_->reg before MI instead of
149/// reloading it.
150bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) {
151  SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex();
152  VNInfo *OrigVNI = li_->getVNInfoAt(UseIdx);
153  if (!OrigVNI) {
154    DEBUG(dbgs() << "\tadding <undef> flags: ");
155    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
156      MachineOperand &MO = MI->getOperand(i);
157      if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg)
158        MO.setIsUndef();
159    }
160    DEBUG(dbgs() << UseIdx << '\t' << *MI);
161    return true;
162  }
163  if (!reMattable_.count(OrigVNI)) {
164    DEBUG(dbgs() << "\tusing non-remat valno " << OrigVNI->id << ": "
165                 << UseIdx << '\t' << *MI);
166    return false;
167  }
168  MachineInstr *OrigMI = lis_.getInstructionFromIndex(OrigVNI->def);
169  if (!allUsesAvailableAt(OrigMI, OrigVNI->def, UseIdx)) {
170    usedValues_.insert(OrigVNI);
171    DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
172    return false;
173  }
174
175  // If the instruction also writes li_->reg, it had better not require the same
176  // register for uses and defs.
177  bool Reads, Writes;
178  SmallVector<unsigned, 8> Ops;
179  tie(Reads, Writes) = MI->readsWritesVirtualRegister(li_->reg, &Ops);
180  if (Writes) {
181    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
182      MachineOperand &MO = MI->getOperand(Ops[i]);
183      if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) {
184        usedValues_.insert(OrigVNI);
185        DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
186        return false;
187      }
188    }
189  }
190
191  // Alocate a new register for the remat.
192  unsigned NewVReg = mri_.createVirtualRegister(rc_);
193  vrm_.grow();
194  LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
195  NewLI.markNotSpillable();
196  newIntervals_->push_back(&NewLI);
197
198  // Finally we can rematerialize OrigMI before MI.
199  MachineBasicBlock &MBB = *MI->getParent();
200  tii_.reMaterialize(MBB, MI, NewLI.reg, 0, OrigMI, tri_);
201  MachineBasicBlock::iterator RematMI = MI;
202  SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(--RematMI).getDefIndex();
203  DEBUG(dbgs() << "\tremat:  " << DefIdx << '\t' << *RematMI);
204
205  // Replace operands
206  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
207    MachineOperand &MO = MI->getOperand(Ops[i]);
208    if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) {
209      MO.setReg(NewVReg);
210      MO.setIsKill();
211    }
212  }
213  DEBUG(dbgs() << "\t        " << UseIdx << '\t' << *MI);
214
215  VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, true,
216                                       lis_.getVNInfoAllocator());
217  NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI));
218  DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
219  return true;
220}
221
222/// reMaterializeAll - Try to rematerialize as many uses of li_ as possible,
223/// and trim the live ranges after.
224void InlineSpiller::reMaterializeAll() {
225  // Do a quick scan of the interval values to find if any are remattable.
226  reMattable_.clear();
227  usedValues_.clear();
228  for (LiveInterval::const_vni_iterator I = li_->vni_begin(),
229       E = li_->vni_end(); I != E; ++I) {
230    VNInfo *VNI = *I;
231    if (VNI->isUnused() || !VNI->isDefAccurate())
232      continue;
233    MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
234    if (!DefMI || !tii_.isTriviallyReMaterializable(DefMI))
235      continue;
236    reMattable_.insert(VNI);
237  }
238
239  // Often, no defs are remattable.
240  if (reMattable_.empty())
241    return;
242
243  // Try to remat before all uses of li_->reg.
244  bool anyRemat = false;
245  for (MachineRegisterInfo::use_nodbg_iterator
246       RI = mri_.use_nodbg_begin(li_->reg);
247       MachineInstr *MI = RI.skipInstruction();)
248     anyRemat |= reMaterializeFor(MI);
249
250  if (!anyRemat)
251    return;
252
253  // Remove any values that were completely rematted.
254  bool anyRemoved = false;
255  for (SmallPtrSet<VNInfo*, 8>::iterator I = reMattable_.begin(),
256       E = reMattable_.end(); I != E; ++I) {
257    VNInfo *VNI = *I;
258    if (VNI->hasPHIKill() || usedValues_.count(VNI))
259      continue;
260    MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
261    DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI);
262    lis_.RemoveMachineInstrFromMaps(DefMI);
263    vrm_.RemoveMachineInstrFromMaps(DefMI);
264    DefMI->eraseFromParent();
265    li_->removeValNo(VNI);
266    anyRemoved = true;
267  }
268
269  if (!anyRemoved)
270    return;
271
272  // Removing values may cause debug uses where li_ is not live.
273  for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(li_->reg);
274       MachineInstr *MI = RI.skipInstruction();) {
275    if (!MI->isDebugValue())
276      continue;
277    // Try to preserve the debug value if li_ is live immediately after it.
278    MachineBasicBlock::iterator NextMI = MI;
279    ++NextMI;
280    if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) {
281      SlotIndex NearIdx = lis_.getInstructionIndex(NextMI);
282      if (li_->liveAt(NearIdx))
283        continue;
284    }
285    DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI);
286    MI->eraseFromParent();
287  }
288}
289
290/// foldMemoryOperand - Try folding stack slot references in Ops into MI.
291/// Return true on success, and MI will be erased.
292bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
293                                      const SmallVectorImpl<unsigned> &Ops) {
294  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
295  // operands.
296  SmallVector<unsigned, 8> FoldOps;
297  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
298    unsigned Idx = Ops[i];
299    MachineOperand &MO = MI->getOperand(Idx);
300    if (MO.isImplicit())
301      continue;
302    // FIXME: Teach targets to deal with subregs.
303    if (MO.getSubReg())
304      return false;
305    // Tied use operands should not be passed to foldMemoryOperand.
306    if (!MI->isRegTiedToDefOperand(Idx))
307      FoldOps.push_back(Idx);
308  }
309
310  MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_);
311  if (!FoldMI)
312    return false;
313  lis_.ReplaceMachineInstrInMaps(MI, FoldMI);
314  vrm_.addSpillSlotUse(stackSlot_, FoldMI);
315  MI->eraseFromParent();
316  DEBUG(dbgs() << "\tfolded: " << *FoldMI);
317  return true;
318}
319
320/// insertReload - Insert a reload of NewLI.reg before MI.
321void InlineSpiller::insertReload(LiveInterval &NewLI,
322                                 MachineBasicBlock::iterator MI) {
323  MachineBasicBlock &MBB = *MI->getParent();
324  SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
325  tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_);
326  --MI; // Point to load instruction.
327  SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
328  vrm_.addSpillSlotUse(stackSlot_, MI);
329  DEBUG(dbgs() << "\treload:  " << LoadIdx << '\t' << *MI);
330  VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, true,
331                                       lis_.getVNInfoAllocator());
332  NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI));
333}
334
335/// insertSpill - Insert a spill of NewLI.reg after MI.
336void InlineSpiller::insertSpill(LiveInterval &NewLI,
337                                MachineBasicBlock::iterator MI) {
338  MachineBasicBlock &MBB = *MI->getParent();
339  SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
340  tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_);
341  --MI; // Point to store instruction.
342  SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
343  vrm_.addSpillSlotUse(stackSlot_, MI);
344  DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI);
345  VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, true,
346                                        lis_.getVNInfoAllocator());
347  NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI));
348}
349
350void InlineSpiller::spill(LiveInterval *li,
351                          std::vector<LiveInterval*> &newIntervals,
352                          SmallVectorImpl<LiveInterval*> &spillIs,
353                          SlotIndex *earliestIndex) {
354  DEBUG(dbgs() << "Inline spilling " << *li << "\n");
355  assert(li->isSpillable() && "Attempting to spill already spilled value.");
356  assert(!li->isStackSlot() && "Trying to spill a stack slot.");
357
358  li_ = li;
359  newIntervals_ = &newIntervals;
360  rc_ = mri_.getRegClass(li->reg);
361  spillIs_ = &spillIs;
362
363  if (split())
364    return;
365
366  reMaterializeAll();
367
368  // Remat may handle everything.
369  if (li_->empty())
370    return;
371
372  stackSlot_ = vrm_.assignVirt2StackSlot(li->reg);
373
374  // Iterate over instructions using register.
375  for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(li->reg);
376       MachineInstr *MI = RI.skipInstruction();) {
377
378    // Debug values are not allowed to affect codegen.
379    if (MI->isDebugValue()) {
380      // Modify DBG_VALUE now that the value is in a spill slot.
381      uint64_t Offset = MI->getOperand(1).getImm();
382      const MDNode *MDPtr = MI->getOperand(2).getMetadata();
383      DebugLoc DL = MI->getDebugLoc();
384      if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_,
385                                                           Offset, MDPtr, DL)) {
386        DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
387        MachineBasicBlock *MBB = MI->getParent();
388        MBB->insert(MBB->erase(MI), NewDV);
389      } else {
390        DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
391        MI->eraseFromParent();
392      }
393      continue;
394    }
395
396    // Analyze instruction.
397    bool Reads, Writes;
398    SmallVector<unsigned, 8> Ops;
399    tie(Reads, Writes) = MI->readsWritesVirtualRegister(li->reg, &Ops);
400
401    // Attempt to fold memory ops.
402    if (foldMemoryOperand(MI, Ops))
403      continue;
404
405    // Allocate interval around instruction.
406    // FIXME: Infer regclass from instruction alone.
407    unsigned NewVReg = mri_.createVirtualRegister(rc_);
408    vrm_.grow();
409    LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
410    NewLI.markNotSpillable();
411
412    if (Reads)
413      insertReload(NewLI, MI);
414
415    // Rewrite instruction operands.
416    bool hasLiveDef = false;
417    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
418      MachineOperand &MO = MI->getOperand(Ops[i]);
419      MO.setReg(NewVReg);
420      if (MO.isUse()) {
421        if (!MI->isRegTiedToDefOperand(Ops[i]))
422          MO.setIsKill();
423      } else {
424        if (!MO.isDead())
425          hasLiveDef = true;
426      }
427    }
428
429    // FIXME: Use a second vreg if instruction has no tied ops.
430    if (Writes && hasLiveDef)
431      insertSpill(NewLI, MI);
432
433    DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
434    newIntervals.push_back(&NewLI);
435  }
436}
437