InlineSpiller.cpp revision ca2b7205e07ebc10ce4f60f9bd02706756bdab0a
1//===-------- InlineSpiller.cpp - Insert spills and restores inline -------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// The inline spiller modifies the machine function directly instead of
11// inserting spills and restores in VirtRegMap.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "spiller"
16#include "Spiller.h"
17#include "SplitKit.h"
18#include "VirtRegMap.h"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "llvm/CodeGen/MachineFrameInfo.h"
21#include "llvm/CodeGen/MachineFunction.h"
22#include "llvm/CodeGen/MachineLoopInfo.h"
23#include "llvm/CodeGen/MachineRegisterInfo.h"
24#include "llvm/Target/TargetMachine.h"
25#include "llvm/Target/TargetInstrInfo.h"
26#include "llvm/Support/Debug.h"
27#include "llvm/Support/raw_ostream.h"
28
29using namespace llvm;
30
31namespace {
32class InlineSpiller : public Spiller {
33  MachineFunction &mf_;
34  LiveIntervals &lis_;
35  MachineLoopInfo &loops_;
36  VirtRegMap &vrm_;
37  MachineFrameInfo &mfi_;
38  MachineRegisterInfo &mri_;
39  const TargetInstrInfo &tii_;
40  const TargetRegisterInfo &tri_;
41  const BitVector reserved_;
42
43  SplitAnalysis splitAnalysis_;
44
45  // Variables that are valid during spill(), but used by multiple methods.
46  LiveInterval *li_;
47  std::vector<LiveInterval*> *newIntervals_;
48  const TargetRegisterClass *rc_;
49  int stackSlot_;
50  const SmallVectorImpl<LiveInterval*> *spillIs_;
51
52  // Values of the current interval that can potentially remat.
53  SmallPtrSet<VNInfo*, 8> reMattable_;
54
55  // Values in reMattable_ that failed to remat at some point.
56  SmallPtrSet<VNInfo*, 8> usedValues_;
57
58  ~InlineSpiller() {}
59
60public:
61  InlineSpiller(MachineFunctionPass &pass,
62                MachineFunction &mf,
63                VirtRegMap &vrm)
64    : mf_(mf),
65      lis_(pass.getAnalysis<LiveIntervals>()),
66      loops_(pass.getAnalysis<MachineLoopInfo>()),
67      vrm_(vrm),
68      mfi_(*mf.getFrameInfo()),
69      mri_(mf.getRegInfo()),
70      tii_(*mf.getTarget().getInstrInfo()),
71      tri_(*mf.getTarget().getRegisterInfo()),
72      reserved_(tri_.getReservedRegs(mf_)),
73      splitAnalysis_(mf, lis_, loops_) {}
74
75  void spill(LiveInterval *li,
76             std::vector<LiveInterval*> &newIntervals,
77             SmallVectorImpl<LiveInterval*> &spillIs,
78             SlotIndex *earliestIndex);
79
80private:
81  bool split();
82
83  bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
84                          SlotIndex UseIdx);
85  bool reMaterializeFor(MachineBasicBlock::iterator MI);
86  void reMaterializeAll();
87
88  bool foldMemoryOperand(MachineBasicBlock::iterator MI,
89                         const SmallVectorImpl<unsigned> &Ops);
90  void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
91  void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
92};
93}
94
95namespace llvm {
96Spiller *createInlineSpiller(MachineFunctionPass &pass,
97                             MachineFunction &mf,
98                             VirtRegMap &vrm) {
99  return new InlineSpiller(pass, mf, vrm);
100}
101}
102
103/// split - try splitting the current interval into pieces that may allocate
104/// separately. Return true if successful.
105bool InlineSpiller::split() {
106  // FIXME: Add intra-MBB splitting.
107  if (lis_.intervalIsInOneMBB(*li_))
108    return false;
109
110  splitAnalysis_.analyze(li_);
111
112  if (const MachineLoop *loop = splitAnalysis_.getBestSplitLoop()) {
113    SplitEditor(splitAnalysis_, lis_, vrm_).splitAroundLoop(loop);
114    return true;
115  }
116  return false;
117}
118
119/// allUsesAvailableAt - Return true if all registers used by OrigMI at
120/// OrigIdx are also available with the same value at UseIdx.
121bool InlineSpiller::allUsesAvailableAt(const MachineInstr *OrigMI,
122                                       SlotIndex OrigIdx,
123                                       SlotIndex UseIdx) {
124  OrigIdx = OrigIdx.getUseIndex();
125  UseIdx = UseIdx.getUseIndex();
126  for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) {
127    const MachineOperand &MO = OrigMI->getOperand(i);
128    if (!MO.isReg() || !MO.getReg() || MO.getReg() == li_->reg)
129      continue;
130    // Reserved registers are OK.
131    if (MO.isUndef() || !lis_.hasInterval(MO.getReg()))
132      continue;
133    // We don't want to move any defs.
134    if (MO.isDef())
135      return false;
136    // We cannot depend on virtual registers in spillIs_. They will be spilled.
137    for (unsigned si = 0, se = spillIs_->size(); si != se; ++si)
138      if ((*spillIs_)[si]->reg == MO.getReg())
139        return false;
140
141    LiveInterval &LI = lis_.getInterval(MO.getReg());
142    const VNInfo *OVNI = LI.getVNInfoAt(OrigIdx);
143    if (!OVNI)
144      continue;
145    if (OVNI != LI.getVNInfoAt(UseIdx))
146      return false;
147  }
148  return true;
149}
150
151/// reMaterializeFor - Attempt to rematerialize li_->reg before MI instead of
152/// reloading it.
153bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) {
154  SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex();
155  VNInfo *OrigVNI = li_->getVNInfoAt(UseIdx);
156  if (!OrigVNI) {
157    DEBUG(dbgs() << "\tadding <undef> flags: ");
158    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
159      MachineOperand &MO = MI->getOperand(i);
160      if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg)
161        MO.setIsUndef();
162    }
163    DEBUG(dbgs() << UseIdx << '\t' << *MI);
164    return true;
165  }
166  if (!reMattable_.count(OrigVNI)) {
167    DEBUG(dbgs() << "\tusing non-remat valno " << OrigVNI->id << ": "
168                 << UseIdx << '\t' << *MI);
169    return false;
170  }
171  MachineInstr *OrigMI = lis_.getInstructionFromIndex(OrigVNI->def);
172  if (!allUsesAvailableAt(OrigMI, OrigVNI->def, UseIdx)) {
173    usedValues_.insert(OrigVNI);
174    DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
175    return false;
176  }
177
178  // If the instruction also writes li_->reg, it had better not require the same
179  // register for uses and defs.
180  bool Reads, Writes;
181  SmallVector<unsigned, 8> Ops;
182  tie(Reads, Writes) = MI->readsWritesVirtualRegister(li_->reg, &Ops);
183  if (Writes) {
184    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
185      MachineOperand &MO = MI->getOperand(Ops[i]);
186      if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) {
187        usedValues_.insert(OrigVNI);
188        DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
189        return false;
190      }
191    }
192  }
193
194  // Alocate a new register for the remat.
195  unsigned NewVReg = mri_.createVirtualRegister(rc_);
196  vrm_.grow();
197  LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
198  NewLI.markNotSpillable();
199  newIntervals_->push_back(&NewLI);
200
201  // Finally we can rematerialize OrigMI before MI.
202  MachineBasicBlock &MBB = *MI->getParent();
203  tii_.reMaterialize(MBB, MI, NewLI.reg, 0, OrigMI, tri_);
204  MachineBasicBlock::iterator RematMI = MI;
205  SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(--RematMI).getDefIndex();
206  DEBUG(dbgs() << "\tremat:  " << DefIdx << '\t' << *RematMI);
207
208  // Replace operands
209  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
210    MachineOperand &MO = MI->getOperand(Ops[i]);
211    if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) {
212      MO.setReg(NewVReg);
213      MO.setIsKill();
214    }
215  }
216  DEBUG(dbgs() << "\t        " << UseIdx << '\t' << *MI);
217
218  VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, true,
219                                       lis_.getVNInfoAllocator());
220  NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI));
221  DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
222  return true;
223}
224
225/// reMaterializeAll - Try to rematerialize as many uses of li_ as possible,
226/// and trim the live ranges after.
227void InlineSpiller::reMaterializeAll() {
228  // Do a quick scan of the interval values to find if any are remattable.
229  reMattable_.clear();
230  usedValues_.clear();
231  for (LiveInterval::const_vni_iterator I = li_->vni_begin(),
232       E = li_->vni_end(); I != E; ++I) {
233    VNInfo *VNI = *I;
234    if (VNI->isUnused() || !VNI->isDefAccurate())
235      continue;
236    MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
237    if (!DefMI || !tii_.isTriviallyReMaterializable(DefMI))
238      continue;
239    reMattable_.insert(VNI);
240  }
241
242  // Often, no defs are remattable.
243  if (reMattable_.empty())
244    return;
245
246  // Try to remat before all uses of li_->reg.
247  bool anyRemat = false;
248  for (MachineRegisterInfo::use_nodbg_iterator
249       RI = mri_.use_nodbg_begin(li_->reg);
250       MachineInstr *MI = RI.skipInstruction();)
251     anyRemat |= reMaterializeFor(MI);
252
253  if (!anyRemat)
254    return;
255
256  // Remove any values that were completely rematted.
257  bool anyRemoved = false;
258  for (SmallPtrSet<VNInfo*, 8>::iterator I = reMattable_.begin(),
259       E = reMattable_.end(); I != E; ++I) {
260    VNInfo *VNI = *I;
261    if (VNI->hasPHIKill() || usedValues_.count(VNI))
262      continue;
263    MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
264    DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI);
265    lis_.RemoveMachineInstrFromMaps(DefMI);
266    vrm_.RemoveMachineInstrFromMaps(DefMI);
267    DefMI->eraseFromParent();
268    li_->removeValNo(VNI);
269    anyRemoved = true;
270  }
271
272  if (!anyRemoved)
273    return;
274
275  // Removing values may cause debug uses where li_ is not live.
276  for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(li_->reg);
277       MachineInstr *MI = RI.skipInstruction();) {
278    if (!MI->isDebugValue())
279      continue;
280    // Try to preserve the debug value if li_ is live immediately after it.
281    MachineBasicBlock::iterator NextMI = MI;
282    ++NextMI;
283    if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) {
284      SlotIndex NearIdx = lis_.getInstructionIndex(NextMI);
285      if (li_->liveAt(NearIdx))
286        continue;
287    }
288    DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI);
289    MI->eraseFromParent();
290  }
291}
292
293/// foldMemoryOperand - Try folding stack slot references in Ops into MI.
294/// Return true on success, and MI will be erased.
295bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
296                                      const SmallVectorImpl<unsigned> &Ops) {
297  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
298  // operands.
299  SmallVector<unsigned, 8> FoldOps;
300  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
301    unsigned Idx = Ops[i];
302    MachineOperand &MO = MI->getOperand(Idx);
303    if (MO.isImplicit())
304      continue;
305    // FIXME: Teach targets to deal with subregs.
306    if (MO.getSubReg())
307      return false;
308    // Tied use operands should not be passed to foldMemoryOperand.
309    if (!MI->isRegTiedToDefOperand(Idx))
310      FoldOps.push_back(Idx);
311  }
312
313  MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_);
314  if (!FoldMI)
315    return false;
316  lis_.ReplaceMachineInstrInMaps(MI, FoldMI);
317  vrm_.addSpillSlotUse(stackSlot_, FoldMI);
318  MI->eraseFromParent();
319  DEBUG(dbgs() << "\tfolded: " << *FoldMI);
320  return true;
321}
322
323/// insertReload - Insert a reload of NewLI.reg before MI.
324void InlineSpiller::insertReload(LiveInterval &NewLI,
325                                 MachineBasicBlock::iterator MI) {
326  MachineBasicBlock &MBB = *MI->getParent();
327  SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
328  tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_);
329  --MI; // Point to load instruction.
330  SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
331  vrm_.addSpillSlotUse(stackSlot_, MI);
332  DEBUG(dbgs() << "\treload:  " << LoadIdx << '\t' << *MI);
333  VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, true,
334                                       lis_.getVNInfoAllocator());
335  NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI));
336}
337
338/// insertSpill - Insert a spill of NewLI.reg after MI.
339void InlineSpiller::insertSpill(LiveInterval &NewLI,
340                                MachineBasicBlock::iterator MI) {
341  MachineBasicBlock &MBB = *MI->getParent();
342  SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
343  tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_);
344  --MI; // Point to store instruction.
345  SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
346  vrm_.addSpillSlotUse(stackSlot_, MI);
347  DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI);
348  VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, true,
349                                        lis_.getVNInfoAllocator());
350  NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI));
351}
352
353void InlineSpiller::spill(LiveInterval *li,
354                          std::vector<LiveInterval*> &newIntervals,
355                          SmallVectorImpl<LiveInterval*> &spillIs,
356                          SlotIndex *earliestIndex) {
357  DEBUG(dbgs() << "Inline spilling " << *li << "\n");
358  assert(li->isSpillable() && "Attempting to spill already spilled value.");
359  assert(!li->isStackSlot() && "Trying to spill a stack slot.");
360
361  li_ = li;
362  newIntervals_ = &newIntervals;
363  rc_ = mri_.getRegClass(li->reg);
364  spillIs_ = &spillIs;
365
366  if (split())
367    return;
368
369  reMaterializeAll();
370
371  // Remat may handle everything.
372  if (li_->empty())
373    return;
374
375  stackSlot_ = vrm_.assignVirt2StackSlot(li->reg);
376
377  // Iterate over instructions using register.
378  for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(li->reg);
379       MachineInstr *MI = RI.skipInstruction();) {
380
381    // Debug values are not allowed to affect codegen.
382    if (MI->isDebugValue()) {
383      // Modify DBG_VALUE now that the value is in a spill slot.
384      uint64_t Offset = MI->getOperand(1).getImm();
385      const MDNode *MDPtr = MI->getOperand(2).getMetadata();
386      DebugLoc DL = MI->getDebugLoc();
387      if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_,
388                                                           Offset, MDPtr, DL)) {
389        DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
390        MachineBasicBlock *MBB = MI->getParent();
391        MBB->insert(MBB->erase(MI), NewDV);
392      } else {
393        DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
394        MI->eraseFromParent();
395      }
396      continue;
397    }
398
399    // Analyze instruction.
400    bool Reads, Writes;
401    SmallVector<unsigned, 8> Ops;
402    tie(Reads, Writes) = MI->readsWritesVirtualRegister(li->reg, &Ops);
403
404    // Attempt to fold memory ops.
405    if (foldMemoryOperand(MI, Ops))
406      continue;
407
408    // Allocate interval around instruction.
409    // FIXME: Infer regclass from instruction alone.
410    unsigned NewVReg = mri_.createVirtualRegister(rc_);
411    vrm_.grow();
412    LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
413    NewLI.markNotSpillable();
414
415    if (Reads)
416      insertReload(NewLI, MI);
417
418    // Rewrite instruction operands.
419    bool hasLiveDef = false;
420    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
421      MachineOperand &MO = MI->getOperand(Ops[i]);
422      MO.setReg(NewVReg);
423      if (MO.isUse()) {
424        if (!MI->isRegTiedToDefOperand(Ops[i]))
425          MO.setIsKill();
426      } else {
427        if (!MO.isDead())
428          hasLiveDef = true;
429      }
430    }
431
432    // FIXME: Use a second vreg if instruction has no tied ops.
433    if (Writes && hasLiveDef)
434      insertSpill(NewLI, MI);
435
436    DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
437    newIntervals.push_back(&NewLI);
438  }
439}
440