InlineSpiller.cpp revision e8e44e77afab78b324fd9cd504042d9a02eb6129
1//===-------- InlineSpiller.cpp - Insert spills and restores inline -------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// The inline spiller modifies the machine function directly instead of
11// inserting spills and restores in VirtRegMap.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "spiller"
16#include "Spiller.h"
17#include "VirtRegMap.h"
18#include "llvm/CodeGen/LiveIntervalAnalysis.h"
19#include "llvm/CodeGen/MachineFrameInfo.h"
20#include "llvm/CodeGen/MachineFunction.h"
21#include "llvm/CodeGen/MachineLoopInfo.h"
22#include "llvm/CodeGen/MachineRegisterInfo.h"
23#include "llvm/Target/TargetMachine.h"
24#include "llvm/Target/TargetInstrInfo.h"
25#include "llvm/Support/Debug.h"
26#include "llvm/Support/raw_ostream.h"
27
28using namespace llvm;
29
30namespace {
31class InlineSpiller : public Spiller {
32  MachineFunction &mf_;
33  LiveIntervals &lis_;
34  MachineLoopInfo &loops_;
35  VirtRegMap &vrm_;
36  MachineFrameInfo &mfi_;
37  MachineRegisterInfo &mri_;
38  const TargetInstrInfo &tii_;
39  const TargetRegisterInfo &tri_;
40  const BitVector reserved_;
41
42  // Variables that are valid during spill(), but used by multiple methods.
43  LiveInterval *li_;
44  std::vector<LiveInterval*> *newIntervals_;
45  const TargetRegisterClass *rc_;
46  int stackSlot_;
47  const SmallVectorImpl<LiveInterval*> *spillIs_;
48
49  // Values of the current interval that can potentially remat.
50  SmallPtrSet<VNInfo*, 8> reMattable_;
51
52  // Values in reMattable_ that failed to remat at some point.
53  SmallPtrSet<VNInfo*, 8> usedValues_;
54
55  ~InlineSpiller() {}
56
57public:
58  InlineSpiller(MachineFunction *mf, LiveIntervals *lis, MachineLoopInfo *mli,
59                VirtRegMap *vrm)
60    : mf_(*mf), lis_(*lis), loops_(*mli), vrm_(*vrm),
61      mfi_(*mf->getFrameInfo()),
62      mri_(mf->getRegInfo()),
63      tii_(*mf->getTarget().getInstrInfo()),
64      tri_(*mf->getTarget().getRegisterInfo()),
65      reserved_(tri_.getReservedRegs(mf_)) {}
66
67  void spill(LiveInterval *li,
68             std::vector<LiveInterval*> &newIntervals,
69             SmallVectorImpl<LiveInterval*> &spillIs,
70             SlotIndex *earliestIndex);
71
72private:
73  bool allUsesAvailableAt(const MachineInstr *OrigMI, SlotIndex OrigIdx,
74                          SlotIndex UseIdx);
75  bool reMaterializeFor(MachineBasicBlock::iterator MI);
76  void reMaterializeAll();
77
78  bool foldMemoryOperand(MachineBasicBlock::iterator MI,
79                         const SmallVectorImpl<unsigned> &Ops);
80  void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
81  void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
82};
83}
84
85namespace llvm {
86Spiller *createInlineSpiller(MachineFunction *mf,
87                             LiveIntervals *lis,
88                             MachineLoopInfo *mli,
89                             VirtRegMap *vrm) {
90  return new InlineSpiller(mf, lis, mli, vrm);
91}
92}
93
94/// allUsesAvailableAt - Return true if all registers used by OrigMI at
95/// OrigIdx are also available with the same value at UseIdx.
96bool InlineSpiller::allUsesAvailableAt(const MachineInstr *OrigMI,
97                                       SlotIndex OrigIdx,
98                                       SlotIndex UseIdx) {
99  OrigIdx = OrigIdx.getUseIndex();
100  UseIdx = UseIdx.getUseIndex();
101  for (unsigned i = 0, e = OrigMI->getNumOperands(); i != e; ++i) {
102    const MachineOperand &MO = OrigMI->getOperand(i);
103    if (!MO.isReg() || !MO.getReg() || MO.getReg() == li_->reg)
104      continue;
105    // Reserved registers are OK.
106    if (MO.isUndef() || !lis_.hasInterval(MO.getReg()))
107      continue;
108    // We don't want to move any defs.
109    if (MO.isDef())
110      return false;
111    // We cannot depend on virtual registers in spillIs_. They will be spilled.
112    for (unsigned si = 0, se = spillIs_->size(); si != se; ++si)
113      if ((*spillIs_)[si]->reg == MO.getReg())
114        return false;
115
116    LiveInterval &LI = lis_.getInterval(MO.getReg());
117    const VNInfo *OVNI = LI.getVNInfoAt(OrigIdx);
118    if (!OVNI)
119      continue;
120    if (OVNI != LI.getVNInfoAt(UseIdx))
121      return false;
122  }
123  return true;
124}
125
126/// reMaterializeFor - Attempt to rematerialize li_->reg before MI instead of
127/// reloading it.
128bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) {
129  SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex();
130  VNInfo *OrigVNI = li_->getVNInfoAt(UseIdx);
131  if (!OrigVNI) {
132    DEBUG(dbgs() << "\tadding <undef> flags: ");
133    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
134      MachineOperand &MO = MI->getOperand(i);
135      if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg)
136        MO.setIsUndef();
137    }
138    DEBUG(dbgs() << UseIdx << '\t' << *MI);
139    return true;
140  }
141  if (!reMattable_.count(OrigVNI)) {
142    DEBUG(dbgs() << "\tusing non-remat valno " << OrigVNI->id << ": "
143                 << UseIdx << '\t' << *MI);
144    return false;
145  }
146  MachineInstr *OrigMI = lis_.getInstructionFromIndex(OrigVNI->def);
147  if (!allUsesAvailableAt(OrigMI, OrigVNI->def, UseIdx)) {
148    usedValues_.insert(OrigVNI);
149    DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
150    return false;
151  }
152
153  // If the instruction also writes li_->reg, it had better not require the same
154  // register for uses and defs.
155  bool Reads, Writes;
156  SmallVector<unsigned, 8> Ops;
157  tie(Reads, Writes) = MI->readsWritesVirtualRegister(li_->reg, &Ops);
158  if (Writes) {
159    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
160      MachineOperand &MO = MI->getOperand(Ops[i]);
161      if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) {
162        usedValues_.insert(OrigVNI);
163        DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
164        return false;
165      }
166    }
167  }
168
169  // Alocate a new register for the remat.
170  unsigned NewVReg = mri_.createVirtualRegister(rc_);
171  vrm_.grow();
172  LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
173  NewLI.markNotSpillable();
174  newIntervals_->push_back(&NewLI);
175
176  // Finally we can rematerialize OrigMI before MI.
177  MachineBasicBlock &MBB = *MI->getParent();
178  tii_.reMaterialize(MBB, MI, NewLI.reg, 0, OrigMI, tri_);
179  MachineBasicBlock::iterator RematMI = MI;
180  SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(--RematMI).getDefIndex();
181  DEBUG(dbgs() << "\tremat:  " << DefIdx << '\t' << *RematMI);
182
183  // Replace operands
184  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
185    MachineOperand &MO = MI->getOperand(Ops[i]);
186    if (MO.isReg() && MO.isUse() && MO.getReg() == li_->reg) {
187      MO.setReg(NewVReg);
188      MO.setIsKill();
189    }
190  }
191  DEBUG(dbgs() << "\t        " << UseIdx << '\t' << *MI);
192
193  VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, true,
194                                       lis_.getVNInfoAllocator());
195  NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI));
196  DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
197  return true;
198}
199
200/// reMaterializeAll - Try to rematerialize as many uses of li_ as possible,
201/// and trim the live ranges after.
202void InlineSpiller::reMaterializeAll() {
203  // Do a quick scan of the interval values to find if any are remattable.
204  reMattable_.clear();
205  usedValues_.clear();
206  for (LiveInterval::const_vni_iterator I = li_->vni_begin(),
207       E = li_->vni_end(); I != E; ++I) {
208    VNInfo *VNI = *I;
209    if (VNI->isUnused() || !VNI->isDefAccurate())
210      continue;
211    MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
212    if (!DefMI || !tii_.isTriviallyReMaterializable(DefMI))
213      continue;
214    reMattable_.insert(VNI);
215  }
216
217  // Often, no defs are remattable.
218  if (reMattable_.empty())
219    return;
220
221  // Try to remat before all uses of li_->reg.
222  bool anyRemat = false;
223  for (MachineRegisterInfo::use_nodbg_iterator
224       RI = mri_.use_nodbg_begin(li_->reg);
225       MachineInstr *MI = RI.skipInstruction();)
226     anyRemat |= reMaterializeFor(MI);
227
228  if (!anyRemat)
229    return;
230
231  // Remove any values that were completely rematted.
232  bool anyRemoved = false;
233  for (SmallPtrSet<VNInfo*, 8>::iterator I = reMattable_.begin(),
234       E = reMattable_.end(); I != E; ++I) {
235    VNInfo *VNI = *I;
236    if (VNI->hasPHIKill() || usedValues_.count(VNI))
237      continue;
238    MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
239    DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI);
240    lis_.RemoveMachineInstrFromMaps(DefMI);
241    vrm_.RemoveMachineInstrFromMaps(DefMI);
242    DefMI->eraseFromParent();
243    li_->removeValNo(VNI);
244    anyRemoved = true;
245  }
246
247  if (!anyRemoved)
248    return;
249
250  // Removing values may cause debug uses where li_ is not live.
251  for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(li_->reg);
252       MachineInstr *MI = RI.skipInstruction();) {
253    if (!MI->isDebugValue())
254      continue;
255    // Try to preserve the debug value if li_ is live immediately after it.
256    MachineBasicBlock::iterator NextMI = MI;
257    ++NextMI;
258    if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) {
259      SlotIndex NearIdx = lis_.getInstructionIndex(NextMI);
260      if (li_->liveAt(NearIdx))
261        continue;
262    }
263    DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI);
264    MI->eraseFromParent();
265  }
266}
267
268/// foldMemoryOperand - Try folding stack slot references in Ops into MI.
269/// Return true on success, and MI will be erased.
270bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
271                                      const SmallVectorImpl<unsigned> &Ops) {
272  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
273  // operands.
274  SmallVector<unsigned, 8> FoldOps;
275  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
276    unsigned Idx = Ops[i];
277    MachineOperand &MO = MI->getOperand(Idx);
278    if (MO.isImplicit())
279      continue;
280    // FIXME: Teach targets to deal with subregs.
281    if (MO.getSubReg())
282      return false;
283    // Tied use operands should not be passed to foldMemoryOperand.
284    if (!MI->isRegTiedToDefOperand(Idx))
285      FoldOps.push_back(Idx);
286  }
287
288  MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_);
289  if (!FoldMI)
290    return false;
291  lis_.ReplaceMachineInstrInMaps(MI, FoldMI);
292  vrm_.addSpillSlotUse(stackSlot_, FoldMI);
293  MI->eraseFromParent();
294  DEBUG(dbgs() << "\tfolded: " << *FoldMI);
295  return true;
296}
297
298/// insertReload - Insert a reload of NewLI.reg before MI.
299void InlineSpiller::insertReload(LiveInterval &NewLI,
300                                 MachineBasicBlock::iterator MI) {
301  MachineBasicBlock &MBB = *MI->getParent();
302  SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
303  tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_);
304  --MI; // Point to load instruction.
305  SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
306  vrm_.addSpillSlotUse(stackSlot_, MI);
307  DEBUG(dbgs() << "\treload:  " << LoadIdx << '\t' << *MI);
308  VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0, true,
309                                       lis_.getVNInfoAllocator());
310  NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI));
311}
312
313/// insertSpill - Insert a spill of NewLI.reg after MI.
314void InlineSpiller::insertSpill(LiveInterval &NewLI,
315                                MachineBasicBlock::iterator MI) {
316  MachineBasicBlock &MBB = *MI->getParent();
317  SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
318  tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_);
319  --MI; // Point to store instruction.
320  SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
321  vrm_.addSpillSlotUse(stackSlot_, MI);
322  DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI);
323  VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, true,
324                                        lis_.getVNInfoAllocator());
325  NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI));
326}
327
328void InlineSpiller::spill(LiveInterval *li,
329                          std::vector<LiveInterval*> &newIntervals,
330                          SmallVectorImpl<LiveInterval*> &spillIs,
331                          SlotIndex *earliestIndex) {
332  DEBUG(dbgs() << "Inline spilling " << *li << "\n");
333  assert(li->isSpillable() && "Attempting to spill already spilled value.");
334  assert(!li->isStackSlot() && "Trying to spill a stack slot.");
335
336  li_ = li;
337  newIntervals_ = &newIntervals;
338  rc_ = mri_.getRegClass(li->reg);
339  spillIs_ = &spillIs;
340
341  reMaterializeAll();
342
343  // Remat may handle everything.
344  if (li_->empty())
345    return;
346
347  stackSlot_ = vrm_.assignVirt2StackSlot(li->reg);
348
349  // Iterate over instructions using register.
350  for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(li->reg);
351       MachineInstr *MI = RI.skipInstruction();) {
352
353    // Debug values are not allowed to affect codegen.
354    if (MI->isDebugValue()) {
355      // Modify DBG_VALUE now that the value is in a spill slot.
356      uint64_t Offset = MI->getOperand(1).getImm();
357      const MDNode *MDPtr = MI->getOperand(2).getMetadata();
358      DebugLoc DL = MI->getDebugLoc();
359      if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_,
360                                                           Offset, MDPtr, DL)) {
361        DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
362        MachineBasicBlock *MBB = MI->getParent();
363        MBB->insert(MBB->erase(MI), NewDV);
364      } else {
365        DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
366        MI->eraseFromParent();
367      }
368      continue;
369    }
370
371    // Analyze instruction.
372    bool Reads, Writes;
373    SmallVector<unsigned, 8> Ops;
374    tie(Reads, Writes) = MI->readsWritesVirtualRegister(li->reg, &Ops);
375
376    // Attempt to fold memory ops.
377    if (foldMemoryOperand(MI, Ops))
378      continue;
379
380    // Allocate interval around instruction.
381    // FIXME: Infer regclass from instruction alone.
382    unsigned NewVReg = mri_.createVirtualRegister(rc_);
383    vrm_.grow();
384    LiveInterval &NewLI = lis_.getOrCreateInterval(NewVReg);
385    NewLI.markNotSpillable();
386
387    if (Reads)
388      insertReload(NewLI, MI);
389
390    // Rewrite instruction operands.
391    bool hasLiveDef = false;
392    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
393      MachineOperand &MO = MI->getOperand(Ops[i]);
394      MO.setReg(NewVReg);
395      if (MO.isUse()) {
396        if (!MI->isRegTiedToDefOperand(Ops[i]))
397          MO.setIsKill();
398      } else {
399        if (!MO.isDead())
400          hasLiveDef = true;
401      }
402    }
403
404    // FIXME: Use a second vreg if instruction has no tied ops.
405    if (Writes && hasLiveDef)
406      insertSpill(NewLI, MI);
407
408    DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
409    newIntervals.push_back(&NewLI);
410  }
411}
412