InlineSpiller.cpp revision e9157e10cd2be2b3dbaf26baa925765562819266
1//===-------- InlineSpiller.cpp - Insert spills and restores inline -------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// The inline spiller modifies the machine function directly instead of
11// inserting spills and restores in VirtRegMap.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "regalloc"
16#include "Spiller.h"
17#include "LiveRangeEdit.h"
18#include "SplitKit.h"
19#include "VirtRegMap.h"
20#include "llvm/CodeGen/LiveIntervalAnalysis.h"
21#include "llvm/CodeGen/LiveStackAnalysis.h"
22#include "llvm/CodeGen/MachineDominators.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineFunction.h"
25#include "llvm/CodeGen/MachineLoopInfo.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/Target/TargetMachine.h"
28#include "llvm/Target/TargetInstrInfo.h"
29#include "llvm/Support/CommandLine.h"
30#include "llvm/Support/Debug.h"
31#include "llvm/Support/raw_ostream.h"
32
33using namespace llvm;
34
35static cl::opt<bool>
36VerifySpills("verify-spills", cl::desc("Verify after each spill/split"));
37
38static cl::opt<bool>
39ExtraSpillerSplits("extra-spiller-splits",
40                   cl::desc("Enable additional splitting during splitting"));
41
42namespace {
43class InlineSpiller : public Spiller {
44  MachineFunctionPass &pass_;
45  MachineFunction &mf_;
46  LiveIntervals &lis_;
47  LiveStacks &lss_;
48  MachineDominatorTree &mdt_;
49  MachineLoopInfo &loops_;
50  VirtRegMap &vrm_;
51  MachineFrameInfo &mfi_;
52  MachineRegisterInfo &mri_;
53  const TargetInstrInfo &tii_;
54  const TargetRegisterInfo &tri_;
55  const BitVector reserved_;
56
57  SplitAnalysis splitAnalysis_;
58
59  // Variables that are valid during spill(), but used by multiple methods.
60  LiveRangeEdit *edit_;
61  const TargetRegisterClass *rc_;
62  int stackSlot_;
63
64  // Values that failed to remat at some point.
65  SmallPtrSet<VNInfo*, 8> usedValues_;
66
67  ~InlineSpiller() {}
68
69public:
70  InlineSpiller(MachineFunctionPass &pass,
71                MachineFunction &mf,
72                VirtRegMap &vrm)
73    : pass_(pass),
74      mf_(mf),
75      lis_(pass.getAnalysis<LiveIntervals>()),
76      lss_(pass.getAnalysis<LiveStacks>()),
77      mdt_(pass.getAnalysis<MachineDominatorTree>()),
78      loops_(pass.getAnalysis<MachineLoopInfo>()),
79      vrm_(vrm),
80      mfi_(*mf.getFrameInfo()),
81      mri_(mf.getRegInfo()),
82      tii_(*mf.getTarget().getInstrInfo()),
83      tri_(*mf.getTarget().getRegisterInfo()),
84      reserved_(tri_.getReservedRegs(mf_)),
85      splitAnalysis_(mf, lis_, loops_) {}
86
87  void spill(LiveInterval *li,
88             SmallVectorImpl<LiveInterval*> &newIntervals,
89             const SmallVectorImpl<LiveInterval*> &spillIs);
90
91  void spill(LiveRangeEdit &);
92
93private:
94  bool split();
95
96  bool reMaterializeFor(MachineBasicBlock::iterator MI);
97  void reMaterializeAll();
98
99  bool coalesceStackAccess(MachineInstr *MI);
100  bool foldMemoryOperand(MachineBasicBlock::iterator MI,
101                         const SmallVectorImpl<unsigned> &Ops);
102  void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
103  void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
104};
105}
106
107namespace llvm {
108Spiller *createInlineSpiller(MachineFunctionPass &pass,
109                             MachineFunction &mf,
110                             VirtRegMap &vrm) {
111  if (VerifySpills)
112    mf.verify(&pass);
113  return new InlineSpiller(pass, mf, vrm);
114}
115}
116
117/// split - try splitting the current interval into pieces that may allocate
118/// separately. Return true if successful.
119bool InlineSpiller::split() {
120  splitAnalysis_.analyze(&edit_->getParent());
121
122  // Try splitting around loops.
123  if (ExtraSpillerSplits) {
124    const MachineLoop *loop = splitAnalysis_.getBestSplitLoop();
125    if (loop) {
126      SplitEditor(splitAnalysis_, lis_, vrm_, mdt_, *edit_)
127        .splitAroundLoop(loop);
128      return true;
129    }
130  }
131
132  // Try splitting into single block intervals.
133  SplitAnalysis::BlockPtrSet blocks;
134  if (splitAnalysis_.getMultiUseBlocks(blocks)) {
135    SplitEditor(splitAnalysis_, lis_, vrm_, mdt_, *edit_)
136      .splitSingleBlocks(blocks);
137    return true;
138  }
139
140  // Try splitting inside a basic block.
141  if (ExtraSpillerSplits) {
142    const MachineBasicBlock *MBB = splitAnalysis_.getBlockForInsideSplit();
143    if (MBB){
144      SplitEditor(splitAnalysis_, lis_, vrm_, mdt_, *edit_)
145        .splitInsideBlock(MBB);
146      return true;
147    }
148  }
149
150  return false;
151}
152
153/// reMaterializeFor - Attempt to rematerialize edit_->getReg() before MI instead of
154/// reloading it.
155bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) {
156  SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex();
157  VNInfo *OrigVNI = edit_->getParent().getVNInfoAt(UseIdx);
158
159  if (!OrigVNI) {
160    DEBUG(dbgs() << "\tadding <undef> flags: ");
161    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
162      MachineOperand &MO = MI->getOperand(i);
163      if (MO.isReg() && MO.isUse() && MO.getReg() == edit_->getReg())
164        MO.setIsUndef();
165    }
166    DEBUG(dbgs() << UseIdx << '\t' << *MI);
167    return true;
168  }
169
170  LiveRangeEdit::Remat RM(OrigVNI);
171  if (!edit_->canRematerializeAt(RM, UseIdx, false, lis_)) {
172    usedValues_.insert(OrigVNI);
173    DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
174    return false;
175  }
176
177  // If the instruction also writes edit_->getReg(), it had better not require
178  // the same register for uses and defs.
179  bool Reads, Writes;
180  SmallVector<unsigned, 8> Ops;
181  tie(Reads, Writes) = MI->readsWritesVirtualRegister(edit_->getReg(), &Ops);
182  if (Writes) {
183    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
184      MachineOperand &MO = MI->getOperand(Ops[i]);
185      if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) {
186        usedValues_.insert(OrigVNI);
187        DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
188        return false;
189      }
190    }
191  }
192
193  // Alocate a new register for the remat.
194  LiveInterval &NewLI = edit_->create(mri_, lis_, vrm_);
195  NewLI.markNotSpillable();
196
197  // Finally we can rematerialize OrigMI before MI.
198  SlotIndex DefIdx = edit_->rematerializeAt(*MI->getParent(), MI, NewLI.reg, RM,
199                                            lis_, tii_, tri_);
200  DEBUG(dbgs() << "\tremat:  " << DefIdx << '\n');
201
202  // Replace operands
203  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
204    MachineOperand &MO = MI->getOperand(Ops[i]);
205    if (MO.isReg() && MO.isUse() && MO.getReg() == edit_->getReg()) {
206      MO.setReg(NewLI.reg);
207      MO.setIsKill();
208    }
209  }
210  DEBUG(dbgs() << "\t        " << UseIdx << '\t' << *MI);
211
212  VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, lis_.getVNInfoAllocator());
213  NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI));
214  DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
215  return true;
216}
217
218/// reMaterializeAll - Try to rematerialize as many uses as possible,
219/// and trim the live ranges after.
220void InlineSpiller::reMaterializeAll() {
221  // Do a quick scan of the interval values to find if any are remattable.
222  if (!edit_->anyRematerializable(lis_, tii_, 0))
223    return;
224
225  usedValues_.clear();
226
227  // Try to remat before all uses of edit_->getReg().
228  bool anyRemat = false;
229  for (MachineRegisterInfo::use_nodbg_iterator
230       RI = mri_.use_nodbg_begin(edit_->getReg());
231       MachineInstr *MI = RI.skipInstruction();)
232     anyRemat |= reMaterializeFor(MI);
233
234  if (!anyRemat)
235    return;
236
237  // Remove any values that were completely rematted.
238  bool anyRemoved = false;
239  for (LiveInterval::vni_iterator I = edit_->getParent().vni_begin(),
240       E = edit_->getParent().vni_end(); I != E; ++I) {
241    VNInfo *VNI = *I;
242    if (VNI->hasPHIKill() || !edit_->didRematerialize(VNI) ||
243        usedValues_.count(VNI))
244      continue;
245    MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
246    DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI);
247    lis_.RemoveMachineInstrFromMaps(DefMI);
248    vrm_.RemoveMachineInstrFromMaps(DefMI);
249    DefMI->eraseFromParent();
250    VNI->def = SlotIndex();
251    anyRemoved = true;
252  }
253
254  if (!anyRemoved)
255    return;
256
257  // Removing values may cause debug uses where parent is not live.
258  for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(edit_->getReg());
259       MachineInstr *MI = RI.skipInstruction();) {
260    if (!MI->isDebugValue())
261      continue;
262    // Try to preserve the debug value if parent is live immediately after it.
263    MachineBasicBlock::iterator NextMI = MI;
264    ++NextMI;
265    if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) {
266      SlotIndex Idx = lis_.getInstructionIndex(NextMI);
267      VNInfo *VNI = edit_->getParent().getVNInfoAt(Idx);
268      if (VNI && (VNI->hasPHIKill() || usedValues_.count(VNI)))
269        continue;
270    }
271    DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI);
272    MI->eraseFromParent();
273  }
274}
275
276/// If MI is a load or store of stackSlot_, it can be removed.
277bool InlineSpiller::coalesceStackAccess(MachineInstr *MI) {
278  int FI = 0;
279  unsigned reg;
280  if (!(reg = tii_.isLoadFromStackSlot(MI, FI)) &&
281      !(reg = tii_.isStoreToStackSlot(MI, FI)))
282    return false;
283
284  // We have a stack access. Is it the right register and slot?
285  if (reg != edit_->getReg() || FI != stackSlot_)
286    return false;
287
288  DEBUG(dbgs() << "Coalescing stack access: " << *MI);
289  lis_.RemoveMachineInstrFromMaps(MI);
290  MI->eraseFromParent();
291  return true;
292}
293
294/// foldMemoryOperand - Try folding stack slot references in Ops into MI.
295/// Return true on success, and MI will be erased.
296bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
297                                      const SmallVectorImpl<unsigned> &Ops) {
298  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
299  // operands.
300  SmallVector<unsigned, 8> FoldOps;
301  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
302    unsigned Idx = Ops[i];
303    MachineOperand &MO = MI->getOperand(Idx);
304    if (MO.isImplicit())
305      continue;
306    // FIXME: Teach targets to deal with subregs.
307    if (MO.getSubReg())
308      return false;
309    // Tied use operands should not be passed to foldMemoryOperand.
310    if (!MI->isRegTiedToDefOperand(Idx))
311      FoldOps.push_back(Idx);
312  }
313
314  MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_);
315  if (!FoldMI)
316    return false;
317  lis_.ReplaceMachineInstrInMaps(MI, FoldMI);
318  vrm_.addSpillSlotUse(stackSlot_, FoldMI);
319  MI->eraseFromParent();
320  DEBUG(dbgs() << "\tfolded: " << *FoldMI);
321  return true;
322}
323
324/// insertReload - Insert a reload of NewLI.reg before MI.
325void InlineSpiller::insertReload(LiveInterval &NewLI,
326                                 MachineBasicBlock::iterator MI) {
327  MachineBasicBlock &MBB = *MI->getParent();
328  SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
329  tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_);
330  --MI; // Point to load instruction.
331  SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
332  vrm_.addSpillSlotUse(stackSlot_, MI);
333  DEBUG(dbgs() << "\treload:  " << LoadIdx << '\t' << *MI);
334  VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0,
335                                       lis_.getVNInfoAllocator());
336  NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI));
337}
338
339/// insertSpill - Insert a spill of NewLI.reg after MI.
340void InlineSpiller::insertSpill(LiveInterval &NewLI,
341                                MachineBasicBlock::iterator MI) {
342  MachineBasicBlock &MBB = *MI->getParent();
343  SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
344  tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_);
345  --MI; // Point to store instruction.
346  SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
347  vrm_.addSpillSlotUse(stackSlot_, MI);
348  DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI);
349  VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, lis_.getVNInfoAllocator());
350  NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI));
351}
352
353void InlineSpiller::spill(LiveInterval *li,
354                          SmallVectorImpl<LiveInterval*> &newIntervals,
355                          const SmallVectorImpl<LiveInterval*> &spillIs) {
356  LiveRangeEdit edit(*li, newIntervals, spillIs);
357  spill(edit);
358  if (VerifySpills)
359    mf_.verify(&pass_);
360}
361
362void InlineSpiller::spill(LiveRangeEdit &edit) {
363  edit_ = &edit;
364  assert(!edit.getParent().isStackSlot() && "Trying to spill a stack slot.");
365  DEBUG(dbgs() << "Inline spilling "
366               << mri_.getRegClass(edit.getReg())->getName()
367               << ':' << edit.getParent() << "\n");
368  assert(edit.getParent().isSpillable() &&
369         "Attempting to spill already spilled value.");
370
371  if (split())
372    return;
373
374  reMaterializeAll();
375
376  // Remat may handle everything.
377  if (edit_->getParent().empty())
378    return;
379
380  rc_ = mri_.getRegClass(edit.getReg());
381  stackSlot_ = vrm_.assignVirt2StackSlot(edit_->getReg());
382
383  // Update LiveStacks now that we are committed to spilling.
384  LiveInterval &stacklvr = lss_.getOrCreateInterval(stackSlot_, rc_);
385  assert(stacklvr.empty() && "Just created stack slot not empty");
386  stacklvr.getNextValue(SlotIndex(), 0, lss_.getVNInfoAllocator());
387  stacklvr.MergeRangesInAsValue(edit_->getParent(), stacklvr.getValNumInfo(0));
388
389  // Iterate over instructions using register.
390  for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(edit.getReg());
391       MachineInstr *MI = RI.skipInstruction();) {
392
393    // Debug values are not allowed to affect codegen.
394    if (MI->isDebugValue()) {
395      // Modify DBG_VALUE now that the value is in a spill slot.
396      uint64_t Offset = MI->getOperand(1).getImm();
397      const MDNode *MDPtr = MI->getOperand(2).getMetadata();
398      DebugLoc DL = MI->getDebugLoc();
399      if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_,
400                                                           Offset, MDPtr, DL)) {
401        DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
402        MachineBasicBlock *MBB = MI->getParent();
403        MBB->insert(MBB->erase(MI), NewDV);
404      } else {
405        DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
406        MI->eraseFromParent();
407      }
408      continue;
409    }
410
411    // Stack slot accesses may coalesce away.
412    if (coalesceStackAccess(MI))
413      continue;
414
415    // Analyze instruction.
416    bool Reads, Writes;
417    SmallVector<unsigned, 8> Ops;
418    tie(Reads, Writes) = MI->readsWritesVirtualRegister(edit.getReg(), &Ops);
419
420    // Attempt to fold memory ops.
421    if (foldMemoryOperand(MI, Ops))
422      continue;
423
424    // Allocate interval around instruction.
425    // FIXME: Infer regclass from instruction alone.
426    LiveInterval &NewLI = edit.create(mri_, lis_, vrm_);
427    NewLI.markNotSpillable();
428
429    if (Reads)
430      insertReload(NewLI, MI);
431
432    // Rewrite instruction operands.
433    bool hasLiveDef = false;
434    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
435      MachineOperand &MO = MI->getOperand(Ops[i]);
436      MO.setReg(NewLI.reg);
437      if (MO.isUse()) {
438        if (!MI->isRegTiedToDefOperand(Ops[i]))
439          MO.setIsKill();
440      } else {
441        if (!MO.isDead())
442          hasLiveDef = true;
443      }
444    }
445
446    // FIXME: Use a second vreg if instruction has no tied ops.
447    if (Writes && hasLiveDef)
448      insertSpill(NewLI, MI);
449
450    DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
451  }
452}
453