InlineSpiller.cpp revision d6f5f4a0893ebdfe7067ca89de78ac7db44794aa
1//===-------- InlineSpiller.cpp - Insert spills and restores inline -------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// The inline spiller modifies the machine function directly instead of
11// inserting spills and restores in VirtRegMap.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "spiller"
16#include "Spiller.h"
17#include "LiveRangeEdit.h"
18#include "SplitKit.h"
19#include "VirtRegMap.h"
20#include "llvm/CodeGen/LiveIntervalAnalysis.h"
21#include "llvm/CodeGen/MachineFrameInfo.h"
22#include "llvm/CodeGen/MachineFunction.h"
23#include "llvm/CodeGen/MachineLoopInfo.h"
24#include "llvm/CodeGen/MachineRegisterInfo.h"
25#include "llvm/Target/TargetMachine.h"
26#include "llvm/Target/TargetInstrInfo.h"
27#include "llvm/Support/Debug.h"
28#include "llvm/Support/raw_ostream.h"
29
30using namespace llvm;
31
32namespace {
33class InlineSpiller : public Spiller {
34  MachineFunctionPass &pass_;
35  MachineFunction &mf_;
36  LiveIntervals &lis_;
37  MachineLoopInfo &loops_;
38  VirtRegMap &vrm_;
39  MachineFrameInfo &mfi_;
40  MachineRegisterInfo &mri_;
41  const TargetInstrInfo &tii_;
42  const TargetRegisterInfo &tri_;
43  const BitVector reserved_;
44
45  SplitAnalysis splitAnalysis_;
46
47  // Variables that are valid during spill(), but used by multiple methods.
48  LiveRangeEdit *edit_;
49  const TargetRegisterClass *rc_;
50  int stackSlot_;
51
52  // Values of the current interval that can potentially remat.
53  SmallPtrSet<VNInfo*, 8> reMattable_;
54
55  // Values in reMattable_ that failed to remat at some point.
56  SmallPtrSet<VNInfo*, 8> usedValues_;
57
58  ~InlineSpiller() {}
59
60public:
61  InlineSpiller(MachineFunctionPass &pass,
62                MachineFunction &mf,
63                VirtRegMap &vrm)
64    : pass_(pass),
65      mf_(mf),
66      lis_(pass.getAnalysis<LiveIntervals>()),
67      loops_(pass.getAnalysis<MachineLoopInfo>()),
68      vrm_(vrm),
69      mfi_(*mf.getFrameInfo()),
70      mri_(mf.getRegInfo()),
71      tii_(*mf.getTarget().getInstrInfo()),
72      tri_(*mf.getTarget().getRegisterInfo()),
73      reserved_(tri_.getReservedRegs(mf_)),
74      splitAnalysis_(mf, lis_, loops_) {}
75
76  void spill(LiveInterval *li,
77             SmallVectorImpl<LiveInterval*> &newIntervals,
78             SmallVectorImpl<LiveInterval*> &spillIs);
79
80  void spill(LiveRangeEdit &);
81
82private:
83  bool split();
84
85  bool reMaterializeFor(MachineBasicBlock::iterator MI);
86  void reMaterializeAll();
87
88  bool coalesceStackAccess(MachineInstr *MI);
89  bool foldMemoryOperand(MachineBasicBlock::iterator MI,
90                         const SmallVectorImpl<unsigned> &Ops);
91  void insertReload(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
92  void insertSpill(LiveInterval &NewLI, MachineBasicBlock::iterator MI);
93};
94}
95
96namespace llvm {
97Spiller *createInlineSpiller(MachineFunctionPass &pass,
98                             MachineFunction &mf,
99                             VirtRegMap &vrm) {
100  return new InlineSpiller(pass, mf, vrm);
101}
102}
103
104/// split - try splitting the current interval into pieces that may allocate
105/// separately. Return true if successful.
106bool InlineSpiller::split() {
107  splitAnalysis_.analyze(&edit_->getParent());
108
109  // Try splitting around loops.
110  if (const MachineLoop *loop = splitAnalysis_.getBestSplitLoop()) {
111    SplitEditor(splitAnalysis_, lis_, vrm_, *edit_)
112      .splitAroundLoop(loop);
113    return true;
114  }
115
116  // Try splitting into single block intervals.
117  SplitAnalysis::BlockPtrSet blocks;
118  if (splitAnalysis_.getMultiUseBlocks(blocks)) {
119    SplitEditor(splitAnalysis_, lis_, vrm_, *edit_)
120      .splitSingleBlocks(blocks);
121    return true;
122  }
123
124  // Try splitting inside a basic block.
125  if (const MachineBasicBlock *MBB = splitAnalysis_.getBlockForInsideSplit()) {
126    SplitEditor(splitAnalysis_, lis_, vrm_, *edit_)
127      .splitInsideBlock(MBB);
128    return true;
129  }
130
131  return false;
132}
133
134/// reMaterializeFor - Attempt to rematerialize edit_->getReg() before MI instead of
135/// reloading it.
136bool InlineSpiller::reMaterializeFor(MachineBasicBlock::iterator MI) {
137  SlotIndex UseIdx = lis_.getInstructionIndex(MI).getUseIndex();
138  VNInfo *OrigVNI = edit_->getParent().getVNInfoAt(UseIdx);
139  if (!OrigVNI) {
140    DEBUG(dbgs() << "\tadding <undef> flags: ");
141    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
142      MachineOperand &MO = MI->getOperand(i);
143      if (MO.isReg() && MO.isUse() && MO.getReg() == edit_->getReg())
144        MO.setIsUndef();
145    }
146    DEBUG(dbgs() << UseIdx << '\t' << *MI);
147    return true;
148  }
149  if (!reMattable_.count(OrigVNI)) {
150    DEBUG(dbgs() << "\tusing non-remat valno " << OrigVNI->id << ": "
151                 << UseIdx << '\t' << *MI);
152    return false;
153  }
154  MachineInstr *OrigMI = lis_.getInstructionFromIndex(OrigVNI->def);
155  if (!edit_->allUsesAvailableAt(OrigMI, OrigVNI->def, UseIdx, lis_)) {
156    usedValues_.insert(OrigVNI);
157    DEBUG(dbgs() << "\tcannot remat for " << UseIdx << '\t' << *MI);
158    return false;
159  }
160
161  // If the instruction also writes edit_->getReg(), it had better not require the same
162  // register for uses and defs.
163  bool Reads, Writes;
164  SmallVector<unsigned, 8> Ops;
165  tie(Reads, Writes) = MI->readsWritesVirtualRegister(edit_->getReg(), &Ops);
166  if (Writes) {
167    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
168      MachineOperand &MO = MI->getOperand(Ops[i]);
169      if (MO.isUse() ? MI->isRegTiedToDefOperand(Ops[i]) : MO.getSubReg()) {
170        usedValues_.insert(OrigVNI);
171        DEBUG(dbgs() << "\tcannot remat tied reg: " << UseIdx << '\t' << *MI);
172        return false;
173      }
174    }
175  }
176
177  // Alocate a new register for the remat.
178  LiveInterval &NewLI = edit_->create(mri_, lis_, vrm_);
179  NewLI.markNotSpillable();
180
181  // Finally we can rematerialize OrigMI before MI.
182  MachineBasicBlock &MBB = *MI->getParent();
183  tii_.reMaterialize(MBB, MI, NewLI.reg, 0, OrigMI, tri_);
184  MachineBasicBlock::iterator RematMI = MI;
185  SlotIndex DefIdx = lis_.InsertMachineInstrInMaps(--RematMI).getDefIndex();
186  DEBUG(dbgs() << "\tremat:  " << DefIdx << '\t' << *RematMI);
187
188  // Replace operands
189  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
190    MachineOperand &MO = MI->getOperand(Ops[i]);
191    if (MO.isReg() && MO.isUse() && MO.getReg() == edit_->getReg()) {
192      MO.setReg(NewLI.reg);
193      MO.setIsKill();
194    }
195  }
196  DEBUG(dbgs() << "\t        " << UseIdx << '\t' << *MI);
197
198  VNInfo *DefVNI = NewLI.getNextValue(DefIdx, 0, lis_.getVNInfoAllocator());
199  NewLI.addRange(LiveRange(DefIdx, UseIdx.getDefIndex(), DefVNI));
200  DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
201  return true;
202}
203
204/// reMaterializeAll - Try to rematerialize as many uses as possible,
205/// and trim the live ranges after.
206void InlineSpiller::reMaterializeAll() {
207  // Do a quick scan of the interval values to find if any are remattable.
208  reMattable_.clear();
209  usedValues_.clear();
210  for (LiveInterval::const_vni_iterator I = edit_->getParent().vni_begin(),
211       E = edit_->getParent().vni_end(); I != E; ++I) {
212    VNInfo *VNI = *I;
213    if (VNI->isUnused())
214      continue;
215    MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
216    if (!DefMI || !tii_.isTriviallyReMaterializable(DefMI))
217      continue;
218    reMattable_.insert(VNI);
219  }
220
221  // Often, no defs are remattable.
222  if (reMattable_.empty())
223    return;
224
225  // Try to remat before all uses of edit_->getReg().
226  bool anyRemat = false;
227  for (MachineRegisterInfo::use_nodbg_iterator
228       RI = mri_.use_nodbg_begin(edit_->getReg());
229       MachineInstr *MI = RI.skipInstruction();)
230     anyRemat |= reMaterializeFor(MI);
231
232  if (!anyRemat)
233    return;
234
235  // Remove any values that were completely rematted.
236  bool anyRemoved = false;
237  for (SmallPtrSet<VNInfo*, 8>::iterator I = reMattable_.begin(),
238       E = reMattable_.end(); I != E; ++I) {
239    VNInfo *VNI = *I;
240    if (VNI->hasPHIKill() || usedValues_.count(VNI))
241      continue;
242    MachineInstr *DefMI = lis_.getInstructionFromIndex(VNI->def);
243    DEBUG(dbgs() << "\tremoving dead def: " << VNI->def << '\t' << *DefMI);
244    lis_.RemoveMachineInstrFromMaps(DefMI);
245    vrm_.RemoveMachineInstrFromMaps(DefMI);
246    DefMI->eraseFromParent();
247    VNI->def = SlotIndex();
248    anyRemoved = true;
249  }
250
251  if (!anyRemoved)
252    return;
253
254  // Removing values may cause debug uses where parent is not live.
255  for (MachineRegisterInfo::use_iterator RI = mri_.use_begin(edit_->getReg());
256       MachineInstr *MI = RI.skipInstruction();) {
257    if (!MI->isDebugValue())
258      continue;
259    // Try to preserve the debug value if parent is live immediately after it.
260    MachineBasicBlock::iterator NextMI = MI;
261    ++NextMI;
262    if (NextMI != MI->getParent()->end() && !lis_.isNotInMIMap(NextMI)) {
263      SlotIndex Idx = lis_.getInstructionIndex(NextMI);
264      VNInfo *VNI = edit_->getParent().getVNInfoAt(Idx);
265      if (VNI && (VNI->hasPHIKill() || usedValues_.count(VNI)))
266        continue;
267    }
268    DEBUG(dbgs() << "Removing debug info due to remat:" << "\t" << *MI);
269    MI->eraseFromParent();
270  }
271}
272
273/// If MI is a load or store of stackSlot_, it can be removed.
274bool InlineSpiller::coalesceStackAccess(MachineInstr *MI) {
275  int FI = 0;
276  unsigned reg;
277  if (!(reg = tii_.isLoadFromStackSlot(MI, FI)) &&
278      !(reg = tii_.isStoreToStackSlot(MI, FI)))
279    return false;
280
281  // We have a stack access. Is it the right register and slot?
282  if (reg != edit_->getReg() || FI != stackSlot_)
283    return false;
284
285  DEBUG(dbgs() << "Coalescing stack access: " << *MI);
286  lis_.RemoveMachineInstrFromMaps(MI);
287  MI->eraseFromParent();
288  return true;
289}
290
291/// foldMemoryOperand - Try folding stack slot references in Ops into MI.
292/// Return true on success, and MI will be erased.
293bool InlineSpiller::foldMemoryOperand(MachineBasicBlock::iterator MI,
294                                      const SmallVectorImpl<unsigned> &Ops) {
295  // TargetInstrInfo::foldMemoryOperand only expects explicit, non-tied
296  // operands.
297  SmallVector<unsigned, 8> FoldOps;
298  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
299    unsigned Idx = Ops[i];
300    MachineOperand &MO = MI->getOperand(Idx);
301    if (MO.isImplicit())
302      continue;
303    // FIXME: Teach targets to deal with subregs.
304    if (MO.getSubReg())
305      return false;
306    // Tied use operands should not be passed to foldMemoryOperand.
307    if (!MI->isRegTiedToDefOperand(Idx))
308      FoldOps.push_back(Idx);
309  }
310
311  MachineInstr *FoldMI = tii_.foldMemoryOperand(MI, FoldOps, stackSlot_);
312  if (!FoldMI)
313    return false;
314  lis_.ReplaceMachineInstrInMaps(MI, FoldMI);
315  vrm_.addSpillSlotUse(stackSlot_, FoldMI);
316  MI->eraseFromParent();
317  DEBUG(dbgs() << "\tfolded: " << *FoldMI);
318  return true;
319}
320
321/// insertReload - Insert a reload of NewLI.reg before MI.
322void InlineSpiller::insertReload(LiveInterval &NewLI,
323                                 MachineBasicBlock::iterator MI) {
324  MachineBasicBlock &MBB = *MI->getParent();
325  SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
326  tii_.loadRegFromStackSlot(MBB, MI, NewLI.reg, stackSlot_, rc_, &tri_);
327  --MI; // Point to load instruction.
328  SlotIndex LoadIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
329  vrm_.addSpillSlotUse(stackSlot_, MI);
330  DEBUG(dbgs() << "\treload:  " << LoadIdx << '\t' << *MI);
331  VNInfo *LoadVNI = NewLI.getNextValue(LoadIdx, 0,
332                                       lis_.getVNInfoAllocator());
333  NewLI.addRange(LiveRange(LoadIdx, Idx, LoadVNI));
334}
335
336/// insertSpill - Insert a spill of NewLI.reg after MI.
337void InlineSpiller::insertSpill(LiveInterval &NewLI,
338                                MachineBasicBlock::iterator MI) {
339  MachineBasicBlock &MBB = *MI->getParent();
340  SlotIndex Idx = lis_.getInstructionIndex(MI).getDefIndex();
341  tii_.storeRegToStackSlot(MBB, ++MI, NewLI.reg, true, stackSlot_, rc_, &tri_);
342  --MI; // Point to store instruction.
343  SlotIndex StoreIdx = lis_.InsertMachineInstrInMaps(MI).getDefIndex();
344  vrm_.addSpillSlotUse(stackSlot_, MI);
345  DEBUG(dbgs() << "\tspilled: " << StoreIdx << '\t' << *MI);
346  VNInfo *StoreVNI = NewLI.getNextValue(Idx, 0, lis_.getVNInfoAllocator());
347  NewLI.addRange(LiveRange(Idx, StoreIdx, StoreVNI));
348}
349
350void InlineSpiller::spill(LiveInterval *li,
351                          SmallVectorImpl<LiveInterval*> &newIntervals,
352                          SmallVectorImpl<LiveInterval*> &spillIs) {
353  LiveRangeEdit edit(*li, newIntervals, spillIs);
354  spill(edit);
355}
356
357void InlineSpiller::spill(LiveRangeEdit &edit) {
358  edit_ = &edit;
359  DEBUG(dbgs() << "Inline spilling " << edit.getParent() << "\n");
360  assert(edit.getParent().isSpillable() &&
361         "Attempting to spill already spilled value.");
362  assert(!edit.getParent().isStackSlot() && "Trying to spill a stack slot.");
363
364  if (split())
365    return;
366
367  reMaterializeAll();
368
369  // Remat may handle everything.
370  if (edit_->getParent().empty())
371    return;
372
373  rc_ = mri_.getRegClass(edit.getReg());
374  stackSlot_ = edit.assignStackSlot(vrm_);
375
376  // Iterate over instructions using register.
377  for (MachineRegisterInfo::reg_iterator RI = mri_.reg_begin(edit.getReg());
378       MachineInstr *MI = RI.skipInstruction();) {
379
380    // Debug values are not allowed to affect codegen.
381    if (MI->isDebugValue()) {
382      // Modify DBG_VALUE now that the value is in a spill slot.
383      uint64_t Offset = MI->getOperand(1).getImm();
384      const MDNode *MDPtr = MI->getOperand(2).getMetadata();
385      DebugLoc DL = MI->getDebugLoc();
386      if (MachineInstr *NewDV = tii_.emitFrameIndexDebugValue(mf_, stackSlot_,
387                                                           Offset, MDPtr, DL)) {
388        DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI);
389        MachineBasicBlock *MBB = MI->getParent();
390        MBB->insert(MBB->erase(MI), NewDV);
391      } else {
392        DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI);
393        MI->eraseFromParent();
394      }
395      continue;
396    }
397
398    // Stack slot accesses may coalesce away.
399    if (coalesceStackAccess(MI))
400      continue;
401
402    // Analyze instruction.
403    bool Reads, Writes;
404    SmallVector<unsigned, 8> Ops;
405    tie(Reads, Writes) = MI->readsWritesVirtualRegister(edit.getReg(), &Ops);
406
407    // Attempt to fold memory ops.
408    if (foldMemoryOperand(MI, Ops))
409      continue;
410
411    // Allocate interval around instruction.
412    // FIXME: Infer regclass from instruction alone.
413    LiveInterval &NewLI = edit.create(mri_, lis_, vrm_);
414    NewLI.markNotSpillable();
415
416    if (Reads)
417      insertReload(NewLI, MI);
418
419    // Rewrite instruction operands.
420    bool hasLiveDef = false;
421    for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
422      MachineOperand &MO = MI->getOperand(Ops[i]);
423      MO.setReg(NewLI.reg);
424      if (MO.isUse()) {
425        if (!MI->isRegTiedToDefOperand(Ops[i]))
426          MO.setIsKill();
427      } else {
428        if (!MO.isDead())
429          hasLiveDef = true;
430      }
431    }
432
433    // FIXME: Use a second vreg if instruction has no tied ops.
434    if (Writes && hasLiveDef)
435      insertSpill(NewLI, MI);
436
437    DEBUG(dbgs() << "\tinterval: " << NewLI << '\n');
438  }
439}
440