Spiller.cpp revision 0ff4e2105ba0d8e378cbd01bf9f4db935d1bf39f
1//===-- llvm/CodeGen/Spiller.cpp -  Spiller -------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#define DEBUG_TYPE "spiller"
11#include "Spiller.h"
12#include "llvm/Support/Compiler.h"
13#include "llvm/ADT/DepthFirstIterator.h"
14#include "llvm/ADT/Statistic.h"
15#include "llvm/ADT/STLExtras.h"
16#include <algorithm>
17using namespace llvm;
18
19STATISTIC(NumDSE     , "Number of dead stores elided");
20STATISTIC(NumDSS     , "Number of dead spill slots removed");
21STATISTIC(NumCommutes, "Number of instructions commuted");
22STATISTIC(NumDRM     , "Number of re-materializable defs elided");
23STATISTIC(NumStores  , "Number of stores added");
24STATISTIC(NumPSpills , "Number of physical register spills");
25STATISTIC(NumOmitted , "Number of reloads omited");
26STATISTIC(NumCopified, "Number of available reloads turned into copies");
27STATISTIC(NumReMats  , "Number of re-materialization");
28STATISTIC(NumLoads   , "Number of loads added");
29STATISTIC(NumReused  , "Number of values reused");
30STATISTIC(NumDCE     , "Number of copies elided");
31
32namespace {
33  enum SpillerName { simple, local };
34}
35
36static cl::opt<SpillerName>
37SpillerOpt("spiller",
38           cl::desc("Spiller to use: (default: local)"),
39           cl::Prefix,
40           cl::values(clEnumVal(simple, "simple spiller"),
41                      clEnumVal(local,  "local spiller"),
42                      clEnumValEnd),
43           cl::init(local));
44
45// ****************************** //
46// Simple Spiller Implementation  //
47// ****************************** //
48
49Spiller::~Spiller() {}
50
51bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) {
52  DOUT << "********** REWRITE MACHINE CODE **********\n";
53  DOUT << "********** Function: " << MF.getFunction()->getName() << '\n';
54  const TargetMachine &TM = MF.getTarget();
55  const TargetInstrInfo &TII = *TM.getInstrInfo();
56  const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
57
58
59  // LoadedRegs - Keep track of which vregs are loaded, so that we only load
60  // each vreg once (in the case where a spilled vreg is used by multiple
61  // operands).  This is always smaller than the number of operands to the
62  // current machine instr, so it should be small.
63  std::vector<unsigned> LoadedRegs;
64
65  for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
66       MBBI != E; ++MBBI) {
67    DOUT << MBBI->getBasicBlock()->getName() << ":\n";
68    MachineBasicBlock &MBB = *MBBI;
69    for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
70         MII != E; ++MII) {
71      MachineInstr &MI = *MII;
72      for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
73        MachineOperand &MO = MI.getOperand(i);
74        if (MO.isReg() && MO.getReg()) {
75          if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
76            unsigned VirtReg = MO.getReg();
77            unsigned SubIdx = MO.getSubReg();
78            unsigned PhysReg = VRM.getPhys(VirtReg);
79            unsigned RReg = SubIdx ? TRI.getSubReg(PhysReg, SubIdx) : PhysReg;
80            if (!VRM.isAssignedReg(VirtReg)) {
81              int StackSlot = VRM.getStackSlot(VirtReg);
82              const TargetRegisterClass* RC =
83                                           MF.getRegInfo().getRegClass(VirtReg);
84
85              if (MO.isUse() &&
86                  std::find(LoadedRegs.begin(), LoadedRegs.end(), VirtReg)
87                           == LoadedRegs.end()) {
88                TII.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC);
89                MachineInstr *LoadMI = prior(MII);
90                VRM.addSpillSlotUse(StackSlot, LoadMI);
91                LoadedRegs.push_back(VirtReg);
92                ++NumLoads;
93                DOUT << '\t' << *LoadMI;
94              }
95
96              if (MO.isDef()) {
97                TII.storeRegToStackSlot(MBB, next(MII), PhysReg, true,
98                                        StackSlot, RC);
99                MachineInstr *StoreMI = next(MII);
100                VRM.addSpillSlotUse(StackSlot, StoreMI);
101                ++NumStores;
102              }
103            }
104            MF.getRegInfo().setPhysRegUsed(RReg);
105            MI.getOperand(i).setReg(RReg);
106          } else {
107            MF.getRegInfo().setPhysRegUsed(MO.getReg());
108          }
109        }
110      }
111
112      DOUT << '\t' << MI;
113      LoadedRegs.clear();
114    }
115  }
116  return true;
117}
118
119// ****************** //
120// Utility Functions  //
121// ****************** //
122
123/// InvalidateKill - A MI that defines the specified register is being deleted,
124/// invalidate the register kill information.
125static void InvalidateKill(unsigned Reg, BitVector &RegKills,
126                           std::vector<MachineOperand*> &KillOps) {
127  if (RegKills[Reg]) {
128    KillOps[Reg]->setIsKill(false);
129    KillOps[Reg] = NULL;
130    RegKills.reset(Reg);
131  }
132}
133
134/// findSinglePredSuccessor - Return via reference a vector of machine basic
135/// blocks each of which is a successor of the specified BB and has no other
136/// predecessor.
137static void findSinglePredSuccessor(MachineBasicBlock *MBB,
138                                   SmallVectorImpl<MachineBasicBlock *> &Succs) {
139  for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
140         SE = MBB->succ_end(); SI != SE; ++SI) {
141    MachineBasicBlock *SuccMBB = *SI;
142    if (SuccMBB->pred_size() == 1)
143      Succs.push_back(SuccMBB);
144  }
145}
146
147/// InvalidateKills - MI is going to be deleted. If any of its operands are
148/// marked kill, then invalidate the information.
149static void InvalidateKills(MachineInstr &MI, BitVector &RegKills,
150                            std::vector<MachineOperand*> &KillOps,
151                            SmallVector<unsigned, 2> *KillRegs = NULL) {
152  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
153    MachineOperand &MO = MI.getOperand(i);
154    if (!MO.isReg() || !MO.isUse() || !MO.isKill())
155      continue;
156    unsigned Reg = MO.getReg();
157    if (TargetRegisterInfo::isVirtualRegister(Reg))
158      continue;
159    if (KillRegs)
160      KillRegs->push_back(Reg);
161    assert(Reg < KillOps.size());
162    if (KillOps[Reg] == &MO) {
163      RegKills.reset(Reg);
164      KillOps[Reg] = NULL;
165    }
166  }
167}
168
169/// InvalidateRegDef - If the def operand of the specified def MI is now dead
170/// (since it's spill instruction is removed), mark it isDead. Also checks if
171/// the def MI has other definition operands that are not dead. Returns it by
172/// reference.
173static bool InvalidateRegDef(MachineBasicBlock::iterator I,
174                             MachineInstr &NewDef, unsigned Reg,
175                             bool &HasLiveDef) {
176  // Due to remat, it's possible this reg isn't being reused. That is,
177  // the def of this reg (by prev MI) is now dead.
178  MachineInstr *DefMI = I;
179  MachineOperand *DefOp = NULL;
180  for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) {
181    MachineOperand &MO = DefMI->getOperand(i);
182    if (MO.isReg() && MO.isDef()) {
183      if (MO.getReg() == Reg)
184        DefOp = &MO;
185      else if (!MO.isDead())
186        HasLiveDef = true;
187    }
188  }
189  if (!DefOp)
190    return false;
191
192  bool FoundUse = false, Done = false;
193  MachineBasicBlock::iterator E = &NewDef;
194  ++I; ++E;
195  for (; !Done && I != E; ++I) {
196    MachineInstr *NMI = I;
197    for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) {
198      MachineOperand &MO = NMI->getOperand(j);
199      if (!MO.isReg() || MO.getReg() != Reg)
200        continue;
201      if (MO.isUse())
202        FoundUse = true;
203      Done = true; // Stop after scanning all the operands of this MI.
204    }
205  }
206  if (!FoundUse) {
207    // Def is dead!
208    DefOp->setIsDead();
209    return true;
210  }
211  return false;
212}
213
214/// UpdateKills - Track and update kill info. If a MI reads a register that is
215/// marked kill, then it must be due to register reuse. Transfer the kill info
216/// over.
217static void UpdateKills(MachineInstr &MI, BitVector &RegKills,
218                        std::vector<MachineOperand*> &KillOps,
219                        const TargetRegisterInfo* TRI) {
220  const TargetInstrDesc &TID = MI.getDesc();
221  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
222    MachineOperand &MO = MI.getOperand(i);
223    if (!MO.isReg() || !MO.isUse())
224      continue;
225    unsigned Reg = MO.getReg();
226    if (Reg == 0)
227      continue;
228
229    if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) {
230      // That can't be right. Register is killed but not re-defined and it's
231      // being reused. Let's fix that.
232      KillOps[Reg]->setIsKill(false);
233      KillOps[Reg] = NULL;
234      RegKills.reset(Reg);
235      if (i < TID.getNumOperands() &&
236          TID.getOperandConstraint(i, TOI::TIED_TO) == -1)
237        // Unless it's a two-address operand, this is the new kill.
238        MO.setIsKill();
239    }
240    if (MO.isKill()) {
241      RegKills.set(Reg);
242      KillOps[Reg] = &MO;
243    }
244  }
245
246  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
247    const MachineOperand &MO = MI.getOperand(i);
248    if (!MO.isReg() || !MO.isDef())
249      continue;
250    unsigned Reg = MO.getReg();
251    RegKills.reset(Reg);
252    KillOps[Reg] = NULL;
253    // It also defines (or partially define) aliases.
254    for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
255      RegKills.reset(*AS);
256      KillOps[*AS] = NULL;
257    }
258  }
259}
260
261/// ReMaterialize - Re-materialize definition for Reg targetting DestReg.
262///
263static void ReMaterialize(MachineBasicBlock &MBB,
264                          MachineBasicBlock::iterator &MII,
265                          unsigned DestReg, unsigned Reg,
266                          const TargetInstrInfo *TII,
267                          const TargetRegisterInfo *TRI,
268                          VirtRegMap &VRM) {
269  TII->reMaterialize(MBB, MII, DestReg, VRM.getReMaterializedMI(Reg));
270  MachineInstr *NewMI = prior(MII);
271  for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
272    MachineOperand &MO = NewMI->getOperand(i);
273    if (!MO.isReg() || MO.getReg() == 0)
274      continue;
275    unsigned VirtReg = MO.getReg();
276    if (TargetRegisterInfo::isPhysicalRegister(VirtReg))
277      continue;
278    assert(MO.isUse());
279    unsigned SubIdx = MO.getSubReg();
280    unsigned Phys = VRM.getPhys(VirtReg);
281    assert(Phys);
282    unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
283    MO.setReg(RReg);
284  }
285  ++NumReMats;
286}
287
288/// findSuperReg - Find the SubReg's super-register of given register class
289/// where its SubIdx sub-register is SubReg.
290static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg,
291                             unsigned SubIdx, const TargetRegisterInfo *TRI) {
292  for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
293       I != E; ++I) {
294    unsigned Reg = *I;
295    if (TRI->getSubReg(Reg, SubIdx) == SubReg)
296      return Reg;
297  }
298  return 0;
299}
300
301// ******************************** //
302// Available Spills Implementation  //
303// ******************************** //
304
305/// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified
306/// stackslot register. The register is still available but is no longer
307/// allowed to be modifed.
308void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) {
309  std::multimap<unsigned, int>::iterator I =
310    PhysRegsAvailable.lower_bound(PhysReg);
311  while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
312    int SlotOrReMat = I->second;
313    I++;
314    assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
315           "Bidirectional map mismatch!");
316    SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1;
317    DOUT << "PhysReg " << TRI->getName(PhysReg)
318         << " copied, it is available for use but can no longer be modified\n";
319  }
320}
321
322/// disallowClobberPhysReg - Unset the CanClobber bit of the specified
323/// stackslot register and its aliases. The register and its aliases may
324/// still available but is no longer allowed to be modifed.
325void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) {
326  for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
327    disallowClobberPhysRegOnly(*AS);
328  disallowClobberPhysRegOnly(PhysReg);
329}
330
331/// ClobberPhysRegOnly - This is called when the specified physreg changes
332/// value.  We use this to invalidate any info about stuff we thing lives in it.
333void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) {
334  std::multimap<unsigned, int>::iterator I =
335    PhysRegsAvailable.lower_bound(PhysReg);
336  while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
337    int SlotOrReMat = I->second;
338    PhysRegsAvailable.erase(I++);
339    assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
340           "Bidirectional map mismatch!");
341    SpillSlotsOrReMatsAvailable.erase(SlotOrReMat);
342    DOUT << "PhysReg " << TRI->getName(PhysReg)
343         << " clobbered, invalidating ";
344    if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
345      DOUT << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 << "\n";
346    else
347      DOUT << "SS#" << SlotOrReMat << "\n";
348  }
349}
350
351/// ClobberPhysReg - This is called when the specified physreg changes
352/// value.  We use this to invalidate any info about stuff we thing lives in
353/// it and any of its aliases.
354void AvailableSpills::ClobberPhysReg(unsigned PhysReg) {
355  for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
356    ClobberPhysRegOnly(*AS);
357  ClobberPhysRegOnly(PhysReg);
358}
359
360/// AddAvailableRegsToLiveIn - Availability information is being kept coming
361/// into the specified MBB. Add available physical registers as potential
362/// live-in's. If they are reused in the MBB, they will be added to the
363/// live-in set to make register scavenger and post-allocation scheduler.
364void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock &MBB,
365                                        BitVector &RegKills,
366                                        std::vector<MachineOperand*> &KillOps) {
367  std::set<unsigned> NotAvailable;
368  for (std::multimap<unsigned, int>::iterator
369         I = PhysRegsAvailable.begin(), E = PhysRegsAvailable.end();
370       I != E; ++I) {
371    unsigned Reg = I->first;
372    const TargetRegisterClass* RC = TRI->getPhysicalRegisterRegClass(Reg);
373    // FIXME: A temporary workaround. We can't reuse available value if it's
374    // not safe to move the def of the virtual register's class. e.g.
375    // X86::RFP* register classes. Do not add it as a live-in.
376    if (!TII->isSafeToMoveRegClassDefs(RC))
377      // This is no longer available.
378      NotAvailable.insert(Reg);
379    else {
380      MBB.addLiveIn(Reg);
381      InvalidateKill(Reg, RegKills, KillOps);
382    }
383
384    // Skip over the same register.
385    std::multimap<unsigned, int>::iterator NI = next(I);
386    while (NI != E && NI->first == Reg) {
387      ++I;
388      ++NI;
389    }
390  }
391
392  for (std::set<unsigned>::iterator I = NotAvailable.begin(),
393         E = NotAvailable.end(); I != E; ++I) {
394    ClobberPhysReg(*I);
395    for (const unsigned *SubRegs = TRI->getSubRegisters(*I);
396       *SubRegs; ++SubRegs)
397      ClobberPhysReg(*SubRegs);
398  }
399}
400
401/// ModifyStackSlotOrReMat - This method is called when the value in a stack
402/// slot changes.  This removes information about which register the previous
403/// value for this slot lives in (as the previous value is dead now).
404void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) {
405  std::map<int, unsigned>::iterator It =
406    SpillSlotsOrReMatsAvailable.find(SlotOrReMat);
407  if (It == SpillSlotsOrReMatsAvailable.end()) return;
408  unsigned Reg = It->second >> 1;
409  SpillSlotsOrReMatsAvailable.erase(It);
410
411  // This register may hold the value of multiple stack slots, only remove this
412  // stack slot from the set of values the register contains.
413  std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg);
414  for (; ; ++I) {
415    assert(I != PhysRegsAvailable.end() && I->first == Reg &&
416           "Map inverse broken!");
417    if (I->second == SlotOrReMat) break;
418  }
419  PhysRegsAvailable.erase(I);
420}
421
422// ************************** //
423// Reuse Info Implementation  //
424// ************************** //
425
426/// GetRegForReload - We are about to emit a reload into PhysReg.  If there
427/// is some other operand that is using the specified register, either pick
428/// a new register to use, or evict the previous reload and use this reg.
429unsigned ReuseInfo::GetRegForReload(unsigned PhysReg, MachineInstr *MI,
430                         AvailableSpills &Spills,
431                         std::vector<MachineInstr*> &MaybeDeadStores,
432                         SmallSet<unsigned, 8> &Rejected,
433                         BitVector &RegKills,
434                         std::vector<MachineOperand*> &KillOps,
435                         VirtRegMap &VRM) {
436  const TargetInstrInfo* TII = MI->getParent()->getParent()->getTarget()
437                               .getInstrInfo();
438
439  if (Reuses.empty()) return PhysReg;  // This is most often empty.
440
441  for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) {
442    ReusedOp &Op = Reuses[ro];
443    // If we find some other reuse that was supposed to use this register
444    // exactly for its reload, we can change this reload to use ITS reload
445    // register. That is, unless its reload register has already been
446    // considered and subsequently rejected because it has also been reused
447    // by another operand.
448    if (Op.PhysRegReused == PhysReg &&
449        Rejected.count(Op.AssignedPhysReg) == 0) {
450      // Yup, use the reload register that we didn't use before.
451      unsigned NewReg = Op.AssignedPhysReg;
452      Rejected.insert(PhysReg);
453      return GetRegForReload(NewReg, MI, Spills, MaybeDeadStores, Rejected,
454                             RegKills, KillOps, VRM);
455    } else {
456      // Otherwise, we might also have a problem if a previously reused
457      // value aliases the new register.  If so, codegen the previous reload
458      // and use this one.
459      unsigned PRRU = Op.PhysRegReused;
460      const TargetRegisterInfo *TRI = Spills.getRegInfo();
461      if (TRI->areAliases(PRRU, PhysReg)) {
462        // Okay, we found out that an alias of a reused register
463        // was used.  This isn't good because it means we have
464        // to undo a previous reuse.
465        MachineBasicBlock *MBB = MI->getParent();
466        const TargetRegisterClass *AliasRC =
467          MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg);
468
469        // Copy Op out of the vector and remove it, we're going to insert an
470        // explicit load for it.
471        ReusedOp NewOp = Op;
472        Reuses.erase(Reuses.begin()+ro);
473
474        // Ok, we're going to try to reload the assigned physreg into the
475        // slot that we were supposed to in the first place.  However, that
476        // register could hold a reuse.  Check to see if it conflicts or
477        // would prefer us to use a different register.
478        unsigned NewPhysReg = GetRegForReload(NewOp.AssignedPhysReg,
479                                              MI, Spills, MaybeDeadStores,
480                                          Rejected, RegKills, KillOps, VRM);
481
482        MachineBasicBlock::iterator MII = MI;
483        if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) {
484          ReMaterialize(*MBB, MII, NewPhysReg, NewOp.VirtReg, TII, TRI,VRM);
485        } else {
486          TII->loadRegFromStackSlot(*MBB, MII, NewPhysReg,
487                                    NewOp.StackSlotOrReMat, AliasRC);
488          MachineInstr *LoadMI = prior(MII);
489          VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI);
490          // Any stores to this stack slot are not dead anymore.
491          MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL;
492          ++NumLoads;
493        }
494        Spills.ClobberPhysReg(NewPhysReg);
495        Spills.ClobberPhysReg(NewOp.PhysRegReused);
496
497        unsigned SubIdx = MI->getOperand(NewOp.Operand).getSubReg();
498        unsigned RReg = SubIdx ? TRI->getSubReg(NewPhysReg, SubIdx) : NewPhysReg;
499        MI->getOperand(NewOp.Operand).setReg(RReg);
500
501        Spills.addAvailable(NewOp.StackSlotOrReMat, NewPhysReg);
502        --MII;
503        UpdateKills(*MII, RegKills, KillOps, TRI);
504        DOUT << '\t' << *MII;
505
506        DOUT << "Reuse undone!\n";
507        --NumReused;
508
509        // Finally, PhysReg is now available, go ahead and use it.
510        return PhysReg;
511      }
512    }
513  }
514  return PhysReg;
515}
516
517// ***************************** //
518// Local Spiller Implementation  //
519// ***************************** //
520
521bool LocalSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) {
522  RegInfo = &MF.getRegInfo();
523  TRI = MF.getTarget().getRegisterInfo();
524  TII = MF.getTarget().getInstrInfo();
525  DOUT << "\n**** Local spiller rewriting function '"
526       << MF.getFunction()->getName() << "':\n";
527  DOUT << "**** Machine Instrs (NOTE! Does not include spills and reloads!)"
528          " ****\n";
529  DEBUG(MF.dump());
530
531  // Spills - Keep track of which spilled values are available in physregs
532  // so that we can choose to reuse the physregs instead of emitting
533  // reloads. This is usually refreshed per basic block.
534  AvailableSpills Spills(TRI, TII);
535
536  // Keep track of kill information.
537  BitVector RegKills(TRI->getNumRegs());
538  std::vector<MachineOperand*> KillOps;
539  KillOps.resize(TRI->getNumRegs(), NULL);
540
541  // SingleEntrySuccs - Successor blocks which have a single predecessor.
542  SmallVector<MachineBasicBlock*, 4> SinglePredSuccs;
543  SmallPtrSet<MachineBasicBlock*,16> EarlyVisited;
544
545  // Traverse the basic blocks depth first.
546  MachineBasicBlock *Entry = MF.begin();
547  SmallPtrSet<MachineBasicBlock*,16> Visited;
548  for (df_ext_iterator<MachineBasicBlock*,
549         SmallPtrSet<MachineBasicBlock*,16> >
550         DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
551       DFI != E; ++DFI) {
552    MachineBasicBlock *MBB = *DFI;
553    if (!EarlyVisited.count(MBB))
554      RewriteMBB(*MBB, VRM, Spills, RegKills, KillOps);
555
556    // If this MBB is the only predecessor of a successor. Keep the
557    // availability information and visit it next.
558    do {
559      // Keep visiting single predecessor successor as long as possible.
560      SinglePredSuccs.clear();
561      findSinglePredSuccessor(MBB, SinglePredSuccs);
562      if (SinglePredSuccs.empty())
563        MBB = 0;
564      else {
565        // FIXME: More than one successors, each of which has MBB has
566        // the only predecessor.
567        MBB = SinglePredSuccs[0];
568        if (!Visited.count(MBB) && EarlyVisited.insert(MBB)) {
569          Spills.AddAvailableRegsToLiveIn(*MBB, RegKills, KillOps);
570          RewriteMBB(*MBB, VRM, Spills, RegKills, KillOps);
571        }
572      }
573    } while (MBB);
574
575    // Clear the availability info.
576    Spills.clear();
577  }
578
579  DOUT << "**** Post Machine Instrs ****\n";
580  DEBUG(MF.dump());
581
582  // Mark unused spill slots.
583  MachineFrameInfo *MFI = MF.getFrameInfo();
584  int SS = VRM.getLowSpillSlot();
585  if (SS != VirtRegMap::NO_STACK_SLOT)
586    for (int e = VRM.getHighSpillSlot(); SS <= e; ++SS)
587      if (!VRM.isSpillSlotUsed(SS)) {
588        MFI->RemoveStackObject(SS);
589        ++NumDSS;
590      }
591
592  return true;
593}
594
595
596/// PrepForUnfoldOpti - Turn a store folding instruction into a load folding
597/// instruction. e.g.
598///     xorl  %edi, %eax
599///     movl  %eax, -32(%ebp)
600///     movl  -36(%ebp), %eax
601///     orl   %eax, -32(%ebp)
602/// ==>
603///     xorl  %edi, %eax
604///     orl   -36(%ebp), %eax
605///     mov   %eax, -32(%ebp)
606/// This enables unfolding optimization for a subsequent instruction which will
607/// also eliminate the newly introduced store instruction.
608bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB,
609                                    MachineBasicBlock::iterator &MII,
610                                    std::vector<MachineInstr*> &MaybeDeadStores,
611                                    AvailableSpills &Spills,
612                                    BitVector &RegKills,
613                                    std::vector<MachineOperand*> &KillOps,
614                                    VirtRegMap &VRM) {
615  MachineFunction &MF = *MBB.getParent();
616  MachineInstr &MI = *MII;
617  unsigned UnfoldedOpc = 0;
618  unsigned UnfoldPR = 0;
619  unsigned UnfoldVR = 0;
620  int FoldedSS = VirtRegMap::NO_STACK_SLOT;
621  VirtRegMap::MI2VirtMapTy::const_iterator I, End;
622  for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
623    // Only transform a MI that folds a single register.
624    if (UnfoldedOpc)
625      return false;
626    UnfoldVR = I->second.first;
627    VirtRegMap::ModRef MR = I->second.second;
628    // MI2VirtMap be can updated which invalidate the iterator.
629    // Increment the iterator first.
630    ++I;
631    if (VRM.isAssignedReg(UnfoldVR))
632      continue;
633    // If this reference is not a use, any previous store is now dead.
634    // Otherwise, the store to this stack slot is not dead anymore.
635    FoldedSS = VRM.getStackSlot(UnfoldVR);
636    MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
637    if (DeadStore && (MR & VirtRegMap::isModRef)) {
638      unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
639      if (!PhysReg || !DeadStore->readsRegister(PhysReg))
640        continue;
641      UnfoldPR = PhysReg;
642      UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
643                                                    false, true);
644    }
645  }
646
647  if (!UnfoldedOpc)
648    return false;
649
650  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
651    MachineOperand &MO = MI.getOperand(i);
652    if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse())
653      continue;
654    unsigned VirtReg = MO.getReg();
655    if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
656      continue;
657    if (VRM.isAssignedReg(VirtReg)) {
658      unsigned PhysReg = VRM.getPhys(VirtReg);
659      if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR))
660        return false;
661    } else if (VRM.isReMaterialized(VirtReg))
662      continue;
663    int SS = VRM.getStackSlot(VirtReg);
664    unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
665    if (PhysReg) {
666      if (TRI->regsOverlap(PhysReg, UnfoldPR))
667        return false;
668      continue;
669    }
670    if (VRM.hasPhys(VirtReg)) {
671      PhysReg = VRM.getPhys(VirtReg);
672      if (!TRI->regsOverlap(PhysReg, UnfoldPR))
673        continue;
674    }
675
676    // Ok, we'll need to reload the value into a register which makes
677    // it impossible to perform the store unfolding optimization later.
678    // Let's see if it is possible to fold the load if the store is
679    // unfolded. This allows us to perform the store unfolding
680    // optimization.
681    SmallVector<MachineInstr*, 4> NewMIs;
682    if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
683      assert(NewMIs.size() == 1);
684      MachineInstr *NewMI = NewMIs.back();
685      NewMIs.clear();
686      int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
687      assert(Idx != -1);
688      SmallVector<unsigned, 1> Ops;
689      Ops.push_back(Idx);
690      MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS);
691      if (FoldedMI) {
692        VRM.addSpillSlotUse(SS, FoldedMI);
693        if (!VRM.hasPhys(UnfoldVR))
694          VRM.assignVirt2Phys(UnfoldVR, UnfoldPR);
695        VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
696        MII = MBB.insert(MII, FoldedMI);
697        InvalidateKills(MI, RegKills, KillOps);
698        VRM.RemoveMachineInstrFromMaps(&MI);
699        MBB.erase(&MI);
700        MF.DeleteMachineInstr(NewMI);
701        return true;
702      }
703      MF.DeleteMachineInstr(NewMI);
704    }
705  }
706  return false;
707}
708
709/// CommuteToFoldReload -
710/// Look for
711/// r1 = load fi#1
712/// r1 = op r1, r2<kill>
713/// store r1, fi#1
714///
715/// If op is commutable and r2 is killed, then we can xform these to
716/// r2 = op r2, fi#1
717/// store r2, fi#1
718bool LocalSpiller::CommuteToFoldReload(MachineBasicBlock &MBB,
719                                    MachineBasicBlock::iterator &MII,
720                                    unsigned VirtReg, unsigned SrcReg, int SS,
721                                    AvailableSpills &Spills,
722                                    BitVector &RegKills,
723                                    std::vector<MachineOperand*> &KillOps,
724                                    const TargetRegisterInfo *TRI,
725                                    VirtRegMap &VRM) {
726  if (MII == MBB.begin() || !MII->killsRegister(SrcReg))
727    return false;
728
729  MachineFunction &MF = *MBB.getParent();
730  MachineInstr &MI = *MII;
731  MachineBasicBlock::iterator DefMII = prior(MII);
732  MachineInstr *DefMI = DefMII;
733  const TargetInstrDesc &TID = DefMI->getDesc();
734  unsigned NewDstIdx;
735  if (DefMII != MBB.begin() &&
736      TID.isCommutable() &&
737      TII->CommuteChangesDestination(DefMI, NewDstIdx)) {
738    MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
739    unsigned NewReg = NewDstMO.getReg();
740    if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
741      return false;
742    MachineInstr *ReloadMI = prior(DefMII);
743    int FrameIdx;
744    unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx);
745    if (DestReg != SrcReg || FrameIdx != SS)
746      return false;
747    int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false);
748    if (UseIdx == -1)
749      return false;
750    int DefIdx = TID.getOperandConstraint(UseIdx, TOI::TIED_TO);
751    if (DefIdx == -1)
752      return false;
753    assert(DefMI->getOperand(DefIdx).isReg() &&
754           DefMI->getOperand(DefIdx).getReg() == SrcReg);
755
756    // Now commute def instruction.
757    MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
758    if (!CommutedMI)
759      return false;
760    SmallVector<unsigned, 1> Ops;
761    Ops.push_back(NewDstIdx);
762    MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS);
763    // Not needed since foldMemoryOperand returns new MI.
764    MF.DeleteMachineInstr(CommutedMI);
765    if (!FoldedMI)
766      return false;
767
768    VRM.addSpillSlotUse(SS, FoldedMI);
769    VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
770    // Insert new def MI and spill MI.
771    const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(VirtReg);
772    TII->storeRegToStackSlot(MBB, &MI, NewReg, true, SS, RC);
773    MII = prior(MII);
774    MachineInstr *StoreMI = MII;
775    VRM.addSpillSlotUse(SS, StoreMI);
776    VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
777    MII = MBB.insert(MII, FoldedMI);  // Update MII to backtrack.
778
779    // Delete all 3 old instructions.
780    InvalidateKills(*ReloadMI, RegKills, KillOps);
781    VRM.RemoveMachineInstrFromMaps(ReloadMI);
782    MBB.erase(ReloadMI);
783    InvalidateKills(*DefMI, RegKills, KillOps);
784    VRM.RemoveMachineInstrFromMaps(DefMI);
785    MBB.erase(DefMI);
786    InvalidateKills(MI, RegKills, KillOps);
787    VRM.RemoveMachineInstrFromMaps(&MI);
788    MBB.erase(&MI);
789
790    // If NewReg was previously holding value of some SS, it's now clobbered.
791    // This has to be done now because it's a physical register. When this
792    // instruction is re-visited, it's ignored.
793    Spills.ClobberPhysReg(NewReg);
794
795    ++NumCommutes;
796    return true;
797  }
798
799  return false;
800}
801
802/// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
803/// the last store to the same slot is now dead. If so, remove the last store.
804void LocalSpiller::SpillRegToStackSlot(MachineBasicBlock &MBB,
805                                  MachineBasicBlock::iterator &MII,
806                                  int Idx, unsigned PhysReg, int StackSlot,
807                                  const TargetRegisterClass *RC,
808                                  bool isAvailable, MachineInstr *&LastStore,
809                                  AvailableSpills &Spills,
810                                  SmallSet<MachineInstr*, 4> &ReMatDefs,
811                                  BitVector &RegKills,
812                                  std::vector<MachineOperand*> &KillOps,
813                                  VirtRegMap &VRM) {
814  TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC);
815  MachineInstr *StoreMI = next(MII);
816  VRM.addSpillSlotUse(StackSlot, StoreMI);
817  DOUT << "Store:\t" << *StoreMI;
818
819  // If there is a dead store to this stack slot, nuke it now.
820  if (LastStore) {
821    DOUT << "Removed dead store:\t" << *LastStore;
822    ++NumDSE;
823    SmallVector<unsigned, 2> KillRegs;
824    InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs);
825    MachineBasicBlock::iterator PrevMII = LastStore;
826    bool CheckDef = PrevMII != MBB.begin();
827    if (CheckDef)
828      --PrevMII;
829    VRM.RemoveMachineInstrFromMaps(LastStore);
830    MBB.erase(LastStore);
831    if (CheckDef) {
832      // Look at defs of killed registers on the store. Mark the defs
833      // as dead since the store has been deleted and they aren't
834      // being reused.
835      for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
836        bool HasOtherDef = false;
837        if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef)) {
838          MachineInstr *DeadDef = PrevMII;
839          if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
840            // FIXME: This assumes a remat def does not have side
841            // effects.
842            VRM.RemoveMachineInstrFromMaps(DeadDef);
843            MBB.erase(DeadDef);
844            ++NumDRM;
845          }
846        }
847      }
848    }
849  }
850
851  LastStore = next(MII);
852
853  // If the stack slot value was previously available in some other
854  // register, change it now.  Otherwise, make the register available,
855  // in PhysReg.
856  Spills.ModifyStackSlotOrReMat(StackSlot);
857  Spills.ClobberPhysReg(PhysReg);
858  Spills.addAvailable(StackSlot, PhysReg, isAvailable);
859  ++NumStores;
860}
861
862/// TransferDeadness - A identity copy definition is dead and it's being
863/// removed. Find the last def or use and mark it as dead / kill.
864void LocalSpiller::TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist,
865                                    unsigned Reg, BitVector &RegKills,
866                                    std::vector<MachineOperand*> &KillOps) {
867  int LastUDDist = -1;
868  MachineInstr *LastUDMI = NULL;
869  for (MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(Reg),
870         RE = RegInfo->reg_end(); RI != RE; ++RI) {
871    MachineInstr *UDMI = &*RI;
872    if (UDMI->getParent() != MBB)
873      continue;
874    DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI);
875    if (DI == DistanceMap.end() || DI->second > CurDist)
876      continue;
877    if ((int)DI->second < LastUDDist)
878      continue;
879    LastUDDist = DI->second;
880    LastUDMI = UDMI;
881  }
882
883  if (LastUDMI) {
884    const TargetInstrDesc &TID = LastUDMI->getDesc();
885    MachineOperand *LastUD = NULL;
886    for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) {
887      MachineOperand &MO = LastUDMI->getOperand(i);
888      if (!MO.isReg() || MO.getReg() != Reg)
889        continue;
890      if (!LastUD || (LastUD->isUse() && MO.isDef()))
891        LastUD = &MO;
892      if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1)
893        return;
894    }
895    if (LastUD->isDef())
896      LastUD->setIsDead();
897    else {
898      LastUD->setIsKill();
899      RegKills.set(Reg);
900      KillOps[Reg] = LastUD;
901    }
902  }
903}
904
905/// rewriteMBB - Keep track of which spills are available even after the
906/// register allocator is done with them.  If possible, avid reloading vregs.
907void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM,
908                              AvailableSpills &Spills, BitVector &RegKills,
909                              std::vector<MachineOperand*> &KillOps) {
910  DOUT << "\n**** Local spiller rewriting MBB '"
911       << MBB.getBasicBlock()->getName() << ":\n";
912
913  MachineFunction &MF = *MBB.getParent();
914
915  // MaybeDeadStores - When we need to write a value back into a stack slot,
916  // keep track of the inserted store.  If the stack slot value is never read
917  // (because the value was used from some available register, for example), and
918  // subsequently stored to, the original store is dead.  This map keeps track
919  // of inserted stores that are not used.  If we see a subsequent store to the
920  // same stack slot, the original store is deleted.
921  std::vector<MachineInstr*> MaybeDeadStores;
922  MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL);
923
924  // ReMatDefs - These are rematerializable def MIs which are not deleted.
925  SmallSet<MachineInstr*, 4> ReMatDefs;
926
927  // Clear kill info.
928  SmallSet<unsigned, 2> KilledMIRegs;
929  RegKills.reset();
930  KillOps.clear();
931  KillOps.resize(TRI->getNumRegs(), NULL);
932
933  unsigned Dist = 0;
934  DistanceMap.clear();
935  for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
936       MII != E; ) {
937    MachineBasicBlock::iterator NextMII = MII; ++NextMII;
938
939    VirtRegMap::MI2VirtMapTy::const_iterator I, End;
940    bool Erased = false;
941    bool BackTracked = false;
942    if (PrepForUnfoldOpti(MBB, MII,
943                          MaybeDeadStores, Spills, RegKills, KillOps, VRM))
944      NextMII = next(MII);
945
946    MachineInstr &MI = *MII;
947    const TargetInstrDesc &TID = MI.getDesc();
948
949    if (VRM.hasEmergencySpills(&MI)) {
950      // Spill physical register(s) in the rare case the allocator has run out
951      // of registers to allocate.
952      SmallSet<int, 4> UsedSS;
953      std::vector<unsigned> &EmSpills = VRM.getEmergencySpills(&MI);
954      for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) {
955        unsigned PhysReg = EmSpills[i];
956        const TargetRegisterClass *RC =
957          TRI->getPhysicalRegisterRegClass(PhysReg);
958        assert(RC && "Unable to determine register class!");
959        int SS = VRM.getEmergencySpillSlot(RC);
960        if (UsedSS.count(SS))
961          assert(0 && "Need to spill more than one physical registers!");
962        UsedSS.insert(SS);
963        TII->storeRegToStackSlot(MBB, MII, PhysReg, true, SS, RC);
964        MachineInstr *StoreMI = prior(MII);
965        VRM.addSpillSlotUse(SS, StoreMI);
966        TII->loadRegFromStackSlot(MBB, next(MII), PhysReg, SS, RC);
967        MachineInstr *LoadMI = next(MII);
968        VRM.addSpillSlotUse(SS, LoadMI);
969        ++NumPSpills;
970      }
971      NextMII = next(MII);
972    }
973
974    // Insert restores here if asked to.
975    if (VRM.isRestorePt(&MI)) {
976      std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI);
977      for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
978        unsigned VirtReg = RestoreRegs[e-i-1];  // Reverse order.
979        if (!VRM.getPreSplitReg(VirtReg))
980          continue; // Split interval spilled again.
981        unsigned Phys = VRM.getPhys(VirtReg);
982        RegInfo->setPhysRegUsed(Phys);
983
984        // Check if the value being restored if available. If so, it must be
985        // from a predecessor BB that fallthrough into this BB. We do not
986        // expect:
987        // BB1:
988        // r1 = load fi#1
989        // ...
990        //    = r1<kill>
991        // ... # r1 not clobbered
992        // ...
993        //    = load fi#1
994        bool DoReMat = VRM.isReMaterialized(VirtReg);
995        int SSorRMId = DoReMat
996          ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
997        const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
998        unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
999        if (InReg == Phys) {
1000          // If the value is already available in the expected register, save
1001          // a reload / remat.
1002          if (SSorRMId)
1003            DOUT << "Reusing RM#" << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1;
1004          else
1005            DOUT << "Reusing SS#" << SSorRMId;
1006          DOUT << " from physreg "
1007               << TRI->getName(InReg) << " for vreg"
1008               << VirtReg <<" instead of reloading into physreg "
1009               << TRI->getName(Phys) << "\n";
1010          ++NumOmitted;
1011          continue;
1012        } else if (InReg && InReg != Phys) {
1013          if (SSorRMId)
1014            DOUT << "Reusing RM#" << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1;
1015          else
1016            DOUT << "Reusing SS#" << SSorRMId;
1017          DOUT << " from physreg "
1018               << TRI->getName(InReg) << " for vreg"
1019               << VirtReg <<" by copying it into physreg "
1020               << TRI->getName(Phys) << "\n";
1021
1022          // If the reloaded / remat value is available in another register,
1023          // copy it to the desired register.
1024          TII->copyRegToReg(MBB, &MI, Phys, InReg, RC, RC);
1025
1026          // This invalidates Phys.
1027          Spills.ClobberPhysReg(Phys);
1028          // Remember it's available.
1029          Spills.addAvailable(SSorRMId, Phys);
1030
1031          // Mark is killed.
1032          MachineInstr *CopyMI = prior(MII);
1033          MachineOperand *KillOpnd = CopyMI->findRegisterUseOperand(InReg);
1034          KillOpnd->setIsKill();
1035          UpdateKills(*CopyMI, RegKills, KillOps, TRI);
1036
1037          DOUT << '\t' << *CopyMI;
1038          ++NumCopified;
1039          continue;
1040        }
1041
1042        if (VRM.isReMaterialized(VirtReg)) {
1043          ReMaterialize(MBB, MII, Phys, VirtReg, TII, TRI, VRM);
1044        } else {
1045          const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1046          TII->loadRegFromStackSlot(MBB, &MI, Phys, SSorRMId, RC);
1047          MachineInstr *LoadMI = prior(MII);
1048          VRM.addSpillSlotUse(SSorRMId, LoadMI);
1049          ++NumLoads;
1050        }
1051
1052        // This invalidates Phys.
1053        Spills.ClobberPhysReg(Phys);
1054        // Remember it's available.
1055        Spills.addAvailable(SSorRMId, Phys);
1056
1057        UpdateKills(*prior(MII), RegKills, KillOps, TRI);
1058        DOUT << '\t' << *prior(MII);
1059      }
1060    }
1061
1062    // Insert spills here if asked to.
1063    if (VRM.isSpillPt(&MI)) {
1064      std::vector<std::pair<unsigned,bool> > &SpillRegs =
1065        VRM.getSpillPtSpills(&MI);
1066      for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
1067        unsigned VirtReg = SpillRegs[i].first;
1068        bool isKill = SpillRegs[i].second;
1069        if (!VRM.getPreSplitReg(VirtReg))
1070          continue; // Split interval spilled again.
1071        const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
1072        unsigned Phys = VRM.getPhys(VirtReg);
1073        int StackSlot = VRM.getStackSlot(VirtReg);
1074        TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC);
1075        MachineInstr *StoreMI = next(MII);
1076        VRM.addSpillSlotUse(StackSlot, StoreMI);
1077        DOUT << "Store:\t" << *StoreMI;
1078        VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1079      }
1080      NextMII = next(MII);
1081    }
1082
1083    /// ReusedOperands - Keep track of operand reuse in case we need to undo
1084    /// reuse.
1085    ReuseInfo ReusedOperands(MI, TRI);
1086    SmallVector<unsigned, 4> VirtUseOps;
1087    for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1088      MachineOperand &MO = MI.getOperand(i);
1089      if (!MO.isReg() || MO.getReg() == 0)
1090        continue;   // Ignore non-register operands.
1091
1092      unsigned VirtReg = MO.getReg();
1093      if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
1094        // Ignore physregs for spilling, but remember that it is used by this
1095        // function.
1096        RegInfo->setPhysRegUsed(VirtReg);
1097        continue;
1098      }
1099
1100      // We want to process implicit virtual register uses first.
1101      if (MO.isImplicit())
1102        // If the virtual register is implicitly defined, emit a implicit_def
1103        // before so scavenger knows it's "defined".
1104        VirtUseOps.insert(VirtUseOps.begin(), i);
1105      else
1106        VirtUseOps.push_back(i);
1107    }
1108
1109    // Process all of the spilled uses and all non spilled reg references.
1110    SmallVector<int, 2> PotentialDeadStoreSlots;
1111    KilledMIRegs.clear();
1112    for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
1113      unsigned i = VirtUseOps[j];
1114      MachineOperand &MO = MI.getOperand(i);
1115      unsigned VirtReg = MO.getReg();
1116      assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
1117             "Not a virtual register?");
1118
1119      unsigned SubIdx = MO.getSubReg();
1120      if (VRM.isAssignedReg(VirtReg)) {
1121        // This virtual register was assigned a physreg!
1122        unsigned Phys = VRM.getPhys(VirtReg);
1123        RegInfo->setPhysRegUsed(Phys);
1124        if (MO.isDef())
1125          ReusedOperands.markClobbered(Phys);
1126        unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
1127        MI.getOperand(i).setReg(RReg);
1128        if (VRM.isImplicitlyDefined(VirtReg))
1129          BuildMI(MBB, &MI, MI.getDebugLoc(),
1130                  TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg);
1131        continue;
1132      }
1133
1134      // This virtual register is now known to be a spilled value.
1135      if (!MO.isUse())
1136        continue;  // Handle defs in the loop below (handle use&def here though)
1137
1138      bool DoReMat = VRM.isReMaterialized(VirtReg);
1139      int SSorRMId = DoReMat
1140        ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
1141      int ReuseSlot = SSorRMId;
1142
1143      // Check to see if this stack slot is available.
1144      unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
1145
1146      // If this is a sub-register use, make sure the reuse register is in the
1147      // right register class. For example, for x86 not all of the 32-bit
1148      // registers have accessible sub-registers.
1149      // Similarly so for EXTRACT_SUBREG. Consider this:
1150      // EDI = op
1151      // MOV32_mr fi#1, EDI
1152      // ...
1153      //       = EXTRACT_SUBREG fi#1
1154      // fi#1 is available in EDI, but it cannot be reused because it's not in
1155      // the right register file.
1156      if (PhysReg &&
1157          (SubIdx || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) {
1158        const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1159        if (!RC->contains(PhysReg))
1160          PhysReg = 0;
1161      }
1162
1163      if (PhysReg) {
1164        // This spilled operand might be part of a two-address operand.  If this
1165        // is the case, then changing it will necessarily require changing the
1166        // def part of the instruction as well.  However, in some cases, we
1167        // aren't allowed to modify the reused register.  If none of these cases
1168        // apply, reuse it.
1169        bool CanReuse = true;
1170        int ti = TID.getOperandConstraint(i, TOI::TIED_TO);
1171        if (ti != -1) {
1172          // Okay, we have a two address operand.  We can reuse this physreg as
1173          // long as we are allowed to clobber the value and there isn't an
1174          // earlier def that has already clobbered the physreg.
1175          CanReuse = Spills.canClobberPhysReg(ReuseSlot) &&
1176            !ReusedOperands.isClobbered(PhysReg);
1177        }
1178
1179        if (CanReuse) {
1180          // If this stack slot value is already available, reuse it!
1181          if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
1182            DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1;
1183          else
1184            DOUT << "Reusing SS#" << ReuseSlot;
1185          DOUT << " from physreg "
1186               << TRI->getName(PhysReg) << " for vreg"
1187               << VirtReg <<" instead of reloading into physreg "
1188               << TRI->getName(VRM.getPhys(VirtReg)) << "\n";
1189          unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1190          MI.getOperand(i).setReg(RReg);
1191
1192          // The only technical detail we have is that we don't know that
1193          // PhysReg won't be clobbered by a reloaded stack slot that occurs
1194          // later in the instruction.  In particular, consider 'op V1, V2'.
1195          // If V1 is available in physreg R0, we would choose to reuse it
1196          // here, instead of reloading it into the register the allocator
1197          // indicated (say R1).  However, V2 might have to be reloaded
1198          // later, and it might indicate that it needs to live in R0.  When
1199          // this occurs, we need to have information available that
1200          // indicates it is safe to use R1 for the reload instead of R0.
1201          //
1202          // To further complicate matters, we might conflict with an alias,
1203          // or R0 and R1 might not be compatible with each other.  In this
1204          // case, we actually insert a reload for V1 in R1, ensuring that
1205          // we can get at R0 or its alias.
1206          ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
1207                                  VRM.getPhys(VirtReg), VirtReg);
1208          if (ti != -1)
1209            // Only mark it clobbered if this is a use&def operand.
1210            ReusedOperands.markClobbered(PhysReg);
1211          ++NumReused;
1212
1213          if (MI.getOperand(i).isKill() &&
1214              ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
1215
1216            // The store of this spilled value is potentially dead, but we
1217            // won't know for certain until we've confirmed that the re-use
1218            // above is valid, which means waiting until the other operands
1219            // are processed. For now we just track the spill slot, we'll
1220            // remove it after the other operands are processed if valid.
1221
1222            PotentialDeadStoreSlots.push_back(ReuseSlot);
1223          }
1224
1225          // Mark is isKill if it's there no other uses of the same virtual
1226          // register and it's not a two-address operand. IsKill will be
1227          // unset if reg is reused.
1228          if (ti == -1 && KilledMIRegs.count(VirtReg) == 0) {
1229            MI.getOperand(i).setIsKill();
1230            KilledMIRegs.insert(VirtReg);
1231          }
1232
1233          continue;
1234        }  // CanReuse
1235
1236        // Otherwise we have a situation where we have a two-address instruction
1237        // whose mod/ref operand needs to be reloaded.  This reload is already
1238        // available in some register "PhysReg", but if we used PhysReg as the
1239        // operand to our 2-addr instruction, the instruction would modify
1240        // PhysReg.  This isn't cool if something later uses PhysReg and expects
1241        // to get its initial value.
1242        //
1243        // To avoid this problem, and to avoid doing a load right after a store,
1244        // we emit a copy from PhysReg into the designated register for this
1245        // operand.
1246        unsigned DesignatedReg = VRM.getPhys(VirtReg);
1247        assert(DesignatedReg && "Must map virtreg to physreg!");
1248
1249        // Note that, if we reused a register for a previous operand, the
1250        // register we want to reload into might not actually be
1251        // available.  If this occurs, use the register indicated by the
1252        // reuser.
1253        if (ReusedOperands.hasReuses())
1254          DesignatedReg = ReusedOperands.GetRegForReload(DesignatedReg, &MI,
1255                               Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1256
1257        // If the mapped designated register is actually the physreg we have
1258        // incoming, we don't need to inserted a dead copy.
1259        if (DesignatedReg == PhysReg) {
1260          // If this stack slot value is already available, reuse it!
1261          if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
1262            DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1;
1263          else
1264            DOUT << "Reusing SS#" << ReuseSlot;
1265          DOUT << " from physreg " << TRI->getName(PhysReg)
1266               << " for vreg" << VirtReg
1267               << " instead of reloading into same physreg.\n";
1268          unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1269          MI.getOperand(i).setReg(RReg);
1270          ReusedOperands.markClobbered(RReg);
1271          ++NumReused;
1272          continue;
1273        }
1274
1275        const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1276        RegInfo->setPhysRegUsed(DesignatedReg);
1277        ReusedOperands.markClobbered(DesignatedReg);
1278        TII->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC);
1279
1280        MachineInstr *CopyMI = prior(MII);
1281        UpdateKills(*CopyMI, RegKills, KillOps, TRI);
1282
1283        // This invalidates DesignatedReg.
1284        Spills.ClobberPhysReg(DesignatedReg);
1285
1286        Spills.addAvailable(ReuseSlot, DesignatedReg);
1287        unsigned RReg =
1288          SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
1289        MI.getOperand(i).setReg(RReg);
1290        DOUT << '\t' << *prior(MII);
1291        ++NumReused;
1292        continue;
1293      } // if (PhysReg)
1294
1295      // Otherwise, reload it and remember that we have it.
1296      PhysReg = VRM.getPhys(VirtReg);
1297      assert(PhysReg && "Must map virtreg to physreg!");
1298
1299      // Note that, if we reused a register for a previous operand, the
1300      // register we want to reload into might not actually be
1301      // available.  If this occurs, use the register indicated by the
1302      // reuser.
1303      if (ReusedOperands.hasReuses())
1304        PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI,
1305                               Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1306
1307      RegInfo->setPhysRegUsed(PhysReg);
1308      ReusedOperands.markClobbered(PhysReg);
1309      if (DoReMat) {
1310        ReMaterialize(MBB, MII, PhysReg, VirtReg, TII, TRI, VRM);
1311      } else {
1312        const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1313        TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC);
1314        MachineInstr *LoadMI = prior(MII);
1315        VRM.addSpillSlotUse(SSorRMId, LoadMI);
1316        ++NumLoads;
1317      }
1318      // This invalidates PhysReg.
1319      Spills.ClobberPhysReg(PhysReg);
1320
1321      // Any stores to this stack slot are not dead anymore.
1322      if (!DoReMat)
1323        MaybeDeadStores[SSorRMId] = NULL;
1324      Spills.addAvailable(SSorRMId, PhysReg);
1325      // Assumes this is the last use. IsKill will be unset if reg is reused
1326      // unless it's a two-address operand.
1327      if (TID.getOperandConstraint(i, TOI::TIED_TO) == -1 &&
1328          KilledMIRegs.count(VirtReg) == 0) {
1329        MI.getOperand(i).setIsKill();
1330        KilledMIRegs.insert(VirtReg);
1331      }
1332      unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1333      MI.getOperand(i).setReg(RReg);
1334      UpdateKills(*prior(MII), RegKills, KillOps, TRI);
1335      DOUT << '\t' << *prior(MII);
1336    }
1337
1338    // Ok - now we can remove stores that have been confirmed dead.
1339    for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) {
1340      // This was the last use and the spilled value is still available
1341      // for reuse. That means the spill was unnecessary!
1342      int PDSSlot = PotentialDeadStoreSlots[j];
1343      MachineInstr* DeadStore = MaybeDeadStores[PDSSlot];
1344      if (DeadStore) {
1345        DOUT << "Removed dead store:\t" << *DeadStore;
1346        InvalidateKills(*DeadStore, RegKills, KillOps);
1347        VRM.RemoveMachineInstrFromMaps(DeadStore);
1348        MBB.erase(DeadStore);
1349        MaybeDeadStores[PDSSlot] = NULL;
1350        ++NumDSE;
1351      }
1352    }
1353
1354
1355    DOUT << '\t' << MI;
1356
1357
1358    // If we have folded references to memory operands, make sure we clear all
1359    // physical registers that may contain the value of the spilled virtual
1360    // register
1361    SmallSet<int, 2> FoldedSS;
1362    for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
1363      unsigned VirtReg = I->second.first;
1364      VirtRegMap::ModRef MR = I->second.second;
1365      DOUT << "Folded vreg: " << VirtReg << "  MR: " << MR;
1366
1367      // MI2VirtMap be can updated which invalidate the iterator.
1368      // Increment the iterator first.
1369      ++I;
1370      int SS = VRM.getStackSlot(VirtReg);
1371      if (SS == VirtRegMap::NO_STACK_SLOT)
1372        continue;
1373      FoldedSS.insert(SS);
1374      DOUT << " - StackSlot: " << SS << "\n";
1375
1376      // If this folded instruction is just a use, check to see if it's a
1377      // straight load from the virt reg slot.
1378      if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) {
1379        int FrameIdx;
1380        unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx);
1381        if (DestReg && FrameIdx == SS) {
1382          // If this spill slot is available, turn it into a copy (or nothing)
1383          // instead of leaving it as a load!
1384          if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
1385            DOUT << "Promoted Load To Copy: " << MI;
1386            if (DestReg != InReg) {
1387              const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
1388              TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC);
1389              MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg);
1390              unsigned SubIdx = DefMO->getSubReg();
1391              // Revisit the copy so we make sure to notice the effects of the
1392              // operation on the destreg (either needing to RA it if it's
1393              // virtual or needing to clobber any values if it's physical).
1394              NextMII = &MI;
1395              --NextMII;  // backtrack to the copy.
1396              // Propagate the sub-register index over.
1397              if (SubIdx) {
1398                DefMO = NextMII->findRegisterDefOperand(DestReg);
1399                DefMO->setSubReg(SubIdx);
1400              }
1401
1402              // Mark is killed.
1403              MachineOperand *KillOpnd = NextMII->findRegisterUseOperand(InReg);
1404              KillOpnd->setIsKill();
1405
1406              BackTracked = true;
1407            } else {
1408              DOUT << "Removing now-noop copy: " << MI;
1409              // Unset last kill since it's being reused.
1410              InvalidateKill(InReg, RegKills, KillOps);
1411            }
1412
1413            InvalidateKills(MI, RegKills, KillOps);
1414            VRM.RemoveMachineInstrFromMaps(&MI);
1415            MBB.erase(&MI);
1416            Erased = true;
1417            goto ProcessNextInst;
1418          }
1419        } else {
1420          unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1421          SmallVector<MachineInstr*, 4> NewMIs;
1422          if (PhysReg &&
1423              TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) {
1424            MBB.insert(MII, NewMIs[0]);
1425            InvalidateKills(MI, RegKills, KillOps);
1426            VRM.RemoveMachineInstrFromMaps(&MI);
1427            MBB.erase(&MI);
1428            Erased = true;
1429            --NextMII;  // backtrack to the unfolded instruction.
1430            BackTracked = true;
1431            goto ProcessNextInst;
1432          }
1433        }
1434      }
1435
1436      // If this reference is not a use, any previous store is now dead.
1437      // Otherwise, the store to this stack slot is not dead anymore.
1438      MachineInstr* DeadStore = MaybeDeadStores[SS];
1439      if (DeadStore) {
1440        bool isDead = !(MR & VirtRegMap::isRef);
1441        MachineInstr *NewStore = NULL;
1442        if (MR & VirtRegMap::isModRef) {
1443          unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1444          SmallVector<MachineInstr*, 4> NewMIs;
1445          // We can reuse this physreg as long as we are allowed to clobber
1446          // the value and there isn't an earlier def that has already clobbered
1447          // the physreg.
1448          if (PhysReg &&
1449              !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable!
1450            MachineOperand *KillOpnd =
1451              DeadStore->findRegisterUseOperand(PhysReg, true);
1452            // Note, if the store is storing a sub-register, it's possible the
1453            // super-register is needed below.
1454            if (KillOpnd && !KillOpnd->getSubReg() &&
1455                TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){
1456             MBB.insert(MII, NewMIs[0]);
1457              NewStore = NewMIs[1];
1458              MBB.insert(MII, NewStore);
1459              VRM.addSpillSlotUse(SS, NewStore);
1460              InvalidateKills(MI, RegKills, KillOps);
1461              VRM.RemoveMachineInstrFromMaps(&MI);
1462              MBB.erase(&MI);
1463              Erased = true;
1464              --NextMII;
1465              --NextMII;  // backtrack to the unfolded instruction.
1466              BackTracked = true;
1467              isDead = true;
1468            }
1469          }
1470        }
1471
1472        if (isDead) {  // Previous store is dead.
1473          // If we get here, the store is dead, nuke it now.
1474          DOUT << "Removed dead store:\t" << *DeadStore;
1475          InvalidateKills(*DeadStore, RegKills, KillOps);
1476          VRM.RemoveMachineInstrFromMaps(DeadStore);
1477          MBB.erase(DeadStore);
1478          if (!NewStore)
1479            ++NumDSE;
1480        }
1481
1482        MaybeDeadStores[SS] = NULL;
1483        if (NewStore) {
1484          // Treat this store as a spill merged into a copy. That makes the
1485          // stack slot value available.
1486          VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
1487          goto ProcessNextInst;
1488        }
1489      }
1490
1491      // If the spill slot value is available, and this is a new definition of
1492      // the value, the value is not available anymore.
1493      if (MR & VirtRegMap::isMod) {
1494        // Notice that the value in this stack slot has been modified.
1495        Spills.ModifyStackSlotOrReMat(SS);
1496
1497        // If this is *just* a mod of the value, check to see if this is just a
1498        // store to the spill slot (i.e. the spill got merged into the copy). If
1499        // so, realize that the vreg is available now, and add the store to the
1500        // MaybeDeadStore info.
1501        int StackSlot;
1502        if (!(MR & VirtRegMap::isRef)) {
1503          if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
1504            assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
1505                   "Src hasn't been allocated yet?");
1506
1507            if (CommuteToFoldReload(MBB, MII, VirtReg, SrcReg, StackSlot,
1508                                    Spills, RegKills, KillOps, TRI, VRM)) {
1509              NextMII = next(MII);
1510              BackTracked = true;
1511              goto ProcessNextInst;
1512            }
1513
1514            // Okay, this is certainly a store of SrcReg to [StackSlot].  Mark
1515            // this as a potentially dead store in case there is a subsequent
1516            // store into the stack slot without a read from it.
1517            MaybeDeadStores[StackSlot] = &MI;
1518
1519            // If the stack slot value was previously available in some other
1520            // register, change it now.  Otherwise, make the register
1521            // available in PhysReg.
1522            Spills.addAvailable(StackSlot, SrcReg, false/*!clobber*/);
1523          }
1524        }
1525      }
1526    }
1527
1528    // Process all of the spilled defs.
1529    for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1530      MachineOperand &MO = MI.getOperand(i);
1531      if (!(MO.isReg() && MO.getReg() && MO.isDef()))
1532        continue;
1533
1534      unsigned VirtReg = MO.getReg();
1535      if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) {
1536        // Check to see if this is a noop copy.  If so, eliminate the
1537        // instruction before considering the dest reg to be changed.
1538        unsigned Src, Dst, SrcSR, DstSR;
1539        if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) {
1540          ++NumDCE;
1541          DOUT << "Removing now-noop copy: " << MI;
1542          SmallVector<unsigned, 2> KillRegs;
1543          InvalidateKills(MI, RegKills, KillOps, &KillRegs);
1544          if (MO.isDead() && !KillRegs.empty()) {
1545            // Source register or an implicit super/sub-register use is killed.
1546            assert(KillRegs[0] == Dst ||
1547                   TRI->isSubRegister(KillRegs[0], Dst) ||
1548                   TRI->isSuperRegister(KillRegs[0], Dst));
1549            // Last def is now dead.
1550            TransferDeadness(&MBB, Dist, Src, RegKills, KillOps);
1551          }
1552          VRM.RemoveMachineInstrFromMaps(&MI);
1553          MBB.erase(&MI);
1554          Erased = true;
1555          Spills.disallowClobberPhysReg(VirtReg);
1556          goto ProcessNextInst;
1557        }
1558
1559        // If it's not a no-op copy, it clobbers the value in the destreg.
1560        Spills.ClobberPhysReg(VirtReg);
1561        ReusedOperands.markClobbered(VirtReg);
1562
1563        // Check to see if this instruction is a load from a stack slot into
1564        // a register.  If so, this provides the stack slot value in the reg.
1565        int FrameIdx;
1566        if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) {
1567          assert(DestReg == VirtReg && "Unknown load situation!");
1568
1569          // If it is a folded reference, then it's not safe to clobber.
1570          bool Folded = FoldedSS.count(FrameIdx);
1571          // Otherwise, if it wasn't available, remember that it is now!
1572          Spills.addAvailable(FrameIdx, DestReg, !Folded);
1573          goto ProcessNextInst;
1574        }
1575
1576        continue;
1577      }
1578
1579      unsigned SubIdx = MO.getSubReg();
1580      bool DoReMat = VRM.isReMaterialized(VirtReg);
1581      if (DoReMat)
1582        ReMatDefs.insert(&MI);
1583
1584      // The only vregs left are stack slot definitions.
1585      int StackSlot = VRM.getStackSlot(VirtReg);
1586      const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
1587
1588      // If this def is part of a two-address operand, make sure to execute
1589      // the store from the correct physical register.
1590      unsigned PhysReg;
1591      int TiedOp = MI.getDesc().findTiedToSrcOperand(i);
1592      if (TiedOp != -1) {
1593        PhysReg = MI.getOperand(TiedOp).getReg();
1594        if (SubIdx) {
1595          unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI);
1596          assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg &&
1597                 "Can't find corresponding super-register!");
1598          PhysReg = SuperReg;
1599        }
1600      } else {
1601        PhysReg = VRM.getPhys(VirtReg);
1602        if (ReusedOperands.isClobbered(PhysReg)) {
1603          // Another def has taken the assigned physreg. It must have been a
1604          // use&def which got it due to reuse. Undo the reuse!
1605          PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI,
1606                               Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1607        }
1608      }
1609
1610      assert(PhysReg && "VR not assigned a physical register?");
1611      RegInfo->setPhysRegUsed(PhysReg);
1612      unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1613      ReusedOperands.markClobbered(RReg);
1614      MI.getOperand(i).setReg(RReg);
1615
1616      if (!MO.isDead()) {
1617        MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
1618        SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, true,
1619                          LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM);
1620        NextMII = next(MII);
1621
1622        // Check to see if this is a noop copy.  If so, eliminate the
1623        // instruction before considering the dest reg to be changed.
1624        {
1625          unsigned Src, Dst, SrcSR, DstSR;
1626          if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) {
1627            ++NumDCE;
1628            DOUT << "Removing now-noop copy: " << MI;
1629            InvalidateKills(MI, RegKills, KillOps);
1630            VRM.RemoveMachineInstrFromMaps(&MI);
1631            MBB.erase(&MI);
1632            Erased = true;
1633            UpdateKills(*LastStore, RegKills, KillOps, TRI);
1634            goto ProcessNextInst;
1635          }
1636        }
1637      }
1638    }
1639  ProcessNextInst:
1640    DistanceMap.insert(std::make_pair(&MI, Dist++));
1641    if (!Erased && !BackTracked) {
1642      for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II)
1643        UpdateKills(*II, RegKills, KillOps, TRI);
1644    }
1645    MII = NextMII;
1646  }
1647
1648}
1649
1650llvm::Spiller* llvm::createSpiller() {
1651  switch (SpillerOpt) {
1652  default: assert(0 && "Unreachable!");
1653  case local:
1654    return new LocalSpiller();
1655  case simple:
1656    return new SimpleSpiller();
1657  }
1658}