Spiller.cpp revision fd302b7056b9fab0b54f2fcad8e942500de2a50e
1//===-- llvm/CodeGen/Spiller.cpp -  Spiller -------------------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#define DEBUG_TYPE "spiller"
11#include "Spiller.h"
12#include "llvm/Support/Compiler.h"
13#include "llvm/ADT/DepthFirstIterator.h"
14#include "llvm/ADT/Statistic.h"
15#include "llvm/ADT/STLExtras.h"
16#include <algorithm>
17using namespace llvm;
18
19STATISTIC(NumDSE     , "Number of dead stores elided");
20STATISTIC(NumDSS     , "Number of dead spill slots removed");
21STATISTIC(NumCommutes, "Number of instructions commuted");
22STATISTIC(NumDRM     , "Number of re-materializable defs elided");
23STATISTIC(NumStores  , "Number of stores added");
24STATISTIC(NumPSpills , "Number of physical register spills");
25STATISTIC(NumOmitted , "Number of reloads omited");
26STATISTIC(NumCopified, "Number of available reloads turned into copies");
27STATISTIC(NumReMats  , "Number of re-materialization");
28STATISTIC(NumLoads   , "Number of loads added");
29STATISTIC(NumReused  , "Number of values reused");
30STATISTIC(NumDCE     , "Number of copies elided");
31STATISTIC(NumSUnfold , "Number of stores unfolded");
32
33namespace {
34  enum SpillerName { simple, local };
35}
36
37static cl::opt<SpillerName>
38SpillerOpt("spiller",
39           cl::desc("Spiller to use: (default: local)"),
40           cl::Prefix,
41           cl::values(clEnumVal(simple, "simple spiller"),
42                      clEnumVal(local,  "local spiller"),
43                      clEnumValEnd),
44           cl::init(local));
45
46// ****************************** //
47// Simple Spiller Implementation  //
48// ****************************** //
49
50Spiller::~Spiller() {}
51
52bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) {
53  DOUT << "********** REWRITE MACHINE CODE **********\n";
54  DOUT << "********** Function: " << MF.getFunction()->getName() << '\n';
55  const TargetMachine &TM = MF.getTarget();
56  const TargetInstrInfo &TII = *TM.getInstrInfo();
57  const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
58
59
60  // LoadedRegs - Keep track of which vregs are loaded, so that we only load
61  // each vreg once (in the case where a spilled vreg is used by multiple
62  // operands).  This is always smaller than the number of operands to the
63  // current machine instr, so it should be small.
64  std::vector<unsigned> LoadedRegs;
65
66  for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
67       MBBI != E; ++MBBI) {
68    DOUT << MBBI->getBasicBlock()->getName() << ":\n";
69    MachineBasicBlock &MBB = *MBBI;
70    for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
71         MII != E; ++MII) {
72      MachineInstr &MI = *MII;
73      for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
74        MachineOperand &MO = MI.getOperand(i);
75        if (MO.isReg() && MO.getReg()) {
76          if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
77            unsigned VirtReg = MO.getReg();
78            unsigned SubIdx = MO.getSubReg();
79            unsigned PhysReg = VRM.getPhys(VirtReg);
80            unsigned RReg = SubIdx ? TRI.getSubReg(PhysReg, SubIdx) : PhysReg;
81            if (!VRM.isAssignedReg(VirtReg)) {
82              int StackSlot = VRM.getStackSlot(VirtReg);
83              const TargetRegisterClass* RC =
84                                           MF.getRegInfo().getRegClass(VirtReg);
85
86              if (MO.isUse() &&
87                  std::find(LoadedRegs.begin(), LoadedRegs.end(), VirtReg)
88                           == LoadedRegs.end()) {
89                TII.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC);
90                MachineInstr *LoadMI = prior(MII);
91                VRM.addSpillSlotUse(StackSlot, LoadMI);
92                LoadedRegs.push_back(VirtReg);
93                ++NumLoads;
94                DOUT << '\t' << *LoadMI;
95              }
96
97              if (MO.isDef()) {
98                TII.storeRegToStackSlot(MBB, next(MII), PhysReg, true,
99                                        StackSlot, RC);
100                MachineInstr *StoreMI = next(MII);
101                VRM.addSpillSlotUse(StackSlot, StoreMI);
102                ++NumStores;
103              }
104            }
105            MF.getRegInfo().setPhysRegUsed(RReg);
106            MI.getOperand(i).setReg(RReg);
107          } else {
108            MF.getRegInfo().setPhysRegUsed(MO.getReg());
109          }
110        }
111      }
112
113      DOUT << '\t' << MI;
114      LoadedRegs.clear();
115    }
116  }
117  return true;
118}
119
120// ****************** //
121// Utility Functions  //
122// ****************** //
123
124/// InvalidateKill - A MI that defines the specified register is being deleted,
125/// invalidate the register kill information.
126static void InvalidateKill(unsigned Reg, BitVector &RegKills,
127                           std::vector<MachineOperand*> &KillOps) {
128  if (RegKills[Reg]) {
129    KillOps[Reg]->setIsKill(false);
130    KillOps[Reg] = NULL;
131    RegKills.reset(Reg);
132  }
133}
134
135/// findSinglePredSuccessor - Return via reference a vector of machine basic
136/// blocks each of which is a successor of the specified BB and has no other
137/// predecessor.
138static void findSinglePredSuccessor(MachineBasicBlock *MBB,
139                                   SmallVectorImpl<MachineBasicBlock *> &Succs) {
140  for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
141         SE = MBB->succ_end(); SI != SE; ++SI) {
142    MachineBasicBlock *SuccMBB = *SI;
143    if (SuccMBB->pred_size() == 1)
144      Succs.push_back(SuccMBB);
145  }
146}
147
148/// InvalidateKills - MI is going to be deleted. If any of its operands are
149/// marked kill, then invalidate the information.
150static void InvalidateKills(MachineInstr &MI, BitVector &RegKills,
151                            std::vector<MachineOperand*> &KillOps,
152                            SmallVector<unsigned, 2> *KillRegs = NULL) {
153  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
154    MachineOperand &MO = MI.getOperand(i);
155    if (!MO.isReg() || !MO.isUse() || !MO.isKill())
156      continue;
157    unsigned Reg = MO.getReg();
158    if (TargetRegisterInfo::isVirtualRegister(Reg))
159      continue;
160    if (KillRegs)
161      KillRegs->push_back(Reg);
162    assert(Reg < KillOps.size());
163    if (KillOps[Reg] == &MO) {
164      RegKills.reset(Reg);
165      KillOps[Reg] = NULL;
166    }
167  }
168}
169
170/// InvalidateRegDef - If the def operand of the specified def MI is now dead
171/// (since it's spill instruction is removed), mark it isDead. Also checks if
172/// the def MI has other definition operands that are not dead. Returns it by
173/// reference.
174static bool InvalidateRegDef(MachineBasicBlock::iterator I,
175                             MachineInstr &NewDef, unsigned Reg,
176                             bool &HasLiveDef) {
177  // Due to remat, it's possible this reg isn't being reused. That is,
178  // the def of this reg (by prev MI) is now dead.
179  MachineInstr *DefMI = I;
180  MachineOperand *DefOp = NULL;
181  for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) {
182    MachineOperand &MO = DefMI->getOperand(i);
183    if (MO.isReg() && MO.isDef()) {
184      if (MO.getReg() == Reg)
185        DefOp = &MO;
186      else if (!MO.isDead())
187        HasLiveDef = true;
188    }
189  }
190  if (!DefOp)
191    return false;
192
193  bool FoundUse = false, Done = false;
194  MachineBasicBlock::iterator E = &NewDef;
195  ++I; ++E;
196  for (; !Done && I != E; ++I) {
197    MachineInstr *NMI = I;
198    for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) {
199      MachineOperand &MO = NMI->getOperand(j);
200      if (!MO.isReg() || MO.getReg() != Reg)
201        continue;
202      if (MO.isUse())
203        FoundUse = true;
204      Done = true; // Stop after scanning all the operands of this MI.
205    }
206  }
207  if (!FoundUse) {
208    // Def is dead!
209    DefOp->setIsDead();
210    return true;
211  }
212  return false;
213}
214
215/// UpdateKills - Track and update kill info. If a MI reads a register that is
216/// marked kill, then it must be due to register reuse. Transfer the kill info
217/// over.
218static void UpdateKills(MachineInstr &MI, BitVector &RegKills,
219                        std::vector<MachineOperand*> &KillOps,
220                        const TargetRegisterInfo* TRI) {
221  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
222    MachineOperand &MO = MI.getOperand(i);
223    if (!MO.isReg() || !MO.isUse())
224      continue;
225    unsigned Reg = MO.getReg();
226    if (Reg == 0)
227      continue;
228
229    if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) {
230      // That can't be right. Register is killed but not re-defined and it's
231      // being reused. Let's fix that.
232      KillOps[Reg]->setIsKill(false);
233      KillOps[Reg] = NULL;
234      RegKills.reset(Reg);
235      if (!MI.isRegTiedToDefOperand(i))
236        // Unless it's a two-address operand, this is the new kill.
237        MO.setIsKill();
238    }
239    if (MO.isKill()) {
240      RegKills.set(Reg);
241      KillOps[Reg] = &MO;
242    }
243  }
244
245  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
246    const MachineOperand &MO = MI.getOperand(i);
247    if (!MO.isReg() || !MO.isDef())
248      continue;
249    unsigned Reg = MO.getReg();
250    RegKills.reset(Reg);
251    KillOps[Reg] = NULL;
252    // It also defines (or partially define) aliases.
253    for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
254      RegKills.reset(*AS);
255      KillOps[*AS] = NULL;
256    }
257  }
258}
259
260/// ReMaterialize - Re-materialize definition for Reg targetting DestReg.
261///
262static void ReMaterialize(MachineBasicBlock &MBB,
263                          MachineBasicBlock::iterator &MII,
264                          unsigned DestReg, unsigned Reg,
265                          const TargetInstrInfo *TII,
266                          const TargetRegisterInfo *TRI,
267                          VirtRegMap &VRM) {
268  TII->reMaterialize(MBB, MII, DestReg, VRM.getReMaterializedMI(Reg));
269  MachineInstr *NewMI = prior(MII);
270  for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
271    MachineOperand &MO = NewMI->getOperand(i);
272    if (!MO.isReg() || MO.getReg() == 0)
273      continue;
274    unsigned VirtReg = MO.getReg();
275    if (TargetRegisterInfo::isPhysicalRegister(VirtReg))
276      continue;
277    assert(MO.isUse());
278    unsigned SubIdx = MO.getSubReg();
279    unsigned Phys = VRM.getPhys(VirtReg);
280    assert(Phys);
281    unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
282    MO.setReg(RReg);
283  }
284  ++NumReMats;
285}
286
287/// findSuperReg - Find the SubReg's super-register of given register class
288/// where its SubIdx sub-register is SubReg.
289static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg,
290                             unsigned SubIdx, const TargetRegisterInfo *TRI) {
291  for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
292       I != E; ++I) {
293    unsigned Reg = *I;
294    if (TRI->getSubReg(Reg, SubIdx) == SubReg)
295      return Reg;
296  }
297  return 0;
298}
299
300// ******************************** //
301// Available Spills Implementation  //
302// ******************************** //
303
304/// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified
305/// stackslot register. The register is still available but is no longer
306/// allowed to be modifed.
307void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) {
308  std::multimap<unsigned, int>::iterator I =
309    PhysRegsAvailable.lower_bound(PhysReg);
310  while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
311    int SlotOrReMat = I->second;
312    I++;
313    assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
314           "Bidirectional map mismatch!");
315    SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1;
316    DOUT << "PhysReg " << TRI->getName(PhysReg)
317         << " copied, it is available for use but can no longer be modified\n";
318  }
319}
320
321/// disallowClobberPhysReg - Unset the CanClobber bit of the specified
322/// stackslot register and its aliases. The register and its aliases may
323/// still available but is no longer allowed to be modifed.
324void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) {
325  for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
326    disallowClobberPhysRegOnly(*AS);
327  disallowClobberPhysRegOnly(PhysReg);
328}
329
330/// ClobberPhysRegOnly - This is called when the specified physreg changes
331/// value.  We use this to invalidate any info about stuff we thing lives in it.
332void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) {
333  std::multimap<unsigned, int>::iterator I =
334    PhysRegsAvailable.lower_bound(PhysReg);
335  while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
336    int SlotOrReMat = I->second;
337    PhysRegsAvailable.erase(I++);
338    assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
339           "Bidirectional map mismatch!");
340    SpillSlotsOrReMatsAvailable.erase(SlotOrReMat);
341    DOUT << "PhysReg " << TRI->getName(PhysReg)
342         << " clobbered, invalidating ";
343    if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
344      DOUT << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 << "\n";
345    else
346      DOUT << "SS#" << SlotOrReMat << "\n";
347  }
348}
349
350/// ClobberPhysReg - This is called when the specified physreg changes
351/// value.  We use this to invalidate any info about stuff we thing lives in
352/// it and any of its aliases.
353void AvailableSpills::ClobberPhysReg(unsigned PhysReg) {
354  for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
355    ClobberPhysRegOnly(*AS);
356  ClobberPhysRegOnly(PhysReg);
357}
358
359/// AddAvailableRegsToLiveIn - Availability information is being kept coming
360/// into the specified MBB. Add available physical registers as potential
361/// live-in's. If they are reused in the MBB, they will be added to the
362/// live-in set to make register scavenger and post-allocation scheduler.
363void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock &MBB,
364                                        BitVector &RegKills,
365                                        std::vector<MachineOperand*> &KillOps) {
366  std::set<unsigned> NotAvailable;
367  for (std::multimap<unsigned, int>::iterator
368         I = PhysRegsAvailable.begin(), E = PhysRegsAvailable.end();
369       I != E; ++I) {
370    unsigned Reg = I->first;
371    const TargetRegisterClass* RC = TRI->getPhysicalRegisterRegClass(Reg);
372    // FIXME: A temporary workaround. We can't reuse available value if it's
373    // not safe to move the def of the virtual register's class. e.g.
374    // X86::RFP* register classes. Do not add it as a live-in.
375    if (!TII->isSafeToMoveRegClassDefs(RC))
376      // This is no longer available.
377      NotAvailable.insert(Reg);
378    else {
379      MBB.addLiveIn(Reg);
380      InvalidateKill(Reg, RegKills, KillOps);
381    }
382
383    // Skip over the same register.
384    std::multimap<unsigned, int>::iterator NI = next(I);
385    while (NI != E && NI->first == Reg) {
386      ++I;
387      ++NI;
388    }
389  }
390
391  for (std::set<unsigned>::iterator I = NotAvailable.begin(),
392         E = NotAvailable.end(); I != E; ++I) {
393    ClobberPhysReg(*I);
394    for (const unsigned *SubRegs = TRI->getSubRegisters(*I);
395       *SubRegs; ++SubRegs)
396      ClobberPhysReg(*SubRegs);
397  }
398}
399
400/// ModifyStackSlotOrReMat - This method is called when the value in a stack
401/// slot changes.  This removes information about which register the previous
402/// value for this slot lives in (as the previous value is dead now).
403void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) {
404  std::map<int, unsigned>::iterator It =
405    SpillSlotsOrReMatsAvailable.find(SlotOrReMat);
406  if (It == SpillSlotsOrReMatsAvailable.end()) return;
407  unsigned Reg = It->second >> 1;
408  SpillSlotsOrReMatsAvailable.erase(It);
409
410  // This register may hold the value of multiple stack slots, only remove this
411  // stack slot from the set of values the register contains.
412  std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg);
413  for (; ; ++I) {
414    assert(I != PhysRegsAvailable.end() && I->first == Reg &&
415           "Map inverse broken!");
416    if (I->second == SlotOrReMat) break;
417  }
418  PhysRegsAvailable.erase(I);
419}
420
421// ************************** //
422// Reuse Info Implementation  //
423// ************************** //
424
425/// GetRegForReload - We are about to emit a reload into PhysReg.  If there
426/// is some other operand that is using the specified register, either pick
427/// a new register to use, or evict the previous reload and use this reg.
428unsigned ReuseInfo::GetRegForReload(unsigned PhysReg, MachineInstr *MI,
429                         AvailableSpills &Spills,
430                         std::vector<MachineInstr*> &MaybeDeadStores,
431                         SmallSet<unsigned, 8> &Rejected,
432                         BitVector &RegKills,
433                         std::vector<MachineOperand*> &KillOps,
434                         VirtRegMap &VRM) {
435  const TargetInstrInfo* TII = MI->getParent()->getParent()->getTarget()
436                               .getInstrInfo();
437
438  if (Reuses.empty()) return PhysReg;  // This is most often empty.
439
440  for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) {
441    ReusedOp &Op = Reuses[ro];
442    // If we find some other reuse that was supposed to use this register
443    // exactly for its reload, we can change this reload to use ITS reload
444    // register. That is, unless its reload register has already been
445    // considered and subsequently rejected because it has also been reused
446    // by another operand.
447    if (Op.PhysRegReused == PhysReg &&
448        Rejected.count(Op.AssignedPhysReg) == 0) {
449      // Yup, use the reload register that we didn't use before.
450      unsigned NewReg = Op.AssignedPhysReg;
451      Rejected.insert(PhysReg);
452      return GetRegForReload(NewReg, MI, Spills, MaybeDeadStores, Rejected,
453                             RegKills, KillOps, VRM);
454    } else {
455      // Otherwise, we might also have a problem if a previously reused
456      // value aliases the new register.  If so, codegen the previous reload
457      // and use this one.
458      unsigned PRRU = Op.PhysRegReused;
459      const TargetRegisterInfo *TRI = Spills.getRegInfo();
460      if (TRI->areAliases(PRRU, PhysReg)) {
461        // Okay, we found out that an alias of a reused register
462        // was used.  This isn't good because it means we have
463        // to undo a previous reuse.
464        MachineBasicBlock *MBB = MI->getParent();
465        const TargetRegisterClass *AliasRC =
466          MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg);
467
468        // Copy Op out of the vector and remove it, we're going to insert an
469        // explicit load for it.
470        ReusedOp NewOp = Op;
471        Reuses.erase(Reuses.begin()+ro);
472
473        // Ok, we're going to try to reload the assigned physreg into the
474        // slot that we were supposed to in the first place.  However, that
475        // register could hold a reuse.  Check to see if it conflicts or
476        // would prefer us to use a different register.
477        unsigned NewPhysReg = GetRegForReload(NewOp.AssignedPhysReg,
478                                              MI, Spills, MaybeDeadStores,
479                                          Rejected, RegKills, KillOps, VRM);
480
481        MachineBasicBlock::iterator MII = MI;
482        if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) {
483          ReMaterialize(*MBB, MII, NewPhysReg, NewOp.VirtReg, TII, TRI,VRM);
484        } else {
485          TII->loadRegFromStackSlot(*MBB, MII, NewPhysReg,
486                                    NewOp.StackSlotOrReMat, AliasRC);
487          MachineInstr *LoadMI = prior(MII);
488          VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI);
489          // Any stores to this stack slot are not dead anymore.
490          MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL;
491          ++NumLoads;
492        }
493        Spills.ClobberPhysReg(NewPhysReg);
494        Spills.ClobberPhysReg(NewOp.PhysRegReused);
495
496        unsigned SubIdx = MI->getOperand(NewOp.Operand).getSubReg();
497        unsigned RReg = SubIdx ? TRI->getSubReg(NewPhysReg, SubIdx) : NewPhysReg;
498        MI->getOperand(NewOp.Operand).setReg(RReg);
499
500        Spills.addAvailable(NewOp.StackSlotOrReMat, NewPhysReg);
501        --MII;
502        UpdateKills(*MII, RegKills, KillOps, TRI);
503        DOUT << '\t' << *MII;
504
505        DOUT << "Reuse undone!\n";
506        --NumReused;
507
508        // Finally, PhysReg is now available, go ahead and use it.
509        return PhysReg;
510      }
511    }
512  }
513  return PhysReg;
514}
515
516// ***************************** //
517// Local Spiller Implementation  //
518// ***************************** //
519
520bool LocalSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) {
521  RegInfo = &MF.getRegInfo();
522  TRI = MF.getTarget().getRegisterInfo();
523  TII = MF.getTarget().getInstrInfo();
524  DOUT << "\n**** Local spiller rewriting function '"
525       << MF.getFunction()->getName() << "':\n";
526  DOUT << "**** Machine Instrs (NOTE! Does not include spills and reloads!)"
527          " ****\n";
528  DEBUG(MF.dump());
529
530  // Spills - Keep track of which spilled values are available in physregs
531  // so that we can choose to reuse the physregs instead of emitting
532  // reloads. This is usually refreshed per basic block.
533  AvailableSpills Spills(TRI, TII);
534
535  // Keep track of kill information.
536  BitVector RegKills(TRI->getNumRegs());
537  std::vector<MachineOperand*> KillOps;
538  KillOps.resize(TRI->getNumRegs(), NULL);
539
540  // SingleEntrySuccs - Successor blocks which have a single predecessor.
541  SmallVector<MachineBasicBlock*, 4> SinglePredSuccs;
542  SmallPtrSet<MachineBasicBlock*,16> EarlyVisited;
543
544  // Traverse the basic blocks depth first.
545  MachineBasicBlock *Entry = MF.begin();
546  SmallPtrSet<MachineBasicBlock*,16> Visited;
547  for (df_ext_iterator<MachineBasicBlock*,
548         SmallPtrSet<MachineBasicBlock*,16> >
549         DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
550       DFI != E; ++DFI) {
551    MachineBasicBlock *MBB = *DFI;
552    if (!EarlyVisited.count(MBB))
553      RewriteMBB(*MBB, VRM, Spills, RegKills, KillOps);
554
555    // If this MBB is the only predecessor of a successor. Keep the
556    // availability information and visit it next.
557    do {
558      // Keep visiting single predecessor successor as long as possible.
559      SinglePredSuccs.clear();
560      findSinglePredSuccessor(MBB, SinglePredSuccs);
561      if (SinglePredSuccs.empty())
562        MBB = 0;
563      else {
564        // FIXME: More than one successors, each of which has MBB has
565        // the only predecessor.
566        MBB = SinglePredSuccs[0];
567        if (!Visited.count(MBB) && EarlyVisited.insert(MBB)) {
568          Spills.AddAvailableRegsToLiveIn(*MBB, RegKills, KillOps);
569          RewriteMBB(*MBB, VRM, Spills, RegKills, KillOps);
570        }
571      }
572    } while (MBB);
573
574    // Clear the availability info.
575    Spills.clear();
576  }
577
578  DOUT << "**** Post Machine Instrs ****\n";
579  DEBUG(MF.dump());
580
581  // Mark unused spill slots.
582  MachineFrameInfo *MFI = MF.getFrameInfo();
583  int SS = VRM.getLowSpillSlot();
584  if (SS != VirtRegMap::NO_STACK_SLOT)
585    for (int e = VRM.getHighSpillSlot(); SS <= e; ++SS)
586      if (!VRM.isSpillSlotUsed(SS)) {
587        MFI->RemoveStackObject(SS);
588        ++NumDSS;
589      }
590
591  return true;
592}
593
594
595/// PrepForUnfoldOpti - Turn a store folding instruction into a load folding
596/// instruction. e.g.
597///     xorl  %edi, %eax
598///     movl  %eax, -32(%ebp)
599///     movl  -36(%ebp), %eax
600///     orl   %eax, -32(%ebp)
601/// ==>
602///     xorl  %edi, %eax
603///     orl   -36(%ebp), %eax
604///     mov   %eax, -32(%ebp)
605/// This enables unfolding optimization for a subsequent instruction which will
606/// also eliminate the newly introduced store instruction.
607bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB,
608                                    MachineBasicBlock::iterator &MII,
609                                    std::vector<MachineInstr*> &MaybeDeadStores,
610                                    AvailableSpills &Spills,
611                                    BitVector &RegKills,
612                                    std::vector<MachineOperand*> &KillOps,
613                                    VirtRegMap &VRM) {
614  MachineFunction &MF = *MBB.getParent();
615  MachineInstr &MI = *MII;
616  unsigned UnfoldedOpc = 0;
617  unsigned UnfoldPR = 0;
618  unsigned UnfoldVR = 0;
619  int FoldedSS = VirtRegMap::NO_STACK_SLOT;
620  VirtRegMap::MI2VirtMapTy::const_iterator I, End;
621  for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
622    // Only transform a MI that folds a single register.
623    if (UnfoldedOpc)
624      return false;
625    UnfoldVR = I->second.first;
626    VirtRegMap::ModRef MR = I->second.second;
627    // MI2VirtMap be can updated which invalidate the iterator.
628    // Increment the iterator first.
629    ++I;
630    if (VRM.isAssignedReg(UnfoldVR))
631      continue;
632    // If this reference is not a use, any previous store is now dead.
633    // Otherwise, the store to this stack slot is not dead anymore.
634    FoldedSS = VRM.getStackSlot(UnfoldVR);
635    MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
636    if (DeadStore && (MR & VirtRegMap::isModRef)) {
637      unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
638      if (!PhysReg || !DeadStore->readsRegister(PhysReg))
639        continue;
640      UnfoldPR = PhysReg;
641      UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
642                                                    false, true);
643    }
644  }
645
646  if (!UnfoldedOpc)
647    return false;
648
649  for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
650    MachineOperand &MO = MI.getOperand(i);
651    if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse())
652      continue;
653    unsigned VirtReg = MO.getReg();
654    if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
655      continue;
656    if (VRM.isAssignedReg(VirtReg)) {
657      unsigned PhysReg = VRM.getPhys(VirtReg);
658      if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR))
659        return false;
660    } else if (VRM.isReMaterialized(VirtReg))
661      continue;
662    int SS = VRM.getStackSlot(VirtReg);
663    unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
664    if (PhysReg) {
665      if (TRI->regsOverlap(PhysReg, UnfoldPR))
666        return false;
667      continue;
668    }
669    if (VRM.hasPhys(VirtReg)) {
670      PhysReg = VRM.getPhys(VirtReg);
671      if (!TRI->regsOverlap(PhysReg, UnfoldPR))
672        continue;
673    }
674
675    // Ok, we'll need to reload the value into a register which makes
676    // it impossible to perform the store unfolding optimization later.
677    // Let's see if it is possible to fold the load if the store is
678    // unfolded. This allows us to perform the store unfolding
679    // optimization.
680    SmallVector<MachineInstr*, 4> NewMIs;
681    if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
682      assert(NewMIs.size() == 1);
683      MachineInstr *NewMI = NewMIs.back();
684      NewMIs.clear();
685      int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
686      assert(Idx != -1);
687      SmallVector<unsigned, 1> Ops;
688      Ops.push_back(Idx);
689      MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS);
690      if (FoldedMI) {
691        VRM.addSpillSlotUse(SS, FoldedMI);
692        if (!VRM.hasPhys(UnfoldVR))
693          VRM.assignVirt2Phys(UnfoldVR, UnfoldPR);
694        VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
695        MII = MBB.insert(MII, FoldedMI);
696        InvalidateKills(MI, RegKills, KillOps);
697        VRM.RemoveMachineInstrFromMaps(&MI);
698        MBB.erase(&MI);
699        MF.DeleteMachineInstr(NewMI);
700        return true;
701      }
702      MF.DeleteMachineInstr(NewMI);
703    }
704  }
705  return false;
706}
707
708/// CommuteToFoldReload -
709/// Look for
710/// r1 = load fi#1
711/// r1 = op r1, r2<kill>
712/// store r1, fi#1
713///
714/// If op is commutable and r2 is killed, then we can xform these to
715/// r2 = op r2, fi#1
716/// store r2, fi#1
717bool LocalSpiller::CommuteToFoldReload(MachineBasicBlock &MBB,
718                                    MachineBasicBlock::iterator &MII,
719                                    unsigned VirtReg, unsigned SrcReg, int SS,
720                                    AvailableSpills &Spills,
721                                    BitVector &RegKills,
722                                    std::vector<MachineOperand*> &KillOps,
723                                    const TargetRegisterInfo *TRI,
724                                    VirtRegMap &VRM) {
725  if (MII == MBB.begin() || !MII->killsRegister(SrcReg))
726    return false;
727
728  MachineFunction &MF = *MBB.getParent();
729  MachineInstr &MI = *MII;
730  MachineBasicBlock::iterator DefMII = prior(MII);
731  MachineInstr *DefMI = DefMII;
732  const TargetInstrDesc &TID = DefMI->getDesc();
733  unsigned NewDstIdx;
734  if (DefMII != MBB.begin() &&
735      TID.isCommutable() &&
736      TII->CommuteChangesDestination(DefMI, NewDstIdx)) {
737    MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
738    unsigned NewReg = NewDstMO.getReg();
739    if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
740      return false;
741    MachineInstr *ReloadMI = prior(DefMII);
742    int FrameIdx;
743    unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx);
744    if (DestReg != SrcReg || FrameIdx != SS)
745      return false;
746    int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false);
747    if (UseIdx == -1)
748      return false;
749    unsigned DefIdx;
750    if (!MI.isRegTiedToDefOperand(UseIdx, &DefIdx))
751      return false;
752    assert(DefMI->getOperand(DefIdx).isReg() &&
753           DefMI->getOperand(DefIdx).getReg() == SrcReg);
754
755    // Now commute def instruction.
756    MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
757    if (!CommutedMI)
758      return false;
759    SmallVector<unsigned, 1> Ops;
760    Ops.push_back(NewDstIdx);
761    MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS);
762    // Not needed since foldMemoryOperand returns new MI.
763    MF.DeleteMachineInstr(CommutedMI);
764    if (!FoldedMI)
765      return false;
766
767    VRM.addSpillSlotUse(SS, FoldedMI);
768    VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
769    // Insert new def MI and spill MI.
770    const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(VirtReg);
771    TII->storeRegToStackSlot(MBB, &MI, NewReg, true, SS, RC);
772    MII = prior(MII);
773    MachineInstr *StoreMI = MII;
774    VRM.addSpillSlotUse(SS, StoreMI);
775    VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
776    MII = MBB.insert(MII, FoldedMI);  // Update MII to backtrack.
777
778    // Delete all 3 old instructions.
779    InvalidateKills(*ReloadMI, RegKills, KillOps);
780    VRM.RemoveMachineInstrFromMaps(ReloadMI);
781    MBB.erase(ReloadMI);
782    InvalidateKills(*DefMI, RegKills, KillOps);
783    VRM.RemoveMachineInstrFromMaps(DefMI);
784    MBB.erase(DefMI);
785    InvalidateKills(MI, RegKills, KillOps);
786    VRM.RemoveMachineInstrFromMaps(&MI);
787    MBB.erase(&MI);
788
789    // If NewReg was previously holding value of some SS, it's now clobbered.
790    // This has to be done now because it's a physical register. When this
791    // instruction is re-visited, it's ignored.
792    Spills.ClobberPhysReg(NewReg);
793
794    ++NumCommutes;
795    return true;
796  }
797
798  return false;
799}
800
801/// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
802/// the last store to the same slot is now dead. If so, remove the last store.
803void LocalSpiller::SpillRegToStackSlot(MachineBasicBlock &MBB,
804                                  MachineBasicBlock::iterator &MII,
805                                  int Idx, unsigned PhysReg, int StackSlot,
806                                  const TargetRegisterClass *RC,
807                                  bool isAvailable, MachineInstr *&LastStore,
808                                  AvailableSpills &Spills,
809                                  SmallSet<MachineInstr*, 4> &ReMatDefs,
810                                  BitVector &RegKills,
811                                  std::vector<MachineOperand*> &KillOps,
812                                  VirtRegMap &VRM) {
813  TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC);
814  MachineInstr *StoreMI = next(MII);
815  VRM.addSpillSlotUse(StackSlot, StoreMI);
816  DOUT << "Store:\t" << *StoreMI;
817
818  // If there is a dead store to this stack slot, nuke it now.
819  if (LastStore) {
820    DOUT << "Removed dead store:\t" << *LastStore;
821    ++NumDSE;
822    SmallVector<unsigned, 2> KillRegs;
823    InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs);
824    MachineBasicBlock::iterator PrevMII = LastStore;
825    bool CheckDef = PrevMII != MBB.begin();
826    if (CheckDef)
827      --PrevMII;
828    VRM.RemoveMachineInstrFromMaps(LastStore);
829    MBB.erase(LastStore);
830    if (CheckDef) {
831      // Look at defs of killed registers on the store. Mark the defs
832      // as dead since the store has been deleted and they aren't
833      // being reused.
834      for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
835        bool HasOtherDef = false;
836        if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef)) {
837          MachineInstr *DeadDef = PrevMII;
838          if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
839            // FIXME: This assumes a remat def does not have side
840            // effects.
841            VRM.RemoveMachineInstrFromMaps(DeadDef);
842            MBB.erase(DeadDef);
843            ++NumDRM;
844          }
845        }
846      }
847    }
848  }
849
850  LastStore = next(MII);
851
852  // If the stack slot value was previously available in some other
853  // register, change it now.  Otherwise, make the register available,
854  // in PhysReg.
855  Spills.ModifyStackSlotOrReMat(StackSlot);
856  Spills.ClobberPhysReg(PhysReg);
857  Spills.addAvailable(StackSlot, PhysReg, isAvailable);
858  ++NumStores;
859}
860
861/// TransferDeadness - A identity copy definition is dead and it's being
862/// removed. Find the last def or use and mark it as dead / kill.
863void LocalSpiller::TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist,
864                                    unsigned Reg, BitVector &RegKills,
865                                    std::vector<MachineOperand*> &KillOps) {
866  int LastUDDist = -1;
867  MachineInstr *LastUDMI = NULL;
868  for (MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(Reg),
869         RE = RegInfo->reg_end(); RI != RE; ++RI) {
870    MachineInstr *UDMI = &*RI;
871    if (UDMI->getParent() != MBB)
872      continue;
873    DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI);
874    if (DI == DistanceMap.end() || DI->second > CurDist)
875      continue;
876    if ((int)DI->second < LastUDDist)
877      continue;
878    LastUDDist = DI->second;
879    LastUDMI = UDMI;
880  }
881
882  if (LastUDMI) {
883    MachineOperand *LastUD = NULL;
884    for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) {
885      MachineOperand &MO = LastUDMI->getOperand(i);
886      if (!MO.isReg() || MO.getReg() != Reg)
887        continue;
888      if (!LastUD || (LastUD->isUse() && MO.isDef()))
889        LastUD = &MO;
890      if (LastUDMI->isRegTiedToDefOperand(i))
891        return;
892    }
893    if (LastUD->isDef())
894      LastUD->setIsDead();
895    else {
896      LastUD->setIsKill();
897      RegKills.set(Reg);
898      KillOps[Reg] = LastUD;
899    }
900  }
901}
902
903/// rewriteMBB - Keep track of which spills are available even after the
904/// register allocator is done with them.  If possible, avid reloading vregs.
905void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM,
906                              AvailableSpills &Spills, BitVector &RegKills,
907                              std::vector<MachineOperand*> &KillOps) {
908  DOUT << "\n**** Local spiller rewriting MBB '"
909       << MBB.getBasicBlock()->getName() << "':\n";
910
911  MachineFunction &MF = *MBB.getParent();
912
913  // MaybeDeadStores - When we need to write a value back into a stack slot,
914  // keep track of the inserted store.  If the stack slot value is never read
915  // (because the value was used from some available register, for example), and
916  // subsequently stored to, the original store is dead.  This map keeps track
917  // of inserted stores that are not used.  If we see a subsequent store to the
918  // same stack slot, the original store is deleted.
919  std::vector<MachineInstr*> MaybeDeadStores;
920  MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL);
921
922  // ReMatDefs - These are rematerializable def MIs which are not deleted.
923  SmallSet<MachineInstr*, 4> ReMatDefs;
924
925  // Clear kill info.
926  SmallSet<unsigned, 2> KilledMIRegs;
927  RegKills.reset();
928  KillOps.clear();
929  KillOps.resize(TRI->getNumRegs(), NULL);
930
931  unsigned Dist = 0;
932  DistanceMap.clear();
933  for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
934       MII != E; ) {
935    MachineBasicBlock::iterator NextMII = MII; ++NextMII;
936
937    VirtRegMap::MI2VirtMapTy::const_iterator I, End;
938    bool Erased = false;
939    bool BackTracked = false;
940    if (PrepForUnfoldOpti(MBB, MII,
941                          MaybeDeadStores, Spills, RegKills, KillOps, VRM))
942      NextMII = next(MII);
943
944    MachineInstr &MI = *MII;
945
946    if (VRM.hasEmergencySpills(&MI)) {
947      // Spill physical register(s) in the rare case the allocator has run out
948      // of registers to allocate.
949      SmallSet<int, 4> UsedSS;
950      std::vector<unsigned> &EmSpills = VRM.getEmergencySpills(&MI);
951      for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) {
952        unsigned PhysReg = EmSpills[i];
953        const TargetRegisterClass *RC =
954          TRI->getPhysicalRegisterRegClass(PhysReg);
955        assert(RC && "Unable to determine register class!");
956        int SS = VRM.getEmergencySpillSlot(RC);
957        if (UsedSS.count(SS))
958          assert(0 && "Need to spill more than one physical registers!");
959        UsedSS.insert(SS);
960        TII->storeRegToStackSlot(MBB, MII, PhysReg, true, SS, RC);
961        MachineInstr *StoreMI = prior(MII);
962        VRM.addSpillSlotUse(SS, StoreMI);
963        TII->loadRegFromStackSlot(MBB, next(MII), PhysReg, SS, RC);
964        MachineInstr *LoadMI = next(MII);
965        VRM.addSpillSlotUse(SS, LoadMI);
966        ++NumPSpills;
967      }
968      NextMII = next(MII);
969    }
970
971    // Insert restores here if asked to.
972    if (VRM.isRestorePt(&MI)) {
973      std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI);
974      for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
975        unsigned VirtReg = RestoreRegs[e-i-1];  // Reverse order.
976        if (!VRM.getPreSplitReg(VirtReg))
977          continue; // Split interval spilled again.
978        unsigned Phys = VRM.getPhys(VirtReg);
979        RegInfo->setPhysRegUsed(Phys);
980
981        // Check if the value being restored if available. If so, it must be
982        // from a predecessor BB that fallthrough into this BB. We do not
983        // expect:
984        // BB1:
985        // r1 = load fi#1
986        // ...
987        //    = r1<kill>
988        // ... # r1 not clobbered
989        // ...
990        //    = load fi#1
991        bool DoReMat = VRM.isReMaterialized(VirtReg);
992        int SSorRMId = DoReMat
993          ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
994        const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
995        unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
996        if (InReg == Phys) {
997          // If the value is already available in the expected register, save
998          // a reload / remat.
999          if (SSorRMId)
1000            DOUT << "Reusing RM#" << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1;
1001          else
1002            DOUT << "Reusing SS#" << SSorRMId;
1003          DOUT << " from physreg "
1004               << TRI->getName(InReg) << " for vreg"
1005               << VirtReg <<" instead of reloading into physreg "
1006               << TRI->getName(Phys) << "\n";
1007          ++NumOmitted;
1008          continue;
1009        } else if (InReg && InReg != Phys) {
1010          if (SSorRMId)
1011            DOUT << "Reusing RM#" << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1;
1012          else
1013            DOUT << "Reusing SS#" << SSorRMId;
1014          DOUT << " from physreg "
1015               << TRI->getName(InReg) << " for vreg"
1016               << VirtReg <<" by copying it into physreg "
1017               << TRI->getName(Phys) << "\n";
1018
1019          // If the reloaded / remat value is available in another register,
1020          // copy it to the desired register.
1021          TII->copyRegToReg(MBB, &MI, Phys, InReg, RC, RC);
1022
1023          // This invalidates Phys.
1024          Spills.ClobberPhysReg(Phys);
1025          // Remember it's available.
1026          Spills.addAvailable(SSorRMId, Phys);
1027
1028          // Mark is killed.
1029          MachineInstr *CopyMI = prior(MII);
1030          MachineOperand *KillOpnd = CopyMI->findRegisterUseOperand(InReg);
1031          KillOpnd->setIsKill();
1032          UpdateKills(*CopyMI, RegKills, KillOps, TRI);
1033
1034          DOUT << '\t' << *CopyMI;
1035          ++NumCopified;
1036          continue;
1037        }
1038
1039        if (VRM.isReMaterialized(VirtReg)) {
1040          ReMaterialize(MBB, MII, Phys, VirtReg, TII, TRI, VRM);
1041        } else {
1042          const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1043          TII->loadRegFromStackSlot(MBB, &MI, Phys, SSorRMId, RC);
1044          MachineInstr *LoadMI = prior(MII);
1045          VRM.addSpillSlotUse(SSorRMId, LoadMI);
1046          ++NumLoads;
1047        }
1048
1049        // This invalidates Phys.
1050        Spills.ClobberPhysReg(Phys);
1051        // Remember it's available.
1052        Spills.addAvailable(SSorRMId, Phys);
1053
1054        UpdateKills(*prior(MII), RegKills, KillOps, TRI);
1055        DOUT << '\t' << *prior(MII);
1056      }
1057    }
1058
1059    // Insert spills here if asked to.
1060    if (VRM.isSpillPt(&MI)) {
1061      std::vector<std::pair<unsigned,bool> > &SpillRegs =
1062        VRM.getSpillPtSpills(&MI);
1063      for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
1064        unsigned VirtReg = SpillRegs[i].first;
1065        bool isKill = SpillRegs[i].second;
1066        if (!VRM.getPreSplitReg(VirtReg))
1067          continue; // Split interval spilled again.
1068        const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
1069        unsigned Phys = VRM.getPhys(VirtReg);
1070        int StackSlot = VRM.getStackSlot(VirtReg);
1071        TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC);
1072        MachineInstr *StoreMI = next(MII);
1073        VRM.addSpillSlotUse(StackSlot, StoreMI);
1074        DOUT << "Store:\t" << *StoreMI;
1075        VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1076      }
1077      NextMII = next(MII);
1078    }
1079
1080    /// ReusedOperands - Keep track of operand reuse in case we need to undo
1081    /// reuse.
1082    ReuseInfo ReusedOperands(MI, TRI);
1083    SmallVector<unsigned, 4> VirtUseOps;
1084    for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1085      MachineOperand &MO = MI.getOperand(i);
1086      if (!MO.isReg() || MO.getReg() == 0)
1087        continue;   // Ignore non-register operands.
1088
1089      unsigned VirtReg = MO.getReg();
1090      if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
1091        // Ignore physregs for spilling, but remember that it is used by this
1092        // function.
1093        RegInfo->setPhysRegUsed(VirtReg);
1094        continue;
1095      }
1096
1097      // We want to process implicit virtual register uses first.
1098      if (MO.isImplicit())
1099        // If the virtual register is implicitly defined, emit a implicit_def
1100        // before so scavenger knows it's "defined".
1101        VirtUseOps.insert(VirtUseOps.begin(), i);
1102      else
1103        VirtUseOps.push_back(i);
1104    }
1105
1106    // Process all of the spilled uses and all non spilled reg references.
1107    SmallVector<int, 2> PotentialDeadStoreSlots;
1108    KilledMIRegs.clear();
1109    for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
1110      unsigned i = VirtUseOps[j];
1111      MachineOperand &MO = MI.getOperand(i);
1112      unsigned VirtReg = MO.getReg();
1113      assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
1114             "Not a virtual register?");
1115
1116      unsigned SubIdx = MO.getSubReg();
1117      if (VRM.isAssignedReg(VirtReg)) {
1118        // This virtual register was assigned a physreg!
1119        unsigned Phys = VRM.getPhys(VirtReg);
1120        RegInfo->setPhysRegUsed(Phys);
1121        if (MO.isDef())
1122          ReusedOperands.markClobbered(Phys);
1123        unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
1124        MI.getOperand(i).setReg(RReg);
1125        if (VRM.isImplicitlyDefined(VirtReg))
1126          BuildMI(MBB, &MI, MI.getDebugLoc(),
1127                  TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg);
1128        continue;
1129      }
1130
1131      // This virtual register is now known to be a spilled value.
1132      if (!MO.isUse())
1133        continue;  // Handle defs in the loop below (handle use&def here though)
1134
1135      bool DoReMat = VRM.isReMaterialized(VirtReg);
1136      int SSorRMId = DoReMat
1137        ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
1138      int ReuseSlot = SSorRMId;
1139
1140      // Check to see if this stack slot is available.
1141      unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
1142
1143      // If this is a sub-register use, make sure the reuse register is in the
1144      // right register class. For example, for x86 not all of the 32-bit
1145      // registers have accessible sub-registers.
1146      // Similarly so for EXTRACT_SUBREG. Consider this:
1147      // EDI = op
1148      // MOV32_mr fi#1, EDI
1149      // ...
1150      //       = EXTRACT_SUBREG fi#1
1151      // fi#1 is available in EDI, but it cannot be reused because it's not in
1152      // the right register file.
1153      if (PhysReg &&
1154          (SubIdx || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) {
1155        const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1156        if (!RC->contains(PhysReg))
1157          PhysReg = 0;
1158      }
1159
1160      if (PhysReg) {
1161        // This spilled operand might be part of a two-address operand.  If this
1162        // is the case, then changing it will necessarily require changing the
1163        // def part of the instruction as well.  However, in some cases, we
1164        // aren't allowed to modify the reused register.  If none of these cases
1165        // apply, reuse it.
1166        bool CanReuse = true;
1167        bool isTied = MI.isRegTiedToDefOperand(i);
1168        if (isTied) {
1169          // Okay, we have a two address operand.  We can reuse this physreg as
1170          // long as we are allowed to clobber the value and there isn't an
1171          // earlier def that has already clobbered the physreg.
1172          CanReuse = !ReusedOperands.isClobbered(PhysReg) &&
1173            Spills.canClobberPhysReg(PhysReg);
1174        }
1175
1176        if (CanReuse) {
1177          // If this stack slot value is already available, reuse it!
1178          if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
1179            DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1;
1180          else
1181            DOUT << "Reusing SS#" << ReuseSlot;
1182          DOUT << " from physreg "
1183               << TRI->getName(PhysReg) << " for vreg"
1184               << VirtReg <<" instead of reloading into physreg "
1185               << TRI->getName(VRM.getPhys(VirtReg)) << "\n";
1186          unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1187          MI.getOperand(i).setReg(RReg);
1188
1189          // The only technical detail we have is that we don't know that
1190          // PhysReg won't be clobbered by a reloaded stack slot that occurs
1191          // later in the instruction.  In particular, consider 'op V1, V2'.
1192          // If V1 is available in physreg R0, we would choose to reuse it
1193          // here, instead of reloading it into the register the allocator
1194          // indicated (say R1).  However, V2 might have to be reloaded
1195          // later, and it might indicate that it needs to live in R0.  When
1196          // this occurs, we need to have information available that
1197          // indicates it is safe to use R1 for the reload instead of R0.
1198          //
1199          // To further complicate matters, we might conflict with an alias,
1200          // or R0 and R1 might not be compatible with each other.  In this
1201          // case, we actually insert a reload for V1 in R1, ensuring that
1202          // we can get at R0 or its alias.
1203          ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
1204                                  VRM.getPhys(VirtReg), VirtReg);
1205          if (isTied)
1206            // Only mark it clobbered if this is a use&def operand.
1207            ReusedOperands.markClobbered(PhysReg);
1208          ++NumReused;
1209
1210          if (MI.getOperand(i).isKill() &&
1211              ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
1212
1213            // The store of this spilled value is potentially dead, but we
1214            // won't know for certain until we've confirmed that the re-use
1215            // above is valid, which means waiting until the other operands
1216            // are processed. For now we just track the spill slot, we'll
1217            // remove it after the other operands are processed if valid.
1218
1219            PotentialDeadStoreSlots.push_back(ReuseSlot);
1220          }
1221
1222          // Mark is isKill if it's there no other uses of the same virtual
1223          // register and it's not a two-address operand. IsKill will be
1224          // unset if reg is reused.
1225          if (!isTied && KilledMIRegs.count(VirtReg) == 0) {
1226            MI.getOperand(i).setIsKill();
1227            KilledMIRegs.insert(VirtReg);
1228          }
1229
1230          continue;
1231        }  // CanReuse
1232
1233        // Otherwise we have a situation where we have a two-address instruction
1234        // whose mod/ref operand needs to be reloaded.  This reload is already
1235        // available in some register "PhysReg", but if we used PhysReg as the
1236        // operand to our 2-addr instruction, the instruction would modify
1237        // PhysReg.  This isn't cool if something later uses PhysReg and expects
1238        // to get its initial value.
1239        //
1240        // To avoid this problem, and to avoid doing a load right after a store,
1241        // we emit a copy from PhysReg into the designated register for this
1242        // operand.
1243        unsigned DesignatedReg = VRM.getPhys(VirtReg);
1244        assert(DesignatedReg && "Must map virtreg to physreg!");
1245
1246        // Note that, if we reused a register for a previous operand, the
1247        // register we want to reload into might not actually be
1248        // available.  If this occurs, use the register indicated by the
1249        // reuser.
1250        if (ReusedOperands.hasReuses())
1251          DesignatedReg = ReusedOperands.GetRegForReload(DesignatedReg, &MI,
1252                               Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1253
1254        // If the mapped designated register is actually the physreg we have
1255        // incoming, we don't need to inserted a dead copy.
1256        if (DesignatedReg == PhysReg) {
1257          // If this stack slot value is already available, reuse it!
1258          if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
1259            DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1;
1260          else
1261            DOUT << "Reusing SS#" << ReuseSlot;
1262          DOUT << " from physreg " << TRI->getName(PhysReg)
1263               << " for vreg" << VirtReg
1264               << " instead of reloading into same physreg.\n";
1265          unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1266          MI.getOperand(i).setReg(RReg);
1267          ReusedOperands.markClobbered(RReg);
1268          ++NumReused;
1269          continue;
1270        }
1271
1272        const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1273        RegInfo->setPhysRegUsed(DesignatedReg);
1274        ReusedOperands.markClobbered(DesignatedReg);
1275        TII->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC);
1276
1277        MachineInstr *CopyMI = prior(MII);
1278        UpdateKills(*CopyMI, RegKills, KillOps, TRI);
1279
1280        // This invalidates DesignatedReg.
1281        Spills.ClobberPhysReg(DesignatedReg);
1282
1283        Spills.addAvailable(ReuseSlot, DesignatedReg);
1284        unsigned RReg =
1285          SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
1286        MI.getOperand(i).setReg(RReg);
1287        DOUT << '\t' << *prior(MII);
1288        ++NumReused;
1289        continue;
1290      } // if (PhysReg)
1291
1292      // Otherwise, reload it and remember that we have it.
1293      PhysReg = VRM.getPhys(VirtReg);
1294      assert(PhysReg && "Must map virtreg to physreg!");
1295
1296      // Note that, if we reused a register for a previous operand, the
1297      // register we want to reload into might not actually be
1298      // available.  If this occurs, use the register indicated by the
1299      // reuser.
1300      if (ReusedOperands.hasReuses())
1301        PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI,
1302                               Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1303
1304      RegInfo->setPhysRegUsed(PhysReg);
1305      ReusedOperands.markClobbered(PhysReg);
1306      if (DoReMat) {
1307        ReMaterialize(MBB, MII, PhysReg, VirtReg, TII, TRI, VRM);
1308      } else {
1309        const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1310        TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC);
1311        MachineInstr *LoadMI = prior(MII);
1312        VRM.addSpillSlotUse(SSorRMId, LoadMI);
1313        ++NumLoads;
1314      }
1315      // This invalidates PhysReg.
1316      Spills.ClobberPhysReg(PhysReg);
1317
1318      // Any stores to this stack slot are not dead anymore.
1319      if (!DoReMat)
1320        MaybeDeadStores[SSorRMId] = NULL;
1321      Spills.addAvailable(SSorRMId, PhysReg);
1322      // Assumes this is the last use. IsKill will be unset if reg is reused
1323      // unless it's a two-address operand.
1324      if (!MI.isRegTiedToDefOperand(i) &&
1325          KilledMIRegs.count(VirtReg) == 0) {
1326        MI.getOperand(i).setIsKill();
1327        KilledMIRegs.insert(VirtReg);
1328      }
1329      unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1330      MI.getOperand(i).setReg(RReg);
1331      UpdateKills(*prior(MII), RegKills, KillOps, TRI);
1332      DOUT << '\t' << *prior(MII);
1333    }
1334
1335    // Ok - now we can remove stores that have been confirmed dead.
1336    for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) {
1337      // This was the last use and the spilled value is still available
1338      // for reuse. That means the spill was unnecessary!
1339      int PDSSlot = PotentialDeadStoreSlots[j];
1340      MachineInstr* DeadStore = MaybeDeadStores[PDSSlot];
1341      if (DeadStore) {
1342        DOUT << "Removed dead store:\t" << *DeadStore;
1343        InvalidateKills(*DeadStore, RegKills, KillOps);
1344        VRM.RemoveMachineInstrFromMaps(DeadStore);
1345        MBB.erase(DeadStore);
1346        MaybeDeadStores[PDSSlot] = NULL;
1347        ++NumDSE;
1348      }
1349    }
1350
1351
1352    DOUT << '\t' << MI;
1353
1354
1355    // If we have folded references to memory operands, make sure we clear all
1356    // physical registers that may contain the value of the spilled virtual
1357    // register
1358    SmallSet<int, 2> FoldedSS;
1359    for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
1360      unsigned VirtReg = I->second.first;
1361      VirtRegMap::ModRef MR = I->second.second;
1362      DOUT << "Folded vreg: " << VirtReg << "  MR: " << MR;
1363
1364      // MI2VirtMap be can updated which invalidate the iterator.
1365      // Increment the iterator first.
1366      ++I;
1367      int SS = VRM.getStackSlot(VirtReg);
1368      if (SS == VirtRegMap::NO_STACK_SLOT)
1369        continue;
1370      FoldedSS.insert(SS);
1371      DOUT << " - StackSlot: " << SS << "\n";
1372
1373      // If this folded instruction is just a use, check to see if it's a
1374      // straight load from the virt reg slot.
1375      if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) {
1376        int FrameIdx;
1377        unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx);
1378        if (DestReg && FrameIdx == SS) {
1379          // If this spill slot is available, turn it into a copy (or nothing)
1380          // instead of leaving it as a load!
1381          if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
1382            DOUT << "Promoted Load To Copy: " << MI;
1383            if (DestReg != InReg) {
1384              const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
1385              TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC);
1386              MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg);
1387              unsigned SubIdx = DefMO->getSubReg();
1388              // Revisit the copy so we make sure to notice the effects of the
1389              // operation on the destreg (either needing to RA it if it's
1390              // virtual or needing to clobber any values if it's physical).
1391              NextMII = &MI;
1392              --NextMII;  // backtrack to the copy.
1393              // Propagate the sub-register index over.
1394              if (SubIdx) {
1395                DefMO = NextMII->findRegisterDefOperand(DestReg);
1396                DefMO->setSubReg(SubIdx);
1397              }
1398
1399              // Mark is killed.
1400              MachineOperand *KillOpnd = NextMII->findRegisterUseOperand(InReg);
1401              KillOpnd->setIsKill();
1402
1403              BackTracked = true;
1404            } else {
1405              DOUT << "Removing now-noop copy: " << MI;
1406              // Unset last kill since it's being reused.
1407              InvalidateKill(InReg, RegKills, KillOps);
1408              Spills.disallowClobberPhysReg(InReg);
1409            }
1410
1411            InvalidateKills(MI, RegKills, KillOps);
1412            VRM.RemoveMachineInstrFromMaps(&MI);
1413            MBB.erase(&MI);
1414            Erased = true;
1415            goto ProcessNextInst;
1416          }
1417        } else {
1418          unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1419          SmallVector<MachineInstr*, 4> NewMIs;
1420          if (PhysReg &&
1421              TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) {
1422            MBB.insert(MII, NewMIs[0]);
1423            InvalidateKills(MI, RegKills, KillOps);
1424            VRM.RemoveMachineInstrFromMaps(&MI);
1425            MBB.erase(&MI);
1426            Erased = true;
1427            --NextMII;  // backtrack to the unfolded instruction.
1428            BackTracked = true;
1429            goto ProcessNextInst;
1430          }
1431        }
1432      }
1433
1434      // If this reference is not a use, any previous store is now dead.
1435      // Otherwise, the store to this stack slot is not dead anymore.
1436      MachineInstr* DeadStore = MaybeDeadStores[SS];
1437      if (DeadStore) {
1438        bool isDead = !(MR & VirtRegMap::isRef);
1439        MachineInstr *NewStore = NULL;
1440        if (MR & VirtRegMap::isModRef) {
1441          unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1442          SmallVector<MachineInstr*, 4> NewMIs;
1443          // We can reuse this physreg as long as we are allowed to clobber
1444          // the value and there isn't an earlier def that has already clobbered
1445          // the physreg.
1446          if (PhysReg &&
1447              !ReusedOperands.isClobbered(PhysReg) &&
1448              Spills.canClobberPhysReg(PhysReg) &&
1449              !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable!
1450            MachineOperand *KillOpnd =
1451              DeadStore->findRegisterUseOperand(PhysReg, true);
1452            // Note, if the store is storing a sub-register, it's possible the
1453            // super-register is needed below.
1454            if (KillOpnd && !KillOpnd->getSubReg() &&
1455                TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){
1456              MBB.insert(MII, NewMIs[0]);
1457              NewStore = NewMIs[1];
1458              MBB.insert(MII, NewStore);
1459              VRM.addSpillSlotUse(SS, NewStore);
1460              InvalidateKills(MI, RegKills, KillOps);
1461              VRM.RemoveMachineInstrFromMaps(&MI);
1462              MBB.erase(&MI);
1463              Erased = true;
1464              --NextMII;
1465              --NextMII;  // backtrack to the unfolded instruction.
1466              BackTracked = true;
1467              isDead = true;
1468              ++NumSUnfold;
1469            }
1470          }
1471        }
1472
1473        if (isDead) {  // Previous store is dead.
1474          // If we get here, the store is dead, nuke it now.
1475          DOUT << "Removed dead store:\t" << *DeadStore;
1476          InvalidateKills(*DeadStore, RegKills, KillOps);
1477          VRM.RemoveMachineInstrFromMaps(DeadStore);
1478          MBB.erase(DeadStore);
1479          if (!NewStore)
1480            ++NumDSE;
1481        }
1482
1483        MaybeDeadStores[SS] = NULL;
1484        if (NewStore) {
1485          // Treat this store as a spill merged into a copy. That makes the
1486          // stack slot value available.
1487          VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
1488          goto ProcessNextInst;
1489        }
1490      }
1491
1492      // If the spill slot value is available, and this is a new definition of
1493      // the value, the value is not available anymore.
1494      if (MR & VirtRegMap::isMod) {
1495        // Notice that the value in this stack slot has been modified.
1496        Spills.ModifyStackSlotOrReMat(SS);
1497
1498        // If this is *just* a mod of the value, check to see if this is just a
1499        // store to the spill slot (i.e. the spill got merged into the copy). If
1500        // so, realize that the vreg is available now, and add the store to the
1501        // MaybeDeadStore info.
1502        int StackSlot;
1503        if (!(MR & VirtRegMap::isRef)) {
1504          if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
1505            assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
1506                   "Src hasn't been allocated yet?");
1507
1508            if (CommuteToFoldReload(MBB, MII, VirtReg, SrcReg, StackSlot,
1509                                    Spills, RegKills, KillOps, TRI, VRM)) {
1510              NextMII = next(MII);
1511              BackTracked = true;
1512              goto ProcessNextInst;
1513            }
1514
1515            // Okay, this is certainly a store of SrcReg to [StackSlot].  Mark
1516            // this as a potentially dead store in case there is a subsequent
1517            // store into the stack slot without a read from it.
1518            MaybeDeadStores[StackSlot] = &MI;
1519
1520            // If the stack slot value was previously available in some other
1521            // register, change it now.  Otherwise, make the register
1522            // available in PhysReg.
1523            Spills.addAvailable(StackSlot, SrcReg, MI.killsRegister(SrcReg));
1524          }
1525        }
1526      }
1527    }
1528
1529    // Process all of the spilled defs.
1530    for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1531      MachineOperand &MO = MI.getOperand(i);
1532      if (!(MO.isReg() && MO.getReg() && MO.isDef()))
1533        continue;
1534
1535      unsigned VirtReg = MO.getReg();
1536      if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) {
1537        // Check to see if this is a noop copy.  If so, eliminate the
1538        // instruction before considering the dest reg to be changed.
1539        unsigned Src, Dst, SrcSR, DstSR;
1540        if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) {
1541          ++NumDCE;
1542          DOUT << "Removing now-noop copy: " << MI;
1543          SmallVector<unsigned, 2> KillRegs;
1544          InvalidateKills(MI, RegKills, KillOps, &KillRegs);
1545          if (MO.isDead() && !KillRegs.empty()) {
1546            // Source register or an implicit super/sub-register use is killed.
1547            assert(KillRegs[0] == Dst ||
1548                   TRI->isSubRegister(KillRegs[0], Dst) ||
1549                   TRI->isSuperRegister(KillRegs[0], Dst));
1550            // Last def is now dead.
1551            TransferDeadness(&MBB, Dist, Src, RegKills, KillOps);
1552          }
1553          VRM.RemoveMachineInstrFromMaps(&MI);
1554          MBB.erase(&MI);
1555          Erased = true;
1556          Spills.disallowClobberPhysReg(VirtReg);
1557          goto ProcessNextInst;
1558        }
1559
1560        // If it's not a no-op copy, it clobbers the value in the destreg.
1561        Spills.ClobberPhysReg(VirtReg);
1562        ReusedOperands.markClobbered(VirtReg);
1563
1564        // Check to see if this instruction is a load from a stack slot into
1565        // a register.  If so, this provides the stack slot value in the reg.
1566        int FrameIdx;
1567        if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) {
1568          assert(DestReg == VirtReg && "Unknown load situation!");
1569
1570          // If it is a folded reference, then it's not safe to clobber.
1571          bool Folded = FoldedSS.count(FrameIdx);
1572          // Otherwise, if it wasn't available, remember that it is now!
1573          Spills.addAvailable(FrameIdx, DestReg, !Folded);
1574          goto ProcessNextInst;
1575        }
1576
1577        continue;
1578      }
1579
1580      unsigned SubIdx = MO.getSubReg();
1581      bool DoReMat = VRM.isReMaterialized(VirtReg);
1582      if (DoReMat)
1583        ReMatDefs.insert(&MI);
1584
1585      // The only vregs left are stack slot definitions.
1586      int StackSlot = VRM.getStackSlot(VirtReg);
1587      const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
1588
1589      // If this def is part of a two-address operand, make sure to execute
1590      // the store from the correct physical register.
1591      unsigned PhysReg;
1592      int TiedOp = MI.getDesc().findTiedToSrcOperand(i);
1593      if (TiedOp != -1) {
1594        PhysReg = MI.getOperand(TiedOp).getReg();
1595        if (SubIdx) {
1596          unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI);
1597          assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg &&
1598                 "Can't find corresponding super-register!");
1599          PhysReg = SuperReg;
1600        }
1601      } else {
1602        PhysReg = VRM.getPhys(VirtReg);
1603        if (ReusedOperands.isClobbered(PhysReg)) {
1604          // Another def has taken the assigned physreg. It must have been a
1605          // use&def which got it due to reuse. Undo the reuse!
1606          PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI,
1607                               Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1608        }
1609      }
1610
1611      assert(PhysReg && "VR not assigned a physical register?");
1612      RegInfo->setPhysRegUsed(PhysReg);
1613      unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1614      ReusedOperands.markClobbered(RReg);
1615      MI.getOperand(i).setReg(RReg);
1616
1617      if (!MO.isDead()) {
1618        MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
1619        SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, true,
1620                          LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM);
1621        NextMII = next(MII);
1622
1623        // Check to see if this is a noop copy.  If so, eliminate the
1624        // instruction before considering the dest reg to be changed.
1625        {
1626          unsigned Src, Dst, SrcSR, DstSR;
1627          if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) {
1628            ++NumDCE;
1629            DOUT << "Removing now-noop copy: " << MI;
1630            InvalidateKills(MI, RegKills, KillOps);
1631            VRM.RemoveMachineInstrFromMaps(&MI);
1632            MBB.erase(&MI);
1633            Erased = true;
1634            UpdateKills(*LastStore, RegKills, KillOps, TRI);
1635            goto ProcessNextInst;
1636          }
1637        }
1638      }
1639    }
1640  ProcessNextInst:
1641    DistanceMap.insert(std::make_pair(&MI, Dist++));
1642    if (!Erased && !BackTracked) {
1643      for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II)
1644        UpdateKills(*II, RegKills, KillOps, TRI);
1645    }
1646    MII = NextMII;
1647  }
1648
1649}
1650
1651llvm::Spiller* llvm::createSpiller() {
1652  switch (SpillerOpt) {
1653  default: assert(0 && "Unreachable!");
1654  case local:
1655    return new LocalSpiller();
1656  case simple:
1657    return new SimpleSpiller();
1658  }
1659}
1660