LiveIntervalAnalysis.cpp revision 4ed4329c37f5a64c16ad4dc1960cbcb66b7118d4
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/AliasAnalysis.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/MachineLoopInfo.h"
27#include "llvm/CodeGen/MachineRegisterInfo.h"
28#include "llvm/CodeGen/Passes.h"
29#include "llvm/CodeGen/PseudoSourceValue.h"
30#include "llvm/Target/TargetRegisterInfo.h"
31#include "llvm/Target/TargetInstrInfo.h"
32#include "llvm/Target/TargetMachine.h"
33#include "llvm/Target/TargetOptions.h"
34#include "llvm/Support/CommandLine.h"
35#include "llvm/Support/Debug.h"
36#include "llvm/ADT/Statistic.h"
37#include "llvm/ADT/STLExtras.h"
38#include <algorithm>
39#include <cmath>
40using namespace llvm;
41
42// Hidden options for help debugging.
43static cl::opt<bool> DisableReMat("disable-rematerialization",
44                                  cl::init(false), cl::Hidden);
45
46static cl::opt<bool> SplitAtBB("split-intervals-at-bb",
47                               cl::init(true), cl::Hidden);
48static cl::opt<int> SplitLimit("split-limit",
49                               cl::init(-1), cl::Hidden);
50
51static cl::opt<bool> EnableAggressiveRemat("aggressive-remat", cl::Hidden);
52
53static cl::opt<bool> EnableFastSpilling("fast-spill",
54                                        cl::init(false), cl::Hidden);
55
56STATISTIC(numIntervals, "Number of original intervals");
57STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
58STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
59STATISTIC(numSplits   , "Number of intervals split");
60
61char LiveIntervals::ID = 0;
62static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
63
64void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
65  AU.addRequired<AliasAnalysis>();
66  AU.addPreserved<AliasAnalysis>();
67  AU.addPreserved<LiveVariables>();
68  AU.addRequired<LiveVariables>();
69  AU.addPreservedID(MachineLoopInfoID);
70  AU.addPreservedID(MachineDominatorsID);
71
72  if (!StrongPHIElim) {
73    AU.addPreservedID(PHIEliminationID);
74    AU.addRequiredID(PHIEliminationID);
75  }
76
77  AU.addRequiredID(TwoAddressInstructionPassID);
78  MachineFunctionPass::getAnalysisUsage(AU);
79}
80
81void LiveIntervals::releaseMemory() {
82  // Free the live intervals themselves.
83  for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(),
84       E = r2iMap_.end(); I != E; ++I)
85    delete I->second;
86
87  MBB2IdxMap.clear();
88  Idx2MBBMap.clear();
89  mi2iMap_.clear();
90  i2miMap_.clear();
91  r2iMap_.clear();
92  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
93  VNInfoAllocator.Reset();
94  while (!ClonedMIs.empty()) {
95    MachineInstr *MI = ClonedMIs.back();
96    ClonedMIs.pop_back();
97    mf_->DeleteMachineInstr(MI);
98  }
99}
100
101void LiveIntervals::computeNumbering() {
102  Index2MiMap OldI2MI = i2miMap_;
103  std::vector<IdxMBBPair> OldI2MBB = Idx2MBBMap;
104
105  Idx2MBBMap.clear();
106  MBB2IdxMap.clear();
107  mi2iMap_.clear();
108  i2miMap_.clear();
109
110  FunctionSize = 0;
111
112  // Number MachineInstrs and MachineBasicBlocks.
113  // Initialize MBB indexes to a sentinal.
114  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
115
116  unsigned MIIndex = 0;
117  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
118       MBB != E; ++MBB) {
119    unsigned StartIdx = MIIndex;
120
121    // Insert an empty slot at the beginning of each block.
122    MIIndex += InstrSlots::NUM;
123    i2miMap_.push_back(0);
124
125    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
126         I != E; ++I) {
127      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
128      assert(inserted && "multiple MachineInstr -> index mappings");
129      i2miMap_.push_back(I);
130      MIIndex += InstrSlots::NUM;
131      FunctionSize++;
132
133      // Insert max(1, numdefs) empty slots after every instruction.
134      unsigned Slots = I->getDesc().getNumDefs();
135      if (Slots == 0)
136        Slots = 1;
137      MIIndex += InstrSlots::NUM * Slots;
138      while (Slots--)
139        i2miMap_.push_back(0);
140    }
141
142    // Set the MBB2IdxMap entry for this MBB.
143    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
144    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
145  }
146  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
147
148  if (!OldI2MI.empty())
149    for (iterator OI = begin(), OE = end(); OI != OE; ++OI) {
150      for (LiveInterval::iterator LI = OI->second->begin(),
151           LE = OI->second->end(); LI != LE; ++LI) {
152
153        // Remap the start index of the live range to the corresponding new
154        // number, or our best guess at what it _should_ correspond to if the
155        // original instruction has been erased.  This is either the following
156        // instruction or its predecessor.
157        unsigned index = LI->start / InstrSlots::NUM;
158        unsigned offset = LI->start % InstrSlots::NUM;
159        if (offset == InstrSlots::LOAD) {
160          std::vector<IdxMBBPair>::const_iterator I =
161                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->start);
162          // Take the pair containing the index
163          std::vector<IdxMBBPair>::const_iterator J =
164                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
165
166          LI->start = getMBBStartIdx(J->second);
167        } else {
168          LI->start = mi2iMap_[OldI2MI[index]] + offset;
169        }
170
171        // Remap the ending index in the same way that we remapped the start,
172        // except for the final step where we always map to the immediately
173        // following instruction.
174        index = (LI->end - 1) / InstrSlots::NUM;
175        offset  = LI->end % InstrSlots::NUM;
176        if (offset == InstrSlots::LOAD) {
177          // VReg dies at end of block.
178          std::vector<IdxMBBPair>::const_iterator I =
179                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), LI->end);
180          --I;
181
182          LI->end = getMBBEndIdx(I->second) + 1;
183        } else {
184          unsigned idx = index;
185          while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
186
187          if (index != OldI2MI.size())
188            LI->end = mi2iMap_[OldI2MI[index]] + (idx == index ? offset : 0);
189          else
190            LI->end = InstrSlots::NUM * i2miMap_.size();
191        }
192      }
193
194      for (LiveInterval::vni_iterator VNI = OI->second->vni_begin(),
195           VNE = OI->second->vni_end(); VNI != VNE; ++VNI) {
196        VNInfo* vni = *VNI;
197
198        // Remap the VNInfo def index, which works the same as the
199        // start indices above. VN's with special sentinel defs
200        // don't need to be remapped.
201        if (vni->def != ~0U && vni->def != ~1U) {
202          unsigned index = vni->def / InstrSlots::NUM;
203          unsigned offset = vni->def % InstrSlots::NUM;
204          if (offset == InstrSlots::LOAD) {
205            std::vector<IdxMBBPair>::const_iterator I =
206                  std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->def);
207            // Take the pair containing the index
208            std::vector<IdxMBBPair>::const_iterator J =
209                    (I == OldI2MBB.end() && OldI2MBB.size()>0) ? (I-1): I;
210
211            vni->def = getMBBStartIdx(J->second);
212          } else {
213            vni->def = mi2iMap_[OldI2MI[index]] + offset;
214          }
215        }
216
217        // Remap the VNInfo kill indices, which works the same as
218        // the end indices above.
219        for (size_t i = 0; i < vni->kills.size(); ++i) {
220          // PHI kills don't need to be remapped.
221          if (!vni->kills[i]) continue;
222
223          unsigned index = (vni->kills[i]-1) / InstrSlots::NUM;
224          unsigned offset = vni->kills[i] % InstrSlots::NUM;
225          if (offset == InstrSlots::LOAD) {
226            std::vector<IdxMBBPair>::const_iterator I =
227             std::lower_bound(OldI2MBB.begin(), OldI2MBB.end(), vni->kills[i]);
228            --I;
229
230            vni->kills[i] = getMBBEndIdx(I->second);
231          } else {
232            unsigned idx = index;
233            while (index < OldI2MI.size() && !OldI2MI[index]) ++index;
234
235            if (index != OldI2MI.size())
236              vni->kills[i] = mi2iMap_[OldI2MI[index]] +
237                              (idx == index ? offset : 0);
238            else
239              vni->kills[i] = InstrSlots::NUM * i2miMap_.size();
240          }
241        }
242      }
243    }
244}
245
246/// runOnMachineFunction - Register allocate the whole function
247///
248bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
249  mf_ = &fn;
250  mri_ = &mf_->getRegInfo();
251  tm_ = &fn.getTarget();
252  tri_ = tm_->getRegisterInfo();
253  tii_ = tm_->getInstrInfo();
254  aa_ = &getAnalysis<AliasAnalysis>();
255  lv_ = &getAnalysis<LiveVariables>();
256  allocatableRegs_ = tri_->getAllocatableSet(fn);
257
258  computeNumbering();
259  computeIntervals();
260
261  numIntervals += getNumIntervals();
262
263  DOUT << "********** INTERVALS **********\n";
264  for (iterator I = begin(), E = end(); I != E; ++I) {
265    I->second->print(DOUT, tri_);
266    DOUT << "\n";
267  }
268
269  numIntervalsAfter += getNumIntervals();
270  DEBUG(dump());
271  return true;
272}
273
274/// print - Implement the dump method.
275void LiveIntervals::print(std::ostream &O, const Module* ) const {
276  O << "********** INTERVALS **********\n";
277  for (const_iterator I = begin(), E = end(); I != E; ++I) {
278    I->second->print(O, tri_);
279    O << "\n";
280  }
281
282  O << "********** MACHINEINSTRS **********\n";
283  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
284       mbbi != mbbe; ++mbbi) {
285    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
286    for (MachineBasicBlock::iterator mii = mbbi->begin(),
287           mie = mbbi->end(); mii != mie; ++mii) {
288      O << getInstructionIndex(mii) << '\t' << *mii;
289    }
290  }
291}
292
293/// conflictsWithPhysRegDef - Returns true if the specified register
294/// is defined during the duration of the specified interval.
295bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
296                                            VirtRegMap &vrm, unsigned reg) {
297  for (LiveInterval::Ranges::const_iterator
298         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
299    for (unsigned index = getBaseIndex(I->start),
300           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
301         index += InstrSlots::NUM) {
302      // skip deleted instructions
303      while (index != end && !getInstructionFromIndex(index))
304        index += InstrSlots::NUM;
305      if (index == end) break;
306
307      MachineInstr *MI = getInstructionFromIndex(index);
308      unsigned SrcReg, DstReg;
309      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
310        if (SrcReg == li.reg || DstReg == li.reg)
311          continue;
312      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
313        MachineOperand& mop = MI->getOperand(i);
314        if (!mop.isReg())
315          continue;
316        unsigned PhysReg = mop.getReg();
317        if (PhysReg == 0 || PhysReg == li.reg)
318          continue;
319        if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
320          if (!vrm.hasPhys(PhysReg))
321            continue;
322          PhysReg = vrm.getPhys(PhysReg);
323        }
324        if (PhysReg && tri_->regsOverlap(PhysReg, reg))
325          return true;
326      }
327    }
328  }
329
330  return false;
331}
332
333void LiveIntervals::printRegName(unsigned reg) const {
334  if (TargetRegisterInfo::isPhysicalRegister(reg))
335    cerr << tri_->getName(reg);
336  else
337    cerr << "%reg" << reg;
338}
339
340void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
341                                             MachineBasicBlock::iterator mi,
342                                             unsigned MIIdx, MachineOperand& MO,
343                                             unsigned MOIdx,
344                                             LiveInterval &interval) {
345  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
346  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
347
348  if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
349    DOUT << "is a implicit_def\n";
350    return;
351  }
352
353  // Virtual registers may be defined multiple times (due to phi
354  // elimination and 2-addr elimination).  Much of what we do only has to be
355  // done once for the vreg.  We use an empty interval to detect the first
356  // time we see a vreg.
357  if (interval.empty()) {
358    // Get the Idx of the defining instructions.
359    unsigned defIndex = getDefIndex(MIIdx);
360    // Earlyclobbers move back one.
361    if (MO.isEarlyClobber())
362      defIndex = getUseIndex(MIIdx);
363    VNInfo *ValNo;
364    MachineInstr *CopyMI = NULL;
365    unsigned SrcReg, DstReg;
366    if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
367        mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
368        tii_->isMoveInstr(*mi, SrcReg, DstReg))
369      CopyMI = mi;
370    ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
371
372    assert(ValNo->id == 0 && "First value in interval is not 0?");
373
374    // Loop over all of the blocks that the vreg is defined in.  There are
375    // two cases we have to handle here.  The most common case is a vreg
376    // whose lifetime is contained within a basic block.  In this case there
377    // will be a single kill, in MBB, which comes after the definition.
378    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
379      // FIXME: what about dead vars?
380      unsigned killIdx;
381      if (vi.Kills[0] != mi)
382        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
383      else
384        killIdx = defIndex+1;
385
386      // If the kill happens after the definition, we have an intra-block
387      // live range.
388      if (killIdx > defIndex) {
389        assert(vi.AliveBlocks.none() &&
390               "Shouldn't be alive across any blocks!");
391        LiveRange LR(defIndex, killIdx, ValNo);
392        interval.addRange(LR);
393        DOUT << " +" << LR << "\n";
394        interval.addKill(ValNo, killIdx);
395        return;
396      }
397    }
398
399    // The other case we handle is when a virtual register lives to the end
400    // of the defining block, potentially live across some blocks, then is
401    // live into some number of blocks, but gets killed.  Start by adding a
402    // range that goes from this definition to the end of the defining block.
403    LiveRange NewLR(defIndex, getMBBEndIdx(mbb)+1, ValNo);
404    DOUT << " +" << NewLR;
405    interval.addRange(NewLR);
406
407    // Iterate over all of the blocks that the variable is completely
408    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
409    // live interval.
410    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
411      if (vi.AliveBlocks[i]) {
412        LiveRange LR(getMBBStartIdx(i),
413                     getMBBEndIdx(i)+1,  // MBB ends at -1.
414                     ValNo);
415        interval.addRange(LR);
416        DOUT << " +" << LR;
417      }
418    }
419
420    // Finally, this virtual register is live from the start of any killing
421    // block to the 'use' slot of the killing instruction.
422    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
423      MachineInstr *Kill = vi.Kills[i];
424      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
425      LiveRange LR(getMBBStartIdx(Kill->getParent()),
426                   killIdx, ValNo);
427      interval.addRange(LR);
428      interval.addKill(ValNo, killIdx);
429      DOUT << " +" << LR;
430    }
431
432  } else {
433    // If this is the second time we see a virtual register definition, it
434    // must be due to phi elimination or two addr elimination.  If this is
435    // the result of two address elimination, then the vreg is one of the
436    // def-and-use register operand.
437    if (mi->isRegReDefinedByTwoAddr(interval.reg, MOIdx)) {
438      // If this is a two-address definition, then we have already processed
439      // the live range.  The only problem is that we didn't realize there
440      // are actually two values in the live interval.  Because of this we
441      // need to take the LiveRegion that defines this register and split it
442      // into two values.
443      assert(interval.containsOneValue());
444      unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
445      unsigned RedefIndex = getDefIndex(MIIdx);
446      // Earlyclobbers move back one.
447      if (MO.isEarlyClobber())
448        RedefIndex = getUseIndex(MIIdx);
449
450      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
451      VNInfo *OldValNo = OldLR->valno;
452
453      // Delete the initial value, which should be short and continuous,
454      // because the 2-addr copy must be in the same MBB as the redef.
455      interval.removeRange(DefIndex, RedefIndex);
456
457      // Two-address vregs should always only be redefined once.  This means
458      // that at this point, there should be exactly one value number in it.
459      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
460
461      // The new value number (#1) is defined by the instruction we claimed
462      // defined value #0.
463      VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
464                                            VNInfoAllocator);
465
466      // Value#0 is now defined by the 2-addr instruction.
467      OldValNo->def  = RedefIndex;
468      OldValNo->copy = 0;
469
470      // Add the new live interval which replaces the range for the input copy.
471      LiveRange LR(DefIndex, RedefIndex, ValNo);
472      DOUT << " replace range with " << LR;
473      interval.addRange(LR);
474      interval.addKill(ValNo, RedefIndex);
475
476      // If this redefinition is dead, we need to add a dummy unit live
477      // range covering the def slot.
478      if (MO.isDead())
479        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
480
481      DOUT << " RESULT: ";
482      interval.print(DOUT, tri_);
483
484    } else {
485      // Otherwise, this must be because of phi elimination.  If this is the
486      // first redefinition of the vreg that we have seen, go back and change
487      // the live range in the PHI block to be a different value number.
488      if (interval.containsOneValue()) {
489        assert(vi.Kills.size() == 1 &&
490               "PHI elimination vreg should have one kill, the PHI itself!");
491
492        // Remove the old range that we now know has an incorrect number.
493        VNInfo *VNI = interval.getValNumInfo(0);
494        MachineInstr *Killer = vi.Kills[0];
495        unsigned Start = getMBBStartIdx(Killer->getParent());
496        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
497        DOUT << " Removing [" << Start << "," << End << "] from: ";
498        interval.print(DOUT, tri_); DOUT << "\n";
499        interval.removeRange(Start, End);
500        VNI->hasPHIKill = true;
501        DOUT << " RESULT: "; interval.print(DOUT, tri_);
502
503        // Replace the interval with one of a NEW value number.  Note that this
504        // value number isn't actually defined by an instruction, weird huh? :)
505        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
506        DOUT << " replace range with " << LR;
507        interval.addRange(LR);
508        interval.addKill(LR.valno, End);
509        DOUT << " RESULT: "; interval.print(DOUT, tri_);
510      }
511
512      // In the case of PHI elimination, each variable definition is only
513      // live until the end of the block.  We've already taken care of the
514      // rest of the live range.
515      unsigned defIndex = getDefIndex(MIIdx);
516      // Earlyclobbers move back one.
517      if (MO.isEarlyClobber())
518        defIndex = getUseIndex(MIIdx);
519
520      VNInfo *ValNo;
521      MachineInstr *CopyMI = NULL;
522      unsigned SrcReg, DstReg;
523      if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
524          mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
525          tii_->isMoveInstr(*mi, SrcReg, DstReg))
526        CopyMI = mi;
527      ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
528
529      unsigned killIndex = getMBBEndIdx(mbb) + 1;
530      LiveRange LR(defIndex, killIndex, ValNo);
531      interval.addRange(LR);
532      interval.addKill(ValNo, killIndex);
533      ValNo->hasPHIKill = true;
534      DOUT << " +" << LR;
535    }
536  }
537
538  DOUT << '\n';
539}
540
541void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
542                                              MachineBasicBlock::iterator mi,
543                                              unsigned MIIdx,
544                                              MachineOperand& MO,
545                                              LiveInterval &interval,
546                                              MachineInstr *CopyMI) {
547  // A physical register cannot be live across basic block, so its
548  // lifetime must end somewhere in its defining basic block.
549  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
550
551  unsigned baseIndex = MIIdx;
552  unsigned start = getDefIndex(baseIndex);
553  // Earlyclobbers move back one.
554  if (MO.isEarlyClobber())
555    start = getUseIndex(MIIdx);
556  unsigned end = start;
557
558  // If it is not used after definition, it is considered dead at
559  // the instruction defining it. Hence its interval is:
560  // [defSlot(def), defSlot(def)+1)
561  if (MO.isDead()) {
562    DOUT << " dead";
563    end = start + 1;
564    goto exit;
565  }
566
567  // If it is not dead on definition, it must be killed by a
568  // subsequent instruction. Hence its interval is:
569  // [defSlot(def), useSlot(kill)+1)
570  baseIndex += InstrSlots::NUM;
571  while (++mi != MBB->end()) {
572    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
573           getInstructionFromIndex(baseIndex) == 0)
574      baseIndex += InstrSlots::NUM;
575    if (mi->killsRegister(interval.reg, tri_)) {
576      DOUT << " killed";
577      end = getUseIndex(baseIndex) + 1;
578      goto exit;
579    } else if (mi->modifiesRegister(interval.reg, tri_)) {
580      // Another instruction redefines the register before it is ever read.
581      // Then the register is essentially dead at the instruction that defines
582      // it. Hence its interval is:
583      // [defSlot(def), defSlot(def)+1)
584      DOUT << " dead";
585      end = start + 1;
586      goto exit;
587    }
588
589    baseIndex += InstrSlots::NUM;
590  }
591
592  // The only case we should have a dead physreg here without a killing or
593  // instruction where we know it's dead is if it is live-in to the function
594  // and never used.
595  assert(!CopyMI && "physreg was not killed in defining block!");
596  end = start + 1;
597
598exit:
599  assert(start < end && "did not find end of interval?");
600
601  // Already exists? Extend old live interval.
602  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
603  VNInfo *ValNo = (OldLR != interval.end())
604    ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
605  LiveRange LR(start, end, ValNo);
606  interval.addRange(LR);
607  interval.addKill(LR.valno, end);
608  DOUT << " +" << LR << '\n';
609}
610
611void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
612                                      MachineBasicBlock::iterator MI,
613                                      unsigned MIIdx,
614                                      MachineOperand& MO,
615                                      unsigned MOIdx) {
616  if (TargetRegisterInfo::isVirtualRegister(MO.getReg()))
617    handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx,
618                             getOrCreateInterval(MO.getReg()));
619  else if (allocatableRegs_[MO.getReg()]) {
620    MachineInstr *CopyMI = NULL;
621    unsigned SrcReg, DstReg;
622    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
623        MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG ||
624        tii_->isMoveInstr(*MI, SrcReg, DstReg))
625      CopyMI = MI;
626    handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
627                              getOrCreateInterval(MO.getReg()), CopyMI);
628    // Def of a register also defines its sub-registers.
629    for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS)
630      // If MI also modifies the sub-register explicitly, avoid processing it
631      // more than once. Do not pass in TRI here so it checks for exact match.
632      if (!MI->modifiesRegister(*AS))
633        handlePhysicalRegisterDef(MBB, MI, MIIdx, MO,
634                                  getOrCreateInterval(*AS), 0);
635  }
636}
637
638void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
639                                         unsigned MIIdx,
640                                         LiveInterval &interval, bool isAlias) {
641  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
642
643  // Look for kills, if it reaches a def before it's killed, then it shouldn't
644  // be considered a livein.
645  MachineBasicBlock::iterator mi = MBB->begin();
646  unsigned baseIndex = MIIdx;
647  unsigned start = baseIndex;
648  while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
649         getInstructionFromIndex(baseIndex) == 0)
650    baseIndex += InstrSlots::NUM;
651  unsigned end = baseIndex;
652
653  while (mi != MBB->end()) {
654    if (mi->killsRegister(interval.reg, tri_)) {
655      DOUT << " killed";
656      end = getUseIndex(baseIndex) + 1;
657      goto exit;
658    } else if (mi->modifiesRegister(interval.reg, tri_)) {
659      // Another instruction redefines the register before it is ever read.
660      // Then the register is essentially dead at the instruction that defines
661      // it. Hence its interval is:
662      // [defSlot(def), defSlot(def)+1)
663      DOUT << " dead";
664      end = getDefIndex(start) + 1;
665      goto exit;
666    }
667
668    baseIndex += InstrSlots::NUM;
669    while (baseIndex / InstrSlots::NUM < i2miMap_.size() &&
670           getInstructionFromIndex(baseIndex) == 0)
671      baseIndex += InstrSlots::NUM;
672    ++mi;
673  }
674
675exit:
676  // Live-in register might not be used at all.
677  if (end == MIIdx) {
678    if (isAlias) {
679      DOUT << " dead";
680      end = getDefIndex(MIIdx) + 1;
681    } else {
682      DOUT << " live through";
683      end = baseIndex;
684    }
685  }
686
687  LiveRange LR(start, end, interval.getNextValue(~0U, 0, VNInfoAllocator));
688  interval.addRange(LR);
689  interval.addKill(LR.valno, end);
690  DOUT << " +" << LR << '\n';
691}
692
693/// computeIntervals - computes the live intervals for virtual
694/// registers. for some ordering of the machine instructions [1,N] a
695/// live interval is an interval [i, j) where 1 <= i <= j < N for
696/// which a variable is live
697void LiveIntervals::computeIntervals() {
698
699  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
700       << "********** Function: "
701       << ((Value*)mf_->getFunction())->getName() << '\n';
702
703  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
704       MBBI != E; ++MBBI) {
705    MachineBasicBlock *MBB = MBBI;
706    // Track the index of the current machine instr.
707    unsigned MIIndex = getMBBStartIdx(MBB);
708    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
709
710    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
711
712    // Create intervals for live-ins to this BB first.
713    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
714           LE = MBB->livein_end(); LI != LE; ++LI) {
715      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
716      // Multiple live-ins can alias the same register.
717      for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
718        if (!hasInterval(*AS))
719          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
720                               true);
721    }
722
723    // Skip over empty initial indices.
724    while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
725           getInstructionFromIndex(MIIndex) == 0)
726      MIIndex += InstrSlots::NUM;
727
728    for (; MI != miEnd; ++MI) {
729      DOUT << MIIndex << "\t" << *MI;
730
731      // Handle defs.
732      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
733        MachineOperand &MO = MI->getOperand(i);
734        // handle register defs - build intervals
735        if (MO.isReg() && MO.getReg() && MO.isDef()) {
736          handleRegisterDef(MBB, MI, MIIndex, MO, i);
737        }
738      }
739
740      // Skip over the empty slots after each instruction.
741      unsigned Slots = MI->getDesc().getNumDefs();
742      if (Slots == 0)
743        Slots = 1;
744      MIIndex += InstrSlots::NUM * Slots;
745
746      // Skip over empty indices.
747      while (MIIndex / InstrSlots::NUM < i2miMap_.size() &&
748             getInstructionFromIndex(MIIndex) == 0)
749        MIIndex += InstrSlots::NUM;
750    }
751  }
752}
753
754bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
755                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
756  std::vector<IdxMBBPair>::const_iterator I =
757    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
758
759  bool ResVal = false;
760  while (I != Idx2MBBMap.end()) {
761    if (LR.end <= I->first)
762      break;
763    MBBs.push_back(I->second);
764    ResVal = true;
765    ++I;
766  }
767  return ResVal;
768}
769
770LiveInterval* LiveIntervals::createInterval(unsigned reg) {
771  float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
772                       HUGE_VALF : 0.0F;
773  return new LiveInterval(reg, Weight);
774}
775
776/// getVNInfoSourceReg - Helper function that parses the specified VNInfo
777/// copy field and returns the source register that defines it.
778unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
779  if (!VNI->copy)
780    return 0;
781
782  if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
783    return VNI->copy->getOperand(1).getReg();
784  if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG)
785    return VNI->copy->getOperand(2).getReg();
786  unsigned SrcReg, DstReg;
787  if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
788    return SrcReg;
789  assert(0 && "Unrecognized copy instruction!");
790  return 0;
791}
792
793//===----------------------------------------------------------------------===//
794// Register allocator hooks.
795//
796
797/// getReMatImplicitUse - If the remat definition MI has one (for now, we only
798/// allow one) virtual register operand, then its uses are implicitly using
799/// the register. Returns the virtual register.
800unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li,
801                                            MachineInstr *MI) const {
802  unsigned RegOp = 0;
803  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
804    MachineOperand &MO = MI->getOperand(i);
805    if (!MO.isReg() || !MO.isUse())
806      continue;
807    unsigned Reg = MO.getReg();
808    if (Reg == 0 || Reg == li.reg)
809      continue;
810    // FIXME: For now, only remat MI with at most one register operand.
811    assert(!RegOp &&
812           "Can't rematerialize instruction with multiple register operand!");
813    RegOp = MO.getReg();
814#ifndef NDEBUG
815    break;
816#endif
817  }
818  return RegOp;
819}
820
821/// isValNoAvailableAt - Return true if the val# of the specified interval
822/// which reaches the given instruction also reaches the specified use index.
823bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
824                                       unsigned UseIdx) const {
825  unsigned Index = getInstructionIndex(MI);
826  VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno;
827  LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx);
828  return UI != li.end() && UI->valno == ValNo;
829}
830
831/// isReMaterializable - Returns true if the definition MI of the specified
832/// val# of the specified interval is re-materializable.
833bool LiveIntervals::isReMaterializable(const LiveInterval &li,
834                                       const VNInfo *ValNo, MachineInstr *MI,
835                                       SmallVectorImpl<LiveInterval*> &SpillIs,
836                                       bool &isLoad) {
837  if (DisableReMat)
838    return false;
839
840  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
841    return true;
842
843  int FrameIdx = 0;
844  if (tii_->isLoadFromStackSlot(MI, FrameIdx) &&
845      mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
846    // FIXME: Let target specific isReallyTriviallyReMaterializable determines
847    // this but remember this is not safe to fold into a two-address
848    // instruction.
849    // This is a load from fixed stack slot. It can be rematerialized.
850    return true;
851
852  // If the target-specific rules don't identify an instruction as
853  // being trivially rematerializable, use some target-independent
854  // rules.
855  if (!MI->getDesc().isRematerializable() ||
856      !tii_->isTriviallyReMaterializable(MI)) {
857    if (!EnableAggressiveRemat)
858      return false;
859
860    // If the instruction accesses memory but the memoperands have been lost,
861    // we can't analyze it.
862    const TargetInstrDesc &TID = MI->getDesc();
863    if ((TID.mayLoad() || TID.mayStore()) && MI->memoperands_empty())
864      return false;
865
866    // Avoid instructions obviously unsafe for remat.
867    if (TID.hasUnmodeledSideEffects() || TID.isNotDuplicable())
868      return false;
869
870    // If the instruction accesses memory and the memory could be non-constant,
871    // assume the instruction is not rematerializable.
872    for (std::list<MachineMemOperand>::const_iterator
873           I = MI->memoperands_begin(), E = MI->memoperands_end(); I != E; ++I){
874      const MachineMemOperand &MMO = *I;
875      if (MMO.isVolatile() || MMO.isStore())
876        return false;
877      const Value *V = MMO.getValue();
878      if (!V)
879        return false;
880      if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
881        if (!PSV->isConstant(mf_->getFrameInfo()))
882          return false;
883      } else if (!aa_->pointsToConstantMemory(V))
884        return false;
885    }
886
887    // If any of the registers accessed are non-constant, conservatively assume
888    // the instruction is not rematerializable.
889    unsigned ImpUse = 0;
890    for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
891      const MachineOperand &MO = MI->getOperand(i);
892      if (MO.isReg()) {
893        unsigned Reg = MO.getReg();
894        if (Reg == 0)
895          continue;
896        if (TargetRegisterInfo::isPhysicalRegister(Reg))
897          return false;
898
899        // Only allow one def, and that in the first operand.
900        if (MO.isDef() != (i == 0))
901          return false;
902
903        // Only allow constant-valued registers.
904        bool IsLiveIn = mri_->isLiveIn(Reg);
905        MachineRegisterInfo::def_iterator I = mri_->def_begin(Reg),
906                                          E = mri_->def_end();
907
908        // For the def, it should be the only def.
909        if (MO.isDef() && (next(I) != E || IsLiveIn))
910          return false;
911
912        if (MO.isUse()) {
913          // Only allow one use other register use, as that's all the
914          // remat mechanisms support currently.
915          if (Reg != li.reg) {
916            if (ImpUse == 0)
917              ImpUse = Reg;
918            else if (Reg != ImpUse)
919              return false;
920          }
921          // For uses, there should be only one associate def.
922          if (I != E && (next(I) != E || IsLiveIn))
923            return false;
924        }
925      }
926    }
927  }
928
929  unsigned ImpUse = getReMatImplicitUse(li, MI);
930  if (ImpUse) {
931    const LiveInterval &ImpLi = getInterval(ImpUse);
932    for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg),
933           re = mri_->use_end(); ri != re; ++ri) {
934      MachineInstr *UseMI = &*ri;
935      unsigned UseIdx = getInstructionIndex(UseMI);
936      if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo)
937        continue;
938      if (!isValNoAvailableAt(ImpLi, MI, UseIdx))
939        return false;
940    }
941
942    // If a register operand of the re-materialized instruction is going to
943    // be spilled next, then it's not legal to re-materialize this instruction.
944    for (unsigned i = 0, e = SpillIs.size(); i != e; ++i)
945      if (ImpUse == SpillIs[i]->reg)
946        return false;
947  }
948  return true;
949}
950
951/// isReMaterializable - Returns true if every definition of MI of every
952/// val# of the specified interval is re-materializable.
953bool LiveIntervals::isReMaterializable(const LiveInterval &li,
954                                       SmallVectorImpl<LiveInterval*> &SpillIs,
955                                       bool &isLoad) {
956  isLoad = false;
957  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
958       i != e; ++i) {
959    const VNInfo *VNI = *i;
960    unsigned DefIdx = VNI->def;
961    if (DefIdx == ~1U)
962      continue; // Dead val#.
963    // Is the def for the val# rematerializable?
964    if (DefIdx == ~0u)
965      return false;
966    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
967    bool DefIsLoad = false;
968    if (!ReMatDefMI ||
969        !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad))
970      return false;
971    isLoad |= DefIsLoad;
972  }
973  return true;
974}
975
976/// FilterFoldedOps - Filter out two-address use operands. Return
977/// true if it finds any issue with the operands that ought to prevent
978/// folding.
979static bool FilterFoldedOps(MachineInstr *MI,
980                            SmallVector<unsigned, 2> &Ops,
981                            unsigned &MRInfo,
982                            SmallVector<unsigned, 2> &FoldOps) {
983  const TargetInstrDesc &TID = MI->getDesc();
984
985  MRInfo = 0;
986  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
987    unsigned OpIdx = Ops[i];
988    MachineOperand &MO = MI->getOperand(OpIdx);
989    // FIXME: fold subreg use.
990    if (MO.getSubReg())
991      return true;
992    if (MO.isDef())
993      MRInfo |= (unsigned)VirtRegMap::isMod;
994    else {
995      // Filter out two-address use operand(s).
996      if (!MO.isImplicit() &&
997          TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
998        MRInfo = VirtRegMap::isModRef;
999        continue;
1000      }
1001      MRInfo |= (unsigned)VirtRegMap::isRef;
1002    }
1003    FoldOps.push_back(OpIdx);
1004  }
1005  return false;
1006}
1007
1008
1009/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
1010/// slot / to reg or any rematerialized load into ith operand of specified
1011/// MI. If it is successul, MI is updated with the newly created MI and
1012/// returns true.
1013bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
1014                                         VirtRegMap &vrm, MachineInstr *DefMI,
1015                                         unsigned InstrIdx,
1016                                         SmallVector<unsigned, 2> &Ops,
1017                                         bool isSS, int Slot, unsigned Reg) {
1018  // If it is an implicit def instruction, just delete it.
1019  if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
1020    RemoveMachineInstrFromMaps(MI);
1021    vrm.RemoveMachineInstrFromMaps(MI);
1022    MI->eraseFromParent();
1023    ++numFolds;
1024    return true;
1025  }
1026
1027  // Filter the list of operand indexes that are to be folded. Abort if
1028  // any operand will prevent folding.
1029  unsigned MRInfo = 0;
1030  SmallVector<unsigned, 2> FoldOps;
1031  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1032    return false;
1033
1034  // The only time it's safe to fold into a two address instruction is when
1035  // it's folding reload and spill from / into a spill stack slot.
1036  if (DefMI && (MRInfo & VirtRegMap::isMod))
1037    return false;
1038
1039  MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
1040                           : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
1041  if (fmi) {
1042    // Remember this instruction uses the spill slot.
1043    if (isSS) vrm.addSpillSlotUse(Slot, fmi);
1044
1045    // Attempt to fold the memory reference into the instruction. If
1046    // we can do this, we don't need to insert spill code.
1047    MachineBasicBlock &MBB = *MI->getParent();
1048    if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
1049      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
1050    vrm.transferSpillPts(MI, fmi);
1051    vrm.transferRestorePts(MI, fmi);
1052    vrm.transferEmergencySpills(MI, fmi);
1053    mi2iMap_.erase(MI);
1054    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
1055    mi2iMap_[fmi] = InstrIdx;
1056    MI = MBB.insert(MBB.erase(MI), fmi);
1057    ++numFolds;
1058    return true;
1059  }
1060  return false;
1061}
1062
1063/// canFoldMemoryOperand - Returns true if the specified load / store
1064/// folding is possible.
1065bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
1066                                         SmallVector<unsigned, 2> &Ops,
1067                                         bool ReMat) const {
1068  // Filter the list of operand indexes that are to be folded. Abort if
1069  // any operand will prevent folding.
1070  unsigned MRInfo = 0;
1071  SmallVector<unsigned, 2> FoldOps;
1072  if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps))
1073    return false;
1074
1075  // It's only legal to remat for a use, not a def.
1076  if (ReMat && (MRInfo & VirtRegMap::isMod))
1077    return false;
1078
1079  return tii_->canFoldMemoryOperand(MI, FoldOps);
1080}
1081
1082bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
1083  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
1084  for (LiveInterval::Ranges::const_iterator
1085         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1086    std::vector<IdxMBBPair>::const_iterator II =
1087      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
1088    if (II == Idx2MBBMap.end())
1089      continue;
1090    if (I->end > II->first)  // crossing a MBB.
1091      return false;
1092    MBBs.insert(II->second);
1093    if (MBBs.size() > 1)
1094      return false;
1095  }
1096  return true;
1097}
1098
1099/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of
1100/// interval on to-be re-materialized operands of MI) with new register.
1101void LiveIntervals::rewriteImplicitOps(const LiveInterval &li,
1102                                       MachineInstr *MI, unsigned NewVReg,
1103                                       VirtRegMap &vrm) {
1104  // There is an implicit use. That means one of the other operand is
1105  // being remat'ed and the remat'ed instruction has li.reg as an
1106  // use operand. Make sure we rewrite that as well.
1107  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1108    MachineOperand &MO = MI->getOperand(i);
1109    if (!MO.isReg())
1110      continue;
1111    unsigned Reg = MO.getReg();
1112    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1113      continue;
1114    if (!vrm.isReMaterialized(Reg))
1115      continue;
1116    MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg);
1117    MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg);
1118    if (UseMO)
1119      UseMO->setReg(NewVReg);
1120  }
1121}
1122
1123/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
1124/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
1125bool LiveIntervals::
1126rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
1127                 bool TrySplit, unsigned index, unsigned end,  MachineInstr *MI,
1128                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1129                 unsigned Slot, int LdSlot,
1130                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1131                 VirtRegMap &vrm,
1132                 const TargetRegisterClass* rc,
1133                 SmallVector<int, 4> &ReMatIds,
1134                 const MachineLoopInfo *loopInfo,
1135                 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse,
1136                 DenseMap<unsigned,unsigned> &MBBVRegsMap,
1137                 std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1138  MachineBasicBlock *MBB = MI->getParent();
1139  unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1140  bool CanFold = false;
1141 RestartInstruction:
1142  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1143    MachineOperand& mop = MI->getOperand(i);
1144    if (!mop.isReg())
1145      continue;
1146    unsigned Reg = mop.getReg();
1147    unsigned RegI = Reg;
1148    if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
1149      continue;
1150    if (Reg != li.reg)
1151      continue;
1152
1153    bool TryFold = !DefIsReMat;
1154    bool FoldSS = true; // Default behavior unless it's a remat.
1155    int FoldSlot = Slot;
1156    if (DefIsReMat) {
1157      // If this is the rematerializable definition MI itself and
1158      // all of its uses are rematerialized, simply delete it.
1159      if (MI == ReMatOrigDefMI && CanDelete) {
1160        DOUT << "\t\t\t\tErasing re-materlizable def: ";
1161        DOUT << MI << '\n';
1162        RemoveMachineInstrFromMaps(MI);
1163        vrm.RemoveMachineInstrFromMaps(MI);
1164        MI->eraseFromParent();
1165        break;
1166      }
1167
1168      // If def for this use can't be rematerialized, then try folding.
1169      // If def is rematerializable and it's a load, also try folding.
1170      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
1171      if (isLoad) {
1172        // Try fold loads (from stack slot, constant pool, etc.) into uses.
1173        FoldSS = isLoadSS;
1174        FoldSlot = LdSlot;
1175      }
1176    }
1177
1178    // Scan all of the operands of this instruction rewriting operands
1179    // to use NewVReg instead of li.reg as appropriate.  We do this for
1180    // two reasons:
1181    //
1182    //   1. If the instr reads the same spilled vreg multiple times, we
1183    //      want to reuse the NewVReg.
1184    //   2. If the instr is a two-addr instruction, we are required to
1185    //      keep the src/dst regs pinned.
1186    //
1187    // Keep track of whether we replace a use and/or def so that we can
1188    // create the spill interval with the appropriate range.
1189
1190    HasUse = mop.isUse();
1191    HasDef = mop.isDef();
1192    SmallVector<unsigned, 2> Ops;
1193    Ops.push_back(i);
1194    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
1195      const MachineOperand &MOj = MI->getOperand(j);
1196      if (!MOj.isReg())
1197        continue;
1198      unsigned RegJ = MOj.getReg();
1199      if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
1200        continue;
1201      if (RegJ == RegI) {
1202        Ops.push_back(j);
1203        HasUse |= MOj.isUse();
1204        HasDef |= MOj.isDef();
1205      }
1206    }
1207
1208    if (HasUse && !li.liveAt(getUseIndex(index)))
1209      // Must be defined by an implicit def. It should not be spilled. Note,
1210      // this is for correctness reason. e.g.
1211      // 8   %reg1024<def> = IMPLICIT_DEF
1212      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1213      // The live range [12, 14) are not part of the r1024 live interval since
1214      // it's defined by an implicit def. It will not conflicts with live
1215      // interval of r1025. Now suppose both registers are spilled, you can
1216      // easily see a situation where both registers are reloaded before
1217      // the INSERT_SUBREG and both target registers that would overlap.
1218      HasUse = false;
1219
1220    // Update stack slot spill weight if we are splitting.
1221    float Weight = getSpillWeight(HasDef, HasUse, loopDepth);
1222      if (!TrySplit)
1223      SSWeight += Weight;
1224
1225    if (!TryFold)
1226      CanFold = false;
1227    else {
1228      // Do not fold load / store here if we are splitting. We'll find an
1229      // optimal point to insert a load / store later.
1230      if (!TrySplit) {
1231        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1232                                 Ops, FoldSS, FoldSlot, Reg)) {
1233          // Folding the load/store can completely change the instruction in
1234          // unpredictable ways, rescan it from the beginning.
1235          HasUse = false;
1236          HasDef = false;
1237          CanFold = false;
1238          if (isRemoved(MI)) {
1239            SSWeight -= Weight;
1240            break;
1241          }
1242          goto RestartInstruction;
1243        }
1244      } else {
1245        // We'll try to fold it later if it's profitable.
1246        CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat);
1247      }
1248    }
1249
1250    // Create a new virtual register for the spill interval.
1251    bool CreatedNewVReg = false;
1252    if (NewVReg == 0) {
1253      NewVReg = mri_->createVirtualRegister(rc);
1254      vrm.grow();
1255      CreatedNewVReg = true;
1256    }
1257    mop.setReg(NewVReg);
1258    if (mop.isImplicit())
1259      rewriteImplicitOps(li, MI, NewVReg, vrm);
1260
1261    // Reuse NewVReg for other reads.
1262    for (unsigned j = 0, e = Ops.size(); j != e; ++j) {
1263      MachineOperand &mopj = MI->getOperand(Ops[j]);
1264      mopj.setReg(NewVReg);
1265      if (mopj.isImplicit())
1266        rewriteImplicitOps(li, MI, NewVReg, vrm);
1267    }
1268
1269    if (CreatedNewVReg) {
1270      if (DefIsReMat) {
1271        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
1272        if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) {
1273          // Each valnum may have its own remat id.
1274          ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg);
1275        } else {
1276          vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]);
1277        }
1278        if (!CanDelete || (HasUse && HasDef)) {
1279          // If this is a two-addr instruction then its use operands are
1280          // rematerializable but its def is not. It should be assigned a
1281          // stack slot.
1282          vrm.assignVirt2StackSlot(NewVReg, Slot);
1283        }
1284      } else {
1285        vrm.assignVirt2StackSlot(NewVReg, Slot);
1286      }
1287    } else if (HasUse && HasDef &&
1288               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
1289      // If this interval hasn't been assigned a stack slot (because earlier
1290      // def is a deleted remat def), do it now.
1291      assert(Slot != VirtRegMap::NO_STACK_SLOT);
1292      vrm.assignVirt2StackSlot(NewVReg, Slot);
1293    }
1294
1295    // Re-matting an instruction with virtual register use. Add the
1296    // register as an implicit use on the use MI.
1297    if (DefIsReMat && ImpUse)
1298      MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
1299
1300    // create a new register interval for this spill / remat.
1301    LiveInterval &nI = getOrCreateInterval(NewVReg);
1302    if (CreatedNewVReg) {
1303      NewLIs.push_back(&nI);
1304      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
1305      if (TrySplit)
1306        vrm.setIsSplitFromReg(NewVReg, li.reg);
1307    }
1308
1309    if (HasUse) {
1310      if (CreatedNewVReg) {
1311        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
1312                     nI.getNextValue(~0U, 0, VNInfoAllocator));
1313        DOUT << " +" << LR;
1314        nI.addRange(LR);
1315      } else {
1316        // Extend the split live interval to this def / use.
1317        unsigned End = getUseIndex(index)+1;
1318        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
1319                     nI.getValNumInfo(nI.getNumValNums()-1));
1320        DOUT << " +" << LR;
1321        nI.addRange(LR);
1322      }
1323    }
1324    if (HasDef) {
1325      LiveRange LR(getDefIndex(index), getStoreIndex(index),
1326                   nI.getNextValue(~0U, 0, VNInfoAllocator));
1327      DOUT << " +" << LR;
1328      nI.addRange(LR);
1329    }
1330
1331    DOUT << "\t\t\t\tAdded new interval: ";
1332    nI.print(DOUT, tri_);
1333    DOUT << '\n';
1334  }
1335  return CanFold;
1336}
1337bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
1338                                   const VNInfo *VNI,
1339                                   MachineBasicBlock *MBB, unsigned Idx) const {
1340  unsigned End = getMBBEndIdx(MBB);
1341  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
1342    unsigned KillIdx = VNI->kills[j];
1343    if (KillIdx > Idx && KillIdx < End)
1344      return true;
1345  }
1346  return false;
1347}
1348
1349/// RewriteInfo - Keep track of machine instrs that will be rewritten
1350/// during spilling.
1351namespace {
1352  struct RewriteInfo {
1353    unsigned Index;
1354    MachineInstr *MI;
1355    bool HasUse;
1356    bool HasDef;
1357    RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
1358      : Index(i), MI(mi), HasUse(u), HasDef(d) {}
1359  };
1360
1361  struct RewriteInfoCompare {
1362    bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
1363      return LHS.Index < RHS.Index;
1364    }
1365  };
1366}
1367
1368void LiveIntervals::
1369rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
1370                    LiveInterval::Ranges::const_iterator &I,
1371                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
1372                    unsigned Slot, int LdSlot,
1373                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
1374                    VirtRegMap &vrm,
1375                    const TargetRegisterClass* rc,
1376                    SmallVector<int, 4> &ReMatIds,
1377                    const MachineLoopInfo *loopInfo,
1378                    BitVector &SpillMBBs,
1379                    DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes,
1380                    BitVector &RestoreMBBs,
1381                    DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes,
1382                    DenseMap<unsigned,unsigned> &MBBVRegsMap,
1383                    std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
1384  bool AllCanFold = true;
1385  unsigned NewVReg = 0;
1386  unsigned start = getBaseIndex(I->start);
1387  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
1388
1389  // First collect all the def / use in this live range that will be rewritten.
1390  // Make sure they are sorted according to instruction index.
1391  std::vector<RewriteInfo> RewriteMIs;
1392  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1393         re = mri_->reg_end(); ri != re; ) {
1394    MachineInstr *MI = &*ri;
1395    MachineOperand &O = ri.getOperand();
1396    ++ri;
1397    assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
1398    unsigned index = getInstructionIndex(MI);
1399    if (index < start || index >= end)
1400      continue;
1401    if (O.isUse() && !li.liveAt(getUseIndex(index)))
1402      // Must be defined by an implicit def. It should not be spilled. Note,
1403      // this is for correctness reason. e.g.
1404      // 8   %reg1024<def> = IMPLICIT_DEF
1405      // 12  %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2
1406      // The live range [12, 14) are not part of the r1024 live interval since
1407      // it's defined by an implicit def. It will not conflicts with live
1408      // interval of r1025. Now suppose both registers are spilled, you can
1409      // easily see a situation where both registers are reloaded before
1410      // the INSERT_SUBREG and both target registers that would overlap.
1411      continue;
1412    RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
1413  }
1414  std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
1415
1416  unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
1417  // Now rewrite the defs and uses.
1418  for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
1419    RewriteInfo &rwi = RewriteMIs[i];
1420    ++i;
1421    unsigned index = rwi.Index;
1422    bool MIHasUse = rwi.HasUse;
1423    bool MIHasDef = rwi.HasDef;
1424    MachineInstr *MI = rwi.MI;
1425    // If MI def and/or use the same register multiple times, then there
1426    // are multiple entries.
1427    unsigned NumUses = MIHasUse;
1428    while (i != e && RewriteMIs[i].MI == MI) {
1429      assert(RewriteMIs[i].Index == index);
1430      bool isUse = RewriteMIs[i].HasUse;
1431      if (isUse) ++NumUses;
1432      MIHasUse |= isUse;
1433      MIHasDef |= RewriteMIs[i].HasDef;
1434      ++i;
1435    }
1436    MachineBasicBlock *MBB = MI->getParent();
1437
1438    if (ImpUse && MI != ReMatDefMI) {
1439      // Re-matting an instruction with virtual register use. Update the
1440      // register interval's spill weight to HUGE_VALF to prevent it from
1441      // being spilled.
1442      LiveInterval &ImpLi = getInterval(ImpUse);
1443      ImpLi.weight = HUGE_VALF;
1444    }
1445
1446    unsigned MBBId = MBB->getNumber();
1447    unsigned ThisVReg = 0;
1448    if (TrySplit) {
1449      DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId);
1450      if (NVI != MBBVRegsMap.end()) {
1451        ThisVReg = NVI->second;
1452        // One common case:
1453        // x = use
1454        // ...
1455        // ...
1456        // def = ...
1457        //     = use
1458        // It's better to start a new interval to avoid artifically
1459        // extend the new interval.
1460        if (MIHasDef && !MIHasUse) {
1461          MBBVRegsMap.erase(MBB->getNumber());
1462          ThisVReg = 0;
1463        }
1464      }
1465    }
1466
1467    bool IsNew = ThisVReg == 0;
1468    if (IsNew) {
1469      // This ends the previous live interval. If all of its def / use
1470      // can be folded, give it a low spill weight.
1471      if (NewVReg && TrySplit && AllCanFold) {
1472        LiveInterval &nI = getOrCreateInterval(NewVReg);
1473        nI.weight /= 10.0F;
1474      }
1475      AllCanFold = true;
1476    }
1477    NewVReg = ThisVReg;
1478
1479    bool HasDef = false;
1480    bool HasUse = false;
1481    bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit,
1482                         index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1483                         Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1484                         CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg,
1485                         ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs, SSWeight);
1486    if (!HasDef && !HasUse)
1487      continue;
1488
1489    AllCanFold &= CanFold;
1490
1491    // Update weight of spill interval.
1492    LiveInterval &nI = getOrCreateInterval(NewVReg);
1493    if (!TrySplit) {
1494      // The spill weight is now infinity as it cannot be spilled again.
1495      nI.weight = HUGE_VALF;
1496      continue;
1497    }
1498
1499    // Keep track of the last def and first use in each MBB.
1500    if (HasDef) {
1501      if (MI != ReMatOrigDefMI || !CanDelete) {
1502        bool HasKill = false;
1503        if (!HasUse)
1504          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1505        else {
1506          // If this is a two-address code, then this index starts a new VNInfo.
1507          const VNInfo *VNI = li.findDefinedVNInfo(getDefIndex(index));
1508          if (VNI)
1509            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1510        }
1511        DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1512          SpillIdxes.find(MBBId);
1513        if (!HasKill) {
1514          if (SII == SpillIdxes.end()) {
1515            std::vector<SRInfo> S;
1516            S.push_back(SRInfo(index, NewVReg, true));
1517            SpillIdxes.insert(std::make_pair(MBBId, S));
1518          } else if (SII->second.back().vreg != NewVReg) {
1519            SII->second.push_back(SRInfo(index, NewVReg, true));
1520          } else if ((int)index > SII->second.back().index) {
1521            // If there is an earlier def and this is a two-address
1522            // instruction, then it's not possible to fold the store (which
1523            // would also fold the load).
1524            SRInfo &Info = SII->second.back();
1525            Info.index = index;
1526            Info.canFold = !HasUse;
1527          }
1528          SpillMBBs.set(MBBId);
1529        } else if (SII != SpillIdxes.end() &&
1530                   SII->second.back().vreg == NewVReg &&
1531                   (int)index > SII->second.back().index) {
1532          // There is an earlier def that's not killed (must be two-address).
1533          // The spill is no longer needed.
1534          SII->second.pop_back();
1535          if (SII->second.empty()) {
1536            SpillIdxes.erase(MBBId);
1537            SpillMBBs.reset(MBBId);
1538          }
1539        }
1540      }
1541    }
1542
1543    if (HasUse) {
1544      DenseMap<unsigned, std::vector<SRInfo> >::iterator SII =
1545        SpillIdxes.find(MBBId);
1546      if (SII != SpillIdxes.end() &&
1547          SII->second.back().vreg == NewVReg &&
1548          (int)index > SII->second.back().index)
1549        // Use(s) following the last def, it's not safe to fold the spill.
1550        SII->second.back().canFold = false;
1551      DenseMap<unsigned, std::vector<SRInfo> >::iterator RII =
1552        RestoreIdxes.find(MBBId);
1553      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1554        // If we are splitting live intervals, only fold if it's the first
1555        // use and there isn't another use later in the MBB.
1556        RII->second.back().canFold = false;
1557      else if (IsNew) {
1558        // Only need a reload if there isn't an earlier def / use.
1559        if (RII == RestoreIdxes.end()) {
1560          std::vector<SRInfo> Infos;
1561          Infos.push_back(SRInfo(index, NewVReg, true));
1562          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1563        } else {
1564          RII->second.push_back(SRInfo(index, NewVReg, true));
1565        }
1566        RestoreMBBs.set(MBBId);
1567      }
1568    }
1569
1570    // Update spill weight.
1571    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1572    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1573  }
1574
1575  if (NewVReg && TrySplit && AllCanFold) {
1576    // If all of its def / use can be folded, give it a low spill weight.
1577    LiveInterval &nI = getOrCreateInterval(NewVReg);
1578    nI.weight /= 10.0F;
1579  }
1580}
1581
1582bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1583                        BitVector &RestoreMBBs,
1584                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1585  if (!RestoreMBBs[Id])
1586    return false;
1587  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1588  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1589    if (Restores[i].index == index &&
1590        Restores[i].vreg == vr &&
1591        Restores[i].canFold)
1592      return true;
1593  return false;
1594}
1595
1596void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1597                        BitVector &RestoreMBBs,
1598                        DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1599  if (!RestoreMBBs[Id])
1600    return;
1601  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1602  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1603    if (Restores[i].index == index && Restores[i].vreg)
1604      Restores[i].index = -1;
1605}
1606
1607/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being
1608/// spilled and create empty intervals for their uses.
1609void
1610LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm,
1611                                    const TargetRegisterClass* rc,
1612                                    std::vector<LiveInterval*> &NewLIs) {
1613  for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
1614         re = mri_->reg_end(); ri != re; ) {
1615    MachineOperand &O = ri.getOperand();
1616    MachineInstr *MI = &*ri;
1617    ++ri;
1618    if (O.isDef()) {
1619      assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF &&
1620             "Register def was not rewritten?");
1621      RemoveMachineInstrFromMaps(MI);
1622      vrm.RemoveMachineInstrFromMaps(MI);
1623      MI->eraseFromParent();
1624    } else {
1625      // This must be an use of an implicit_def so it's not part of the live
1626      // interval. Create a new empty live interval for it.
1627      // FIXME: Can we simply erase some of the instructions? e.g. Stores?
1628      unsigned NewVReg = mri_->createVirtualRegister(rc);
1629      vrm.grow();
1630      vrm.setIsImplicitlyDefined(NewVReg);
1631      NewLIs.push_back(&getOrCreateInterval(NewVReg));
1632      for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1633        MachineOperand &MO = MI->getOperand(i);
1634        if (MO.isReg() && MO.getReg() == li.reg)
1635          MO.setReg(NewVReg);
1636      }
1637    }
1638  }
1639}
1640
1641namespace {
1642  struct LISorter {
1643    bool operator()(LiveInterval* A, LiveInterval* B) {
1644      return A->beginNumber() < B->beginNumber();
1645    }
1646  };
1647}
1648
1649std::vector<LiveInterval*> LiveIntervals::
1650addIntervalsForSpillsFast(const LiveInterval &li,
1651                          const MachineLoopInfo *loopInfo,
1652                          VirtRegMap &vrm, float& SSWeight) {
1653  unsigned slot = vrm.assignVirt2StackSlot(li.reg);
1654
1655  std::vector<LiveInterval*> added;
1656
1657  assert(li.weight != HUGE_VALF &&
1658         "attempt to spill already spilled interval!");
1659
1660  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1661  DEBUG(li.dump());
1662  DOUT << '\n';
1663
1664  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1665
1666  SSWeight = 0.0f;
1667
1668  MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg);
1669  while (RI != mri_->reg_end()) {
1670    MachineInstr* MI = &*RI;
1671
1672    SmallVector<unsigned, 2> Indices;
1673    bool HasUse = false;
1674    bool HasDef = false;
1675
1676    for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1677      MachineOperand& mop = MI->getOperand(i);
1678      if (!mop.isReg() || mop.getReg() != li.reg) continue;
1679
1680      HasUse |= MI->getOperand(i).isUse();
1681      HasDef |= MI->getOperand(i).isDef();
1682
1683      Indices.push_back(i);
1684    }
1685
1686    if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI),
1687                              Indices, true, slot, li.reg)) {
1688      unsigned NewVReg = mri_->createVirtualRegister(rc);
1689      vrm.grow();
1690      vrm.assignVirt2StackSlot(NewVReg, slot);
1691
1692      // create a new register for this spill
1693      LiveInterval &nI = getOrCreateInterval(NewVReg);
1694
1695      // the spill weight is now infinity as it
1696      // cannot be spilled again
1697      nI.weight = HUGE_VALF;
1698
1699      // Rewrite register operands to use the new vreg.
1700      for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(),
1701           E = Indices.end(); I != E; ++I) {
1702        MI->getOperand(*I).setReg(NewVReg);
1703
1704        if (MI->getOperand(*I).isUse())
1705          MI->getOperand(*I).setIsKill(true);
1706      }
1707
1708      // Fill in  the new live interval.
1709      unsigned index = getInstructionIndex(MI);
1710      if (HasUse) {
1711        LiveRange LR(getLoadIndex(index), getUseIndex(index),
1712                     nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1713        DOUT << " +" << LR;
1714        nI.addRange(LR);
1715        vrm.addRestorePoint(NewVReg, MI);
1716      }
1717      if (HasDef) {
1718        LiveRange LR(getDefIndex(index), getStoreIndex(index),
1719                     nI.getNextValue(~0U, 0, getVNInfoAllocator()));
1720        DOUT << " +" << LR;
1721        nI.addRange(LR);
1722        vrm.addSpillPoint(NewVReg, true, MI);
1723      }
1724
1725      added.push_back(&nI);
1726
1727      DOUT << "\t\t\t\tadded new interval: ";
1728      DEBUG(nI.dump());
1729      DOUT << '\n';
1730
1731      unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent());
1732      if (HasUse) {
1733        if (HasDef)
1734          SSWeight += getSpillWeight(true, true, loopDepth);
1735        else
1736          SSWeight += getSpillWeight(false, true, loopDepth);
1737      } else
1738        SSWeight += getSpillWeight(true, false, loopDepth);
1739    }
1740
1741
1742    RI = mri_->reg_begin(li.reg);
1743  }
1744
1745  // Clients expect the new intervals to be returned in sorted order.
1746  std::sort(added.begin(), added.end(), LISorter());
1747
1748  return added;
1749}
1750
1751std::vector<LiveInterval*> LiveIntervals::
1752addIntervalsForSpills(const LiveInterval &li,
1753                      SmallVectorImpl<LiveInterval*> &SpillIs,
1754                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm,
1755                      float &SSWeight) {
1756
1757  if (EnableFastSpilling)
1758    return addIntervalsForSpillsFast(li, loopInfo, vrm, SSWeight);
1759
1760  assert(li.weight != HUGE_VALF &&
1761         "attempt to spill already spilled interval!");
1762
1763  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1764  li.print(DOUT, tri_);
1765  DOUT << '\n';
1766
1767  // Spill slot weight.
1768  SSWeight = 0.0f;
1769
1770  // Each bit specify whether it a spill is required in the MBB.
1771  BitVector SpillMBBs(mf_->getNumBlockIDs());
1772  DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes;
1773  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1774  DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes;
1775  DenseMap<unsigned,unsigned> MBBVRegsMap;
1776  std::vector<LiveInterval*> NewLIs;
1777  const TargetRegisterClass* rc = mri_->getRegClass(li.reg);
1778
1779  unsigned NumValNums = li.getNumValNums();
1780  SmallVector<MachineInstr*, 4> ReMatDefs;
1781  ReMatDefs.resize(NumValNums, NULL);
1782  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1783  ReMatOrigDefs.resize(NumValNums, NULL);
1784  SmallVector<int, 4> ReMatIds;
1785  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1786  BitVector ReMatDelete(NumValNums);
1787  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1788
1789  // Spilling a split live interval. It cannot be split any further. Also,
1790  // it's also guaranteed to be a single val# / range interval.
1791  if (vrm.getPreSplitReg(li.reg)) {
1792    vrm.setIsSplitFromReg(li.reg, 0);
1793    // Unset the split kill marker on the last use.
1794    unsigned KillIdx = vrm.getKillPoint(li.reg);
1795    if (KillIdx) {
1796      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1797      assert(KillMI && "Last use disappeared?");
1798      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1799      assert(KillOp != -1 && "Last use disappeared?");
1800      KillMI->getOperand(KillOp).setIsKill(false);
1801    }
1802    vrm.removeKillPoint(li.reg);
1803    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1804    Slot = vrm.getStackSlot(li.reg);
1805    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1806    MachineInstr *ReMatDefMI = DefIsReMat ?
1807      vrm.getReMaterializedMI(li.reg) : NULL;
1808    int LdSlot = 0;
1809    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1810    bool isLoad = isLoadSS ||
1811      (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1812    bool IsFirstRange = true;
1813    for (LiveInterval::Ranges::const_iterator
1814           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1815      // If this is a split live interval with multiple ranges, it means there
1816      // are two-address instructions that re-defined the value. Only the
1817      // first def can be rematerialized!
1818      if (IsFirstRange) {
1819        // Note ReMatOrigDefMI has already been deleted.
1820        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1821                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1822                             false, vrm, rc, ReMatIds, loopInfo,
1823                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1824                             MBBVRegsMap, NewLIs, SSWeight);
1825      } else {
1826        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1827                             Slot, 0, false, false, false,
1828                             false, vrm, rc, ReMatIds, loopInfo,
1829                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1830                             MBBVRegsMap, NewLIs, SSWeight);
1831      }
1832      IsFirstRange = false;
1833    }
1834
1835    SSWeight = 0.0f;  // Already accounted for when split.
1836    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1837    return NewLIs;
1838  }
1839
1840  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1841  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1842    TrySplit = false;
1843  if (TrySplit)
1844    ++numSplits;
1845  bool NeedStackSlot = false;
1846  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1847       i != e; ++i) {
1848    const VNInfo *VNI = *i;
1849    unsigned VN = VNI->id;
1850    unsigned DefIdx = VNI->def;
1851    if (DefIdx == ~1U)
1852      continue; // Dead val#.
1853    // Is the def for the val# rematerializable?
1854    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1855      ? 0 : getInstructionFromIndex(DefIdx);
1856    bool dummy;
1857    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) {
1858      // Remember how to remat the def of this val#.
1859      ReMatOrigDefs[VN] = ReMatDefMI;
1860      // Original def may be modified so we have to make a copy here.
1861      MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI);
1862      ClonedMIs.push_back(Clone);
1863      ReMatDefs[VN] = Clone;
1864
1865      bool CanDelete = true;
1866      if (VNI->hasPHIKill) {
1867        // A kill is a phi node, not all of its uses can be rematerialized.
1868        // It must not be deleted.
1869        CanDelete = false;
1870        // Need a stack slot if there is any live range where uses cannot be
1871        // rematerialized.
1872        NeedStackSlot = true;
1873      }
1874      if (CanDelete)
1875        ReMatDelete.set(VN);
1876    } else {
1877      // Need a stack slot if there is any live range where uses cannot be
1878      // rematerialized.
1879      NeedStackSlot = true;
1880    }
1881  }
1882
1883  // One stack slot per live interval.
1884  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1885    Slot = vrm.assignVirt2StackSlot(li.reg);
1886
1887  // Create new intervals and rewrite defs and uses.
1888  for (LiveInterval::Ranges::const_iterator
1889         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1890    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1891    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1892    bool DefIsReMat = ReMatDefMI != NULL;
1893    bool CanDelete = ReMatDelete[I->valno->id];
1894    int LdSlot = 0;
1895    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1896    bool isLoad = isLoadSS ||
1897      (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1898    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1899                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1900                               CanDelete, vrm, rc, ReMatIds, loopInfo,
1901                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1902                               MBBVRegsMap, NewLIs, SSWeight);
1903  }
1904
1905  // Insert spills / restores if we are splitting.
1906  if (!TrySplit) {
1907    handleSpilledImpDefs(li, vrm, rc, NewLIs);
1908    return NewLIs;
1909  }
1910
1911  SmallPtrSet<LiveInterval*, 4> AddedKill;
1912  SmallVector<unsigned, 2> Ops;
1913  if (NeedStackSlot) {
1914    int Id = SpillMBBs.find_first();
1915    while (Id != -1) {
1916      MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1917      unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1918      std::vector<SRInfo> &spills = SpillIdxes[Id];
1919      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1920        int index = spills[i].index;
1921        unsigned VReg = spills[i].vreg;
1922        LiveInterval &nI = getOrCreateInterval(VReg);
1923        bool isReMat = vrm.isReMaterialized(VReg);
1924        MachineInstr *MI = getInstructionFromIndex(index);
1925        bool CanFold = false;
1926        bool FoundUse = false;
1927        Ops.clear();
1928        if (spills[i].canFold) {
1929          CanFold = true;
1930          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1931            MachineOperand &MO = MI->getOperand(j);
1932            if (!MO.isReg() || MO.getReg() != VReg)
1933              continue;
1934
1935            Ops.push_back(j);
1936            if (MO.isDef())
1937              continue;
1938            if (isReMat ||
1939                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1940                                                RestoreMBBs, RestoreIdxes))) {
1941              // MI has two-address uses of the same register. If the use
1942              // isn't the first and only use in the BB, then we can't fold
1943              // it. FIXME: Move this to rewriteInstructionsForSpills.
1944              CanFold = false;
1945              break;
1946            }
1947            FoundUse = true;
1948          }
1949        }
1950        // Fold the store into the def if possible.
1951        bool Folded = false;
1952        if (CanFold && !Ops.empty()) {
1953          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1954            Folded = true;
1955            if (FoundUse > 0) {
1956              // Also folded uses, do not issue a load.
1957              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1958              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1959            }
1960            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1961          }
1962        }
1963
1964        // Otherwise tell the spiller to issue a spill.
1965        if (!Folded) {
1966          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1967          bool isKill = LR->end == getStoreIndex(index);
1968          if (!MI->registerDefIsDead(nI.reg))
1969            // No need to spill a dead def.
1970            vrm.addSpillPoint(VReg, isKill, MI);
1971          if (isKill)
1972            AddedKill.insert(&nI);
1973        }
1974
1975        // Update spill slot weight.
1976        if (!isReMat)
1977          SSWeight += getSpillWeight(true, false, loopDepth);
1978      }
1979      Id = SpillMBBs.find_next(Id);
1980    }
1981  }
1982
1983  int Id = RestoreMBBs.find_first();
1984  while (Id != -1) {
1985    MachineBasicBlock *MBB = mf_->getBlockNumbered(Id);
1986    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1987
1988    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1989    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1990      int index = restores[i].index;
1991      if (index == -1)
1992        continue;
1993      unsigned VReg = restores[i].vreg;
1994      LiveInterval &nI = getOrCreateInterval(VReg);
1995      bool isReMat = vrm.isReMaterialized(VReg);
1996      MachineInstr *MI = getInstructionFromIndex(index);
1997      bool CanFold = false;
1998      Ops.clear();
1999      if (restores[i].canFold) {
2000        CanFold = true;
2001        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
2002          MachineOperand &MO = MI->getOperand(j);
2003          if (!MO.isReg() || MO.getReg() != VReg)
2004            continue;
2005
2006          if (MO.isDef()) {
2007            // If this restore were to be folded, it would have been folded
2008            // already.
2009            CanFold = false;
2010            break;
2011          }
2012          Ops.push_back(j);
2013        }
2014      }
2015
2016      // Fold the load into the use if possible.
2017      bool Folded = false;
2018      if (CanFold && !Ops.empty()) {
2019        if (!isReMat)
2020          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
2021        else {
2022          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
2023          int LdSlot = 0;
2024          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
2025          // If the rematerializable def is a load, also try to fold it.
2026          if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
2027            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
2028                                          Ops, isLoadSS, LdSlot, VReg);
2029          unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI);
2030          if (ImpUse) {
2031            // Re-matting an instruction with virtual register use. Add the
2032            // register as an implicit use on the use MI and update the register
2033            // interval's spill weight to HUGE_VALF to prevent it from being
2034            // spilled.
2035            LiveInterval &ImpLi = getInterval(ImpUse);
2036            ImpLi.weight = HUGE_VALF;
2037            MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true));
2038          }
2039        }
2040      }
2041      // If folding is not possible / failed, then tell the spiller to issue a
2042      // load / rematerialization for us.
2043      if (Folded)
2044        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
2045      else
2046        vrm.addRestorePoint(VReg, MI);
2047
2048      // Update spill slot weight.
2049      if (!isReMat)
2050        SSWeight += getSpillWeight(false, true, loopDepth);
2051    }
2052    Id = RestoreMBBs.find_next(Id);
2053  }
2054
2055  // Finalize intervals: add kills, finalize spill weights, and filter out
2056  // dead intervals.
2057  std::vector<LiveInterval*> RetNewLIs;
2058  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
2059    LiveInterval *LI = NewLIs[i];
2060    if (!LI->empty()) {
2061      LI->weight /= InstrSlots::NUM * getApproximateInstructionCount(*LI);
2062      if (!AddedKill.count(LI)) {
2063        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
2064        unsigned LastUseIdx = getBaseIndex(LR->end);
2065        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
2066        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false);
2067        assert(UseIdx != -1);
2068        if (LastUse->getOperand(UseIdx).isImplicit() ||
2069            LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){
2070          LastUse->getOperand(UseIdx).setIsKill();
2071          vrm.addKillPoint(LI->reg, LastUseIdx);
2072        }
2073      }
2074      RetNewLIs.push_back(LI);
2075    }
2076  }
2077
2078  handleSpilledImpDefs(li, vrm, rc, RetNewLIs);
2079  return RetNewLIs;
2080}
2081
2082/// hasAllocatableSuperReg - Return true if the specified physical register has
2083/// any super register that's allocatable.
2084bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const {
2085  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS)
2086    if (allocatableRegs_[*AS] && hasInterval(*AS))
2087      return true;
2088  return false;
2089}
2090
2091/// getRepresentativeReg - Find the largest super register of the specified
2092/// physical register.
2093unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const {
2094  // Find the largest super-register that is allocatable.
2095  unsigned BestReg = Reg;
2096  for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) {
2097    unsigned SuperReg = *AS;
2098    if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) {
2099      BestReg = SuperReg;
2100      break;
2101    }
2102  }
2103  return BestReg;
2104}
2105
2106/// getNumConflictsWithPhysReg - Return the number of uses and defs of the
2107/// specified interval that conflicts with the specified physical register.
2108unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li,
2109                                                   unsigned PhysReg) const {
2110  unsigned NumConflicts = 0;
2111  const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg));
2112  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2113         E = mri_->reg_end(); I != E; ++I) {
2114    MachineOperand &O = I.getOperand();
2115    MachineInstr *MI = O.getParent();
2116    unsigned Index = getInstructionIndex(MI);
2117    if (pli.liveAt(Index))
2118      ++NumConflicts;
2119  }
2120  return NumConflicts;
2121}
2122
2123/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
2124/// around all defs and uses of the specified interval.
2125void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li,
2126                                            unsigned PhysReg, VirtRegMap &vrm) {
2127  unsigned SpillReg = getRepresentativeReg(PhysReg);
2128
2129  for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS)
2130    // If there are registers which alias PhysReg, but which are not a
2131    // sub-register of the chosen representative super register. Assert
2132    // since we can't handle it yet.
2133    assert(*AS == SpillReg || !allocatableRegs_[*AS] ||
2134           tri_->isSuperRegister(*AS, SpillReg));
2135
2136  LiveInterval &pli = getInterval(SpillReg);
2137  SmallPtrSet<MachineInstr*, 8> SeenMIs;
2138  for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg),
2139         E = mri_->reg_end(); I != E; ++I) {
2140    MachineOperand &O = I.getOperand();
2141    MachineInstr *MI = O.getParent();
2142    if (SeenMIs.count(MI))
2143      continue;
2144    SeenMIs.insert(MI);
2145    unsigned Index = getInstructionIndex(MI);
2146    if (pli.liveAt(Index)) {
2147      vrm.addEmergencySpill(SpillReg, MI);
2148      pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2149      for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) {
2150        if (!hasInterval(*AS))
2151          continue;
2152        LiveInterval &spli = getInterval(*AS);
2153        if (spli.liveAt(Index))
2154          spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1);
2155      }
2156    }
2157  }
2158}
2159
2160LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg,
2161                                                   MachineInstr* startInst) {
2162  LiveInterval& Interval = getOrCreateInterval(reg);
2163  VNInfo* VN = Interval.getNextValue(
2164            getInstructionIndex(startInst) + InstrSlots::DEF,
2165            startInst, getVNInfoAllocator());
2166  VN->hasPHIKill = true;
2167  VN->kills.push_back(getMBBEndIdx(startInst->getParent()));
2168  LiveRange LR(getInstructionIndex(startInst) + InstrSlots::DEF,
2169               getMBBEndIdx(startInst->getParent()) + 1, VN);
2170  Interval.addRange(LR);
2171
2172  return LR;
2173}
2174