LiveIntervalAnalysis.cpp revision 1a1932c83d6b6fcbf089eee3f97b65a23de22a92
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "llvm/CodeGen/LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/CodeGen/LiveVariables.h"
23#include "llvm/CodeGen/MachineFrameInfo.h"
24#include "llvm/CodeGen/MachineInstr.h"
25#include "llvm/CodeGen/MachineLoopInfo.h"
26#include "llvm/CodeGen/MachineRegisterInfo.h"
27#include "llvm/CodeGen/Passes.h"
28#include "llvm/Target/MRegisterInfo.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/ADT/STLExtras.h"
35#include <algorithm>
36#include <cmath>
37using namespace llvm;
38
39namespace {
40  // Hidden options for help debugging.
41  cl::opt<bool> DisableReMat("disable-rematerialization",
42                              cl::init(false), cl::Hidden);
43
44  cl::opt<bool> SplitAtBB("split-intervals-at-bb",
45                          cl::init(true), cl::Hidden);
46  cl::opt<int> SplitLimit("split-limit",
47                          cl::init(-1), cl::Hidden);
48}
49
50STATISTIC(numIntervals, "Number of original intervals");
51STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52STATISTIC(numFolds    , "Number of loads/stores folded into instructions");
53STATISTIC(numSplits   , "Number of intervals split");
54
55char LiveIntervals::ID = 0;
56namespace {
57  RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
58}
59
60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61  AU.addPreserved<LiveVariables>();
62  AU.addRequired<LiveVariables>();
63  AU.addPreservedID(MachineLoopInfoID);
64  AU.addPreservedID(MachineDominatorsID);
65  AU.addPreservedID(PHIEliminationID);
66  AU.addRequiredID(PHIEliminationID);
67  AU.addRequiredID(TwoAddressInstructionPassID);
68  MachineFunctionPass::getAnalysisUsage(AU);
69}
70
71void LiveIntervals::releaseMemory() {
72  Idx2MBBMap.clear();
73  mi2iMap_.clear();
74  i2miMap_.clear();
75  r2iMap_.clear();
76  // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
77  VNInfoAllocator.Reset();
78  for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
79    delete ClonedMIs[i];
80}
81
82namespace llvm {
83  inline bool operator<(unsigned V, const IdxMBBPair &IM) {
84    return V < IM.first;
85  }
86
87  inline bool operator<(const IdxMBBPair &IM, unsigned V) {
88    return IM.first < V;
89  }
90
91  struct Idx2MBBCompare {
92    bool operator()(const IdxMBBPair &LHS, const IdxMBBPair &RHS) const {
93      return LHS.first < RHS.first;
94    }
95  };
96}
97
98/// runOnMachineFunction - Register allocate the whole function
99///
100bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
101  mf_ = &fn;
102  tm_ = &fn.getTarget();
103  mri_ = tm_->getRegisterInfo();
104  tii_ = tm_->getInstrInfo();
105  lv_ = &getAnalysis<LiveVariables>();
106  allocatableRegs_ = mri_->getAllocatableSet(fn);
107
108  // Number MachineInstrs and MachineBasicBlocks.
109  // Initialize MBB indexes to a sentinal.
110  MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
111
112  unsigned MIIndex = 0;
113  for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
114       MBB != E; ++MBB) {
115    unsigned StartIdx = MIIndex;
116
117    for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
118         I != E; ++I) {
119      bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
120      assert(inserted && "multiple MachineInstr -> index mappings");
121      i2miMap_.push_back(I);
122      MIIndex += InstrSlots::NUM;
123    }
124
125    // Set the MBB2IdxMap entry for this MBB.
126    MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
127    Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
128  }
129  std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
130
131  computeIntervals();
132
133  numIntervals += getNumIntervals();
134
135  DOUT << "********** INTERVALS **********\n";
136  for (iterator I = begin(), E = end(); I != E; ++I) {
137    I->second.print(DOUT, mri_);
138    DOUT << "\n";
139  }
140
141  numIntervalsAfter += getNumIntervals();
142  DEBUG(dump());
143  return true;
144}
145
146/// print - Implement the dump method.
147void LiveIntervals::print(std::ostream &O, const Module* ) const {
148  O << "********** INTERVALS **********\n";
149  for (const_iterator I = begin(), E = end(); I != E; ++I) {
150    I->second.print(DOUT, mri_);
151    DOUT << "\n";
152  }
153
154  O << "********** MACHINEINSTRS **********\n";
155  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
156       mbbi != mbbe; ++mbbi) {
157    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
158    for (MachineBasicBlock::iterator mii = mbbi->begin(),
159           mie = mbbi->end(); mii != mie; ++mii) {
160      O << getInstructionIndex(mii) << '\t' << *mii;
161    }
162  }
163}
164
165/// conflictsWithPhysRegDef - Returns true if the specified register
166/// is defined during the duration of the specified interval.
167bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
168                                            VirtRegMap &vrm, unsigned reg) {
169  for (LiveInterval::Ranges::const_iterator
170         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
171    for (unsigned index = getBaseIndex(I->start),
172           end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
173         index += InstrSlots::NUM) {
174      // skip deleted instructions
175      while (index != end && !getInstructionFromIndex(index))
176        index += InstrSlots::NUM;
177      if (index == end) break;
178
179      MachineInstr *MI = getInstructionFromIndex(index);
180      unsigned SrcReg, DstReg;
181      if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
182        if (SrcReg == li.reg || DstReg == li.reg)
183          continue;
184      for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
185        MachineOperand& mop = MI->getOperand(i);
186        if (!mop.isRegister())
187          continue;
188        unsigned PhysReg = mop.getReg();
189        if (PhysReg == 0 || PhysReg == li.reg)
190          continue;
191        if (MRegisterInfo::isVirtualRegister(PhysReg)) {
192          if (!vrm.hasPhys(PhysReg))
193            continue;
194          PhysReg = vrm.getPhys(PhysReg);
195        }
196        if (PhysReg && mri_->regsOverlap(PhysReg, reg))
197          return true;
198      }
199    }
200  }
201
202  return false;
203}
204
205void LiveIntervals::printRegName(unsigned reg) const {
206  if (MRegisterInfo::isPhysicalRegister(reg))
207    cerr << mri_->getName(reg);
208  else
209    cerr << "%reg" << reg;
210}
211
212void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
213                                             MachineBasicBlock::iterator mi,
214                                             unsigned MIIdx,
215                                             LiveInterval &interval) {
216  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
217  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
218
219  // Virtual registers may be defined multiple times (due to phi
220  // elimination and 2-addr elimination).  Much of what we do only has to be
221  // done once for the vreg.  We use an empty interval to detect the first
222  // time we see a vreg.
223  if (interval.empty()) {
224    // Get the Idx of the defining instructions.
225    unsigned defIndex = getDefIndex(MIIdx);
226    VNInfo *ValNo;
227    unsigned SrcReg, DstReg;
228    if (tii_->isMoveInstr(*mi, SrcReg, DstReg))
229      ValNo = interval.getNextValue(defIndex, SrcReg, VNInfoAllocator);
230    else if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
231      ValNo = interval.getNextValue(defIndex, mi->getOperand(1).getReg(),
232                                    VNInfoAllocator);
233    else
234      ValNo = interval.getNextValue(defIndex, 0, VNInfoAllocator);
235
236    assert(ValNo->id == 0 && "First value in interval is not 0?");
237
238    // Loop over all of the blocks that the vreg is defined in.  There are
239    // two cases we have to handle here.  The most common case is a vreg
240    // whose lifetime is contained within a basic block.  In this case there
241    // will be a single kill, in MBB, which comes after the definition.
242    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
243      // FIXME: what about dead vars?
244      unsigned killIdx;
245      if (vi.Kills[0] != mi)
246        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
247      else
248        killIdx = defIndex+1;
249
250      // If the kill happens after the definition, we have an intra-block
251      // live range.
252      if (killIdx > defIndex) {
253        assert(vi.AliveBlocks.none() &&
254               "Shouldn't be alive across any blocks!");
255        LiveRange LR(defIndex, killIdx, ValNo);
256        interval.addRange(LR);
257        DOUT << " +" << LR << "\n";
258        interval.addKill(ValNo, killIdx);
259        return;
260      }
261    }
262
263    // The other case we handle is when a virtual register lives to the end
264    // of the defining block, potentially live across some blocks, then is
265    // live into some number of blocks, but gets killed.  Start by adding a
266    // range that goes from this definition to the end of the defining block.
267    LiveRange NewLR(defIndex,
268                    getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
269                    ValNo);
270    DOUT << " +" << NewLR;
271    interval.addRange(NewLR);
272
273    // Iterate over all of the blocks that the variable is completely
274    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
275    // live interval.
276    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
277      if (vi.AliveBlocks[i]) {
278        MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
279        if (!MBB->empty()) {
280          LiveRange LR(getMBBStartIdx(i),
281                       getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
282                       ValNo);
283          interval.addRange(LR);
284          DOUT << " +" << LR;
285        }
286      }
287    }
288
289    // Finally, this virtual register is live from the start of any killing
290    // block to the 'use' slot of the killing instruction.
291    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
292      MachineInstr *Kill = vi.Kills[i];
293      unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
294      LiveRange LR(getMBBStartIdx(Kill->getParent()),
295                   killIdx, ValNo);
296      interval.addRange(LR);
297      interval.addKill(ValNo, killIdx);
298      DOUT << " +" << LR;
299    }
300
301  } else {
302    // If this is the second time we see a virtual register definition, it
303    // must be due to phi elimination or two addr elimination.  If this is
304    // the result of two address elimination, then the vreg is one of the
305    // def-and-use register operand.
306    if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
307      // If this is a two-address definition, then we have already processed
308      // the live range.  The only problem is that we didn't realize there
309      // are actually two values in the live interval.  Because of this we
310      // need to take the LiveRegion that defines this register and split it
311      // into two values.
312      unsigned DefIndex = getDefIndex(getInstructionIndex(vi.DefInst));
313      unsigned RedefIndex = getDefIndex(MIIdx);
314
315      const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
316      VNInfo *OldValNo = OldLR->valno;
317      unsigned OldEnd = OldLR->end;
318
319      // Delete the initial value, which should be short and continuous,
320      // because the 2-addr copy must be in the same MBB as the redef.
321      interval.removeRange(DefIndex, RedefIndex);
322
323      // Two-address vregs should always only be redefined once.  This means
324      // that at this point, there should be exactly one value number in it.
325      assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
326
327      // The new value number (#1) is defined by the instruction we claimed
328      // defined value #0.
329      VNInfo *ValNo = interval.getNextValue(0, 0, VNInfoAllocator);
330      interval.copyValNumInfo(ValNo, OldValNo);
331
332      // Value#0 is now defined by the 2-addr instruction.
333      OldValNo->def = RedefIndex;
334      OldValNo->reg = 0;
335
336      // Add the new live interval which replaces the range for the input copy.
337      LiveRange LR(DefIndex, RedefIndex, ValNo);
338      DOUT << " replace range with " << LR;
339      interval.addRange(LR);
340      interval.addKill(ValNo, RedefIndex);
341      interval.removeKills(ValNo, RedefIndex, OldEnd);
342
343      // If this redefinition is dead, we need to add a dummy unit live
344      // range covering the def slot.
345      if (lv_->RegisterDefIsDead(mi, interval.reg))
346        interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
347
348      DOUT << " RESULT: ";
349      interval.print(DOUT, mri_);
350
351    } else {
352      // Otherwise, this must be because of phi elimination.  If this is the
353      // first redefinition of the vreg that we have seen, go back and change
354      // the live range in the PHI block to be a different value number.
355      if (interval.containsOneValue()) {
356        assert(vi.Kills.size() == 1 &&
357               "PHI elimination vreg should have one kill, the PHI itself!");
358
359        // Remove the old range that we now know has an incorrect number.
360        VNInfo *VNI = interval.getValNumInfo(0);
361        MachineInstr *Killer = vi.Kills[0];
362        unsigned Start = getMBBStartIdx(Killer->getParent());
363        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
364        DOUT << " Removing [" << Start << "," << End << "] from: ";
365        interval.print(DOUT, mri_); DOUT << "\n";
366        interval.removeRange(Start, End);
367        interval.addKill(VNI, Start);
368        VNI->hasPHIKill = true;
369        DOUT << " RESULT: "; interval.print(DOUT, mri_);
370
371        // Replace the interval with one of a NEW value number.  Note that this
372        // value number isn't actually defined by an instruction, weird huh? :)
373        LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
374        DOUT << " replace range with " << LR;
375        interval.addRange(LR);
376        interval.addKill(LR.valno, End);
377        DOUT << " RESULT: "; interval.print(DOUT, mri_);
378      }
379
380      // In the case of PHI elimination, each variable definition is only
381      // live until the end of the block.  We've already taken care of the
382      // rest of the live range.
383      unsigned defIndex = getDefIndex(MIIdx);
384
385      VNInfo *ValNo;
386      unsigned SrcReg, DstReg;
387      if (tii_->isMoveInstr(*mi, SrcReg, DstReg))
388        ValNo = interval.getNextValue(defIndex, SrcReg, VNInfoAllocator);
389      else if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
390        ValNo = interval.getNextValue(defIndex, mi->getOperand(1).getReg(),
391                                      VNInfoAllocator);
392      else
393        ValNo = interval.getNextValue(defIndex, 0, VNInfoAllocator);
394
395      unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
396      LiveRange LR(defIndex, killIndex, ValNo);
397      interval.addRange(LR);
398      interval.addKill(ValNo, killIndex);
399      ValNo->hasPHIKill = true;
400      DOUT << " +" << LR;
401    }
402  }
403
404  DOUT << '\n';
405}
406
407void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
408                                              MachineBasicBlock::iterator mi,
409                                              unsigned MIIdx,
410                                              LiveInterval &interval,
411                                              unsigned SrcReg) {
412  // A physical register cannot be live across basic block, so its
413  // lifetime must end somewhere in its defining basic block.
414  DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
415
416  unsigned baseIndex = MIIdx;
417  unsigned start = getDefIndex(baseIndex);
418  unsigned end = start;
419
420  // If it is not used after definition, it is considered dead at
421  // the instruction defining it. Hence its interval is:
422  // [defSlot(def), defSlot(def)+1)
423  if (lv_->RegisterDefIsDead(mi, interval.reg)) {
424    DOUT << " dead";
425    end = getDefIndex(start) + 1;
426    goto exit;
427  }
428
429  // If it is not dead on definition, it must be killed by a
430  // subsequent instruction. Hence its interval is:
431  // [defSlot(def), useSlot(kill)+1)
432  while (++mi != MBB->end()) {
433    baseIndex += InstrSlots::NUM;
434    if (lv_->KillsRegister(mi, interval.reg)) {
435      DOUT << " killed";
436      end = getUseIndex(baseIndex) + 1;
437      goto exit;
438    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
439      // Another instruction redefines the register before it is ever read.
440      // Then the register is essentially dead at the instruction that defines
441      // it. Hence its interval is:
442      // [defSlot(def), defSlot(def)+1)
443      DOUT << " dead";
444      end = getDefIndex(start) + 1;
445      goto exit;
446    }
447  }
448
449  // The only case we should have a dead physreg here without a killing or
450  // instruction where we know it's dead is if it is live-in to the function
451  // and never used.
452  assert(!SrcReg && "physreg was not killed in defining block!");
453  end = getDefIndex(start) + 1;  // It's dead.
454
455exit:
456  assert(start < end && "did not find end of interval?");
457
458  // Already exists? Extend old live interval.
459  LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
460  VNInfo *ValNo = (OldLR != interval.end())
461    ? OldLR->valno : interval.getNextValue(start, SrcReg, VNInfoAllocator);
462  LiveRange LR(start, end, ValNo);
463  interval.addRange(LR);
464  interval.addKill(LR.valno, end);
465  DOUT << " +" << LR << '\n';
466}
467
468void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
469                                      MachineBasicBlock::iterator MI,
470                                      unsigned MIIdx,
471                                      unsigned reg) {
472  if (MRegisterInfo::isVirtualRegister(reg))
473    handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
474  else if (allocatableRegs_[reg]) {
475    unsigned SrcReg, DstReg;
476    if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
477      SrcReg = MI->getOperand(1).getReg();
478    else if (!tii_->isMoveInstr(*MI, SrcReg, DstReg))
479      SrcReg = 0;
480    handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), SrcReg);
481    // Def of a register also defines its sub-registers.
482    for (const unsigned* AS = mri_->getSubRegisters(reg); *AS; ++AS)
483      // Avoid processing some defs more than once.
484      if (!MI->findRegisterDefOperand(*AS))
485        handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
486  }
487}
488
489void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
490                                         unsigned MIIdx,
491                                         LiveInterval &interval, bool isAlias) {
492  DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
493
494  // Look for kills, if it reaches a def before it's killed, then it shouldn't
495  // be considered a livein.
496  MachineBasicBlock::iterator mi = MBB->begin();
497  unsigned baseIndex = MIIdx;
498  unsigned start = baseIndex;
499  unsigned end = start;
500  while (mi != MBB->end()) {
501    if (lv_->KillsRegister(mi, interval.reg)) {
502      DOUT << " killed";
503      end = getUseIndex(baseIndex) + 1;
504      goto exit;
505    } else if (lv_->ModifiesRegister(mi, interval.reg)) {
506      // Another instruction redefines the register before it is ever read.
507      // Then the register is essentially dead at the instruction that defines
508      // it. Hence its interval is:
509      // [defSlot(def), defSlot(def)+1)
510      DOUT << " dead";
511      end = getDefIndex(start) + 1;
512      goto exit;
513    }
514
515    baseIndex += InstrSlots::NUM;
516    ++mi;
517  }
518
519exit:
520  // Live-in register might not be used at all.
521  if (end == MIIdx) {
522    if (isAlias) {
523      DOUT << " dead";
524      end = getDefIndex(MIIdx) + 1;
525    } else {
526      DOUT << " live through";
527      end = baseIndex;
528    }
529  }
530
531  LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
532  interval.addRange(LR);
533  interval.addKill(LR.valno, end);
534  DOUT << " +" << LR << '\n';
535}
536
537/// computeIntervals - computes the live intervals for virtual
538/// registers. for some ordering of the machine instructions [1,N] a
539/// live interval is an interval [i, j) where 1 <= i <= j < N for
540/// which a variable is live
541void LiveIntervals::computeIntervals() {
542  DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
543       << "********** Function: "
544       << ((Value*)mf_->getFunction())->getName() << '\n';
545  // Track the index of the current machine instr.
546  unsigned MIIndex = 0;
547  for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
548       MBBI != E; ++MBBI) {
549    MachineBasicBlock *MBB = MBBI;
550    DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
551
552    MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
553
554    // Create intervals for live-ins to this BB first.
555    for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
556           LE = MBB->livein_end(); LI != LE; ++LI) {
557      handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
558      // Multiple live-ins can alias the same register.
559      for (const unsigned* AS = mri_->getSubRegisters(*LI); *AS; ++AS)
560        if (!hasInterval(*AS))
561          handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
562                               true);
563    }
564
565    for (; MI != miEnd; ++MI) {
566      DOUT << MIIndex << "\t" << *MI;
567
568      // Handle defs.
569      for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
570        MachineOperand &MO = MI->getOperand(i);
571        // handle register defs - build intervals
572        if (MO.isRegister() && MO.getReg() && MO.isDef())
573          handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
574      }
575
576      MIIndex += InstrSlots::NUM;
577    }
578  }
579}
580
581bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
582                              SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
583  std::vector<IdxMBBPair>::const_iterator I =
584    std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
585
586  bool ResVal = false;
587  while (I != Idx2MBBMap.end()) {
588    if (LR.end <= I->first)
589      break;
590    MBBs.push_back(I->second);
591    ResVal = true;
592    ++I;
593  }
594  return ResVal;
595}
596
597
598LiveInterval LiveIntervals::createInterval(unsigned reg) {
599  float Weight = MRegisterInfo::isPhysicalRegister(reg) ?
600                       HUGE_VALF : 0.0F;
601  return LiveInterval(reg, Weight);
602}
603
604
605//===----------------------------------------------------------------------===//
606// Register allocator hooks.
607//
608
609/// isReMaterializable - Returns true if the definition MI of the specified
610/// val# of the specified interval is re-materializable.
611bool LiveIntervals::isReMaterializable(const LiveInterval &li,
612                                       const VNInfo *ValNo, MachineInstr *MI,
613                                       bool &isLoad) {
614  if (DisableReMat)
615    return false;
616
617  isLoad = false;
618  const TargetInstrDescriptor *TID = MI->getInstrDescriptor();
619  if ((TID->Flags & M_IMPLICIT_DEF_FLAG) ||
620      tii_->isTriviallyReMaterializable(MI)) {
621    isLoad = TID->isSimpleLoad();
622    return true;
623  }
624
625  int FrameIdx = 0;
626  if (!tii_->isLoadFromStackSlot(MI, FrameIdx) ||
627      !mf_->getFrameInfo()->isFixedObjectIndex(FrameIdx))
628    return false;
629
630  // This is a load from fixed stack slot. It can be rematerialized unless it's
631  // re-defined by a two-address instruction.
632  isLoad = true;
633  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
634       i != e; ++i) {
635    const VNInfo *VNI = *i;
636    if (VNI == ValNo)
637      continue;
638    unsigned DefIdx = VNI->def;
639    if (DefIdx == ~1U)
640      continue; // Dead val#.
641    MachineInstr *DefMI = (DefIdx == ~0u)
642      ? NULL : getInstructionFromIndex(DefIdx);
643    if (DefMI && DefMI->isRegReDefinedByTwoAddr(li.reg)) {
644      isLoad = false;
645      return false;
646    }
647  }
648  return true;
649}
650
651/// isReMaterializable - Returns true if every definition of MI of every
652/// val# of the specified interval is re-materializable.
653bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
654  isLoad = false;
655  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
656       i != e; ++i) {
657    const VNInfo *VNI = *i;
658    unsigned DefIdx = VNI->def;
659    if (DefIdx == ~1U)
660      continue; // Dead val#.
661    // Is the def for the val# rematerializable?
662    if (DefIdx == ~0u)
663      return false;
664    MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
665    bool DefIsLoad = false;
666    if (!ReMatDefMI || !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
667      return false;
668    isLoad |= DefIsLoad;
669  }
670  return true;
671}
672
673/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
674/// slot / to reg or any rematerialized load into ith operand of specified
675/// MI. If it is successul, MI is updated with the newly created MI and
676/// returns true.
677bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
678                                         VirtRegMap &vrm, MachineInstr *DefMI,
679                                         unsigned InstrIdx,
680                                         SmallVector<unsigned, 2> &Ops,
681                                         bool isSS, int Slot, unsigned Reg) {
682  unsigned MRInfo = 0;
683  const TargetInstrDescriptor *TID = MI->getInstrDescriptor();
684  // If it is an implicit def instruction, just delete it.
685  if (TID->Flags & M_IMPLICIT_DEF_FLAG) {
686    RemoveMachineInstrFromMaps(MI);
687    vrm.RemoveMachineInstrFromMaps(MI);
688    MI->eraseFromParent();
689    ++numFolds;
690    return true;
691  }
692
693  SmallVector<unsigned, 2> FoldOps;
694  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
695    unsigned OpIdx = Ops[i];
696    // FIXME: fold subreg use.
697    if (MI->getOperand(OpIdx).getSubReg())
698      return false;
699    if (MI->getOperand(OpIdx).isDef())
700      MRInfo |= (unsigned)VirtRegMap::isMod;
701    else {
702      // Filter out two-address use operand(s).
703      if (TID->getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
704        MRInfo = VirtRegMap::isModRef;
705        continue;
706      }
707      MRInfo |= (unsigned)VirtRegMap::isRef;
708    }
709    FoldOps.push_back(OpIdx);
710  }
711
712  MachineInstr *fmi = isSS ? mri_->foldMemoryOperand(MI, FoldOps, Slot)
713                           : mri_->foldMemoryOperand(MI, FoldOps, DefMI);
714  if (fmi) {
715    // Attempt to fold the memory reference into the instruction. If
716    // we can do this, we don't need to insert spill code.
717    if (lv_)
718      lv_->instructionChanged(MI, fmi);
719    else
720      LiveVariables::transferKillDeadInfo(MI, fmi, mri_);
721    MachineBasicBlock &MBB = *MI->getParent();
722    if (isSS && !mf_->getFrameInfo()->isFixedObjectIndex(Slot))
723      vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
724    vrm.transferSpillPts(MI, fmi);
725    vrm.transferRestorePts(MI, fmi);
726    mi2iMap_.erase(MI);
727    i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
728    mi2iMap_[fmi] = InstrIdx;
729    MI = MBB.insert(MBB.erase(MI), fmi);
730    ++numFolds;
731    return true;
732  }
733  return false;
734}
735
736/// canFoldMemoryOperand - Returns true if the specified load / store
737/// folding is possible.
738bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
739                                         SmallVector<unsigned, 2> &Ops) const {
740  SmallVector<unsigned, 2> FoldOps;
741  for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
742    unsigned OpIdx = Ops[i];
743    // FIXME: fold subreg use.
744    if (MI->getOperand(OpIdx).getSubReg())
745      return false;
746    FoldOps.push_back(OpIdx);
747  }
748
749  return mri_->canFoldMemoryOperand(MI, FoldOps);
750}
751
752bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
753  SmallPtrSet<MachineBasicBlock*, 4> MBBs;
754  for (LiveInterval::Ranges::const_iterator
755         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
756    std::vector<IdxMBBPair>::const_iterator II =
757      std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
758    if (II == Idx2MBBMap.end())
759      continue;
760    if (I->end > II->first)  // crossing a MBB.
761      return false;
762    MBBs.insert(II->second);
763    if (MBBs.size() > 1)
764      return false;
765  }
766  return true;
767}
768
769/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
770/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
771bool LiveIntervals::
772rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
773                 unsigned id, unsigned index, unsigned end,  MachineInstr *MI,
774                 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
775                 unsigned Slot, int LdSlot,
776                 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
777                 VirtRegMap &vrm, MachineRegisterInfo &RegInfo,
778                 const TargetRegisterClass* rc,
779                 SmallVector<int, 4> &ReMatIds,
780                 unsigned &NewVReg, bool &HasDef, bool &HasUse,
781                 const MachineLoopInfo *loopInfo,
782                 std::map<unsigned,unsigned> &MBBVRegsMap,
783                 std::vector<LiveInterval*> &NewLIs) {
784  bool CanFold = false;
785 RestartInstruction:
786  for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
787    MachineOperand& mop = MI->getOperand(i);
788    if (!mop.isRegister())
789      continue;
790    unsigned Reg = mop.getReg();
791    unsigned RegI = Reg;
792    if (Reg == 0 || MRegisterInfo::isPhysicalRegister(Reg))
793      continue;
794    if (Reg != li.reg)
795      continue;
796
797    bool TryFold = !DefIsReMat;
798    bool FoldSS = true; // Default behavior unless it's a remat.
799    int FoldSlot = Slot;
800    if (DefIsReMat) {
801      // If this is the rematerializable definition MI itself and
802      // all of its uses are rematerialized, simply delete it.
803      if (MI == ReMatOrigDefMI && CanDelete) {
804        DOUT << "\t\t\t\tErasing re-materlizable def: ";
805        DOUT << MI << '\n';
806        RemoveMachineInstrFromMaps(MI);
807        vrm.RemoveMachineInstrFromMaps(MI);
808        MI->eraseFromParent();
809        break;
810      }
811
812      // If def for this use can't be rematerialized, then try folding.
813      // If def is rematerializable and it's a load, also try folding.
814      TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
815      if (isLoad) {
816        // Try fold loads (from stack slot, constant pool, etc.) into uses.
817        FoldSS = isLoadSS;
818        FoldSlot = LdSlot;
819      }
820    }
821
822    // Scan all of the operands of this instruction rewriting operands
823    // to use NewVReg instead of li.reg as appropriate.  We do this for
824    // two reasons:
825    //
826    //   1. If the instr reads the same spilled vreg multiple times, we
827    //      want to reuse the NewVReg.
828    //   2. If the instr is a two-addr instruction, we are required to
829    //      keep the src/dst regs pinned.
830    //
831    // Keep track of whether we replace a use and/or def so that we can
832    // create the spill interval with the appropriate range.
833
834    HasUse = mop.isUse();
835    HasDef = mop.isDef();
836    SmallVector<unsigned, 2> Ops;
837    Ops.push_back(i);
838    for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
839      const MachineOperand &MOj = MI->getOperand(j);
840      if (!MOj.isRegister())
841        continue;
842      unsigned RegJ = MOj.getReg();
843      if (RegJ == 0 || MRegisterInfo::isPhysicalRegister(RegJ))
844        continue;
845      if (RegJ == RegI) {
846        Ops.push_back(j);
847        HasUse |= MOj.isUse();
848        HasDef |= MOj.isDef();
849      }
850    }
851
852    if (TryFold) {
853      // Do not fold load / store here if we are splitting. We'll find an
854      // optimal point to insert a load / store later.
855      if (!TrySplit) {
856        if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
857                                 Ops, FoldSS, FoldSlot, Reg)) {
858          // Folding the load/store can completely change the instruction in
859          // unpredictable ways, rescan it from the beginning.
860          HasUse = false;
861          HasDef = false;
862          CanFold = false;
863          goto RestartInstruction;
864        }
865      } else {
866        CanFold = canFoldMemoryOperand(MI, Ops);
867      }
868    } else
869      CanFold = false;
870
871    // Create a new virtual register for the spill interval.
872    bool CreatedNewVReg = false;
873    if (NewVReg == 0) {
874      NewVReg = RegInfo.createVirtualRegister(rc);
875      vrm.grow();
876      CreatedNewVReg = true;
877    }
878    mop.setReg(NewVReg);
879
880    // Reuse NewVReg for other reads.
881    for (unsigned j = 0, e = Ops.size(); j != e; ++j)
882      MI->getOperand(Ops[j]).setReg(NewVReg);
883
884    if (CreatedNewVReg) {
885      if (DefIsReMat) {
886        vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
887        if (ReMatIds[id] == VirtRegMap::MAX_STACK_SLOT) {
888          // Each valnum may have its own remat id.
889          ReMatIds[id] = vrm.assignVirtReMatId(NewVReg);
890        } else {
891          vrm.assignVirtReMatId(NewVReg, ReMatIds[id]);
892        }
893        if (!CanDelete || (HasUse && HasDef)) {
894          // If this is a two-addr instruction then its use operands are
895          // rematerializable but its def is not. It should be assigned a
896          // stack slot.
897          vrm.assignVirt2StackSlot(NewVReg, Slot);
898        }
899      } else {
900        vrm.assignVirt2StackSlot(NewVReg, Slot);
901      }
902    } else if (HasUse && HasDef &&
903               vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
904      // If this interval hasn't been assigned a stack slot (because earlier
905      // def is a deleted remat def), do it now.
906      assert(Slot != VirtRegMap::NO_STACK_SLOT);
907      vrm.assignVirt2StackSlot(NewVReg, Slot);
908    }
909
910    // create a new register interval for this spill / remat.
911    LiveInterval &nI = getOrCreateInterval(NewVReg);
912    if (CreatedNewVReg) {
913      NewLIs.push_back(&nI);
914      MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
915      if (TrySplit)
916        vrm.setIsSplitFromReg(NewVReg, li.reg);
917    }
918
919    if (HasUse) {
920      if (CreatedNewVReg) {
921        LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
922                     nI.getNextValue(~0U, 0, VNInfoAllocator));
923        DOUT << " +" << LR;
924        nI.addRange(LR);
925      } else {
926        // Extend the split live interval to this def / use.
927        unsigned End = getUseIndex(index)+1;
928        LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
929                     nI.getValNumInfo(nI.getNumValNums()-1));
930        DOUT << " +" << LR;
931        nI.addRange(LR);
932      }
933    }
934    if (HasDef) {
935      LiveRange LR(getDefIndex(index), getStoreIndex(index),
936                   nI.getNextValue(~0U, 0, VNInfoAllocator));
937      DOUT << " +" << LR;
938      nI.addRange(LR);
939    }
940
941    DOUT << "\t\t\t\tAdded new interval: ";
942    nI.print(DOUT, mri_);
943    DOUT << '\n';
944  }
945  return CanFold;
946}
947bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
948                                   const VNInfo *VNI,
949                                   MachineBasicBlock *MBB, unsigned Idx) const {
950  unsigned End = getMBBEndIdx(MBB);
951  for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
952    unsigned KillIdx = VNI->kills[j];
953    if (KillIdx > Idx && KillIdx < End)
954      return true;
955  }
956  return false;
957}
958
959static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
960  const VNInfo *VNI = NULL;
961  for (LiveInterval::const_vni_iterator i = li.vni_begin(),
962         e = li.vni_end(); i != e; ++i)
963    if ((*i)->def == DefIdx) {
964      VNI = *i;
965      break;
966    }
967  return VNI;
968}
969
970void LiveIntervals::
971rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
972                    LiveInterval::Ranges::const_iterator &I,
973                    MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
974                    unsigned Slot, int LdSlot,
975                    bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
976                    VirtRegMap &vrm, MachineRegisterInfo &RegInfo,
977                    const TargetRegisterClass* rc,
978                    SmallVector<int, 4> &ReMatIds,
979                    const MachineLoopInfo *loopInfo,
980                    BitVector &SpillMBBs,
981                    std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
982                    BitVector &RestoreMBBs,
983                    std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
984                    std::map<unsigned,unsigned> &MBBVRegsMap,
985                    std::vector<LiveInterval*> &NewLIs) {
986  bool AllCanFold = true;
987  unsigned NewVReg = 0;
988  unsigned index = getBaseIndex(I->start);
989  unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
990  for (; index != end; index += InstrSlots::NUM) {
991    // skip deleted instructions
992    while (index != end && !getInstructionFromIndex(index))
993      index += InstrSlots::NUM;
994    if (index == end) break;
995
996    MachineInstr *MI = getInstructionFromIndex(index);
997    MachineBasicBlock *MBB = MI->getParent();
998    unsigned ThisVReg = 0;
999    if (TrySplit) {
1000      std::map<unsigned,unsigned>::const_iterator NVI =
1001        MBBVRegsMap.find(MBB->getNumber());
1002      if (NVI != MBBVRegsMap.end()) {
1003        ThisVReg = NVI->second;
1004        // One common case:
1005        // x = use
1006        // ...
1007        // ...
1008        // def = ...
1009        //     = use
1010        // It's better to start a new interval to avoid artifically
1011        // extend the new interval.
1012        // FIXME: Too slow? Can we fix it after rewriteInstructionsForSpills?
1013        bool MIHasUse = false;
1014        bool MIHasDef = false;
1015        for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1016          MachineOperand& mop = MI->getOperand(i);
1017          if (!mop.isRegister() || mop.getReg() != li.reg)
1018            continue;
1019          if (mop.isUse())
1020            MIHasUse = true;
1021          else
1022            MIHasDef = true;
1023        }
1024        if (MIHasDef && !MIHasUse) {
1025          MBBVRegsMap.erase(MBB->getNumber());
1026          ThisVReg = 0;
1027        }
1028      }
1029    }
1030
1031    bool IsNew = ThisVReg == 0;
1032    if (IsNew) {
1033      // This ends the previous live interval. If all of its def / use
1034      // can be folded, give it a low spill weight.
1035      if (NewVReg && TrySplit && AllCanFold) {
1036        LiveInterval &nI = getOrCreateInterval(NewVReg);
1037        nI.weight /= 10.0F;
1038      }
1039      AllCanFold = true;
1040    }
1041    NewVReg = ThisVReg;
1042
1043    bool HasDef = false;
1044    bool HasUse = false;
1045    bool CanFold = rewriteInstructionForSpills(li, TrySplit, I->valno->id,
1046                                index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1047                                Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1048                                CanDelete, vrm, RegInfo, rc, ReMatIds, NewVReg,
1049                                HasDef, HasUse, loopInfo, MBBVRegsMap, NewLIs);
1050    if (!HasDef && !HasUse)
1051      continue;
1052
1053    AllCanFold &= CanFold;
1054
1055    // Update weight of spill interval.
1056    LiveInterval &nI = getOrCreateInterval(NewVReg);
1057    if (!TrySplit) {
1058      // The spill weight is now infinity as it cannot be spilled again.
1059      nI.weight = HUGE_VALF;
1060      continue;
1061    }
1062
1063    // Keep track of the last def and first use in each MBB.
1064    unsigned MBBId = MBB->getNumber();
1065    if (HasDef) {
1066      if (MI != ReMatOrigDefMI || !CanDelete) {
1067        bool HasKill = false;
1068        if (!HasUse)
1069          HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1070        else {
1071          // If this is a two-address code, then this index starts a new VNInfo.
1072          const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1073          if (VNI)
1074            HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1075        }
1076        std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1077          SpillIdxes.find(MBBId);
1078        if (!HasKill) {
1079          if (SII == SpillIdxes.end()) {
1080            std::vector<SRInfo> S;
1081            S.push_back(SRInfo(index, NewVReg, true));
1082            SpillIdxes.insert(std::make_pair(MBBId, S));
1083          } else if (SII->second.back().vreg != NewVReg) {
1084            SII->second.push_back(SRInfo(index, NewVReg, true));
1085          } else if ((int)index > SII->second.back().index) {
1086            // If there is an earlier def and this is a two-address
1087            // instruction, then it's not possible to fold the store (which
1088            // would also fold the load).
1089            SRInfo &Info = SII->second.back();
1090            Info.index = index;
1091            Info.canFold = !HasUse;
1092          }
1093          SpillMBBs.set(MBBId);
1094        } else if (SII != SpillIdxes.end() &&
1095                   SII->second.back().vreg == NewVReg &&
1096                   (int)index > SII->second.back().index) {
1097          // There is an earlier def that's not killed (must be two-address).
1098          // The spill is no longer needed.
1099          SII->second.pop_back();
1100          if (SII->second.empty()) {
1101            SpillIdxes.erase(MBBId);
1102            SpillMBBs.reset(MBBId);
1103          }
1104        }
1105      }
1106    }
1107
1108    if (HasUse) {
1109      std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1110        SpillIdxes.find(MBBId);
1111      if (SII != SpillIdxes.end() &&
1112          SII->second.back().vreg == NewVReg &&
1113          (int)index > SII->second.back().index)
1114        // Use(s) following the last def, it's not safe to fold the spill.
1115        SII->second.back().canFold = false;
1116      std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1117        RestoreIdxes.find(MBBId);
1118      if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1119        // If we are splitting live intervals, only fold if it's the first
1120        // use and there isn't another use later in the MBB.
1121        RII->second.back().canFold = false;
1122      else if (IsNew) {
1123        // Only need a reload if there isn't an earlier def / use.
1124        if (RII == RestoreIdxes.end()) {
1125          std::vector<SRInfo> Infos;
1126          Infos.push_back(SRInfo(index, NewVReg, true));
1127          RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1128        } else {
1129          RII->second.push_back(SRInfo(index, NewVReg, true));
1130        }
1131        RestoreMBBs.set(MBBId);
1132      }
1133    }
1134
1135    // Update spill weight.
1136    unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1137    nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1138  }
1139
1140  if (NewVReg && TrySplit && AllCanFold) {
1141    // If all of its def / use can be folded, give it a low spill weight.
1142    LiveInterval &nI = getOrCreateInterval(NewVReg);
1143    nI.weight /= 10.0F;
1144  }
1145}
1146
1147bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1148                        BitVector &RestoreMBBs,
1149                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1150  if (!RestoreMBBs[Id])
1151    return false;
1152  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1153  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1154    if (Restores[i].index == index &&
1155        Restores[i].vreg == vr &&
1156        Restores[i].canFold)
1157      return true;
1158  return false;
1159}
1160
1161void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1162                        BitVector &RestoreMBBs,
1163                        std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1164  if (!RestoreMBBs[Id])
1165    return;
1166  std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1167  for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1168    if (Restores[i].index == index && Restores[i].vreg)
1169      Restores[i].index = -1;
1170}
1171
1172
1173std::vector<LiveInterval*> LiveIntervals::
1174addIntervalsForSpills(const LiveInterval &li,
1175                      const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1176  // Since this is called after the analysis is done we don't know if
1177  // LiveVariables is available
1178  lv_ = getAnalysisToUpdate<LiveVariables>();
1179
1180  assert(li.weight != HUGE_VALF &&
1181         "attempt to spill already spilled interval!");
1182
1183  DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1184  li.print(DOUT, mri_);
1185  DOUT << '\n';
1186
1187  // Each bit specify whether it a spill is required in the MBB.
1188  BitVector SpillMBBs(mf_->getNumBlockIDs());
1189  std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1190  BitVector RestoreMBBs(mf_->getNumBlockIDs());
1191  std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1192  std::map<unsigned,unsigned> MBBVRegsMap;
1193  std::vector<LiveInterval*> NewLIs;
1194  MachineRegisterInfo &RegInfo = mf_->getRegInfo();
1195  const TargetRegisterClass* rc = RegInfo.getRegClass(li.reg);
1196
1197  unsigned NumValNums = li.getNumValNums();
1198  SmallVector<MachineInstr*, 4> ReMatDefs;
1199  ReMatDefs.resize(NumValNums, NULL);
1200  SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1201  ReMatOrigDefs.resize(NumValNums, NULL);
1202  SmallVector<int, 4> ReMatIds;
1203  ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1204  BitVector ReMatDelete(NumValNums);
1205  unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1206
1207  // Spilling a split live interval. It cannot be split any further. Also,
1208  // it's also guaranteed to be a single val# / range interval.
1209  if (vrm.getPreSplitReg(li.reg)) {
1210    vrm.setIsSplitFromReg(li.reg, 0);
1211    // Unset the split kill marker on the last use.
1212    unsigned KillIdx = vrm.getKillPoint(li.reg);
1213    if (KillIdx) {
1214      MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1215      assert(KillMI && "Last use disappeared?");
1216      int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1217      assert(KillOp != -1 && "Last use disappeared?");
1218      KillMI->getOperand(KillOp).setIsKill(false);
1219    }
1220    vrm.removeKillPoint(li.reg);
1221    bool DefIsReMat = vrm.isReMaterialized(li.reg);
1222    Slot = vrm.getStackSlot(li.reg);
1223    assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1224    MachineInstr *ReMatDefMI = DefIsReMat ?
1225      vrm.getReMaterializedMI(li.reg) : NULL;
1226    int LdSlot = 0;
1227    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1228    bool isLoad = isLoadSS ||
1229      (DefIsReMat && (ReMatDefMI->getInstrDescriptor()->isSimpleLoad()));
1230    bool IsFirstRange = true;
1231    for (LiveInterval::Ranges::const_iterator
1232           I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1233      // If this is a split live interval with multiple ranges, it means there
1234      // are two-address instructions that re-defined the value. Only the
1235      // first def can be rematerialized!
1236      if (IsFirstRange) {
1237        // Note ReMatOrigDefMI has already been deleted.
1238        rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1239                             Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1240                             false, vrm, RegInfo, rc, ReMatIds, loopInfo,
1241                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1242                             MBBVRegsMap, NewLIs);
1243      } else {
1244        rewriteInstructionsForSpills(li, false, I, NULL, 0,
1245                             Slot, 0, false, false, false,
1246                             false, vrm, RegInfo, rc, ReMatIds, loopInfo,
1247                             SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1248                             MBBVRegsMap, NewLIs);
1249      }
1250      IsFirstRange = false;
1251    }
1252    return NewLIs;
1253  }
1254
1255  bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1256  if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1257    TrySplit = false;
1258  if (TrySplit)
1259    ++numSplits;
1260  bool NeedStackSlot = false;
1261  for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1262       i != e; ++i) {
1263    const VNInfo *VNI = *i;
1264    unsigned VN = VNI->id;
1265    unsigned DefIdx = VNI->def;
1266    if (DefIdx == ~1U)
1267      continue; // Dead val#.
1268    // Is the def for the val# rematerializable?
1269    MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1270      ? 0 : getInstructionFromIndex(DefIdx);
1271    bool dummy;
1272    if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1273      // Remember how to remat the def of this val#.
1274      ReMatOrigDefs[VN] = ReMatDefMI;
1275      // Original def may be modified so we have to make a copy here. vrm must
1276      // delete these!
1277      ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1278
1279      bool CanDelete = true;
1280      if (VNI->hasPHIKill) {
1281        // A kill is a phi node, not all of its uses can be rematerialized.
1282        // It must not be deleted.
1283        CanDelete = false;
1284        // Need a stack slot if there is any live range where uses cannot be
1285        // rematerialized.
1286        NeedStackSlot = true;
1287      }
1288      if (CanDelete)
1289        ReMatDelete.set(VN);
1290    } else {
1291      // Need a stack slot if there is any live range where uses cannot be
1292      // rematerialized.
1293      NeedStackSlot = true;
1294    }
1295  }
1296
1297  // One stack slot per live interval.
1298  if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1299    Slot = vrm.assignVirt2StackSlot(li.reg);
1300
1301  // Create new intervals and rewrite defs and uses.
1302  for (LiveInterval::Ranges::const_iterator
1303         I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1304    MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1305    MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1306    bool DefIsReMat = ReMatDefMI != NULL;
1307    bool CanDelete = ReMatDelete[I->valno->id];
1308    int LdSlot = 0;
1309    bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1310    bool isLoad = isLoadSS ||
1311      (DefIsReMat && ReMatDefMI->getInstrDescriptor()->isSimpleLoad());
1312    rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1313                               Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1314                               CanDelete, vrm, RegInfo, rc, ReMatIds, loopInfo,
1315                               SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1316                               MBBVRegsMap, NewLIs);
1317  }
1318
1319  // Insert spills / restores if we are splitting.
1320  if (!TrySplit)
1321    return NewLIs;
1322
1323  SmallPtrSet<LiveInterval*, 4> AddedKill;
1324  SmallVector<unsigned, 2> Ops;
1325  if (NeedStackSlot) {
1326    int Id = SpillMBBs.find_first();
1327    while (Id != -1) {
1328      std::vector<SRInfo> &spills = SpillIdxes[Id];
1329      for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1330        int index = spills[i].index;
1331        unsigned VReg = spills[i].vreg;
1332        LiveInterval &nI = getOrCreateInterval(VReg);
1333        bool isReMat = vrm.isReMaterialized(VReg);
1334        MachineInstr *MI = getInstructionFromIndex(index);
1335        bool CanFold = false;
1336        bool FoundUse = false;
1337        Ops.clear();
1338        if (spills[i].canFold) {
1339          CanFold = true;
1340          for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1341            MachineOperand &MO = MI->getOperand(j);
1342            if (!MO.isRegister() || MO.getReg() != VReg)
1343              continue;
1344
1345            Ops.push_back(j);
1346            if (MO.isDef())
1347              continue;
1348            if (isReMat ||
1349                (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1350                                                RestoreMBBs, RestoreIdxes))) {
1351              // MI has two-address uses of the same register. If the use
1352              // isn't the first and only use in the BB, then we can't fold
1353              // it. FIXME: Move this to rewriteInstructionsForSpills.
1354              CanFold = false;
1355              break;
1356            }
1357            FoundUse = true;
1358          }
1359        }
1360        // Fold the store into the def if possible.
1361        bool Folded = false;
1362        if (CanFold && !Ops.empty()) {
1363          if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1364            Folded = true;
1365            if (FoundUse > 0) {
1366              // Also folded uses, do not issue a load.
1367              eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1368              nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1369            }
1370            nI.removeRange(getDefIndex(index), getStoreIndex(index));
1371          }
1372        }
1373
1374        // Else tell the spiller to issue a spill.
1375        if (!Folded) {
1376          LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1377          bool isKill = LR->end == getStoreIndex(index);
1378          vrm.addSpillPoint(VReg, isKill, MI);
1379          if (isKill)
1380            AddedKill.insert(&nI);
1381        }
1382      }
1383      Id = SpillMBBs.find_next(Id);
1384    }
1385  }
1386
1387  int Id = RestoreMBBs.find_first();
1388  while (Id != -1) {
1389    std::vector<SRInfo> &restores = RestoreIdxes[Id];
1390    for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1391      int index = restores[i].index;
1392      if (index == -1)
1393        continue;
1394      unsigned VReg = restores[i].vreg;
1395      LiveInterval &nI = getOrCreateInterval(VReg);
1396      MachineInstr *MI = getInstructionFromIndex(index);
1397      bool CanFold = false;
1398      Ops.clear();
1399      if (restores[i].canFold) {
1400        CanFold = true;
1401        for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1402          MachineOperand &MO = MI->getOperand(j);
1403          if (!MO.isRegister() || MO.getReg() != VReg)
1404            continue;
1405
1406          if (MO.isDef()) {
1407            // If this restore were to be folded, it would have been folded
1408            // already.
1409            CanFold = false;
1410            break;
1411          }
1412          Ops.push_back(j);
1413        }
1414      }
1415
1416      // Fold the load into the use if possible.
1417      bool Folded = false;
1418      if (CanFold && !Ops.empty()) {
1419        if (!vrm.isReMaterialized(VReg))
1420          Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1421        else {
1422          MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1423          int LdSlot = 0;
1424          bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1425          // If the rematerializable def is a load, also try to fold it.
1426          if (isLoadSS || ReMatDefMI->getInstrDescriptor()->isSimpleLoad())
1427            Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1428                                          Ops, isLoadSS, LdSlot, VReg);
1429        }
1430      }
1431      // If folding is not possible / failed, then tell the spiller to issue a
1432      // load / rematerialization for us.
1433      if (Folded)
1434        nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1435      else
1436        vrm.addRestorePoint(VReg, MI);
1437    }
1438    Id = RestoreMBBs.find_next(Id);
1439  }
1440
1441  // Finalize intervals: add kills, finalize spill weights, and filter out
1442  // dead intervals.
1443  std::vector<LiveInterval*> RetNewLIs;
1444  for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1445    LiveInterval *LI = NewLIs[i];
1446    if (!LI->empty()) {
1447      LI->weight /= LI->getSize();
1448      if (!AddedKill.count(LI)) {
1449        LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1450        unsigned LastUseIdx = getBaseIndex(LR->end);
1451        MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1452        int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg);
1453        assert(UseIdx != -1);
1454        if (LastUse->getInstrDescriptor()->
1455            getOperandConstraint(UseIdx, TOI::TIED_TO) == -1) {
1456          LastUse->getOperand(UseIdx).setIsKill();
1457          vrm.addKillPoint(LI->reg, LastUseIdx);
1458        }
1459      }
1460      RetNewLIs.push_back(LI);
1461    }
1462  }
1463
1464  return RetNewLIs;
1465}
1466