LiveIntervalAnalysis.cpp revision cef21c354408980eab7922c35af7523b08b5bec9
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file was developed by the LLVM research group and is distributed under
6// the University of Illinois Open Source License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements the LiveInterval analysis pass which is used
11// by the Linear Scan Register allocator. This pass linearizes the
12// basic blocks of the function in DFS order and uses the
13// LiveVariables pass to conservatively compute live intervals for
14// each virtual and physical register.
15//
16//===----------------------------------------------------------------------===//
17
18#define DEBUG_TYPE "liveintervals"
19#include "LiveIntervalAnalysis.h"
20#include "VirtRegMap.h"
21#include "llvm/Value.h"
22#include "llvm/Analysis/LoopInfo.h"
23#include "llvm/CodeGen/LiveVariables.h"
24#include "llvm/CodeGen/MachineFrameInfo.h"
25#include "llvm/CodeGen/MachineInstr.h"
26#include "llvm/CodeGen/Passes.h"
27#include "llvm/CodeGen/SSARegMap.h"
28#include "llvm/Target/MRegisterInfo.h"
29#include "llvm/Target/TargetInstrInfo.h"
30#include "llvm/Target/TargetMachine.h"
31#include "llvm/Support/CommandLine.h"
32#include "llvm/Support/Debug.h"
33#include "llvm/ADT/Statistic.h"
34#include "llvm/ADT/STLExtras.h"
35#include <algorithm>
36#include <cmath>
37using namespace llvm;
38
39namespace {
40  RegisterAnalysis<LiveIntervals> X("liveintervals", "Live Interval Analysis");
41
42  Statistic<> numIntervals
43  ("liveintervals", "Number of original intervals");
44
45  Statistic<> numIntervalsAfter
46  ("liveintervals", "Number of intervals after coalescing");
47
48  Statistic<> numJoins
49  ("liveintervals", "Number of interval joins performed");
50
51  Statistic<> numPeep
52  ("liveintervals", "Number of identity moves eliminated after coalescing");
53
54  Statistic<> numFolded
55  ("liveintervals", "Number of loads/stores folded into instructions");
56
57  cl::opt<bool>
58  EnableJoining("join-liveintervals",
59                cl::desc("Join compatible live intervals"),
60                cl::init(true));
61};
62
63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const
64{
65  AU.addPreserved<LiveVariables>();
66  AU.addRequired<LiveVariables>();
67  AU.addPreservedID(PHIEliminationID);
68  AU.addRequiredID(PHIEliminationID);
69  AU.addRequiredID(TwoAddressInstructionPassID);
70  AU.addRequired<LoopInfo>();
71  MachineFunctionPass::getAnalysisUsage(AU);
72}
73
74void LiveIntervals::releaseMemory()
75{
76  mi2iMap_.clear();
77  i2miMap_.clear();
78  r2iMap_.clear();
79  r2rMap_.clear();
80}
81
82
83/// runOnMachineFunction - Register allocate the whole function
84///
85bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
86  mf_ = &fn;
87  tm_ = &fn.getTarget();
88  mri_ = tm_->getRegisterInfo();
89  tii_ = tm_->getInstrInfo();
90  lv_ = &getAnalysis<LiveVariables>();
91  allocatableRegs_ = mri_->getAllocatableSet(fn);
92  r2rMap_.grow(mf_->getSSARegMap()->getLastVirtReg());
93
94  // If this function has any live ins, insert a dummy instruction at the
95  // beginning of the function that we will pretend "defines" the values.  This
96  // is to make the interval analysis simpler by providing a number.
97  if (fn.livein_begin() != fn.livein_end()) {
98    unsigned FirstLiveIn = fn.livein_begin()->first;
99
100    // Find a reg class that contains this live in.
101    const TargetRegisterClass *RC = 0;
102    for (MRegisterInfo::regclass_iterator RCI = mri_->regclass_begin(),
103           E = mri_->regclass_end(); RCI != E; ++RCI)
104      if ((*RCI)->contains(FirstLiveIn)) {
105        RC = *RCI;
106        break;
107      }
108
109    MachineInstr *OldFirstMI = fn.begin()->begin();
110    mri_->copyRegToReg(*fn.begin(), fn.begin()->begin(),
111                       FirstLiveIn, FirstLiveIn, RC);
112    assert(OldFirstMI != fn.begin()->begin() &&
113           "copyRetToReg didn't insert anything!");
114  }
115
116  // number MachineInstrs
117  unsigned miIndex = 0;
118  for (MachineFunction::iterator mbb = mf_->begin(), mbbEnd = mf_->end();
119       mbb != mbbEnd; ++mbb)
120    for (MachineBasicBlock::iterator mi = mbb->begin(), miEnd = mbb->end();
121         mi != miEnd; ++mi) {
122      bool inserted = mi2iMap_.insert(std::make_pair(mi, miIndex)).second;
123      assert(inserted && "multiple MachineInstr -> index mappings");
124      i2miMap_.push_back(mi);
125      miIndex += InstrSlots::NUM;
126    }
127
128  // Note intervals due to live-in values.
129  if (fn.livein_begin() != fn.livein_end()) {
130    MachineBasicBlock *Entry = fn.begin();
131    for (MachineFunction::livein_iterator I = fn.livein_begin(),
132           E = fn.livein_end(); I != E; ++I) {
133      handlePhysicalRegisterDef(Entry, Entry->begin(),
134                                getOrCreateInterval(I->first), 0, 0);
135      for (const unsigned* AS = mri_->getAliasSet(I->first); *AS; ++AS)
136        handlePhysicalRegisterDef(Entry, Entry->begin(),
137                                  getOrCreateInterval(*AS), 0, 0);
138    }
139  }
140
141  computeIntervals();
142
143  numIntervals += getNumIntervals();
144
145  DEBUG(std::cerr << "********** INTERVALS **********\n";
146        for (iterator I = begin(), E = end(); I != E; ++I) {
147          I->second.print(std::cerr, mri_);
148          std::cerr << "\n";
149        });
150
151  // join intervals if requested
152  if (EnableJoining) joinIntervals();
153
154  numIntervalsAfter += getNumIntervals();
155
156  // perform a final pass over the instructions and compute spill
157  // weights, coalesce virtual registers and remove identity moves
158  const LoopInfo& loopInfo = getAnalysis<LoopInfo>();
159
160  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
161       mbbi != mbbe; ++mbbi) {
162    MachineBasicBlock* mbb = mbbi;
163    unsigned loopDepth = loopInfo.getLoopDepth(mbb->getBasicBlock());
164
165    for (MachineBasicBlock::iterator mii = mbb->begin(), mie = mbb->end();
166         mii != mie; ) {
167      // if the move will be an identity move delete it
168      unsigned srcReg, dstReg, RegRep;
169      if (tii_->isMoveInstr(*mii, srcReg, dstReg) &&
170          (RegRep = rep(srcReg)) == rep(dstReg)) {
171        // remove from def list
172        LiveInterval &interval = getOrCreateInterval(RegRep);
173        // remove index -> MachineInstr and
174        // MachineInstr -> index mappings
175        Mi2IndexMap::iterator mi2i = mi2iMap_.find(mii);
176        if (mi2i != mi2iMap_.end()) {
177          i2miMap_[mi2i->second/InstrSlots::NUM] = 0;
178          mi2iMap_.erase(mi2i);
179        }
180        mii = mbbi->erase(mii);
181        ++numPeep;
182      }
183      else {
184        for (unsigned i = 0; i < mii->getNumOperands(); ++i) {
185          const MachineOperand& mop = mii->getOperand(i);
186          if (mop.isRegister() && mop.getReg() &&
187              MRegisterInfo::isVirtualRegister(mop.getReg())) {
188            // replace register with representative register
189            unsigned reg = rep(mop.getReg());
190            mii->SetMachineOperandReg(i, reg);
191
192            LiveInterval &RegInt = getInterval(reg);
193            RegInt.weight +=
194              (mop.isUse() + mop.isDef()) * pow(10.0F, (int)loopDepth);
195          }
196        }
197        ++mii;
198      }
199    }
200  }
201
202  DEBUG(dump());
203  return true;
204}
205
206/// print - Implement the dump method.
207void LiveIntervals::print(std::ostream &O, const Module* ) const {
208  O << "********** INTERVALS **********\n";
209  for (const_iterator I = begin(), E = end(); I != E; ++I) {
210    I->second.print(std::cerr, mri_);
211    std::cerr << "\n";
212  }
213
214  O << "********** MACHINEINSTRS **********\n";
215  for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
216       mbbi != mbbe; ++mbbi) {
217    O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
218    for (MachineBasicBlock::iterator mii = mbbi->begin(),
219           mie = mbbi->end(); mii != mie; ++mii) {
220      O << getInstructionIndex(mii) << '\t' << *mii;
221    }
222  }
223}
224
225std::vector<LiveInterval*> LiveIntervals::
226addIntervalsForSpills(const LiveInterval &li, VirtRegMap &vrm, int slot) {
227  // since this is called after the analysis is done we don't know if
228  // LiveVariables is available
229  lv_ = getAnalysisToUpdate<LiveVariables>();
230
231  std::vector<LiveInterval*> added;
232
233  assert(li.weight != HUGE_VAL &&
234         "attempt to spill already spilled interval!");
235
236  DEBUG(std::cerr << "\t\t\t\tadding intervals for spills for interval: "
237        << li << '\n');
238
239  const TargetRegisterClass* rc = mf_->getSSARegMap()->getRegClass(li.reg);
240
241  for (LiveInterval::Ranges::const_iterator
242         i = li.ranges.begin(), e = li.ranges.end(); i != e; ++i) {
243    unsigned index = getBaseIndex(i->start);
244    unsigned end = getBaseIndex(i->end-1) + InstrSlots::NUM;
245    for (; index != end; index += InstrSlots::NUM) {
246      // skip deleted instructions
247      while (index != end && !getInstructionFromIndex(index))
248        index += InstrSlots::NUM;
249      if (index == end) break;
250
251      MachineBasicBlock::iterator mi = getInstructionFromIndex(index);
252
253    for_operand:
254      for (unsigned i = 0; i != mi->getNumOperands(); ++i) {
255        MachineOperand& mop = mi->getOperand(i);
256        if (mop.isRegister() && mop.getReg() == li.reg) {
257          // First thing, attempt to fold the memory reference into the
258          // instruction.  If we can do this, we don't need to insert spill
259          // code.
260          if (MachineInstr* fmi = mri_->foldMemoryOperand(mi, i, slot)) {
261            if (lv_)
262              lv_->instructionChanged(mi, fmi);
263            vrm.virtFolded(li.reg, mi, i, fmi);
264            mi2iMap_.erase(mi);
265            i2miMap_[index/InstrSlots::NUM] = fmi;
266            mi2iMap_[fmi] = index;
267            MachineBasicBlock &MBB = *mi->getParent();
268            mi = MBB.insert(MBB.erase(mi), fmi);
269            ++numFolded;
270
271            // Folding the load/store can completely change the instruction in
272            // unpredictable ways, rescan it from the beginning.
273            goto for_operand;
274          } else {
275            // This is tricky. We need to add information in the interval about
276            // the spill code so we have to use our extra load/store slots.
277            //
278            // If we have a use we are going to have a load so we start the
279            // interval from the load slot onwards. Otherwise we start from the
280            // def slot.
281            unsigned start = (mop.isUse() ?
282                              getLoadIndex(index) :
283                              getDefIndex(index));
284            // If we have a def we are going to have a store right after it so
285            // we end the interval after the use of the next
286            // instruction. Otherwise we end after the use of this instruction.
287            unsigned end = 1 + (mop.isDef() ?
288                                getStoreIndex(index) :
289                                getUseIndex(index));
290
291            // create a new register for this spill
292            unsigned nReg = mf_->getSSARegMap()->createVirtualRegister(rc);
293            mi->SetMachineOperandReg(i, nReg);
294            vrm.grow();
295            vrm.assignVirt2StackSlot(nReg, slot);
296            LiveInterval& nI = getOrCreateInterval(nReg);
297            assert(nI.empty());
298
299            // the spill weight is now infinity as it
300            // cannot be spilled again
301            nI.weight = float(HUGE_VAL);
302            LiveRange LR(start, end, nI.getNextValue());
303            DEBUG(std::cerr << " +" << LR);
304            nI.addRange(LR);
305            added.push_back(&nI);
306
307            // update live variables if it is available
308            if (lv_)
309              lv_->addVirtualRegisterKilled(nReg, mi);
310            DEBUG(std::cerr << "\t\t\t\tadded new interval: " << nI << '\n');
311          }
312        }
313      }
314    }
315  }
316
317  return added;
318}
319
320void LiveIntervals::printRegName(unsigned reg) const
321{
322  if (MRegisterInfo::isPhysicalRegister(reg))
323    std::cerr << mri_->getName(reg);
324  else
325    std::cerr << "%reg" << reg;
326}
327
328void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock* mbb,
329                                             MachineBasicBlock::iterator mi,
330                                             LiveInterval& interval)
331{
332  DEBUG(std::cerr << "\t\tregister: "; printRegName(interval.reg));
333  LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
334
335  // Virtual registers may be defined multiple times (due to phi
336  // elimination and 2-addr elimination).  Much of what we do only has to be
337  // done once for the vreg.  We use an empty interval to detect the first
338  // time we see a vreg.
339  if (interval.empty()) {
340    // Get the Idx of the defining instructions.
341    unsigned defIndex = getDefIndex(getInstructionIndex(mi));
342
343    unsigned ValNum = interval.getNextValue();
344    assert(ValNum == 0 && "First value in interval is not 0?");
345    ValNum = 0;  // Clue in the optimizer.
346
347    // Loop over all of the blocks that the vreg is defined in.  There are
348    // two cases we have to handle here.  The most common case is a vreg
349    // whose lifetime is contained within a basic block.  In this case there
350    // will be a single kill, in MBB, which comes after the definition.
351    if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
352      // FIXME: what about dead vars?
353      unsigned killIdx;
354      if (vi.Kills[0] != mi)
355        killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
356      else
357        killIdx = defIndex+1;
358
359      // If the kill happens after the definition, we have an intra-block
360      // live range.
361      if (killIdx > defIndex) {
362        assert(vi.AliveBlocks.empty() &&
363               "Shouldn't be alive across any blocks!");
364        LiveRange LR(defIndex, killIdx, ValNum);
365        interval.addRange(LR);
366        DEBUG(std::cerr << " +" << LR << "\n");
367        return;
368      }
369    }
370
371    // The other case we handle is when a virtual register lives to the end
372    // of the defining block, potentially live across some blocks, then is
373    // live into some number of blocks, but gets killed.  Start by adding a
374    // range that goes from this definition to the end of the defining block.
375    LiveRange NewLR(defIndex,
376                    getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
377                    ValNum);
378    DEBUG(std::cerr << " +" << NewLR);
379    interval.addRange(NewLR);
380
381    // Iterate over all of the blocks that the variable is completely
382    // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
383    // live interval.
384    for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
385      if (vi.AliveBlocks[i]) {
386        MachineBasicBlock* mbb = mf_->getBlockNumbered(i);
387        if (!mbb->empty()) {
388          LiveRange LR(getInstructionIndex(&mbb->front()),
389                       getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
390                       ValNum);
391          interval.addRange(LR);
392          DEBUG(std::cerr << " +" << LR);
393        }
394      }
395    }
396
397    // Finally, this virtual register is live from the start of any killing
398    // block to the 'use' slot of the killing instruction.
399    for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
400      MachineInstr *Kill = vi.Kills[i];
401      LiveRange LR(getInstructionIndex(Kill->getParent()->begin()),
402                   getUseIndex(getInstructionIndex(Kill))+1,
403                   ValNum);
404      interval.addRange(LR);
405      DEBUG(std::cerr << " +" << LR);
406    }
407
408  } else {
409    // If this is the second time we see a virtual register definition, it
410    // must be due to phi elimination or two addr elimination.  If this is
411    // the result of two address elimination, then the vreg is the first
412    // operand, and is a def-and-use.
413    if (mi->getOperand(0).isRegister() &&
414        mi->getOperand(0).getReg() == interval.reg &&
415        mi->getOperand(0).isDef() && mi->getOperand(0).isUse()) {
416      // If this is a two-address definition, then we have already processed
417      // the live range.  The only problem is that we didn't realize there
418      // are actually two values in the live interval.  Because of this we
419      // need to take the LiveRegion that defines this register and split it
420      // into two values.
421      unsigned DefIndex = getDefIndex(getInstructionIndex(vi.DefInst));
422      unsigned RedefIndex = getDefIndex(getInstructionIndex(mi));
423
424      // Delete the initial value, which should be short and continuous,
425      // becuase the 2-addr copy must be in the same MBB as the redef.
426      interval.removeRange(DefIndex, RedefIndex);
427
428      LiveRange LR(DefIndex, RedefIndex, interval.getNextValue());
429      DEBUG(std::cerr << " replace range with " << LR);
430      interval.addRange(LR);
431
432      // If this redefinition is dead, we need to add a dummy unit live
433      // range covering the def slot.
434      for (LiveVariables::killed_iterator KI = lv_->dead_begin(mi),
435             E = lv_->dead_end(mi); KI != E; ++KI)
436        if (KI->second == interval.reg) {
437          interval.addRange(LiveRange(RedefIndex, RedefIndex+1, 0));
438          break;
439        }
440
441      DEBUG(std::cerr << "RESULT: " << interval);
442
443    } else {
444      // Otherwise, this must be because of phi elimination.  If this is the
445      // first redefinition of the vreg that we have seen, go back and change
446      // the live range in the PHI block to be a different value number.
447      if (interval.containsOneValue()) {
448        assert(vi.Kills.size() == 1 &&
449               "PHI elimination vreg should have one kill, the PHI itself!");
450
451        // Remove the old range that we now know has an incorrect number.
452        MachineInstr *Killer = vi.Kills[0];
453        unsigned Start = getInstructionIndex(Killer->getParent()->begin());
454        unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
455        DEBUG(std::cerr << "Removing [" << Start << "," << End << "] from: "
456              << interval << "\n");
457        interval.removeRange(Start, End);
458        DEBUG(std::cerr << "RESULT: " << interval);
459
460        // Replace the interval with one of a NEW value number.
461        LiveRange LR(Start, End, interval.getNextValue());
462        DEBUG(std::cerr << " replace range with " << LR);
463        interval.addRange(LR);
464        DEBUG(std::cerr << "RESULT: " << interval);
465      }
466
467      // In the case of PHI elimination, each variable definition is only
468      // live until the end of the block.  We've already taken care of the
469      // rest of the live range.
470      unsigned defIndex = getDefIndex(getInstructionIndex(mi));
471      LiveRange LR(defIndex,
472                   getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
473                   interval.getNextValue());
474      interval.addRange(LR);
475      DEBUG(std::cerr << " +" << LR);
476    }
477  }
478
479  DEBUG(std::cerr << '\n');
480}
481
482void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
483                                              MachineBasicBlock::iterator mi,
484                                              LiveInterval& interval,
485                                              unsigned SrcReg, unsigned DestReg)
486{
487  // A physical register cannot be live across basic block, so its
488  // lifetime must end somewhere in its defining basic block.
489  DEBUG(std::cerr << "\t\tregister: "; printRegName(interval.reg));
490  typedef LiveVariables::killed_iterator KillIter;
491
492  unsigned baseIndex = getInstructionIndex(mi);
493  unsigned start = getDefIndex(baseIndex);
494  unsigned end = start;
495
496  // If it is not used after definition, it is considered dead at
497  // the instruction defining it. Hence its interval is:
498  // [defSlot(def), defSlot(def)+1)
499  for (KillIter ki = lv_->dead_begin(mi), ke = lv_->dead_end(mi);
500       ki != ke; ++ki) {
501    if (interval.reg == ki->second) {
502      DEBUG(std::cerr << " dead");
503      end = getDefIndex(start) + 1;
504      goto exit;
505    }
506  }
507
508  // If it is not dead on definition, it must be killed by a
509  // subsequent instruction. Hence its interval is:
510  // [defSlot(def), useSlot(kill)+1)
511  while (true) {
512    ++mi;
513    assert(mi != MBB->end() && "physreg was not killed in defining block!");
514    baseIndex += InstrSlots::NUM;
515    for (KillIter ki = lv_->killed_begin(mi), ke = lv_->killed_end(mi);
516         ki != ke; ++ki) {
517      if (interval.reg == ki->second) {
518        DEBUG(std::cerr << " killed");
519        end = getUseIndex(baseIndex) + 1;
520        goto exit;
521      }
522    }
523  }
524
525exit:
526  assert(start < end && "did not find end of interval?");
527
528  // Finally, if this is defining a new range for the physical register, and if
529  // that physreg is just a copy from a vreg, and if THAT vreg was a copy from
530  // the physreg, then the new fragment has the same value as the one copied
531  // into the vreg.
532  if (interval.reg == DestReg && !interval.empty() &&
533      MRegisterInfo::isVirtualRegister(SrcReg)) {
534
535    // Get the live interval for the vreg, see if it is defined by a copy.
536    LiveInterval &SrcInterval = getOrCreateInterval(SrcReg);
537
538    if (SrcInterval.containsOneValue()) {
539      assert(!SrcInterval.empty() && "Can't contain a value and be empty!");
540
541      // Get the first index of the first range.  Though the interval may have
542      // multiple liveranges in it, we only check the first.
543      unsigned StartIdx = SrcInterval.begin()->start;
544      MachineInstr *SrcDefMI = getInstructionFromIndex(StartIdx);
545
546      // Check to see if the vreg was defined by a copy instruction, and that
547      // the source was this physreg.
548      unsigned VRegSrcSrc, VRegSrcDest;
549      if (tii_->isMoveInstr(*SrcDefMI, VRegSrcSrc, VRegSrcDest) &&
550          SrcReg == VRegSrcDest && VRegSrcSrc == DestReg) {
551        // Okay, now we know that the vreg was defined by a copy from this
552        // physreg.  Find the value number being copied and use it as the value
553        // for this range.
554        const LiveRange *DefRange = interval.getLiveRangeContaining(StartIdx-1);
555        if (DefRange) {
556          LiveRange LR(start, end, DefRange->ValId);
557          interval.addRange(LR);
558          DEBUG(std::cerr << " +" << LR << '\n');
559          return;
560        }
561      }
562    }
563  }
564
565
566  LiveRange LR(start, end, interval.getNextValue());
567  interval.addRange(LR);
568  DEBUG(std::cerr << " +" << LR << '\n');
569}
570
571void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
572                                      MachineBasicBlock::iterator MI,
573                                      unsigned reg) {
574  if (MRegisterInfo::isVirtualRegister(reg))
575    handleVirtualRegisterDef(MBB, MI, getOrCreateInterval(reg));
576  else if (allocatableRegs_[reg]) {
577    unsigned SrcReg = 0, DestReg = 0;
578    bool IsMove = tii_->isMoveInstr(*MI, SrcReg, DestReg);
579
580    handlePhysicalRegisterDef(MBB, MI, getOrCreateInterval(reg),
581                              SrcReg, DestReg);
582    for (const unsigned* AS = mri_->getAliasSet(reg); *AS; ++AS)
583      handlePhysicalRegisterDef(MBB, MI, getOrCreateInterval(*AS),
584                                SrcReg, DestReg);
585  }
586}
587
588/// computeIntervals - computes the live intervals for virtual
589/// registers. for some ordering of the machine instructions [1,N] a
590/// live interval is an interval [i, j) where 1 <= i <= j < N for
591/// which a variable is live
592void LiveIntervals::computeIntervals()
593{
594  DEBUG(std::cerr << "********** COMPUTING LIVE INTERVALS **********\n");
595  DEBUG(std::cerr << "********** Function: "
596        << ((Value*)mf_->getFunction())->getName() << '\n');
597  bool IgnoreFirstInstr = mf_->livein_begin() != mf_->livein_end();
598
599  for (MachineFunction::iterator I = mf_->begin(), E = mf_->end();
600       I != E; ++I) {
601    MachineBasicBlock* mbb = I;
602    DEBUG(std::cerr << ((Value*)mbb->getBasicBlock())->getName() << ":\n");
603
604    MachineBasicBlock::iterator mi = mbb->begin(), miEnd = mbb->end();
605    if (IgnoreFirstInstr) { ++mi; IgnoreFirstInstr = false; }
606    for (; mi != miEnd; ++mi) {
607      const TargetInstrDescriptor& tid =
608        tm_->getInstrInfo()->get(mi->getOpcode());
609      DEBUG(std::cerr << getInstructionIndex(mi) << "\t" << *mi);
610
611      // handle implicit defs
612      for (const unsigned* id = tid.ImplicitDefs; *id; ++id)
613        handleRegisterDef(mbb, mi, *id);
614
615      // handle explicit defs
616      for (int i = mi->getNumOperands() - 1; i >= 0; --i) {
617        MachineOperand& mop = mi->getOperand(i);
618        // handle register defs - build intervals
619        if (mop.isRegister() && mop.getReg() && mop.isDef())
620          handleRegisterDef(mbb, mi, mop.getReg());
621      }
622    }
623  }
624}
625
626void LiveIntervals::joinIntervalsInMachineBB(MachineBasicBlock *MBB) {
627  DEBUG(std::cerr << ((Value*)MBB->getBasicBlock())->getName() << ":\n");
628
629  for (MachineBasicBlock::iterator mi = MBB->begin(), mie = MBB->end();
630       mi != mie; ++mi) {
631    DEBUG(std::cerr << getInstructionIndex(mi) << '\t' << *mi);
632
633    // we only join virtual registers with allocatable
634    // physical registers since we do not have liveness information
635    // on not allocatable physical registers
636    unsigned regA, regB;
637    if (tii_->isMoveInstr(*mi, regA, regB) &&
638        (MRegisterInfo::isVirtualRegister(regA) || allocatableRegs_[regA]) &&
639        (MRegisterInfo::isVirtualRegister(regB) || allocatableRegs_[regB])) {
640
641      // Get representative registers.
642      regA = rep(regA);
643      regB = rep(regB);
644
645      // If they are already joined we continue.
646      if (regA == regB)
647        continue;
648
649      // If they are both physical registers, we cannot join them.
650      if (MRegisterInfo::isPhysicalRegister(regA) &&
651          MRegisterInfo::isPhysicalRegister(regB))
652        continue;
653
654      // If they are not of the same register class, we cannot join them.
655      if (differingRegisterClasses(regA, regB))
656        continue;
657
658      LiveInterval &IntA = getInterval(regA);
659      LiveInterval &IntB = getInterval(regB);
660      assert(IntA.reg == regA && IntB.reg == regB &&
661             "Register mapping is horribly broken!");
662
663      DEBUG(std::cerr << "\t\tInspecting " << IntA << " and " << IntB << ": ");
664
665      // If two intervals contain a single value and are joined by a copy, it
666      // does not matter if the intervals overlap, they can always be joined.
667      bool TriviallyJoinable =
668        IntA.containsOneValue() && IntB.containsOneValue();
669
670      unsigned MIDefIdx = getDefIndex(getInstructionIndex(mi));
671      if ((TriviallyJoinable || IntB.joinable(IntA, MIDefIdx)) &&
672          !overlapsAliases(&IntA, &IntB)) {
673        IntB.join(IntA, MIDefIdx);
674        DEBUG(std::cerr << "Joined.  Result = " << IntB << "\n");
675
676        if (!MRegisterInfo::isPhysicalRegister(regA)) {
677          r2iMap_.erase(regA);
678          r2rMap_[regA] = regB;
679        } else {
680          // Otherwise merge the data structures the other way so we don't lose
681          // the physreg information.
682          r2rMap_[regB] = regA;
683          IntB.reg = regA;
684          IntA.swap(IntB);
685          r2iMap_.erase(regB);
686        }
687        ++numJoins;
688      } else {
689        DEBUG(std::cerr << "Interference!\n");
690      }
691    }
692  }
693}
694
695namespace {
696  // DepthMBBCompare - Comparison predicate that sort first based on the loop
697  // depth of the basic block (the unsigned), and then on the MBB number.
698  struct DepthMBBCompare {
699    typedef std::pair<unsigned, MachineBasicBlock*> DepthMBBPair;
700    bool operator()(const DepthMBBPair &LHS, const DepthMBBPair &RHS) const {
701      if (LHS.first > RHS.first) return true;   // Deeper loops first
702      return LHS.first == RHS.first &&
703        LHS.second->getNumber() < RHS.second->getNumber();
704    }
705  };
706}
707
708void LiveIntervals::joinIntervals() {
709  DEBUG(std::cerr << "********** JOINING INTERVALS ***********\n");
710
711  const LoopInfo &LI = getAnalysis<LoopInfo>();
712  if (LI.begin() == LI.end()) {
713    // If there are no loops in the function, join intervals in function order.
714    for (MachineFunction::iterator I = mf_->begin(), E = mf_->end();
715         I != E; ++I)
716      joinIntervalsInMachineBB(I);
717  } else {
718    // Otherwise, join intervals in inner loops before other intervals.
719    // Unfortunately we can't just iterate over loop hierarchy here because
720    // there may be more MBB's than BB's.  Collect MBB's for sorting.
721    std::vector<std::pair<unsigned, MachineBasicBlock*> > MBBs;
722    for (MachineFunction::iterator I = mf_->begin(), E = mf_->end();
723         I != E; ++I)
724      MBBs.push_back(std::make_pair(LI.getLoopDepth(I->getBasicBlock()), I));
725
726    // Sort by loop depth.
727    std::sort(MBBs.begin(), MBBs.end(), DepthMBBCompare());
728
729    // Finally, join intervals in loop nest order.
730    for (unsigned i = 0, e = MBBs.size(); i != e; ++i)
731      joinIntervalsInMachineBB(MBBs[i].second);
732  }
733
734  DEBUG(std::cerr << "*** Register mapping ***\n");
735  DEBUG(for (int i = 0, e = r2rMap_.size(); i != e; ++i)
736          if (r2rMap_[i])
737             std::cerr << "  reg " << i << " -> reg " << r2rMap_[i] << "\n");
738}
739
740/// Return true if the two specified registers belong to different register
741/// classes.  The registers may be either phys or virt regs.
742bool LiveIntervals::differingRegisterClasses(unsigned RegA,
743                                             unsigned RegB) const {
744
745  // Get the register classes for the first reg.
746  if (MRegisterInfo::isPhysicalRegister(RegA)) {
747    assert(MRegisterInfo::isVirtualRegister(RegB) &&
748           "Shouldn't consider two physregs!");
749    return !mf_->getSSARegMap()->getRegClass(RegB)->contains(RegA);
750  }
751
752  // Compare against the regclass for the second reg.
753  const TargetRegisterClass *RegClass = mf_->getSSARegMap()->getRegClass(RegA);
754  if (MRegisterInfo::isVirtualRegister(RegB))
755    return RegClass != mf_->getSSARegMap()->getRegClass(RegB);
756  else
757    return !RegClass->contains(RegB);
758}
759
760bool LiveIntervals::overlapsAliases(const LiveInterval *LHS,
761                                    const LiveInterval *RHS) const {
762  if (!MRegisterInfo::isPhysicalRegister(LHS->reg)) {
763    if (!MRegisterInfo::isPhysicalRegister(RHS->reg))
764      return false;   // vreg-vreg merge has no aliases!
765    std::swap(LHS, RHS);
766  }
767
768  assert(MRegisterInfo::isPhysicalRegister(LHS->reg) &&
769         MRegisterInfo::isVirtualRegister(RHS->reg) &&
770         "first interval must describe a physical register");
771
772  for (const unsigned *AS = mri_->getAliasSet(LHS->reg); *AS; ++AS)
773    if (RHS->overlaps(getInterval(*AS)))
774      return true;
775
776  return false;
777}
778
779LiveInterval LiveIntervals::createInterval(unsigned reg) {
780  float Weight = MRegisterInfo::isPhysicalRegister(reg) ?
781                       (float)HUGE_VAL :0.0F;
782  return LiveInterval(reg, Weight);
783}
784