1//===-- RegAllocGreedy.cpp - greedy register allocator --------------------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines the RAGreedy function pass for register allocation in
11// optimized builds.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "regalloc"
16#include "llvm/CodeGen/Passes.h"
17#include "AllocationOrder.h"
18#include "InterferenceCache.h"
19#include "LiveDebugVariables.h"
20#include "RegAllocBase.h"
21#include "SpillPlacement.h"
22#include "Spiller.h"
23#include "SplitKit.h"
24#include "llvm/ADT/Statistic.h"
25#include "llvm/Analysis/AliasAnalysis.h"
26#include "llvm/CodeGen/CalcSpillWeights.h"
27#include "llvm/CodeGen/EdgeBundles.h"
28#include "llvm/CodeGen/LiveIntervalAnalysis.h"
29#include "llvm/CodeGen/LiveRangeEdit.h"
30#include "llvm/CodeGen/LiveRegMatrix.h"
31#include "llvm/CodeGen/LiveStackAnalysis.h"
32#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
33#include "llvm/CodeGen/MachineDominators.h"
34#include "llvm/CodeGen/MachineFunctionPass.h"
35#include "llvm/CodeGen/MachineLoopInfo.h"
36#include "llvm/CodeGen/MachineRegisterInfo.h"
37#include "llvm/CodeGen/RegAllocRegistry.h"
38#include "llvm/CodeGen/VirtRegMap.h"
39#include "llvm/PassAnalysisSupport.h"
40#include "llvm/Support/CommandLine.h"
41#include "llvm/Support/Debug.h"
42#include "llvm/Support/ErrorHandling.h"
43#include "llvm/Support/Timer.h"
44#include "llvm/Support/raw_ostream.h"
45#include <queue>
46
47using namespace llvm;
48
49STATISTIC(NumGlobalSplits, "Number of split global live ranges");
50STATISTIC(NumLocalSplits,  "Number of split local live ranges");
51STATISTIC(NumEvicted,      "Number of interferences evicted");
52
53static cl::opt<SplitEditor::ComplementSpillMode>
54SplitSpillMode("split-spill-mode", cl::Hidden,
55  cl::desc("Spill mode for splitting live ranges"),
56  cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"),
57             clEnumValN(SplitEditor::SM_Size,  "size",  "Optimize for size"),
58             clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"),
59             clEnumValEnd),
60  cl::init(SplitEditor::SM_Partition));
61
62static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator",
63                                       createGreedyRegisterAllocator);
64
65namespace {
66class RAGreedy : public MachineFunctionPass,
67                 public RegAllocBase,
68                 private LiveRangeEdit::Delegate {
69
70  // context
71  MachineFunction *MF;
72
73  // analyses
74  SlotIndexes *Indexes;
75  MachineBlockFrequencyInfo *MBFI;
76  MachineDominatorTree *DomTree;
77  MachineLoopInfo *Loops;
78  EdgeBundles *Bundles;
79  SpillPlacement *SpillPlacer;
80  LiveDebugVariables *DebugVars;
81
82  // state
83  OwningPtr<Spiller> SpillerInstance;
84  std::priority_queue<std::pair<unsigned, unsigned> > Queue;
85  unsigned NextCascade;
86
87  // Live ranges pass through a number of stages as we try to allocate them.
88  // Some of the stages may also create new live ranges:
89  //
90  // - Region splitting.
91  // - Per-block splitting.
92  // - Local splitting.
93  // - Spilling.
94  //
95  // Ranges produced by one of the stages skip the previous stages when they are
96  // dequeued. This improves performance because we can skip interference checks
97  // that are unlikely to give any results. It also guarantees that the live
98  // range splitting algorithm terminates, something that is otherwise hard to
99  // ensure.
100  enum LiveRangeStage {
101    /// Newly created live range that has never been queued.
102    RS_New,
103
104    /// Only attempt assignment and eviction. Then requeue as RS_Split.
105    RS_Assign,
106
107    /// Attempt live range splitting if assignment is impossible.
108    RS_Split,
109
110    /// Attempt more aggressive live range splitting that is guaranteed to make
111    /// progress.  This is used for split products that may not be making
112    /// progress.
113    RS_Split2,
114
115    /// Live range will be spilled.  No more splitting will be attempted.
116    RS_Spill,
117
118    /// There is nothing more we can do to this live range.  Abort compilation
119    /// if it can't be assigned.
120    RS_Done
121  };
122
123  static const char *const StageName[];
124
125  // RegInfo - Keep additional information about each live range.
126  struct RegInfo {
127    LiveRangeStage Stage;
128
129    // Cascade - Eviction loop prevention. See canEvictInterference().
130    unsigned Cascade;
131
132    RegInfo() : Stage(RS_New), Cascade(0) {}
133  };
134
135  IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo;
136
137  LiveRangeStage getStage(const LiveInterval &VirtReg) const {
138    return ExtraRegInfo[VirtReg.reg].Stage;
139  }
140
141  void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) {
142    ExtraRegInfo.resize(MRI->getNumVirtRegs());
143    ExtraRegInfo[VirtReg.reg].Stage = Stage;
144  }
145
146  template<typename Iterator>
147  void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) {
148    ExtraRegInfo.resize(MRI->getNumVirtRegs());
149    for (;Begin != End; ++Begin) {
150      unsigned Reg = (*Begin)->reg;
151      if (ExtraRegInfo[Reg].Stage == RS_New)
152        ExtraRegInfo[Reg].Stage = NewStage;
153    }
154  }
155
156  /// Cost of evicting interference.
157  struct EvictionCost {
158    unsigned BrokenHints; ///< Total number of broken hints.
159    float MaxWeight;      ///< Maximum spill weight evicted.
160
161    EvictionCost(unsigned B = 0) : BrokenHints(B), MaxWeight(0) {}
162
163    bool isMax() const { return BrokenHints == ~0u; }
164
165    bool operator<(const EvictionCost &O) const {
166      if (BrokenHints != O.BrokenHints)
167        return BrokenHints < O.BrokenHints;
168      return MaxWeight < O.MaxWeight;
169    }
170  };
171
172  // splitting state.
173  OwningPtr<SplitAnalysis> SA;
174  OwningPtr<SplitEditor> SE;
175
176  /// Cached per-block interference maps
177  InterferenceCache IntfCache;
178
179  /// All basic blocks where the current register has uses.
180  SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints;
181
182  /// Global live range splitting candidate info.
183  struct GlobalSplitCandidate {
184    // Register intended for assignment, or 0.
185    unsigned PhysReg;
186
187    // SplitKit interval index for this candidate.
188    unsigned IntvIdx;
189
190    // Interference for PhysReg.
191    InterferenceCache::Cursor Intf;
192
193    // Bundles where this candidate should be live.
194    BitVector LiveBundles;
195    SmallVector<unsigned, 8> ActiveBlocks;
196
197    void reset(InterferenceCache &Cache, unsigned Reg) {
198      PhysReg = Reg;
199      IntvIdx = 0;
200      Intf.setPhysReg(Cache, Reg);
201      LiveBundles.clear();
202      ActiveBlocks.clear();
203    }
204
205    // Set B[i] = C for every live bundle where B[i] was NoCand.
206    unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) {
207      unsigned Count = 0;
208      for (int i = LiveBundles.find_first(); i >= 0;
209           i = LiveBundles.find_next(i))
210        if (B[i] == NoCand) {
211          B[i] = C;
212          Count++;
213        }
214      return Count;
215    }
216  };
217
218  /// Candidate info for for each PhysReg in AllocationOrder.
219  /// This vector never shrinks, but grows to the size of the largest register
220  /// class.
221  SmallVector<GlobalSplitCandidate, 32> GlobalCand;
222
223  enum { NoCand = ~0u };
224
225  /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to
226  /// NoCand which indicates the stack interval.
227  SmallVector<unsigned, 32> BundleCand;
228
229public:
230  RAGreedy();
231
232  /// Return the pass name.
233  virtual const char* getPassName() const {
234    return "Greedy Register Allocator";
235  }
236
237  /// RAGreedy analysis usage.
238  virtual void getAnalysisUsage(AnalysisUsage &AU) const;
239  virtual void releaseMemory();
240  virtual Spiller &spiller() { return *SpillerInstance; }
241  virtual void enqueue(LiveInterval *LI);
242  virtual LiveInterval *dequeue();
243  virtual unsigned selectOrSplit(LiveInterval&,
244                                 SmallVectorImpl<LiveInterval*>&);
245
246  /// Perform register allocation.
247  virtual bool runOnMachineFunction(MachineFunction &mf);
248
249  static char ID;
250
251private:
252  bool LRE_CanEraseVirtReg(unsigned);
253  void LRE_WillShrinkVirtReg(unsigned);
254  void LRE_DidCloneVirtReg(unsigned, unsigned);
255
256  BlockFrequency calcSpillCost();
257  bool addSplitConstraints(InterferenceCache::Cursor, BlockFrequency&);
258  void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>);
259  void growRegion(GlobalSplitCandidate &Cand);
260  BlockFrequency calcGlobalSplitCost(GlobalSplitCandidate&);
261  bool calcCompactRegion(GlobalSplitCandidate&);
262  void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>);
263  void calcGapWeights(unsigned, SmallVectorImpl<float>&);
264  unsigned canReassign(LiveInterval &VirtReg, unsigned PhysReg);
265  bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool);
266  bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&);
267  void evictInterference(LiveInterval&, unsigned,
268                         SmallVectorImpl<LiveInterval*>&);
269
270  unsigned tryAssign(LiveInterval&, AllocationOrder&,
271                     SmallVectorImpl<LiveInterval*>&);
272  unsigned tryEvict(LiveInterval&, AllocationOrder&,
273                    SmallVectorImpl<LiveInterval*>&, unsigned = ~0u);
274  unsigned tryRegionSplit(LiveInterval&, AllocationOrder&,
275                          SmallVectorImpl<LiveInterval*>&);
276  unsigned tryBlockSplit(LiveInterval&, AllocationOrder&,
277                         SmallVectorImpl<LiveInterval*>&);
278  unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&,
279                               SmallVectorImpl<LiveInterval*>&);
280  unsigned tryLocalSplit(LiveInterval&, AllocationOrder&,
281    SmallVectorImpl<LiveInterval*>&);
282  unsigned trySplit(LiveInterval&, AllocationOrder&,
283                    SmallVectorImpl<LiveInterval*>&);
284};
285} // end anonymous namespace
286
287char RAGreedy::ID = 0;
288
289#ifndef NDEBUG
290const char *const RAGreedy::StageName[] = {
291    "RS_New",
292    "RS_Assign",
293    "RS_Split",
294    "RS_Split2",
295    "RS_Spill",
296    "RS_Done"
297};
298#endif
299
300// Hysteresis to use when comparing floats.
301// This helps stabilize decisions based on float comparisons.
302const float Hysteresis = 0.98f;
303
304
305FunctionPass* llvm::createGreedyRegisterAllocator() {
306  return new RAGreedy();
307}
308
309RAGreedy::RAGreedy(): MachineFunctionPass(ID) {
310  initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
311  initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
312  initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
313  initializeSlotIndexesPass(*PassRegistry::getPassRegistry());
314  initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry());
315  initializeMachineSchedulerPass(*PassRegistry::getPassRegistry());
316  initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry());
317  initializeLiveStacksPass(*PassRegistry::getPassRegistry());
318  initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry());
319  initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry());
320  initializeVirtRegMapPass(*PassRegistry::getPassRegistry());
321  initializeLiveRegMatrixPass(*PassRegistry::getPassRegistry());
322  initializeEdgeBundlesPass(*PassRegistry::getPassRegistry());
323  initializeSpillPlacementPass(*PassRegistry::getPassRegistry());
324}
325
326void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const {
327  AU.setPreservesCFG();
328  AU.addRequired<MachineBlockFrequencyInfo>();
329  AU.addPreserved<MachineBlockFrequencyInfo>();
330  AU.addRequired<AliasAnalysis>();
331  AU.addPreserved<AliasAnalysis>();
332  AU.addRequired<LiveIntervals>();
333  AU.addPreserved<LiveIntervals>();
334  AU.addRequired<SlotIndexes>();
335  AU.addPreserved<SlotIndexes>();
336  AU.addRequired<LiveDebugVariables>();
337  AU.addPreserved<LiveDebugVariables>();
338  AU.addRequired<LiveStacks>();
339  AU.addPreserved<LiveStacks>();
340  AU.addRequired<CalculateSpillWeights>();
341  AU.addRequired<MachineDominatorTree>();
342  AU.addPreserved<MachineDominatorTree>();
343  AU.addRequired<MachineLoopInfo>();
344  AU.addPreserved<MachineLoopInfo>();
345  AU.addRequired<VirtRegMap>();
346  AU.addPreserved<VirtRegMap>();
347  AU.addRequired<LiveRegMatrix>();
348  AU.addPreserved<LiveRegMatrix>();
349  AU.addRequired<EdgeBundles>();
350  AU.addRequired<SpillPlacement>();
351  MachineFunctionPass::getAnalysisUsage(AU);
352}
353
354
355//===----------------------------------------------------------------------===//
356//                     LiveRangeEdit delegate methods
357//===----------------------------------------------------------------------===//
358
359bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) {
360  if (VRM->hasPhys(VirtReg)) {
361    Matrix->unassign(LIS->getInterval(VirtReg));
362    return true;
363  }
364  // Unassigned virtreg is probably in the priority queue.
365  // RegAllocBase will erase it after dequeueing.
366  return false;
367}
368
369void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) {
370  if (!VRM->hasPhys(VirtReg))
371    return;
372
373  // Register is assigned, put it back on the queue for reassignment.
374  LiveInterval &LI = LIS->getInterval(VirtReg);
375  Matrix->unassign(LI);
376  enqueue(&LI);
377}
378
379void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) {
380  // Cloning a register we haven't even heard about yet?  Just ignore it.
381  if (!ExtraRegInfo.inBounds(Old))
382    return;
383
384  // LRE may clone a virtual register because dead code elimination causes it to
385  // be split into connected components. The new components are much smaller
386  // than the original, so they should get a new chance at being assigned.
387  // same stage as the parent.
388  ExtraRegInfo[Old].Stage = RS_Assign;
389  ExtraRegInfo.grow(New);
390  ExtraRegInfo[New] = ExtraRegInfo[Old];
391}
392
393void RAGreedy::releaseMemory() {
394  SpillerInstance.reset(0);
395  ExtraRegInfo.clear();
396  GlobalCand.clear();
397}
398
399void RAGreedy::enqueue(LiveInterval *LI) {
400  // Prioritize live ranges by size, assigning larger ranges first.
401  // The queue holds (size, reg) pairs.
402  const unsigned Size = LI->getSize();
403  const unsigned Reg = LI->reg;
404  assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
405         "Can only enqueue virtual registers");
406  unsigned Prio;
407
408  ExtraRegInfo.grow(Reg);
409  if (ExtraRegInfo[Reg].Stage == RS_New)
410    ExtraRegInfo[Reg].Stage = RS_Assign;
411
412  if (ExtraRegInfo[Reg].Stage == RS_Split) {
413    // Unsplit ranges that couldn't be allocated immediately are deferred until
414    // everything else has been allocated.
415    Prio = Size;
416  } else {
417    if (ExtraRegInfo[Reg].Stage == RS_Assign && !LI->empty() &&
418        LIS->intervalIsInOneMBB(*LI)) {
419      // Allocate original local ranges in linear instruction order. Since they
420      // are singly defined, this produces optimal coloring in the absence of
421      // global interference and other constraints.
422      Prio = LI->beginIndex().getInstrDistance(Indexes->getLastIndex());
423    }
424    else {
425      // Allocate global and split ranges in long->short order. Long ranges that
426      // don't fit should be spilled (or split) ASAP so they don't create
427      // interference.  Mark a bit to prioritize global above local ranges.
428      Prio = (1u << 29) + Size;
429    }
430    // Mark a higher bit to prioritize global and local above RS_Split.
431    Prio |= (1u << 31);
432
433    // Boost ranges that have a physical register hint.
434    if (VRM->hasKnownPreference(Reg))
435      Prio |= (1u << 30);
436  }
437  // The virtual register number is a tie breaker for same-sized ranges.
438  // Give lower vreg numbers higher priority to assign them first.
439  Queue.push(std::make_pair(Prio, ~Reg));
440}
441
442LiveInterval *RAGreedy::dequeue() {
443  if (Queue.empty())
444    return 0;
445  LiveInterval *LI = &LIS->getInterval(~Queue.top().second);
446  Queue.pop();
447  return LI;
448}
449
450
451//===----------------------------------------------------------------------===//
452//                            Direct Assignment
453//===----------------------------------------------------------------------===//
454
455/// tryAssign - Try to assign VirtReg to an available register.
456unsigned RAGreedy::tryAssign(LiveInterval &VirtReg,
457                             AllocationOrder &Order,
458                             SmallVectorImpl<LiveInterval*> &NewVRegs) {
459  Order.rewind();
460  unsigned PhysReg;
461  while ((PhysReg = Order.next()))
462    if (!Matrix->checkInterference(VirtReg, PhysReg))
463      break;
464  if (!PhysReg || Order.isHint())
465    return PhysReg;
466
467  // PhysReg is available, but there may be a better choice.
468
469  // If we missed a simple hint, try to cheaply evict interference from the
470  // preferred register.
471  if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg))
472    if (Order.isHint(Hint)) {
473      DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n');
474      EvictionCost MaxCost(1);
475      if (canEvictInterference(VirtReg, Hint, true, MaxCost)) {
476        evictInterference(VirtReg, Hint, NewVRegs);
477        return Hint;
478      }
479    }
480
481  // Try to evict interference from a cheaper alternative.
482  unsigned Cost = TRI->getCostPerUse(PhysReg);
483
484  // Most registers have 0 additional cost.
485  if (!Cost)
486    return PhysReg;
487
488  DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost
489               << '\n');
490  unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost);
491  return CheapReg ? CheapReg : PhysReg;
492}
493
494
495//===----------------------------------------------------------------------===//
496//                         Interference eviction
497//===----------------------------------------------------------------------===//
498
499unsigned RAGreedy::canReassign(LiveInterval &VirtReg, unsigned PrevReg) {
500  AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
501  unsigned PhysReg;
502  while ((PhysReg = Order.next())) {
503    if (PhysReg == PrevReg)
504      continue;
505
506    MCRegUnitIterator Units(PhysReg, TRI);
507    for (; Units.isValid(); ++Units) {
508      // Instantiate a "subquery", not to be confused with the Queries array.
509      LiveIntervalUnion::Query subQ(&VirtReg, &Matrix->getLiveUnions()[*Units]);
510      if (subQ.checkInterference())
511        break;
512    }
513    // If no units have interference, break out with the current PhysReg.
514    if (!Units.isValid())
515      break;
516  }
517  if (PhysReg)
518    DEBUG(dbgs() << "can reassign: " << VirtReg << " from "
519          << PrintReg(PrevReg, TRI) << " to " << PrintReg(PhysReg, TRI)
520          << '\n');
521  return PhysReg;
522}
523
524/// shouldEvict - determine if A should evict the assigned live range B. The
525/// eviction policy defined by this function together with the allocation order
526/// defined by enqueue() decides which registers ultimately end up being split
527/// and spilled.
528///
529/// Cascade numbers are used to prevent infinite loops if this function is a
530/// cyclic relation.
531///
532/// @param A          The live range to be assigned.
533/// @param IsHint     True when A is about to be assigned to its preferred
534///                   register.
535/// @param B          The live range to be evicted.
536/// @param BreaksHint True when B is already assigned to its preferred register.
537bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint,
538                           LiveInterval &B, bool BreaksHint) {
539  bool CanSplit = getStage(B) < RS_Spill;
540
541  // Be fairly aggressive about following hints as long as the evictee can be
542  // split.
543  if (CanSplit && IsHint && !BreaksHint)
544    return true;
545
546  return A.weight > B.weight;
547}
548
549/// canEvictInterference - Return true if all interferences between VirtReg and
550/// PhysReg can be evicted.  When OnlyCheap is set, don't do anything
551///
552/// @param VirtReg Live range that is about to be assigned.
553/// @param PhysReg Desired register for assignment.
554/// @param IsHint  True when PhysReg is VirtReg's preferred register.
555/// @param MaxCost Only look for cheaper candidates and update with new cost
556///                when returning true.
557/// @returns True when interference can be evicted cheaper than MaxCost.
558bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg,
559                                    bool IsHint, EvictionCost &MaxCost) {
560  // It is only possible to evict virtual register interference.
561  if (Matrix->checkInterference(VirtReg, PhysReg) > LiveRegMatrix::IK_VirtReg)
562    return false;
563
564  bool IsLocal = LIS->intervalIsInOneMBB(VirtReg);
565
566  // Find VirtReg's cascade number. This will be unassigned if VirtReg was never
567  // involved in an eviction before. If a cascade number was assigned, deny
568  // evicting anything with the same or a newer cascade number. This prevents
569  // infinite eviction loops.
570  //
571  // This works out so a register without a cascade number is allowed to evict
572  // anything, and it can be evicted by anything.
573  unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
574  if (!Cascade)
575    Cascade = NextCascade;
576
577  EvictionCost Cost;
578  for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
579    LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
580    // If there is 10 or more interferences, chances are one is heavier.
581    if (Q.collectInterferingVRegs(10) >= 10)
582      return false;
583
584    // Check if any interfering live range is heavier than MaxWeight.
585    for (unsigned i = Q.interferingVRegs().size(); i; --i) {
586      LiveInterval *Intf = Q.interferingVRegs()[i - 1];
587      assert(TargetRegisterInfo::isVirtualRegister(Intf->reg) &&
588             "Only expecting virtual register interference from query");
589      // Never evict spill products. They cannot split or spill.
590      if (getStage(*Intf) == RS_Done)
591        return false;
592      // Once a live range becomes small enough, it is urgent that we find a
593      // register for it. This is indicated by an infinite spill weight. These
594      // urgent live ranges get to evict almost anything.
595      //
596      // Also allow urgent evictions of unspillable ranges from a strictly
597      // larger allocation order.
598      bool Urgent = !VirtReg.isSpillable() &&
599        (Intf->isSpillable() ||
600         RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg)) <
601         RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(Intf->reg)));
602      // Only evict older cascades or live ranges without a cascade.
603      unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade;
604      if (Cascade <= IntfCascade) {
605        if (!Urgent)
606          return false;
607        // We permit breaking cascades for urgent evictions. It should be the
608        // last resort, though, so make it really expensive.
609        Cost.BrokenHints += 10;
610      }
611      // Would this break a satisfied hint?
612      bool BreaksHint = VRM->hasPreferredPhys(Intf->reg);
613      // Update eviction cost.
614      Cost.BrokenHints += BreaksHint;
615      Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight);
616      // Abort if this would be too expensive.
617      if (!(Cost < MaxCost))
618        return false;
619      if (Urgent)
620        continue;
621      // If !MaxCost.isMax(), then we're just looking for a cheap register.
622      // Evicting another local live range in this case could lead to suboptimal
623      // coloring.
624      if (!MaxCost.isMax() && IsLocal && LIS->intervalIsInOneMBB(*Intf) &&
625          !canReassign(*Intf, PhysReg)) {
626        return false;
627      }
628      // Finally, apply the eviction policy for non-urgent evictions.
629      if (!shouldEvict(VirtReg, IsHint, *Intf, BreaksHint))
630        return false;
631    }
632  }
633  MaxCost = Cost;
634  return true;
635}
636
637/// evictInterference - Evict any interferring registers that prevent VirtReg
638/// from being assigned to Physreg. This assumes that canEvictInterference
639/// returned true.
640void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg,
641                                 SmallVectorImpl<LiveInterval*> &NewVRegs) {
642  // Make sure that VirtReg has a cascade number, and assign that cascade
643  // number to every evicted register. These live ranges than then only be
644  // evicted by a newer cascade, preventing infinite loops.
645  unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade;
646  if (!Cascade)
647    Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++;
648
649  DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI)
650               << " interference: Cascade " << Cascade << '\n');
651
652  // Collect all interfering virtregs first.
653  SmallVector<LiveInterval*, 8> Intfs;
654  for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
655    LiveIntervalUnion::Query &Q = Matrix->query(VirtReg, *Units);
656    assert(Q.seenAllInterferences() && "Didn't check all interfererences.");
657    ArrayRef<LiveInterval*> IVR = Q.interferingVRegs();
658    Intfs.append(IVR.begin(), IVR.end());
659  }
660
661  // Evict them second. This will invalidate the queries.
662  for (unsigned i = 0, e = Intfs.size(); i != e; ++i) {
663    LiveInterval *Intf = Intfs[i];
664    // The same VirtReg may be present in multiple RegUnits. Skip duplicates.
665    if (!VRM->hasPhys(Intf->reg))
666      continue;
667    Matrix->unassign(*Intf);
668    assert((ExtraRegInfo[Intf->reg].Cascade < Cascade ||
669            VirtReg.isSpillable() < Intf->isSpillable()) &&
670           "Cannot decrease cascade number, illegal eviction");
671    ExtraRegInfo[Intf->reg].Cascade = Cascade;
672    ++NumEvicted;
673    NewVRegs.push_back(Intf);
674  }
675}
676
677/// tryEvict - Try to evict all interferences for a physreg.
678/// @param  VirtReg Currently unassigned virtual register.
679/// @param  Order   Physregs to try.
680/// @return         Physreg to assign VirtReg, or 0.
681unsigned RAGreedy::tryEvict(LiveInterval &VirtReg,
682                            AllocationOrder &Order,
683                            SmallVectorImpl<LiveInterval*> &NewVRegs,
684                            unsigned CostPerUseLimit) {
685  NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled);
686
687  // Keep track of the cheapest interference seen so far.
688  EvictionCost BestCost(~0u);
689  unsigned BestPhys = 0;
690  unsigned OrderLimit = Order.getOrder().size();
691
692  // When we are just looking for a reduced cost per use, don't break any
693  // hints, and only evict smaller spill weights.
694  if (CostPerUseLimit < ~0u) {
695    BestCost.BrokenHints = 0;
696    BestCost.MaxWeight = VirtReg.weight;
697
698    // Check of any registers in RC are below CostPerUseLimit.
699    const TargetRegisterClass *RC = MRI->getRegClass(VirtReg.reg);
700    unsigned MinCost = RegClassInfo.getMinCost(RC);
701    if (MinCost >= CostPerUseLimit) {
702      DEBUG(dbgs() << RC->getName() << " minimum cost = " << MinCost
703                   << ", no cheaper registers to be found.\n");
704      return 0;
705    }
706
707    // It is normal for register classes to have a long tail of registers with
708    // the same cost. We don't need to look at them if they're too expensive.
709    if (TRI->getCostPerUse(Order.getOrder().back()) >= CostPerUseLimit) {
710      OrderLimit = RegClassInfo.getLastCostChange(RC);
711      DEBUG(dbgs() << "Only trying the first " << OrderLimit << " regs.\n");
712    }
713  }
714
715  Order.rewind();
716  while (unsigned PhysReg = Order.nextWithDups(OrderLimit)) {
717    if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit)
718      continue;
719    // The first use of a callee-saved register in a function has cost 1.
720    // Don't start using a CSR when the CostPerUseLimit is low.
721    if (CostPerUseLimit == 1)
722     if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg))
723       if (!MRI->isPhysRegUsed(CSR)) {
724         DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR "
725                      << PrintReg(CSR, TRI) << '\n');
726         continue;
727       }
728
729    if (!canEvictInterference(VirtReg, PhysReg, false, BestCost))
730      continue;
731
732    // Best so far.
733    BestPhys = PhysReg;
734
735    // Stop if the hint can be used.
736    if (Order.isHint())
737      break;
738  }
739
740  if (!BestPhys)
741    return 0;
742
743  evictInterference(VirtReg, BestPhys, NewVRegs);
744  return BestPhys;
745}
746
747
748//===----------------------------------------------------------------------===//
749//                              Region Splitting
750//===----------------------------------------------------------------------===//
751
752/// addSplitConstraints - Fill out the SplitConstraints vector based on the
753/// interference pattern in Physreg and its aliases. Add the constraints to
754/// SpillPlacement and return the static cost of this split in Cost, assuming
755/// that all preferences in SplitConstraints are met.
756/// Return false if there are no bundles with positive bias.
757bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf,
758                                   BlockFrequency &Cost) {
759  ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
760
761  // Reset interference dependent info.
762  SplitConstraints.resize(UseBlocks.size());
763  BlockFrequency StaticCost = 0;
764  for (unsigned i = 0; i != UseBlocks.size(); ++i) {
765    const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
766    SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
767
768    BC.Number = BI.MBB->getNumber();
769    Intf.moveToBlock(BC.Number);
770    BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
771    BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare;
772    BC.ChangesValue = BI.FirstDef.isValid();
773
774    if (!Intf.hasInterference())
775      continue;
776
777    // Number of spill code instructions to insert.
778    unsigned Ins = 0;
779
780    // Interference for the live-in value.
781    if (BI.LiveIn) {
782      if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number))
783        BC.Entry = SpillPlacement::MustSpill, ++Ins;
784      else if (Intf.first() < BI.FirstInstr)
785        BC.Entry = SpillPlacement::PrefSpill, ++Ins;
786      else if (Intf.first() < BI.LastInstr)
787        ++Ins;
788    }
789
790    // Interference for the live-out value.
791    if (BI.LiveOut) {
792      if (Intf.last() >= SA->getLastSplitPoint(BC.Number))
793        BC.Exit = SpillPlacement::MustSpill, ++Ins;
794      else if (Intf.last() > BI.LastInstr)
795        BC.Exit = SpillPlacement::PrefSpill, ++Ins;
796      else if (Intf.last() > BI.FirstInstr)
797        ++Ins;
798    }
799
800    // Accumulate the total frequency of inserted spill code.
801    while (Ins--)
802      StaticCost += SpillPlacer->getBlockFrequency(BC.Number);
803  }
804  Cost = StaticCost;
805
806  // Add constraints for use-blocks. Note that these are the only constraints
807  // that may add a positive bias, it is downhill from here.
808  SpillPlacer->addConstraints(SplitConstraints);
809  return SpillPlacer->scanActiveBundles();
810}
811
812
813/// addThroughConstraints - Add constraints and links to SpillPlacer from the
814/// live-through blocks in Blocks.
815void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf,
816                                     ArrayRef<unsigned> Blocks) {
817  const unsigned GroupSize = 8;
818  SpillPlacement::BlockConstraint BCS[GroupSize];
819  unsigned TBS[GroupSize];
820  unsigned B = 0, T = 0;
821
822  for (unsigned i = 0; i != Blocks.size(); ++i) {
823    unsigned Number = Blocks[i];
824    Intf.moveToBlock(Number);
825
826    if (!Intf.hasInterference()) {
827      assert(T < GroupSize && "Array overflow");
828      TBS[T] = Number;
829      if (++T == GroupSize) {
830        SpillPlacer->addLinks(makeArrayRef(TBS, T));
831        T = 0;
832      }
833      continue;
834    }
835
836    assert(B < GroupSize && "Array overflow");
837    BCS[B].Number = Number;
838
839    // Interference for the live-in value.
840    if (Intf.first() <= Indexes->getMBBStartIdx(Number))
841      BCS[B].Entry = SpillPlacement::MustSpill;
842    else
843      BCS[B].Entry = SpillPlacement::PrefSpill;
844
845    // Interference for the live-out value.
846    if (Intf.last() >= SA->getLastSplitPoint(Number))
847      BCS[B].Exit = SpillPlacement::MustSpill;
848    else
849      BCS[B].Exit = SpillPlacement::PrefSpill;
850
851    if (++B == GroupSize) {
852      ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
853      SpillPlacer->addConstraints(Array);
854      B = 0;
855    }
856  }
857
858  ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B);
859  SpillPlacer->addConstraints(Array);
860  SpillPlacer->addLinks(makeArrayRef(TBS, T));
861}
862
863void RAGreedy::growRegion(GlobalSplitCandidate &Cand) {
864  // Keep track of through blocks that have not been added to SpillPlacer.
865  BitVector Todo = SA->getThroughBlocks();
866  SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks;
867  unsigned AddedTo = 0;
868#ifndef NDEBUG
869  unsigned Visited = 0;
870#endif
871
872  for (;;) {
873    ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive();
874    // Find new through blocks in the periphery of PrefRegBundles.
875    for (int i = 0, e = NewBundles.size(); i != e; ++i) {
876      unsigned Bundle = NewBundles[i];
877      // Look at all blocks connected to Bundle in the full graph.
878      ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle);
879      for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end();
880           I != E; ++I) {
881        unsigned Block = *I;
882        if (!Todo.test(Block))
883          continue;
884        Todo.reset(Block);
885        // This is a new through block. Add it to SpillPlacer later.
886        ActiveBlocks.push_back(Block);
887#ifndef NDEBUG
888        ++Visited;
889#endif
890      }
891    }
892    // Any new blocks to add?
893    if (ActiveBlocks.size() == AddedTo)
894      break;
895
896    // Compute through constraints from the interference, or assume that all
897    // through blocks prefer spilling when forming compact regions.
898    ArrayRef<unsigned> NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo);
899    if (Cand.PhysReg)
900      addThroughConstraints(Cand.Intf, NewBlocks);
901    else
902      // Provide a strong negative bias on through blocks to prevent unwanted
903      // liveness on loop backedges.
904      SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true);
905    AddedTo = ActiveBlocks.size();
906
907    // Perhaps iterating can enable more bundles?
908    SpillPlacer->iterate();
909  }
910  DEBUG(dbgs() << ", v=" << Visited);
911}
912
913/// calcCompactRegion - Compute the set of edge bundles that should be live
914/// when splitting the current live range into compact regions.  Compact
915/// regions can be computed without looking at interference.  They are the
916/// regions formed by removing all the live-through blocks from the live range.
917///
918/// Returns false if the current live range is already compact, or if the
919/// compact regions would form single block regions anyway.
920bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) {
921  // Without any through blocks, the live range is already compact.
922  if (!SA->getNumThroughBlocks())
923    return false;
924
925  // Compact regions don't correspond to any physreg.
926  Cand.reset(IntfCache, 0);
927
928  DEBUG(dbgs() << "Compact region bundles");
929
930  // Use the spill placer to determine the live bundles. GrowRegion pretends
931  // that all the through blocks have interference when PhysReg is unset.
932  SpillPlacer->prepare(Cand.LiveBundles);
933
934  // The static split cost will be zero since Cand.Intf reports no interference.
935  BlockFrequency Cost;
936  if (!addSplitConstraints(Cand.Intf, Cost)) {
937    DEBUG(dbgs() << ", none.\n");
938    return false;
939  }
940
941  growRegion(Cand);
942  SpillPlacer->finish();
943
944  if (!Cand.LiveBundles.any()) {
945    DEBUG(dbgs() << ", none.\n");
946    return false;
947  }
948
949  DEBUG({
950    for (int i = Cand.LiveBundles.find_first(); i>=0;
951         i = Cand.LiveBundles.find_next(i))
952    dbgs() << " EB#" << i;
953    dbgs() << ".\n";
954  });
955  return true;
956}
957
958/// calcSpillCost - Compute how expensive it would be to split the live range in
959/// SA around all use blocks instead of forming bundle regions.
960BlockFrequency RAGreedy::calcSpillCost() {
961  BlockFrequency Cost = 0;
962  ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
963  for (unsigned i = 0; i != UseBlocks.size(); ++i) {
964    const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
965    unsigned Number = BI.MBB->getNumber();
966    // We normally only need one spill instruction - a load or a store.
967    Cost += SpillPlacer->getBlockFrequency(Number);
968
969    // Unless the value is redefined in the block.
970    if (BI.LiveIn && BI.LiveOut && BI.FirstDef)
971      Cost += SpillPlacer->getBlockFrequency(Number);
972  }
973  return Cost;
974}
975
976/// calcGlobalSplitCost - Return the global split cost of following the split
977/// pattern in LiveBundles. This cost should be added to the local cost of the
978/// interference pattern in SplitConstraints.
979///
980BlockFrequency RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) {
981  BlockFrequency GlobalCost = 0;
982  const BitVector &LiveBundles = Cand.LiveBundles;
983  ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
984  for (unsigned i = 0; i != UseBlocks.size(); ++i) {
985    const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
986    SpillPlacement::BlockConstraint &BC = SplitConstraints[i];
987    bool RegIn  = LiveBundles[Bundles->getBundle(BC.Number, 0)];
988    bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)];
989    unsigned Ins = 0;
990
991    if (BI.LiveIn)
992      Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg);
993    if (BI.LiveOut)
994      Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg);
995    while (Ins--)
996      GlobalCost += SpillPlacer->getBlockFrequency(BC.Number);
997  }
998
999  for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) {
1000    unsigned Number = Cand.ActiveBlocks[i];
1001    bool RegIn  = LiveBundles[Bundles->getBundle(Number, 0)];
1002    bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)];
1003    if (!RegIn && !RegOut)
1004      continue;
1005    if (RegIn && RegOut) {
1006      // We need double spill code if this block has interference.
1007      Cand.Intf.moveToBlock(Number);
1008      if (Cand.Intf.hasInterference()) {
1009        GlobalCost += SpillPlacer->getBlockFrequency(Number);
1010        GlobalCost += SpillPlacer->getBlockFrequency(Number);
1011      }
1012      continue;
1013    }
1014    // live-in / stack-out or stack-in live-out.
1015    GlobalCost += SpillPlacer->getBlockFrequency(Number);
1016  }
1017  return GlobalCost;
1018}
1019
1020/// splitAroundRegion - Split the current live range around the regions
1021/// determined by BundleCand and GlobalCand.
1022///
1023/// Before calling this function, GlobalCand and BundleCand must be initialized
1024/// so each bundle is assigned to a valid candidate, or NoCand for the
1025/// stack-bound bundles.  The shared SA/SE SplitAnalysis and SplitEditor
1026/// objects must be initialized for the current live range, and intervals
1027/// created for the used candidates.
1028///
1029/// @param LREdit    The LiveRangeEdit object handling the current split.
1030/// @param UsedCands List of used GlobalCand entries. Every BundleCand value
1031///                  must appear in this list.
1032void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit,
1033                                 ArrayRef<unsigned> UsedCands) {
1034  // These are the intervals created for new global ranges. We may create more
1035  // intervals for local ranges.
1036  const unsigned NumGlobalIntvs = LREdit.size();
1037  DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n");
1038  assert(NumGlobalIntvs && "No global intervals configured");
1039
1040  // Isolate even single instructions when dealing with a proper sub-class.
1041  // That guarantees register class inflation for the stack interval because it
1042  // is all copies.
1043  unsigned Reg = SA->getParent().reg;
1044  bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
1045
1046  // First handle all the blocks with uses.
1047  ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1048  for (unsigned i = 0; i != UseBlocks.size(); ++i) {
1049    const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
1050    unsigned Number = BI.MBB->getNumber();
1051    unsigned IntvIn = 0, IntvOut = 0;
1052    SlotIndex IntfIn, IntfOut;
1053    if (BI.LiveIn) {
1054      unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
1055      if (CandIn != NoCand) {
1056        GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1057        IntvIn = Cand.IntvIdx;
1058        Cand.Intf.moveToBlock(Number);
1059        IntfIn = Cand.Intf.first();
1060      }
1061    }
1062    if (BI.LiveOut) {
1063      unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
1064      if (CandOut != NoCand) {
1065        GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1066        IntvOut = Cand.IntvIdx;
1067        Cand.Intf.moveToBlock(Number);
1068        IntfOut = Cand.Intf.last();
1069      }
1070    }
1071
1072    // Create separate intervals for isolated blocks with multiple uses.
1073    if (!IntvIn && !IntvOut) {
1074      DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n");
1075      if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1076        SE->splitSingleBlock(BI);
1077      continue;
1078    }
1079
1080    if (IntvIn && IntvOut)
1081      SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1082    else if (IntvIn)
1083      SE->splitRegInBlock(BI, IntvIn, IntfIn);
1084    else
1085      SE->splitRegOutBlock(BI, IntvOut, IntfOut);
1086  }
1087
1088  // Handle live-through blocks. The relevant live-through blocks are stored in
1089  // the ActiveBlocks list with each candidate. We need to filter out
1090  // duplicates.
1091  BitVector Todo = SA->getThroughBlocks();
1092  for (unsigned c = 0; c != UsedCands.size(); ++c) {
1093    ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks;
1094    for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
1095      unsigned Number = Blocks[i];
1096      if (!Todo.test(Number))
1097        continue;
1098      Todo.reset(Number);
1099
1100      unsigned IntvIn = 0, IntvOut = 0;
1101      SlotIndex IntfIn, IntfOut;
1102
1103      unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)];
1104      if (CandIn != NoCand) {
1105        GlobalSplitCandidate &Cand = GlobalCand[CandIn];
1106        IntvIn = Cand.IntvIdx;
1107        Cand.Intf.moveToBlock(Number);
1108        IntfIn = Cand.Intf.first();
1109      }
1110
1111      unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)];
1112      if (CandOut != NoCand) {
1113        GlobalSplitCandidate &Cand = GlobalCand[CandOut];
1114        IntvOut = Cand.IntvIdx;
1115        Cand.Intf.moveToBlock(Number);
1116        IntfOut = Cand.Intf.last();
1117      }
1118      if (!IntvIn && !IntvOut)
1119        continue;
1120      SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut);
1121    }
1122  }
1123
1124  ++NumGlobalSplits;
1125
1126  SmallVector<unsigned, 8> IntvMap;
1127  SE->finish(&IntvMap);
1128  DebugVars->splitRegister(Reg, LREdit.regs());
1129
1130  ExtraRegInfo.resize(MRI->getNumVirtRegs());
1131  unsigned OrigBlocks = SA->getNumLiveBlocks();
1132
1133  // Sort out the new intervals created by splitting. We get four kinds:
1134  // - Remainder intervals should not be split again.
1135  // - Candidate intervals can be assigned to Cand.PhysReg.
1136  // - Block-local splits are candidates for local splitting.
1137  // - DCE leftovers should go back on the queue.
1138  for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
1139    LiveInterval &Reg = *LREdit.get(i);
1140
1141    // Ignore old intervals from DCE.
1142    if (getStage(Reg) != RS_New)
1143      continue;
1144
1145    // Remainder interval. Don't try splitting again, spill if it doesn't
1146    // allocate.
1147    if (IntvMap[i] == 0) {
1148      setStage(Reg, RS_Spill);
1149      continue;
1150    }
1151
1152    // Global intervals. Allow repeated splitting as long as the number of live
1153    // blocks is strictly decreasing.
1154    if (IntvMap[i] < NumGlobalIntvs) {
1155      if (SA->countLiveBlocks(&Reg) >= OrigBlocks) {
1156        DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks
1157                     << " blocks as original.\n");
1158        // Don't allow repeated splitting as a safe guard against looping.
1159        setStage(Reg, RS_Split2);
1160      }
1161      continue;
1162    }
1163
1164    // Other intervals are treated as new. This includes local intervals created
1165    // for blocks with multiple uses, and anything created by DCE.
1166  }
1167
1168  if (VerifyEnabled)
1169    MF->verify(this, "After splitting live range around region");
1170}
1171
1172unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1173                                  SmallVectorImpl<LiveInterval*> &NewVRegs) {
1174  unsigned NumCands = 0;
1175  unsigned BestCand = NoCand;
1176  BlockFrequency BestCost;
1177  SmallVector<unsigned, 8> UsedCands;
1178
1179  // Check if we can split this live range around a compact region.
1180  bool HasCompact = calcCompactRegion(GlobalCand.front());
1181  if (HasCompact) {
1182    // Yes, keep GlobalCand[0] as the compact region candidate.
1183    NumCands = 1;
1184    BestCost = BlockFrequency::getMaxFrequency();
1185  } else {
1186    // No benefit from the compact region, our fallback will be per-block
1187    // splitting. Make sure we find a solution that is cheaper than spilling.
1188    BestCost = calcSpillCost();
1189    DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n');
1190  }
1191
1192  Order.rewind();
1193  while (unsigned PhysReg = Order.next()) {
1194    // Discard bad candidates before we run out of interference cache cursors.
1195    // This will only affect register classes with a lot of registers (>32).
1196    if (NumCands == IntfCache.getMaxCursors()) {
1197      unsigned WorstCount = ~0u;
1198      unsigned Worst = 0;
1199      for (unsigned i = 0; i != NumCands; ++i) {
1200        if (i == BestCand || !GlobalCand[i].PhysReg)
1201          continue;
1202        unsigned Count = GlobalCand[i].LiveBundles.count();
1203        if (Count < WorstCount)
1204          Worst = i, WorstCount = Count;
1205      }
1206      --NumCands;
1207      GlobalCand[Worst] = GlobalCand[NumCands];
1208      if (BestCand == NumCands)
1209        BestCand = Worst;
1210    }
1211
1212    if (GlobalCand.size() <= NumCands)
1213      GlobalCand.resize(NumCands+1);
1214    GlobalSplitCandidate &Cand = GlobalCand[NumCands];
1215    Cand.reset(IntfCache, PhysReg);
1216
1217    SpillPlacer->prepare(Cand.LiveBundles);
1218    BlockFrequency Cost;
1219    if (!addSplitConstraints(Cand.Intf, Cost)) {
1220      DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n");
1221      continue;
1222    }
1223    DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost);
1224    if (Cost >= BestCost) {
1225      DEBUG({
1226        if (BestCand == NoCand)
1227          dbgs() << " worse than no bundles\n";
1228        else
1229          dbgs() << " worse than "
1230                 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n';
1231      });
1232      continue;
1233    }
1234    growRegion(Cand);
1235
1236    SpillPlacer->finish();
1237
1238    // No live bundles, defer to splitSingleBlocks().
1239    if (!Cand.LiveBundles.any()) {
1240      DEBUG(dbgs() << " no bundles.\n");
1241      continue;
1242    }
1243
1244    Cost += calcGlobalSplitCost(Cand);
1245    DEBUG({
1246      dbgs() << ", total = " << Cost << " with bundles";
1247      for (int i = Cand.LiveBundles.find_first(); i>=0;
1248           i = Cand.LiveBundles.find_next(i))
1249        dbgs() << " EB#" << i;
1250      dbgs() << ".\n";
1251    });
1252    if (Cost < BestCost) {
1253      BestCand = NumCands;
1254      BestCost = Cost;
1255    }
1256    ++NumCands;
1257  }
1258
1259  // No solutions found, fall back to single block splitting.
1260  if (!HasCompact && BestCand == NoCand)
1261    return 0;
1262
1263  // Prepare split editor.
1264  LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1265  SE->reset(LREdit, SplitSpillMode);
1266
1267  // Assign all edge bundles to the preferred candidate, or NoCand.
1268  BundleCand.assign(Bundles->getNumBundles(), NoCand);
1269
1270  // Assign bundles for the best candidate region.
1271  if (BestCand != NoCand) {
1272    GlobalSplitCandidate &Cand = GlobalCand[BestCand];
1273    if (unsigned B = Cand.getBundles(BundleCand, BestCand)) {
1274      UsedCands.push_back(BestCand);
1275      Cand.IntvIdx = SE->openIntv();
1276      DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in "
1277                   << B << " bundles, intv " << Cand.IntvIdx << ".\n");
1278      (void)B;
1279    }
1280  }
1281
1282  // Assign bundles for the compact region.
1283  if (HasCompact) {
1284    GlobalSplitCandidate &Cand = GlobalCand.front();
1285    assert(!Cand.PhysReg && "Compact region has no physreg");
1286    if (unsigned B = Cand.getBundles(BundleCand, 0)) {
1287      UsedCands.push_back(0);
1288      Cand.IntvIdx = SE->openIntv();
1289      DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv "
1290                   << Cand.IntvIdx << ".\n");
1291      (void)B;
1292    }
1293  }
1294
1295  splitAroundRegion(LREdit, UsedCands);
1296  return 0;
1297}
1298
1299
1300//===----------------------------------------------------------------------===//
1301//                            Per-Block Splitting
1302//===----------------------------------------------------------------------===//
1303
1304/// tryBlockSplit - Split a global live range around every block with uses. This
1305/// creates a lot of local live ranges, that will be split by tryLocalSplit if
1306/// they don't allocate.
1307unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1308                                 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1309  assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed");
1310  unsigned Reg = VirtReg.reg;
1311  bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg));
1312  LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1313  SE->reset(LREdit, SplitSpillMode);
1314  ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks();
1315  for (unsigned i = 0; i != UseBlocks.size(); ++i) {
1316    const SplitAnalysis::BlockInfo &BI = UseBlocks[i];
1317    if (SA->shouldSplitSingleBlock(BI, SingleInstrs))
1318      SE->splitSingleBlock(BI);
1319  }
1320  // No blocks were split.
1321  if (LREdit.empty())
1322    return 0;
1323
1324  // We did split for some blocks.
1325  SmallVector<unsigned, 8> IntvMap;
1326  SE->finish(&IntvMap);
1327
1328  // Tell LiveDebugVariables about the new ranges.
1329  DebugVars->splitRegister(Reg, LREdit.regs());
1330
1331  ExtraRegInfo.resize(MRI->getNumVirtRegs());
1332
1333  // Sort out the new intervals created by splitting. The remainder interval
1334  // goes straight to spilling, the new local ranges get to stay RS_New.
1335  for (unsigned i = 0, e = LREdit.size(); i != e; ++i) {
1336    LiveInterval &LI = *LREdit.get(i);
1337    if (getStage(LI) == RS_New && IntvMap[i] == 0)
1338      setStage(LI, RS_Spill);
1339  }
1340
1341  if (VerifyEnabled)
1342    MF->verify(this, "After splitting live range around basic blocks");
1343  return 0;
1344}
1345
1346
1347//===----------------------------------------------------------------------===//
1348//                         Per-Instruction Splitting
1349//===----------------------------------------------------------------------===//
1350
1351/// tryInstructionSplit - Split a live range around individual instructions.
1352/// This is normally not worthwhile since the spiller is doing essentially the
1353/// same thing. However, when the live range is in a constrained register
1354/// class, it may help to insert copies such that parts of the live range can
1355/// be moved to a larger register class.
1356///
1357/// This is similar to spilling to a larger register class.
1358unsigned
1359RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1360                              SmallVectorImpl<LiveInterval*> &NewVRegs) {
1361  // There is no point to this if there are no larger sub-classes.
1362  if (!RegClassInfo.isProperSubClass(MRI->getRegClass(VirtReg.reg)))
1363    return 0;
1364
1365  // Always enable split spill mode, since we're effectively spilling to a
1366  // register.
1367  LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1368  SE->reset(LREdit, SplitEditor::SM_Size);
1369
1370  ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1371  if (Uses.size() <= 1)
1372    return 0;
1373
1374  DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n");
1375
1376  // Split around every non-copy instruction.
1377  for (unsigned i = 0; i != Uses.size(); ++i) {
1378    if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]))
1379      if (MI->isFullCopy()) {
1380        DEBUG(dbgs() << "    skip:\t" << Uses[i] << '\t' << *MI);
1381        continue;
1382      }
1383    SE->openIntv();
1384    SlotIndex SegStart = SE->enterIntvBefore(Uses[i]);
1385    SlotIndex SegStop  = SE->leaveIntvAfter(Uses[i]);
1386    SE->useIntv(SegStart, SegStop);
1387  }
1388
1389  if (LREdit.empty()) {
1390    DEBUG(dbgs() << "All uses were copies.\n");
1391    return 0;
1392  }
1393
1394  SmallVector<unsigned, 8> IntvMap;
1395  SE->finish(&IntvMap);
1396  DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
1397  ExtraRegInfo.resize(MRI->getNumVirtRegs());
1398
1399  // Assign all new registers to RS_Spill. This was the last chance.
1400  setStage(LREdit.begin(), LREdit.end(), RS_Spill);
1401  return 0;
1402}
1403
1404
1405//===----------------------------------------------------------------------===//
1406//                             Local Splitting
1407//===----------------------------------------------------------------------===//
1408
1409
1410/// calcGapWeights - Compute the maximum spill weight that needs to be evicted
1411/// in order to use PhysReg between two entries in SA->UseSlots.
1412///
1413/// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1].
1414///
1415void RAGreedy::calcGapWeights(unsigned PhysReg,
1416                              SmallVectorImpl<float> &GapWeight) {
1417  assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1418  const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1419  ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1420  const unsigned NumGaps = Uses.size()-1;
1421
1422  // Start and end points for the interference check.
1423  SlotIndex StartIdx =
1424    BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr;
1425  SlotIndex StopIdx =
1426    BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr;
1427
1428  GapWeight.assign(NumGaps, 0.0f);
1429
1430  // Add interference from each overlapping register.
1431  for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1432    if (!Matrix->query(const_cast<LiveInterval&>(SA->getParent()), *Units)
1433          .checkInterference())
1434      continue;
1435
1436    // We know that VirtReg is a continuous interval from FirstInstr to
1437    // LastInstr, so we don't need InterferenceQuery.
1438    //
1439    // Interference that overlaps an instruction is counted in both gaps
1440    // surrounding the instruction. The exception is interference before
1441    // StartIdx and after StopIdx.
1442    //
1443    LiveIntervalUnion::SegmentIter IntI =
1444      Matrix->getLiveUnions()[*Units] .find(StartIdx);
1445    for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) {
1446      // Skip the gaps before IntI.
1447      while (Uses[Gap+1].getBoundaryIndex() < IntI.start())
1448        if (++Gap == NumGaps)
1449          break;
1450      if (Gap == NumGaps)
1451        break;
1452
1453      // Update the gaps covered by IntI.
1454      const float weight = IntI.value()->weight;
1455      for (; Gap != NumGaps; ++Gap) {
1456        GapWeight[Gap] = std::max(GapWeight[Gap], weight);
1457        if (Uses[Gap+1].getBaseIndex() >= IntI.stop())
1458          break;
1459      }
1460      if (Gap == NumGaps)
1461        break;
1462    }
1463  }
1464
1465  // Add fixed interference.
1466  for (MCRegUnitIterator Units(PhysReg, TRI); Units.isValid(); ++Units) {
1467    const LiveInterval &LI = LIS->getRegUnit(*Units);
1468    LiveInterval::const_iterator I = LI.find(StartIdx);
1469    LiveInterval::const_iterator E = LI.end();
1470
1471    // Same loop as above. Mark any overlapped gaps as HUGE_VALF.
1472    for (unsigned Gap = 0; I != E && I->start < StopIdx; ++I) {
1473      while (Uses[Gap+1].getBoundaryIndex() < I->start)
1474        if (++Gap == NumGaps)
1475          break;
1476      if (Gap == NumGaps)
1477        break;
1478
1479      for (; Gap != NumGaps; ++Gap) {
1480        GapWeight[Gap] = HUGE_VALF;
1481        if (Uses[Gap+1].getBaseIndex() >= I->end)
1482          break;
1483      }
1484      if (Gap == NumGaps)
1485        break;
1486    }
1487  }
1488}
1489
1490/// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only
1491/// basic block.
1492///
1493unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order,
1494                                 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1495  assert(SA->getUseBlocks().size() == 1 && "Not a local interval");
1496  const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front();
1497
1498  // Note that it is possible to have an interval that is live-in or live-out
1499  // while only covering a single block - A phi-def can use undef values from
1500  // predecessors, and the block could be a single-block loop.
1501  // We don't bother doing anything clever about such a case, we simply assume
1502  // that the interval is continuous from FirstInstr to LastInstr. We should
1503  // make sure that we don't do anything illegal to such an interval, though.
1504
1505  ArrayRef<SlotIndex> Uses = SA->getUseSlots();
1506  if (Uses.size() <= 2)
1507    return 0;
1508  const unsigned NumGaps = Uses.size()-1;
1509
1510  DEBUG({
1511    dbgs() << "tryLocalSplit: ";
1512    for (unsigned i = 0, e = Uses.size(); i != e; ++i)
1513      dbgs() << ' ' << Uses[i];
1514    dbgs() << '\n';
1515  });
1516
1517  // If VirtReg is live across any register mask operands, compute a list of
1518  // gaps with register masks.
1519  SmallVector<unsigned, 8> RegMaskGaps;
1520  if (Matrix->checkRegMaskInterference(VirtReg)) {
1521    // Get regmask slots for the whole block.
1522    ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber());
1523    DEBUG(dbgs() << RMS.size() << " regmasks in block:");
1524    // Constrain to VirtReg's live range.
1525    unsigned ri = std::lower_bound(RMS.begin(), RMS.end(),
1526                                   Uses.front().getRegSlot()) - RMS.begin();
1527    unsigned re = RMS.size();
1528    for (unsigned i = 0; i != NumGaps && ri != re; ++i) {
1529      // Look for Uses[i] <= RMS <= Uses[i+1].
1530      assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i]));
1531      if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri]))
1532        continue;
1533      // Skip a regmask on the same instruction as the last use. It doesn't
1534      // overlap the live range.
1535      if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps)
1536        break;
1537      DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]);
1538      RegMaskGaps.push_back(i);
1539      // Advance ri to the next gap. A regmask on one of the uses counts in
1540      // both gaps.
1541      while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1]))
1542        ++ri;
1543    }
1544    DEBUG(dbgs() << '\n');
1545  }
1546
1547  // Since we allow local split results to be split again, there is a risk of
1548  // creating infinite loops. It is tempting to require that the new live
1549  // ranges have less instructions than the original. That would guarantee
1550  // convergence, but it is too strict. A live range with 3 instructions can be
1551  // split 2+3 (including the COPY), and we want to allow that.
1552  //
1553  // Instead we use these rules:
1554  //
1555  // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the
1556  //    noop split, of course).
1557  // 2. Require progress be made for ranges with getStage() == RS_Split2. All
1558  //    the new ranges must have fewer instructions than before the split.
1559  // 3. New ranges with the same number of instructions are marked RS_Split2,
1560  //    smaller ranges are marked RS_New.
1561  //
1562  // These rules allow a 3 -> 2+3 split once, which we need. They also prevent
1563  // excessive splitting and infinite loops.
1564  //
1565  bool ProgressRequired = getStage(VirtReg) >= RS_Split2;
1566
1567  // Best split candidate.
1568  unsigned BestBefore = NumGaps;
1569  unsigned BestAfter = 0;
1570  float BestDiff = 0;
1571
1572  const float blockFreq =
1573    SpillPlacer->getBlockFrequency(BI.MBB->getNumber()).getFrequency() *
1574    (1.0f / BlockFrequency::getEntryFrequency());
1575  SmallVector<float, 8> GapWeight;
1576
1577  Order.rewind();
1578  while (unsigned PhysReg = Order.next()) {
1579    // Keep track of the largest spill weight that would need to be evicted in
1580    // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1].
1581    calcGapWeights(PhysReg, GapWeight);
1582
1583    // Remove any gaps with regmask clobbers.
1584    if (Matrix->checkRegMaskInterference(VirtReg, PhysReg))
1585      for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i)
1586        GapWeight[RegMaskGaps[i]] = HUGE_VALF;
1587
1588    // Try to find the best sequence of gaps to close.
1589    // The new spill weight must be larger than any gap interference.
1590
1591    // We will split before Uses[SplitBefore] and after Uses[SplitAfter].
1592    unsigned SplitBefore = 0, SplitAfter = 1;
1593
1594    // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]).
1595    // It is the spill weight that needs to be evicted.
1596    float MaxGap = GapWeight[0];
1597
1598    for (;;) {
1599      // Live before/after split?
1600      const bool LiveBefore = SplitBefore != 0 || BI.LiveIn;
1601      const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut;
1602
1603      DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' '
1604                   << Uses[SplitBefore] << '-' << Uses[SplitAfter]
1605                   << " i=" << MaxGap);
1606
1607      // Stop before the interval gets so big we wouldn't be making progress.
1608      if (!LiveBefore && !LiveAfter) {
1609        DEBUG(dbgs() << " all\n");
1610        break;
1611      }
1612      // Should the interval be extended or shrunk?
1613      bool Shrink = true;
1614
1615      // How many gaps would the new range have?
1616      unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter;
1617
1618      // Legally, without causing looping?
1619      bool Legal = !ProgressRequired || NewGaps < NumGaps;
1620
1621      if (Legal && MaxGap < HUGE_VALF) {
1622        // Estimate the new spill weight. Each instruction reads or writes the
1623        // register. Conservatively assume there are no read-modify-write
1624        // instructions.
1625        //
1626        // Try to guess the size of the new interval.
1627        const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1),
1628                                 Uses[SplitBefore].distance(Uses[SplitAfter]) +
1629                                 (LiveBefore + LiveAfter)*SlotIndex::InstrDist);
1630        // Would this split be possible to allocate?
1631        // Never allocate all gaps, we wouldn't be making progress.
1632        DEBUG(dbgs() << " w=" << EstWeight);
1633        if (EstWeight * Hysteresis >= MaxGap) {
1634          Shrink = false;
1635          float Diff = EstWeight - MaxGap;
1636          if (Diff > BestDiff) {
1637            DEBUG(dbgs() << " (best)");
1638            BestDiff = Hysteresis * Diff;
1639            BestBefore = SplitBefore;
1640            BestAfter = SplitAfter;
1641          }
1642        }
1643      }
1644
1645      // Try to shrink.
1646      if (Shrink) {
1647        if (++SplitBefore < SplitAfter) {
1648          DEBUG(dbgs() << " shrink\n");
1649          // Recompute the max when necessary.
1650          if (GapWeight[SplitBefore - 1] >= MaxGap) {
1651            MaxGap = GapWeight[SplitBefore];
1652            for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i)
1653              MaxGap = std::max(MaxGap, GapWeight[i]);
1654          }
1655          continue;
1656        }
1657        MaxGap = 0;
1658      }
1659
1660      // Try to extend the interval.
1661      if (SplitAfter >= NumGaps) {
1662        DEBUG(dbgs() << " end\n");
1663        break;
1664      }
1665
1666      DEBUG(dbgs() << " extend\n");
1667      MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]);
1668    }
1669  }
1670
1671  // Didn't find any candidates?
1672  if (BestBefore == NumGaps)
1673    return 0;
1674
1675  DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore]
1676               << '-' << Uses[BestAfter] << ", " << BestDiff
1677               << ", " << (BestAfter - BestBefore + 1) << " instrs\n");
1678
1679  LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1680  SE->reset(LREdit);
1681
1682  SE->openIntv();
1683  SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]);
1684  SlotIndex SegStop  = SE->leaveIntvAfter(Uses[BestAfter]);
1685  SE->useIntv(SegStart, SegStop);
1686  SmallVector<unsigned, 8> IntvMap;
1687  SE->finish(&IntvMap);
1688  DebugVars->splitRegister(VirtReg.reg, LREdit.regs());
1689
1690  // If the new range has the same number of instructions as before, mark it as
1691  // RS_Split2 so the next split will be forced to make progress. Otherwise,
1692  // leave the new intervals as RS_New so they can compete.
1693  bool LiveBefore = BestBefore != 0 || BI.LiveIn;
1694  bool LiveAfter = BestAfter != NumGaps || BI.LiveOut;
1695  unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter;
1696  if (NewGaps >= NumGaps) {
1697    DEBUG(dbgs() << "Tagging non-progress ranges: ");
1698    assert(!ProgressRequired && "Didn't make progress when it was required.");
1699    for (unsigned i = 0, e = IntvMap.size(); i != e; ++i)
1700      if (IntvMap[i] == 1) {
1701        setStage(*LREdit.get(i), RS_Split2);
1702        DEBUG(dbgs() << PrintReg(LREdit.get(i)->reg));
1703      }
1704    DEBUG(dbgs() << '\n');
1705  }
1706  ++NumLocalSplits;
1707
1708  return 0;
1709}
1710
1711//===----------------------------------------------------------------------===//
1712//                          Live Range Splitting
1713//===----------------------------------------------------------------------===//
1714
1715/// trySplit - Try to split VirtReg or one of its interferences, making it
1716/// assignable.
1717/// @return Physreg when VirtReg may be assigned and/or new NewVRegs.
1718unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order,
1719                            SmallVectorImpl<LiveInterval*>&NewVRegs) {
1720  // Ranges must be Split2 or less.
1721  if (getStage(VirtReg) >= RS_Spill)
1722    return 0;
1723
1724  // Local intervals are handled separately.
1725  if (LIS->intervalIsInOneMBB(VirtReg)) {
1726    NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled);
1727    SA->analyze(&VirtReg);
1728    unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs);
1729    if (PhysReg || !NewVRegs.empty())
1730      return PhysReg;
1731    return tryInstructionSplit(VirtReg, Order, NewVRegs);
1732  }
1733
1734  NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled);
1735
1736  SA->analyze(&VirtReg);
1737
1738  // FIXME: SplitAnalysis may repair broken live ranges coming from the
1739  // coalescer. That may cause the range to become allocatable which means that
1740  // tryRegionSplit won't be making progress. This check should be replaced with
1741  // an assertion when the coalescer is fixed.
1742  if (SA->didRepairRange()) {
1743    // VirtReg has changed, so all cached queries are invalid.
1744    Matrix->invalidateVirtRegs();
1745    if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1746      return PhysReg;
1747  }
1748
1749  // First try to split around a region spanning multiple blocks. RS_Split2
1750  // ranges already made dubious progress with region splitting, so they go
1751  // straight to single block splitting.
1752  if (getStage(VirtReg) < RS_Split2) {
1753    unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs);
1754    if (PhysReg || !NewVRegs.empty())
1755      return PhysReg;
1756  }
1757
1758  // Then isolate blocks.
1759  return tryBlockSplit(VirtReg, Order, NewVRegs);
1760}
1761
1762
1763//===----------------------------------------------------------------------===//
1764//                            Main Entry Point
1765//===----------------------------------------------------------------------===//
1766
1767unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg,
1768                                 SmallVectorImpl<LiveInterval*> &NewVRegs) {
1769  // First try assigning a free register.
1770  AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo);
1771  if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs))
1772    return PhysReg;
1773
1774  LiveRangeStage Stage = getStage(VirtReg);
1775  DEBUG(dbgs() << StageName[Stage]
1776               << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n');
1777
1778  // Try to evict a less worthy live range, but only for ranges from the primary
1779  // queue. The RS_Split ranges already failed to do this, and they should not
1780  // get a second chance until they have been split.
1781  if (Stage != RS_Split)
1782    if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs))
1783      return PhysReg;
1784
1785  assert(NewVRegs.empty() && "Cannot append to existing NewVRegs");
1786
1787  // The first time we see a live range, don't try to split or spill.
1788  // Wait until the second time, when all smaller ranges have been allocated.
1789  // This gives a better picture of the interference to split around.
1790  if (Stage < RS_Split) {
1791    setStage(VirtReg, RS_Split);
1792    DEBUG(dbgs() << "wait for second round\n");
1793    NewVRegs.push_back(&VirtReg);
1794    return 0;
1795  }
1796
1797  // If we couldn't allocate a register from spilling, there is probably some
1798  // invalid inline assembly. The base class wil report it.
1799  if (Stage >= RS_Done || !VirtReg.isSpillable())
1800    return ~0u;
1801
1802  // Try splitting VirtReg or interferences.
1803  unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs);
1804  if (PhysReg || !NewVRegs.empty())
1805    return PhysReg;
1806
1807  // Finally spill VirtReg itself.
1808  NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled);
1809  LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this);
1810  spiller().spill(LRE);
1811  setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done);
1812
1813  if (VerifyEnabled)
1814    MF->verify(this, "After spilling");
1815
1816  // The live virtual register requesting allocation was spilled, so tell
1817  // the caller not to allocate anything during this round.
1818  return 0;
1819}
1820
1821bool RAGreedy::runOnMachineFunction(MachineFunction &mf) {
1822  DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n"
1823               << "********** Function: " << mf.getName() << '\n');
1824
1825  MF = &mf;
1826  if (VerifyEnabled)
1827    MF->verify(this, "Before greedy register allocator");
1828
1829  RegAllocBase::init(getAnalysis<VirtRegMap>(),
1830                     getAnalysis<LiveIntervals>(),
1831                     getAnalysis<LiveRegMatrix>());
1832  Indexes = &getAnalysis<SlotIndexes>();
1833  MBFI = &getAnalysis<MachineBlockFrequencyInfo>();
1834  DomTree = &getAnalysis<MachineDominatorTree>();
1835  SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM));
1836  Loops = &getAnalysis<MachineLoopInfo>();
1837  Bundles = &getAnalysis<EdgeBundles>();
1838  SpillPlacer = &getAnalysis<SpillPlacement>();
1839  DebugVars = &getAnalysis<LiveDebugVariables>();
1840
1841  DEBUG(LIS->dump());
1842
1843  SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops));
1844  SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree, *MBFI));
1845  ExtraRegInfo.clear();
1846  ExtraRegInfo.resize(MRI->getNumVirtRegs());
1847  NextCascade = 1;
1848  IntfCache.init(MF, Matrix->getLiveUnions(), Indexes, LIS, TRI);
1849  GlobalCand.resize(32);  // This will grow as needed.
1850
1851  allocatePhysRegs();
1852  releaseMemory();
1853  return true;
1854}
1855