RegAllocGreedy.cpp revision eda0fe8d58b0aaff5f18e7f13edfda3022384e70
1//===-- RegAllocGreedy.cpp - greedy register allocator --------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the RAGreedy function pass for register allocation in 11// optimized builds. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "regalloc" 16#include "AllocationOrder.h" 17#include "InterferenceCache.h" 18#include "LiveRangeEdit.h" 19#include "RegAllocBase.h" 20#include "Spiller.h" 21#include "SpillPlacement.h" 22#include "SplitKit.h" 23#include "VirtRegMap.h" 24#include "llvm/ADT/Statistic.h" 25#include "llvm/Analysis/AliasAnalysis.h" 26#include "llvm/Function.h" 27#include "llvm/PassAnalysisSupport.h" 28#include "llvm/CodeGen/CalcSpillWeights.h" 29#include "llvm/CodeGen/EdgeBundles.h" 30#include "llvm/CodeGen/LiveIntervalAnalysis.h" 31#include "llvm/CodeGen/LiveStackAnalysis.h" 32#include "llvm/CodeGen/MachineDominators.h" 33#include "llvm/CodeGen/MachineFunctionPass.h" 34#include "llvm/CodeGen/MachineLoopInfo.h" 35#include "llvm/CodeGen/MachineLoopRanges.h" 36#include "llvm/CodeGen/MachineRegisterInfo.h" 37#include "llvm/CodeGen/Passes.h" 38#include "llvm/CodeGen/RegAllocRegistry.h" 39#include "llvm/CodeGen/RegisterCoalescer.h" 40#include "llvm/Target/TargetOptions.h" 41#include "llvm/Support/Debug.h" 42#include "llvm/Support/ErrorHandling.h" 43#include "llvm/Support/raw_ostream.h" 44#include "llvm/Support/Timer.h" 45 46#include <queue> 47 48using namespace llvm; 49 50STATISTIC(NumGlobalSplits, "Number of split global live ranges"); 51STATISTIC(NumLocalSplits, "Number of split local live ranges"); 52STATISTIC(NumEvicted, "Number of interferences evicted"); 53 54static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", 55 createGreedyRegisterAllocator); 56 57namespace { 58class RAGreedy : public MachineFunctionPass, 59 public RegAllocBase, 60 private LiveRangeEdit::Delegate { 61 62 // context 63 MachineFunction *MF; 64 BitVector ReservedRegs; 65 66 // analyses 67 SlotIndexes *Indexes; 68 LiveStacks *LS; 69 MachineDominatorTree *DomTree; 70 MachineLoopInfo *Loops; 71 MachineLoopRanges *LoopRanges; 72 EdgeBundles *Bundles; 73 SpillPlacement *SpillPlacer; 74 75 // state 76 std::auto_ptr<Spiller> SpillerInstance; 77 std::priority_queue<std::pair<unsigned, unsigned> > Queue; 78 79 // Live ranges pass through a number of stages as we try to allocate them. 80 // Some of the stages may also create new live ranges: 81 // 82 // - Region splitting. 83 // - Per-block splitting. 84 // - Local splitting. 85 // - Spilling. 86 // 87 // Ranges produced by one of the stages skip the previous stages when they are 88 // dequeued. This improves performance because we can skip interference checks 89 // that are unlikely to give any results. It also guarantees that the live 90 // range splitting algorithm terminates, something that is otherwise hard to 91 // ensure. 92 enum LiveRangeStage { 93 RS_New, ///< Never seen before. 94 RS_First, ///< First time in the queue. 95 RS_Second, ///< Second time in the queue. 96 RS_Region, ///< Produced by region splitting. 97 RS_Block, ///< Produced by per-block splitting. 98 RS_Local, ///< Produced by local splitting. 99 RS_Spill ///< Produced by spilling. 100 }; 101 102 IndexedMap<unsigned char, VirtReg2IndexFunctor> LRStage; 103 104 LiveRangeStage getStage(const LiveInterval &VirtReg) const { 105 return LiveRangeStage(LRStage[VirtReg.reg]); 106 } 107 108 template<typename Iterator> 109 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) { 110 LRStage.resize(MRI->getNumVirtRegs()); 111 for (;Begin != End; ++Begin) { 112 unsigned Reg = (*Begin)->reg; 113 if (LRStage[Reg] == RS_New) 114 LRStage[Reg] = NewStage; 115 } 116 } 117 118 // splitting state. 119 std::auto_ptr<SplitAnalysis> SA; 120 std::auto_ptr<SplitEditor> SE; 121 122 /// Cached per-block interference maps 123 InterferenceCache IntfCache; 124 125 /// All basic blocks where the current register is live. 126 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints; 127 128 /// Global live range splitting candidate info. 129 struct GlobalSplitCandidate { 130 unsigned PhysReg; 131 BitVector LiveBundles; 132 }; 133 134 /// Candidate info for for each PhysReg in AllocationOrder. 135 /// This vector never shrinks, but grows to the size of the largest register 136 /// class. 137 SmallVector<GlobalSplitCandidate, 32> GlobalCand; 138 139 /// For every instruction in SA->UseSlots, store the previous non-copy 140 /// instruction. 141 SmallVector<SlotIndex, 8> PrevSlot; 142 143public: 144 RAGreedy(); 145 146 /// Return the pass name. 147 virtual const char* getPassName() const { 148 return "Greedy Register Allocator"; 149 } 150 151 /// RAGreedy analysis usage. 152 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 153 virtual void releaseMemory(); 154 virtual Spiller &spiller() { return *SpillerInstance; } 155 virtual void enqueue(LiveInterval *LI); 156 virtual LiveInterval *dequeue(); 157 virtual unsigned selectOrSplit(LiveInterval&, 158 SmallVectorImpl<LiveInterval*>&); 159 160 /// Perform register allocation. 161 virtual bool runOnMachineFunction(MachineFunction &mf); 162 163 static char ID; 164 165private: 166 void LRE_WillEraseInstruction(MachineInstr*); 167 bool LRE_CanEraseVirtReg(unsigned); 168 void LRE_WillShrinkVirtReg(unsigned); 169 void LRE_DidCloneVirtReg(unsigned, unsigned); 170 171 float calcSplitConstraints(unsigned); 172 float calcGlobalSplitCost(const BitVector&); 173 void splitAroundRegion(LiveInterval&, unsigned, const BitVector&, 174 SmallVectorImpl<LiveInterval*>&); 175 void calcGapWeights(unsigned, SmallVectorImpl<float>&); 176 SlotIndex getPrevMappedIndex(const MachineInstr*); 177 void calcPrevSlots(); 178 unsigned nextSplitPoint(unsigned); 179 bool canEvictInterference(LiveInterval&, unsigned, float&); 180 181 unsigned tryEvict(LiveInterval&, AllocationOrder&, 182 SmallVectorImpl<LiveInterval*>&); 183 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&, 184 SmallVectorImpl<LiveInterval*>&); 185 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&, 186 SmallVectorImpl<LiveInterval*>&); 187 unsigned trySplit(LiveInterval&, AllocationOrder&, 188 SmallVectorImpl<LiveInterval*>&); 189}; 190} // end anonymous namespace 191 192char RAGreedy::ID = 0; 193 194FunctionPass* llvm::createGreedyRegisterAllocator() { 195 return new RAGreedy(); 196} 197 198RAGreedy::RAGreedy(): MachineFunctionPass(ID), LRStage(RS_New) { 199 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 200 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry()); 201 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 202 initializeStrongPHIEliminationPass(*PassRegistry::getPassRegistry()); 203 initializeRegisterCoalescerAnalysisGroup(*PassRegistry::getPassRegistry()); 204 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry()); 205 initializeLiveStacksPass(*PassRegistry::getPassRegistry()); 206 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry()); 207 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry()); 208 initializeMachineLoopRangesPass(*PassRegistry::getPassRegistry()); 209 initializeVirtRegMapPass(*PassRegistry::getPassRegistry()); 210 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry()); 211 initializeSpillPlacementPass(*PassRegistry::getPassRegistry()); 212} 213 214void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { 215 AU.setPreservesCFG(); 216 AU.addRequired<AliasAnalysis>(); 217 AU.addPreserved<AliasAnalysis>(); 218 AU.addRequired<LiveIntervals>(); 219 AU.addRequired<SlotIndexes>(); 220 AU.addPreserved<SlotIndexes>(); 221 if (StrongPHIElim) 222 AU.addRequiredID(StrongPHIEliminationID); 223 AU.addRequiredTransitive<RegisterCoalescer>(); 224 AU.addRequired<CalculateSpillWeights>(); 225 AU.addRequired<LiveStacks>(); 226 AU.addPreserved<LiveStacks>(); 227 AU.addRequired<MachineDominatorTree>(); 228 AU.addPreserved<MachineDominatorTree>(); 229 AU.addRequired<MachineLoopInfo>(); 230 AU.addPreserved<MachineLoopInfo>(); 231 AU.addRequired<MachineLoopRanges>(); 232 AU.addPreserved<MachineLoopRanges>(); 233 AU.addRequired<VirtRegMap>(); 234 AU.addPreserved<VirtRegMap>(); 235 AU.addRequired<EdgeBundles>(); 236 AU.addRequired<SpillPlacement>(); 237 MachineFunctionPass::getAnalysisUsage(AU); 238} 239 240 241//===----------------------------------------------------------------------===// 242// LiveRangeEdit delegate methods 243//===----------------------------------------------------------------------===// 244 245void RAGreedy::LRE_WillEraseInstruction(MachineInstr *MI) { 246 // LRE itself will remove from SlotIndexes and parent basic block. 247 VRM->RemoveMachineInstrFromMaps(MI); 248} 249 250bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) { 251 if (unsigned PhysReg = VRM->getPhys(VirtReg)) { 252 unassign(LIS->getInterval(VirtReg), PhysReg); 253 return true; 254 } 255 // Unassigned virtreg is probably in the priority queue. 256 // RegAllocBase will erase it after dequeueing. 257 return false; 258} 259 260void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) { 261 unsigned PhysReg = VRM->getPhys(VirtReg); 262 if (!PhysReg) 263 return; 264 265 // Register is assigned, put it back on the queue for reassignment. 266 LiveInterval &LI = LIS->getInterval(VirtReg); 267 unassign(LI, PhysReg); 268 enqueue(&LI); 269} 270 271void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) { 272 // LRE may clone a virtual register because dead code elimination causes it to 273 // be split into connected components. Ensure that the new register gets the 274 // same stage as the parent. 275 LRStage.grow(New); 276 LRStage[New] = LRStage[Old]; 277} 278 279void RAGreedy::releaseMemory() { 280 SpillerInstance.reset(0); 281 LRStage.clear(); 282 RegAllocBase::releaseMemory(); 283} 284 285void RAGreedy::enqueue(LiveInterval *LI) { 286 // Prioritize live ranges by size, assigning larger ranges first. 287 // The queue holds (size, reg) pairs. 288 const unsigned Size = LI->getSize(); 289 const unsigned Reg = LI->reg; 290 assert(TargetRegisterInfo::isVirtualRegister(Reg) && 291 "Can only enqueue virtual registers"); 292 unsigned Prio; 293 294 LRStage.grow(Reg); 295 if (LRStage[Reg] == RS_New) 296 LRStage[Reg] = RS_First; 297 298 if (LRStage[Reg] == RS_Second) 299 // Unsplit ranges that couldn't be allocated immediately are deferred until 300 // everything else has been allocated. Long ranges are allocated last so 301 // they are split against realistic interference. 302 Prio = (1u << 31) - Size; 303 else { 304 // Everything else is allocated in long->short order. Long ranges that don't 305 // fit should be spilled ASAP so they don't create interference. 306 Prio = (1u << 31) + Size; 307 308 // Boost ranges that have a physical register hint. 309 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg))) 310 Prio |= (1u << 30); 311 } 312 313 Queue.push(std::make_pair(Prio, Reg)); 314} 315 316LiveInterval *RAGreedy::dequeue() { 317 if (Queue.empty()) 318 return 0; 319 LiveInterval *LI = &LIS->getInterval(Queue.top().second); 320 Queue.pop(); 321 return LI; 322} 323 324//===----------------------------------------------------------------------===// 325// Interference eviction 326//===----------------------------------------------------------------------===// 327 328/// canEvict - Return true if all interferences between VirtReg and PhysReg can 329/// be evicted. Set maxWeight to the maximal spill weight of an interference. 330bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg, 331 float &MaxWeight) { 332 float Weight = 0; 333 for (const unsigned *AliasI = TRI->getOverlaps(PhysReg); *AliasI; ++AliasI) { 334 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI); 335 // If there is 10 or more interferences, chances are one is smaller. 336 if (Q.collectInterferingVRegs(10) >= 10) 337 return false; 338 339 // Check if any interfering live range is heavier than VirtReg. 340 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) { 341 LiveInterval *Intf = Q.interferingVRegs()[i]; 342 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg)) 343 return false; 344 if (Intf->weight >= VirtReg.weight) 345 return false; 346 Weight = std::max(Weight, Intf->weight); 347 } 348 } 349 MaxWeight = Weight; 350 return true; 351} 352 353/// tryEvict - Try to evict all interferences for a physreg. 354/// @param VirtReg Currently unassigned virtual register. 355/// @param Order Physregs to try. 356/// @return Physreg to assign VirtReg, or 0. 357unsigned RAGreedy::tryEvict(LiveInterval &VirtReg, 358 AllocationOrder &Order, 359 SmallVectorImpl<LiveInterval*> &NewVRegs){ 360 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled); 361 362 // Keep track of the lightest single interference seen so far. 363 float BestWeight = 0; 364 unsigned BestPhys = 0; 365 366 Order.rewind(); 367 while (unsigned PhysReg = Order.next()) { 368 float Weight = 0; 369 if (!canEvictInterference(VirtReg, PhysReg, Weight)) 370 continue; 371 372 // This is an eviction candidate. 373 DEBUG(dbgs() << "max " << PrintReg(PhysReg, TRI) << " interference = " 374 << Weight << '\n'); 375 if (BestPhys && Weight >= BestWeight) 376 continue; 377 378 // Best so far. 379 BestPhys = PhysReg; 380 BestWeight = Weight; 381 // Stop if the hint can be used. 382 if (Order.isHint(PhysReg)) 383 break; 384 } 385 386 if (!BestPhys) 387 return 0; 388 389 DEBUG(dbgs() << "evicting " << PrintReg(BestPhys, TRI) << " interference\n"); 390 for (const unsigned *AliasI = TRI->getOverlaps(BestPhys); *AliasI; ++AliasI) { 391 LiveIntervalUnion::Query &Q = query(VirtReg, *AliasI); 392 assert(Q.seenAllInterferences() && "Didn't check all interfererences."); 393 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) { 394 LiveInterval *Intf = Q.interferingVRegs()[i]; 395 unassign(*Intf, VRM->getPhys(Intf->reg)); 396 ++NumEvicted; 397 NewVRegs.push_back(Intf); 398 } 399 } 400 return BestPhys; 401} 402 403 404//===----------------------------------------------------------------------===// 405// Region Splitting 406//===----------------------------------------------------------------------===// 407 408/// calcSplitConstraints - Fill out the SplitConstraints vector based on the 409/// interference pattern in Physreg and its aliases. Return the static cost of 410/// this split, assuming that all preferences in SplitConstraints are met. 411float RAGreedy::calcSplitConstraints(unsigned PhysReg) { 412 InterferenceCache::Cursor Intf(IntfCache, PhysReg); 413 414 // Reset interference dependent info. 415 SplitConstraints.resize(SA->LiveBlocks.size()); 416 float StaticCost = 0; 417 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) { 418 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i]; 419 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 420 421 BC.Number = BI.MBB->getNumber(); 422 Intf.moveToBlock(BC.Number); 423 BC.Entry = (BI.Uses && BI.LiveIn) ? 424 SpillPlacement::PrefReg : SpillPlacement::DontCare; 425 BC.Exit = (BI.Uses && BI.LiveOut) ? 426 SpillPlacement::PrefReg : SpillPlacement::DontCare; 427 428 if (!Intf.hasInterference()) 429 continue; 430 431 // Number of spill code instructions to insert. 432 unsigned Ins = 0; 433 434 // Interference for the live-in value. 435 if (BI.LiveIn) { 436 if (Intf.first() <= BI.Start) 437 BC.Entry = SpillPlacement::MustSpill, Ins += BI.Uses; 438 else if (!BI.Uses) 439 BC.Entry = SpillPlacement::PrefSpill; 440 else if (Intf.first() < BI.FirstUse) 441 BC.Entry = SpillPlacement::PrefSpill, ++Ins; 442 else if (Intf.first() < (BI.LiveThrough ? BI.LastUse : BI.Kill)) 443 ++Ins; 444 } 445 446 // Interference for the live-out value. 447 if (BI.LiveOut) { 448 if (Intf.last() >= BI.LastSplitPoint) 449 BC.Exit = SpillPlacement::MustSpill, Ins += BI.Uses; 450 else if (!BI.Uses) 451 BC.Exit = SpillPlacement::PrefSpill; 452 else if (Intf.last() > BI.LastUse) 453 BC.Exit = SpillPlacement::PrefSpill, ++Ins; 454 else if (Intf.last() > (BI.LiveThrough ? BI.FirstUse : BI.Def)) 455 ++Ins; 456 } 457 458 // Accumulate the total frequency of inserted spill code. 459 if (Ins) 460 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number); 461 } 462 return StaticCost; 463} 464 465 466/// calcGlobalSplitCost - Return the global split cost of following the split 467/// pattern in LiveBundles. This cost should be added to the local cost of the 468/// interference pattern in SplitConstraints. 469/// 470float RAGreedy::calcGlobalSplitCost(const BitVector &LiveBundles) { 471 float GlobalCost = 0; 472 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) { 473 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i]; 474 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 475 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)]; 476 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)]; 477 unsigned Ins = 0; 478 479 if (!BI.Uses) 480 Ins += RegIn != RegOut; 481 else { 482 if (BI.LiveIn) 483 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg); 484 if (BI.LiveOut) 485 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg); 486 } 487 if (Ins) 488 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number); 489 } 490 return GlobalCost; 491} 492 493/// splitAroundRegion - Split VirtReg around the region determined by 494/// LiveBundles. Make an effort to avoid interference from PhysReg. 495/// 496/// The 'register' interval is going to contain as many uses as possible while 497/// avoiding interference. The 'stack' interval is the complement constructed by 498/// SplitEditor. It will contain the rest. 499/// 500void RAGreedy::splitAroundRegion(LiveInterval &VirtReg, unsigned PhysReg, 501 const BitVector &LiveBundles, 502 SmallVectorImpl<LiveInterval*> &NewVRegs) { 503 DEBUG({ 504 dbgs() << "Splitting around region for " << PrintReg(PhysReg, TRI) 505 << " with bundles"; 506 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i)) 507 dbgs() << " EB#" << i; 508 dbgs() << ".\n"; 509 }); 510 511 InterferenceCache::Cursor Intf(IntfCache, PhysReg); 512 LiveRangeEdit LREdit(VirtReg, NewVRegs, this); 513 SE->reset(LREdit); 514 515 // Create the main cross-block interval. 516 SE->openIntv(); 517 518 // First add all defs that are live out of a block. 519 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) { 520 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i]; 521 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)]; 522 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)]; 523 524 // Should the register be live out? 525 if (!BI.LiveOut || !RegOut) 526 continue; 527 528 Intf.moveToBlock(BI.MBB->getNumber()); 529 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " -> EB#" 530 << Bundles->getBundle(BI.MBB->getNumber(), 1) 531 << " [" << BI.Start << ';' << BI.LastSplitPoint << '-' 532 << BI.Stop << ") intf [" << Intf.first() << ';' << Intf.last() 533 << ')'); 534 535 // The interference interval should either be invalid or overlap MBB. 536 assert((!Intf.hasInterference() || Intf.first() < BI.Stop) 537 && "Bad interference"); 538 assert((!Intf.hasInterference() || Intf.last() > BI.Start) 539 && "Bad interference"); 540 541 // Check interference leaving the block. 542 if (!Intf.hasInterference()) { 543 // Block is interference-free. 544 DEBUG(dbgs() << ", no interference"); 545 if (!BI.Uses) { 546 assert(BI.LiveThrough && "No uses, but not live through block?"); 547 // Block is live-through without interference. 548 DEBUG(dbgs() << ", no uses" 549 << (RegIn ? ", live-through.\n" : ", stack in.\n")); 550 if (!RegIn) 551 SE->enterIntvAtEnd(*BI.MBB); 552 continue; 553 } 554 if (!BI.LiveThrough) { 555 DEBUG(dbgs() << ", not live-through.\n"); 556 SE->useIntv(SE->enterIntvBefore(BI.Def), BI.Stop); 557 continue; 558 } 559 if (!RegIn) { 560 // Block is live-through, but entry bundle is on the stack. 561 // Reload just before the first use. 562 DEBUG(dbgs() << ", not live-in, enter before first use.\n"); 563 SE->useIntv(SE->enterIntvBefore(BI.FirstUse), BI.Stop); 564 continue; 565 } 566 DEBUG(dbgs() << ", live-through.\n"); 567 continue; 568 } 569 570 // Block has interference. 571 DEBUG(dbgs() << ", interference to " << Intf.last()); 572 573 if (!BI.LiveThrough && Intf.last() <= BI.Def) { 574 // The interference doesn't reach the outgoing segment. 575 DEBUG(dbgs() << " doesn't affect def from " << BI.Def << '\n'); 576 SE->useIntv(BI.Def, BI.Stop); 577 continue; 578 } 579 580 581 if (!BI.Uses) { 582 // No uses in block, avoid interference by reloading as late as possible. 583 DEBUG(dbgs() << ", no uses.\n"); 584 SlotIndex SegStart = SE->enterIntvAtEnd(*BI.MBB); 585 assert(SegStart >= Intf.last() && "Couldn't avoid interference"); 586 continue; 587 } 588 589 if (Intf.last().getBoundaryIndex() < BI.LastUse) { 590 // There are interference-free uses at the end of the block. 591 // Find the first use that can get the live-out register. 592 SmallVectorImpl<SlotIndex>::const_iterator UI = 593 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(), 594 Intf.last().getBoundaryIndex()); 595 assert(UI != SA->UseSlots.end() && "Couldn't find last use"); 596 SlotIndex Use = *UI; 597 assert(Use <= BI.LastUse && "Couldn't find last use"); 598 // Only attempt a split befroe the last split point. 599 if (Use.getBaseIndex() <= BI.LastSplitPoint) { 600 DEBUG(dbgs() << ", free use at " << Use << ".\n"); 601 SlotIndex SegStart = SE->enterIntvBefore(Use); 602 assert(SegStart >= Intf.last() && "Couldn't avoid interference"); 603 assert(SegStart < BI.LastSplitPoint && "Impossible split point"); 604 SE->useIntv(SegStart, BI.Stop); 605 continue; 606 } 607 } 608 609 // Interference is after the last use. 610 DEBUG(dbgs() << " after last use.\n"); 611 SlotIndex SegStart = SE->enterIntvAtEnd(*BI.MBB); 612 assert(SegStart >= Intf.last() && "Couldn't avoid interference"); 613 } 614 615 // Now all defs leading to live bundles are handled, do everything else. 616 for (unsigned i = 0, e = SA->LiveBlocks.size(); i != e; ++i) { 617 SplitAnalysis::BlockInfo &BI = SA->LiveBlocks[i]; 618 bool RegIn = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 0)]; 619 bool RegOut = LiveBundles[Bundles->getBundle(BI.MBB->getNumber(), 1)]; 620 621 // Is the register live-in? 622 if (!BI.LiveIn || !RegIn) 623 continue; 624 625 // We have an incoming register. Check for interference. 626 Intf.moveToBlock(BI.MBB->getNumber()); 627 DEBUG(dbgs() << "EB#" << Bundles->getBundle(BI.MBB->getNumber(), 0) 628 << " -> BB#" << BI.MBB->getNumber() << " [" << BI.Start << ';' 629 << BI.LastSplitPoint << '-' << BI.Stop << ')'); 630 631 // Check interference entering the block. 632 if (!Intf.hasInterference()) { 633 // Block is interference-free. 634 DEBUG(dbgs() << ", no interference"); 635 if (!BI.Uses) { 636 assert(BI.LiveThrough && "No uses, but not live through block?"); 637 // Block is live-through without interference. 638 if (RegOut) { 639 DEBUG(dbgs() << ", no uses, live-through.\n"); 640 SE->useIntv(BI.Start, BI.Stop); 641 } else { 642 DEBUG(dbgs() << ", no uses, stack-out.\n"); 643 SE->leaveIntvAtTop(*BI.MBB); 644 } 645 continue; 646 } 647 if (!BI.LiveThrough) { 648 DEBUG(dbgs() << ", killed in block.\n"); 649 SE->useIntv(BI.Start, SE->leaveIntvAfter(BI.Kill)); 650 continue; 651 } 652 if (!RegOut) { 653 // Block is live-through, but exit bundle is on the stack. 654 // Spill immediately after the last use. 655 if (BI.LastUse < BI.LastSplitPoint) { 656 DEBUG(dbgs() << ", uses, stack-out.\n"); 657 SE->useIntv(BI.Start, SE->leaveIntvAfter(BI.LastUse)); 658 continue; 659 } 660 // The last use is after the last split point, it is probably an 661 // indirect jump. 662 DEBUG(dbgs() << ", uses at " << BI.LastUse << " after split point " 663 << BI.LastSplitPoint << ", stack-out.\n"); 664 SlotIndex SegEnd = SE->leaveIntvBefore(BI.LastSplitPoint); 665 SE->useIntv(BI.Start, SegEnd); 666 // Run a double interval from the split to the last use. 667 // This makes it possible to spill the complement without affecting the 668 // indirect branch. 669 SE->overlapIntv(SegEnd, BI.LastUse); 670 continue; 671 } 672 // Register is live-through. 673 DEBUG(dbgs() << ", uses, live-through.\n"); 674 SE->useIntv(BI.Start, BI.Stop); 675 continue; 676 } 677 678 // Block has interference. 679 DEBUG(dbgs() << ", interference from " << Intf.first()); 680 681 if (!BI.LiveThrough && Intf.first() >= BI.Kill) { 682 // The interference doesn't reach the outgoing segment. 683 DEBUG(dbgs() << " doesn't affect kill at " << BI.Kill << '\n'); 684 SE->useIntv(BI.Start, BI.Kill); 685 continue; 686 } 687 688 if (!BI.Uses) { 689 // No uses in block, avoid interference by spilling as soon as possible. 690 DEBUG(dbgs() << ", no uses.\n"); 691 SlotIndex SegEnd = SE->leaveIntvAtTop(*BI.MBB); 692 assert(SegEnd <= Intf.first() && "Couldn't avoid interference"); 693 continue; 694 } 695 if (Intf.first().getBaseIndex() > BI.FirstUse) { 696 // There are interference-free uses at the beginning of the block. 697 // Find the last use that can get the register. 698 SmallVectorImpl<SlotIndex>::const_iterator UI = 699 std::lower_bound(SA->UseSlots.begin(), SA->UseSlots.end(), 700 Intf.first().getBaseIndex()); 701 assert(UI != SA->UseSlots.begin() && "Couldn't find first use"); 702 SlotIndex Use = (--UI)->getBoundaryIndex(); 703 DEBUG(dbgs() << ", free use at " << *UI << ".\n"); 704 SlotIndex SegEnd = SE->leaveIntvAfter(Use); 705 assert(SegEnd <= Intf.first() && "Couldn't avoid interference"); 706 SE->useIntv(BI.Start, SegEnd); 707 continue; 708 } 709 710 // Interference is before the first use. 711 DEBUG(dbgs() << " before first use.\n"); 712 SlotIndex SegEnd = SE->leaveIntvAtTop(*BI.MBB); 713 assert(SegEnd <= Intf.first() && "Couldn't avoid interference"); 714 } 715 716 SE->closeIntv(); 717 718 // FIXME: Should we be more aggressive about splitting the stack region into 719 // per-block segments? The current approach allows the stack region to 720 // separate into connected components. Some components may be allocatable. 721 SE->finish(); 722 ++NumGlobalSplits; 723 724 if (VerifyEnabled) 725 MF->verify(this, "After splitting live range around region"); 726} 727 728unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 729 SmallVectorImpl<LiveInterval*> &NewVRegs) { 730 BitVector LiveBundles, BestBundles; 731 float BestCost = 0; 732 unsigned BestReg = 0; 733 734 Order.rewind(); 735 for (unsigned Cand = 0; unsigned PhysReg = Order.next(); ++Cand) { 736 if (GlobalCand.size() <= Cand) 737 GlobalCand.resize(Cand+1); 738 GlobalCand[Cand].PhysReg = PhysReg; 739 740 float Cost = calcSplitConstraints(PhysReg); 741 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost); 742 if (BestReg && Cost >= BestCost) { 743 DEBUG(dbgs() << " higher.\n"); 744 continue; 745 } 746 747 SpillPlacer->placeSpills(SplitConstraints, LiveBundles); 748 // No live bundles, defer to splitSingleBlocks(). 749 if (!LiveBundles.any()) { 750 DEBUG(dbgs() << " no bundles.\n"); 751 continue; 752 } 753 754 Cost += calcGlobalSplitCost(LiveBundles); 755 DEBUG({ 756 dbgs() << ", total = " << Cost << " with bundles"; 757 for (int i = LiveBundles.find_first(); i>=0; i = LiveBundles.find_next(i)) 758 dbgs() << " EB#" << i; 759 dbgs() << ".\n"; 760 }); 761 if (!BestReg || Cost < BestCost) { 762 BestReg = PhysReg; 763 BestCost = 0.98f * Cost; // Prevent rounding effects. 764 BestBundles.swap(LiveBundles); 765 } 766 } 767 768 if (!BestReg) 769 return 0; 770 771 splitAroundRegion(VirtReg, BestReg, BestBundles, NewVRegs); 772 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Region); 773 return 0; 774} 775 776 777//===----------------------------------------------------------------------===// 778// Local Splitting 779//===----------------------------------------------------------------------===// 780 781 782/// calcGapWeights - Compute the maximum spill weight that needs to be evicted 783/// in order to use PhysReg between two entries in SA->UseSlots. 784/// 785/// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1]. 786/// 787void RAGreedy::calcGapWeights(unsigned PhysReg, 788 SmallVectorImpl<float> &GapWeight) { 789 assert(SA->LiveBlocks.size() == 1 && "Not a local interval"); 790 const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks.front(); 791 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots; 792 const unsigned NumGaps = Uses.size()-1; 793 794 // Start and end points for the interference check. 795 SlotIndex StartIdx = BI.LiveIn ? BI.FirstUse.getBaseIndex() : BI.FirstUse; 796 SlotIndex StopIdx = BI.LiveOut ? BI.LastUse.getBoundaryIndex() : BI.LastUse; 797 798 GapWeight.assign(NumGaps, 0.0f); 799 800 // Add interference from each overlapping register. 801 for (const unsigned *AI = TRI->getOverlaps(PhysReg); *AI; ++AI) { 802 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI) 803 .checkInterference()) 804 continue; 805 806 // We know that VirtReg is a continuous interval from FirstUse to LastUse, 807 // so we don't need InterferenceQuery. 808 // 809 // Interference that overlaps an instruction is counted in both gaps 810 // surrounding the instruction. The exception is interference before 811 // StartIdx and after StopIdx. 812 // 813 LiveIntervalUnion::SegmentIter IntI = PhysReg2LiveUnion[*AI].find(StartIdx); 814 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) { 815 // Skip the gaps before IntI. 816 while (Uses[Gap+1].getBoundaryIndex() < IntI.start()) 817 if (++Gap == NumGaps) 818 break; 819 if (Gap == NumGaps) 820 break; 821 822 // Update the gaps covered by IntI. 823 const float weight = IntI.value()->weight; 824 for (; Gap != NumGaps; ++Gap) { 825 GapWeight[Gap] = std::max(GapWeight[Gap], weight); 826 if (Uses[Gap+1].getBaseIndex() >= IntI.stop()) 827 break; 828 } 829 if (Gap == NumGaps) 830 break; 831 } 832 } 833} 834 835/// getPrevMappedIndex - Return the slot index of the last non-copy instruction 836/// before MI that has a slot index. If MI is the first mapped instruction in 837/// its block, return the block start index instead. 838/// 839SlotIndex RAGreedy::getPrevMappedIndex(const MachineInstr *MI) { 840 assert(MI && "Missing MachineInstr"); 841 const MachineBasicBlock *MBB = MI->getParent(); 842 MachineBasicBlock::const_iterator B = MBB->begin(), I = MI; 843 while (I != B) 844 if (!(--I)->isDebugValue() && !I->isCopy()) 845 return Indexes->getInstructionIndex(I); 846 return Indexes->getMBBStartIdx(MBB); 847} 848 849/// calcPrevSlots - Fill in the PrevSlot array with the index of the previous 850/// real non-copy instruction for each instruction in SA->UseSlots. 851/// 852void RAGreedy::calcPrevSlots() { 853 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots; 854 PrevSlot.clear(); 855 PrevSlot.reserve(Uses.size()); 856 for (unsigned i = 0, e = Uses.size(); i != e; ++i) { 857 const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i]); 858 PrevSlot.push_back(getPrevMappedIndex(MI).getDefIndex()); 859 } 860} 861 862/// nextSplitPoint - Find the next index into SA->UseSlots > i such that it may 863/// be beneficial to split before UseSlots[i]. 864/// 865/// 0 is always a valid split point 866unsigned RAGreedy::nextSplitPoint(unsigned i) { 867 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots; 868 const unsigned Size = Uses.size(); 869 assert(i != Size && "No split points after the end"); 870 // Allow split before i when Uses[i] is not adjacent to the previous use. 871 while (++i != Size && PrevSlot[i].getBaseIndex() <= Uses[i-1].getBaseIndex()) 872 ; 873 return i; 874} 875 876/// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only 877/// basic block. 878/// 879unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, 880 SmallVectorImpl<LiveInterval*> &NewVRegs) { 881 assert(SA->LiveBlocks.size() == 1 && "Not a local interval"); 882 const SplitAnalysis::BlockInfo &BI = SA->LiveBlocks.front(); 883 884 // Note that it is possible to have an interval that is live-in or live-out 885 // while only covering a single block - A phi-def can use undef values from 886 // predecessors, and the block could be a single-block loop. 887 // We don't bother doing anything clever about such a case, we simply assume 888 // that the interval is continuous from FirstUse to LastUse. We should make 889 // sure that we don't do anything illegal to such an interval, though. 890 891 const SmallVectorImpl<SlotIndex> &Uses = SA->UseSlots; 892 if (Uses.size() <= 2) 893 return 0; 894 const unsigned NumGaps = Uses.size()-1; 895 896 DEBUG({ 897 dbgs() << "tryLocalSplit: "; 898 for (unsigned i = 0, e = Uses.size(); i != e; ++i) 899 dbgs() << ' ' << SA->UseSlots[i]; 900 dbgs() << '\n'; 901 }); 902 903 // For every use, find the previous mapped non-copy instruction. 904 // We use this to detect valid split points, and to estimate new interval 905 // sizes. 906 calcPrevSlots(); 907 908 unsigned BestBefore = NumGaps; 909 unsigned BestAfter = 0; 910 float BestDiff = 0; 911 912 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber()); 913 SmallVector<float, 8> GapWeight; 914 915 Order.rewind(); 916 while (unsigned PhysReg = Order.next()) { 917 // Keep track of the largest spill weight that would need to be evicted in 918 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1]. 919 calcGapWeights(PhysReg, GapWeight); 920 921 // Try to find the best sequence of gaps to close. 922 // The new spill weight must be larger than any gap interference. 923 924 // We will split before Uses[SplitBefore] and after Uses[SplitAfter]. 925 unsigned SplitBefore = 0, SplitAfter = nextSplitPoint(1) - 1; 926 927 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]). 928 // It is the spill weight that needs to be evicted. 929 float MaxGap = GapWeight[0]; 930 for (unsigned i = 1; i != SplitAfter; ++i) 931 MaxGap = std::max(MaxGap, GapWeight[i]); 932 933 for (;;) { 934 // Live before/after split? 935 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn; 936 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut; 937 938 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' ' 939 << Uses[SplitBefore] << '-' << Uses[SplitAfter] 940 << " i=" << MaxGap); 941 942 // Stop before the interval gets so big we wouldn't be making progress. 943 if (!LiveBefore && !LiveAfter) { 944 DEBUG(dbgs() << " all\n"); 945 break; 946 } 947 // Should the interval be extended or shrunk? 948 bool Shrink = true; 949 if (MaxGap < HUGE_VALF) { 950 // Estimate the new spill weight. 951 // 952 // Each instruction reads and writes the register, except the first 953 // instr doesn't read when !FirstLive, and the last instr doesn't write 954 // when !LastLive. 955 // 956 // We will be inserting copies before and after, so the total number of 957 // reads and writes is 2 * EstUses. 958 // 959 const unsigned EstUses = 2*(SplitAfter - SplitBefore) + 960 2*(LiveBefore + LiveAfter); 961 962 // Try to guess the size of the new interval. This should be trivial, 963 // but the slot index of an inserted copy can be a lot smaller than the 964 // instruction it is inserted before if there are many dead indexes 965 // between them. 966 // 967 // We measure the distance from the instruction before SplitBefore to 968 // get a conservative estimate. 969 // 970 // The final distance can still be different if inserting copies 971 // triggers a slot index renumbering. 972 // 973 const float EstWeight = normalizeSpillWeight(blockFreq * EstUses, 974 PrevSlot[SplitBefore].distance(Uses[SplitAfter])); 975 // Would this split be possible to allocate? 976 // Never allocate all gaps, we wouldn't be making progress. 977 float Diff = EstWeight - MaxGap; 978 DEBUG(dbgs() << " w=" << EstWeight << " d=" << Diff); 979 if (Diff > 0) { 980 Shrink = false; 981 if (Diff > BestDiff) { 982 DEBUG(dbgs() << " (best)"); 983 BestDiff = Diff; 984 BestBefore = SplitBefore; 985 BestAfter = SplitAfter; 986 } 987 } 988 } 989 990 // Try to shrink. 991 if (Shrink) { 992 SplitBefore = nextSplitPoint(SplitBefore); 993 if (SplitBefore < SplitAfter) { 994 DEBUG(dbgs() << " shrink\n"); 995 // Recompute the max when necessary. 996 if (GapWeight[SplitBefore - 1] >= MaxGap) { 997 MaxGap = GapWeight[SplitBefore]; 998 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i) 999 MaxGap = std::max(MaxGap, GapWeight[i]); 1000 } 1001 continue; 1002 } 1003 MaxGap = 0; 1004 } 1005 1006 // Try to extend the interval. 1007 if (SplitAfter >= NumGaps) { 1008 DEBUG(dbgs() << " end\n"); 1009 break; 1010 } 1011 1012 DEBUG(dbgs() << " extend\n"); 1013 for (unsigned e = nextSplitPoint(SplitAfter + 1) - 1; 1014 SplitAfter != e; ++SplitAfter) 1015 MaxGap = std::max(MaxGap, GapWeight[SplitAfter]); 1016 continue; 1017 } 1018 } 1019 1020 // Didn't find any candidates? 1021 if (BestBefore == NumGaps) 1022 return 0; 1023 1024 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] 1025 << '-' << Uses[BestAfter] << ", " << BestDiff 1026 << ", " << (BestAfter - BestBefore + 1) << " instrs\n"); 1027 1028 LiveRangeEdit LREdit(VirtReg, NewVRegs, this); 1029 SE->reset(LREdit); 1030 1031 SE->openIntv(); 1032 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]); 1033 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]); 1034 SE->useIntv(SegStart, SegStop); 1035 SE->closeIntv(); 1036 SE->finish(); 1037 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Local); 1038 ++NumLocalSplits; 1039 1040 return 0; 1041} 1042 1043//===----------------------------------------------------------------------===// 1044// Live Range Splitting 1045//===----------------------------------------------------------------------===// 1046 1047/// trySplit - Try to split VirtReg or one of its interferences, making it 1048/// assignable. 1049/// @return Physreg when VirtReg may be assigned and/or new NewVRegs. 1050unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, 1051 SmallVectorImpl<LiveInterval*>&NewVRegs) { 1052 // Local intervals are handled separately. 1053 if (LIS->intervalIsInOneMBB(VirtReg)) { 1054 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled); 1055 SA->analyze(&VirtReg); 1056 return tryLocalSplit(VirtReg, Order, NewVRegs); 1057 } 1058 1059 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled); 1060 1061 // Don't iterate global splitting. 1062 // Move straight to spilling if this range was produced by a global split. 1063 LiveRangeStage Stage = getStage(VirtReg); 1064 if (Stage >= RS_Block) 1065 return 0; 1066 1067 SA->analyze(&VirtReg); 1068 1069 // First try to split around a region spanning multiple blocks. 1070 if (Stage < RS_Region) { 1071 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); 1072 if (PhysReg || !NewVRegs.empty()) 1073 return PhysReg; 1074 } 1075 1076 // Then isolate blocks with multiple uses. 1077 if (Stage < RS_Block) { 1078 SplitAnalysis::BlockPtrSet Blocks; 1079 if (SA->getMultiUseBlocks(Blocks)) { 1080 LiveRangeEdit LREdit(VirtReg, NewVRegs, this); 1081 SE->reset(LREdit); 1082 SE->splitSingleBlocks(Blocks); 1083 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Block); 1084 if (VerifyEnabled) 1085 MF->verify(this, "After splitting live range around basic blocks"); 1086 } 1087 } 1088 1089 // Don't assign any physregs. 1090 return 0; 1091} 1092 1093 1094//===----------------------------------------------------------------------===// 1095// Main Entry Point 1096//===----------------------------------------------------------------------===// 1097 1098unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, 1099 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1100 // First try assigning a free register. 1101 AllocationOrder Order(VirtReg.reg, *VRM, ReservedRegs); 1102 while (unsigned PhysReg = Order.next()) { 1103 if (!checkPhysRegInterference(VirtReg, PhysReg)) 1104 return PhysReg; 1105 } 1106 1107 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs)) 1108 return PhysReg; 1109 1110 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs"); 1111 1112 // The first time we see a live range, don't try to split or spill. 1113 // Wait until the second time, when all smaller ranges have been allocated. 1114 // This gives a better picture of the interference to split around. 1115 LiveRangeStage Stage = getStage(VirtReg); 1116 if (Stage == RS_First) { 1117 LRStage[VirtReg.reg] = RS_Second; 1118 DEBUG(dbgs() << "wait for second round\n"); 1119 NewVRegs.push_back(&VirtReg); 1120 return 0; 1121 } 1122 1123 assert(Stage < RS_Spill && "Cannot allocate after spilling"); 1124 1125 // Try splitting VirtReg or interferences. 1126 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs); 1127 if (PhysReg || !NewVRegs.empty()) 1128 return PhysReg; 1129 1130 // Finally spill VirtReg itself. 1131 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled); 1132 LiveRangeEdit LRE(VirtReg, NewVRegs, this); 1133 spiller().spill(LRE); 1134 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Spill); 1135 1136 if (VerifyEnabled) 1137 MF->verify(this, "After spilling"); 1138 1139 // The live virtual register requesting allocation was spilled, so tell 1140 // the caller not to allocate anything during this round. 1141 return 0; 1142} 1143 1144bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { 1145 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n" 1146 << "********** Function: " 1147 << ((Value*)mf.getFunction())->getName() << '\n'); 1148 1149 MF = &mf; 1150 if (VerifyEnabled) 1151 MF->verify(this, "Before greedy register allocator"); 1152 1153 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>()); 1154 Indexes = &getAnalysis<SlotIndexes>(); 1155 DomTree = &getAnalysis<MachineDominatorTree>(); 1156 ReservedRegs = TRI->getReservedRegs(*MF); 1157 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM)); 1158 Loops = &getAnalysis<MachineLoopInfo>(); 1159 LoopRanges = &getAnalysis<MachineLoopRanges>(); 1160 Bundles = &getAnalysis<EdgeBundles>(); 1161 SpillPlacer = &getAnalysis<SpillPlacement>(); 1162 1163 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops)); 1164 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree)); 1165 LRStage.clear(); 1166 LRStage.resize(MRI->getNumVirtRegs()); 1167 IntfCache.init(MF, &PhysReg2LiveUnion[0], Indexes, TRI); 1168 1169 allocatePhysRegs(); 1170 addMBBLiveIns(MF); 1171 LIS->addKillFlags(); 1172 1173 // Run rewriter 1174 { 1175 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled); 1176 VRM->rewrite(Indexes); 1177 } 1178 1179 // The pass output is in VirtRegMap. Release all the transient data. 1180 releaseMemory(); 1181 1182 return true; 1183} 1184