RegAllocGreedy.cpp revision 95a9d937728ca9cf2bf44f86ff1184df318b3bd7
1//===-- RegAllocGreedy.cpp - greedy register allocator --------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file defines the RAGreedy function pass for register allocation in 11// optimized builds. 12// 13//===----------------------------------------------------------------------===// 14 15#define DEBUG_TYPE "regalloc" 16#include "AllocationOrder.h" 17#include "InterferenceCache.h" 18#include "LiveDebugVariables.h" 19#include "RegAllocBase.h" 20#include "Spiller.h" 21#include "SpillPlacement.h" 22#include "SplitKit.h" 23#include "VirtRegMap.h" 24#include "llvm/ADT/Statistic.h" 25#include "llvm/Analysis/AliasAnalysis.h" 26#include "llvm/Function.h" 27#include "llvm/PassAnalysisSupport.h" 28#include "llvm/CodeGen/CalcSpillWeights.h" 29#include "llvm/CodeGen/EdgeBundles.h" 30#include "llvm/CodeGen/LiveIntervalAnalysis.h" 31#include "llvm/CodeGen/LiveRangeEdit.h" 32#include "llvm/CodeGen/LiveStackAnalysis.h" 33#include "llvm/CodeGen/MachineDominators.h" 34#include "llvm/CodeGen/MachineFunctionPass.h" 35#include "llvm/CodeGen/MachineLoopInfo.h" 36#include "llvm/CodeGen/MachineRegisterInfo.h" 37#include "llvm/CodeGen/Passes.h" 38#include "llvm/CodeGen/RegAllocRegistry.h" 39#include "llvm/Target/TargetOptions.h" 40#include "llvm/Support/CommandLine.h" 41#include "llvm/Support/Debug.h" 42#include "llvm/Support/ErrorHandling.h" 43#include "llvm/Support/raw_ostream.h" 44#include "llvm/Support/Timer.h" 45 46#include <queue> 47 48using namespace llvm; 49 50STATISTIC(NumGlobalSplits, "Number of split global live ranges"); 51STATISTIC(NumLocalSplits, "Number of split local live ranges"); 52STATISTIC(NumEvicted, "Number of interferences evicted"); 53 54static cl::opt<SplitEditor::ComplementSpillMode> 55SplitSpillMode("split-spill-mode", cl::Hidden, 56 cl::desc("Spill mode for splitting live ranges"), 57 cl::values(clEnumValN(SplitEditor::SM_Partition, "default", "Default"), 58 clEnumValN(SplitEditor::SM_Size, "size", "Optimize for size"), 59 clEnumValN(SplitEditor::SM_Speed, "speed", "Optimize for speed"), 60 clEnumValEnd), 61 cl::init(SplitEditor::SM_Partition)); 62 63static RegisterRegAlloc greedyRegAlloc("greedy", "greedy register allocator", 64 createGreedyRegisterAllocator); 65 66namespace { 67class RAGreedy : public MachineFunctionPass, 68 public RegAllocBase, 69 private LiveRangeEdit::Delegate { 70 71 // context 72 MachineFunction *MF; 73 74 // analyses 75 SlotIndexes *Indexes; 76 MachineDominatorTree *DomTree; 77 MachineLoopInfo *Loops; 78 EdgeBundles *Bundles; 79 SpillPlacement *SpillPlacer; 80 LiveDebugVariables *DebugVars; 81 82 // state 83 std::auto_ptr<Spiller> SpillerInstance; 84 std::priority_queue<std::pair<unsigned, unsigned> > Queue; 85 unsigned NextCascade; 86 87 // Live ranges pass through a number of stages as we try to allocate them. 88 // Some of the stages may also create new live ranges: 89 // 90 // - Region splitting. 91 // - Per-block splitting. 92 // - Local splitting. 93 // - Spilling. 94 // 95 // Ranges produced by one of the stages skip the previous stages when they are 96 // dequeued. This improves performance because we can skip interference checks 97 // that are unlikely to give any results. It also guarantees that the live 98 // range splitting algorithm terminates, something that is otherwise hard to 99 // ensure. 100 enum LiveRangeStage { 101 /// Newly created live range that has never been queued. 102 RS_New, 103 104 /// Only attempt assignment and eviction. Then requeue as RS_Split. 105 RS_Assign, 106 107 /// Attempt live range splitting if assignment is impossible. 108 RS_Split, 109 110 /// Attempt more aggressive live range splitting that is guaranteed to make 111 /// progress. This is used for split products that may not be making 112 /// progress. 113 RS_Split2, 114 115 /// Live range will be spilled. No more splitting will be attempted. 116 RS_Spill, 117 118 /// There is nothing more we can do to this live range. Abort compilation 119 /// if it can't be assigned. 120 RS_Done 121 }; 122 123 static const char *const StageName[]; 124 125 // RegInfo - Keep additional information about each live range. 126 struct RegInfo { 127 LiveRangeStage Stage; 128 129 // Cascade - Eviction loop prevention. See canEvictInterference(). 130 unsigned Cascade; 131 132 RegInfo() : Stage(RS_New), Cascade(0) {} 133 }; 134 135 IndexedMap<RegInfo, VirtReg2IndexFunctor> ExtraRegInfo; 136 137 LiveRangeStage getStage(const LiveInterval &VirtReg) const { 138 return ExtraRegInfo[VirtReg.reg].Stage; 139 } 140 141 void setStage(const LiveInterval &VirtReg, LiveRangeStage Stage) { 142 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 143 ExtraRegInfo[VirtReg.reg].Stage = Stage; 144 } 145 146 template<typename Iterator> 147 void setStage(Iterator Begin, Iterator End, LiveRangeStage NewStage) { 148 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 149 for (;Begin != End; ++Begin) { 150 unsigned Reg = (*Begin)->reg; 151 if (ExtraRegInfo[Reg].Stage == RS_New) 152 ExtraRegInfo[Reg].Stage = NewStage; 153 } 154 } 155 156 /// Cost of evicting interference. 157 struct EvictionCost { 158 unsigned BrokenHints; ///< Total number of broken hints. 159 float MaxWeight; ///< Maximum spill weight evicted. 160 161 EvictionCost(unsigned B = 0) : BrokenHints(B), MaxWeight(0) {} 162 163 bool operator<(const EvictionCost &O) const { 164 if (BrokenHints != O.BrokenHints) 165 return BrokenHints < O.BrokenHints; 166 return MaxWeight < O.MaxWeight; 167 } 168 }; 169 170 // Register mask interference. The current VirtReg is checked for register 171 // mask interference on entry to selectOrSplit(). If there is no 172 // interference, UsableRegs is left empty. If there is interference, 173 // UsableRegs has a bit mask of registers that can be used without register 174 // mask interference. 175 BitVector UsableRegs; 176 177 /// clobberedByRegMask - Returns true if PhysReg is not directly usable 178 /// because of register mask clobbers. 179 bool clobberedByRegMask(unsigned PhysReg) const { 180 return !UsableRegs.empty() && !UsableRegs.test(PhysReg); 181 } 182 183 // splitting state. 184 std::auto_ptr<SplitAnalysis> SA; 185 std::auto_ptr<SplitEditor> SE; 186 187 /// Cached per-block interference maps 188 InterferenceCache IntfCache; 189 190 /// All basic blocks where the current register has uses. 191 SmallVector<SpillPlacement::BlockConstraint, 8> SplitConstraints; 192 193 /// Global live range splitting candidate info. 194 struct GlobalSplitCandidate { 195 // Register intended for assignment, or 0. 196 unsigned PhysReg; 197 198 // SplitKit interval index for this candidate. 199 unsigned IntvIdx; 200 201 // Interference for PhysReg. 202 InterferenceCache::Cursor Intf; 203 204 // Bundles where this candidate should be live. 205 BitVector LiveBundles; 206 SmallVector<unsigned, 8> ActiveBlocks; 207 208 void reset(InterferenceCache &Cache, unsigned Reg) { 209 PhysReg = Reg; 210 IntvIdx = 0; 211 Intf.setPhysReg(Cache, Reg); 212 LiveBundles.clear(); 213 ActiveBlocks.clear(); 214 } 215 216 // Set B[i] = C for every live bundle where B[i] was NoCand. 217 unsigned getBundles(SmallVectorImpl<unsigned> &B, unsigned C) { 218 unsigned Count = 0; 219 for (int i = LiveBundles.find_first(); i >= 0; 220 i = LiveBundles.find_next(i)) 221 if (B[i] == NoCand) { 222 B[i] = C; 223 Count++; 224 } 225 return Count; 226 } 227 }; 228 229 /// Candidate info for for each PhysReg in AllocationOrder. 230 /// This vector never shrinks, but grows to the size of the largest register 231 /// class. 232 SmallVector<GlobalSplitCandidate, 32> GlobalCand; 233 234 enum { NoCand = ~0u }; 235 236 /// Candidate map. Each edge bundle is assigned to a GlobalCand entry, or to 237 /// NoCand which indicates the stack interval. 238 SmallVector<unsigned, 32> BundleCand; 239 240public: 241 RAGreedy(); 242 243 /// Return the pass name. 244 virtual const char* getPassName() const { 245 return "Greedy Register Allocator"; 246 } 247 248 /// RAGreedy analysis usage. 249 virtual void getAnalysisUsage(AnalysisUsage &AU) const; 250 virtual void releaseMemory(); 251 virtual Spiller &spiller() { return *SpillerInstance; } 252 virtual void enqueue(LiveInterval *LI); 253 virtual LiveInterval *dequeue(); 254 virtual unsigned selectOrSplit(LiveInterval&, 255 SmallVectorImpl<LiveInterval*>&); 256 257 /// Perform register allocation. 258 virtual bool runOnMachineFunction(MachineFunction &mf); 259 260 static char ID; 261 262private: 263 bool LRE_CanEraseVirtReg(unsigned); 264 void LRE_WillShrinkVirtReg(unsigned); 265 void LRE_DidCloneVirtReg(unsigned, unsigned); 266 267 float calcSpillCost(); 268 bool addSplitConstraints(InterferenceCache::Cursor, float&); 269 void addThroughConstraints(InterferenceCache::Cursor, ArrayRef<unsigned>); 270 void growRegion(GlobalSplitCandidate &Cand); 271 float calcGlobalSplitCost(GlobalSplitCandidate&); 272 bool calcCompactRegion(GlobalSplitCandidate&); 273 void splitAroundRegion(LiveRangeEdit&, ArrayRef<unsigned>); 274 void calcGapWeights(unsigned, SmallVectorImpl<float>&); 275 bool shouldEvict(LiveInterval &A, bool, LiveInterval &B, bool); 276 bool canEvictInterference(LiveInterval&, unsigned, bool, EvictionCost&); 277 void evictInterference(LiveInterval&, unsigned, 278 SmallVectorImpl<LiveInterval*>&); 279 280 unsigned tryAssign(LiveInterval&, AllocationOrder&, 281 SmallVectorImpl<LiveInterval*>&); 282 unsigned tryEvict(LiveInterval&, AllocationOrder&, 283 SmallVectorImpl<LiveInterval*>&, unsigned = ~0u); 284 unsigned tryRegionSplit(LiveInterval&, AllocationOrder&, 285 SmallVectorImpl<LiveInterval*>&); 286 unsigned tryBlockSplit(LiveInterval&, AllocationOrder&, 287 SmallVectorImpl<LiveInterval*>&); 288 unsigned tryInstructionSplit(LiveInterval&, AllocationOrder&, 289 SmallVectorImpl<LiveInterval*>&); 290 unsigned tryLocalSplit(LiveInterval&, AllocationOrder&, 291 SmallVectorImpl<LiveInterval*>&); 292 unsigned trySplit(LiveInterval&, AllocationOrder&, 293 SmallVectorImpl<LiveInterval*>&); 294}; 295} // end anonymous namespace 296 297char RAGreedy::ID = 0; 298 299#ifndef NDEBUG 300const char *const RAGreedy::StageName[] = { 301 "RS_New", 302 "RS_Assign", 303 "RS_Split", 304 "RS_Split2", 305 "RS_Spill", 306 "RS_Done" 307}; 308#endif 309 310// Hysteresis to use when comparing floats. 311// This helps stabilize decisions based on float comparisons. 312const float Hysteresis = 0.98f; 313 314 315FunctionPass* llvm::createGreedyRegisterAllocator() { 316 return new RAGreedy(); 317} 318 319RAGreedy::RAGreedy(): MachineFunctionPass(ID) { 320 initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry()); 321 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 322 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry()); 323 initializeSlotIndexesPass(*PassRegistry::getPassRegistry()); 324 initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry()); 325 initializeMachineSchedulerPass(*PassRegistry::getPassRegistry()); 326 initializeCalculateSpillWeightsPass(*PassRegistry::getPassRegistry()); 327 initializeLiveStacksPass(*PassRegistry::getPassRegistry()); 328 initializeMachineDominatorTreePass(*PassRegistry::getPassRegistry()); 329 initializeMachineLoopInfoPass(*PassRegistry::getPassRegistry()); 330 initializeVirtRegMapPass(*PassRegistry::getPassRegistry()); 331 initializeEdgeBundlesPass(*PassRegistry::getPassRegistry()); 332 initializeSpillPlacementPass(*PassRegistry::getPassRegistry()); 333} 334 335void RAGreedy::getAnalysisUsage(AnalysisUsage &AU) const { 336 AU.setPreservesCFG(); 337 AU.addRequired<AliasAnalysis>(); 338 AU.addPreserved<AliasAnalysis>(); 339 AU.addRequired<LiveIntervals>(); 340 AU.addRequired<SlotIndexes>(); 341 AU.addPreserved<SlotIndexes>(); 342 AU.addRequired<LiveDebugVariables>(); 343 AU.addPreserved<LiveDebugVariables>(); 344 AU.addRequired<CalculateSpillWeights>(); 345 AU.addRequired<LiveStacks>(); 346 AU.addPreserved<LiveStacks>(); 347 AU.addRequired<MachineDominatorTree>(); 348 AU.addPreserved<MachineDominatorTree>(); 349 AU.addRequired<MachineLoopInfo>(); 350 AU.addPreserved<MachineLoopInfo>(); 351 AU.addRequired<VirtRegMap>(); 352 AU.addPreserved<VirtRegMap>(); 353 AU.addRequired<EdgeBundles>(); 354 AU.addRequired<SpillPlacement>(); 355 MachineFunctionPass::getAnalysisUsage(AU); 356} 357 358 359//===----------------------------------------------------------------------===// 360// LiveRangeEdit delegate methods 361//===----------------------------------------------------------------------===// 362 363bool RAGreedy::LRE_CanEraseVirtReg(unsigned VirtReg) { 364 if (unsigned PhysReg = VRM->getPhys(VirtReg)) { 365 unassign(LIS->getInterval(VirtReg), PhysReg); 366 return true; 367 } 368 // Unassigned virtreg is probably in the priority queue. 369 // RegAllocBase will erase it after dequeueing. 370 return false; 371} 372 373void RAGreedy::LRE_WillShrinkVirtReg(unsigned VirtReg) { 374 unsigned PhysReg = VRM->getPhys(VirtReg); 375 if (!PhysReg) 376 return; 377 378 // Register is assigned, put it back on the queue for reassignment. 379 LiveInterval &LI = LIS->getInterval(VirtReg); 380 unassign(LI, PhysReg); 381 enqueue(&LI); 382} 383 384void RAGreedy::LRE_DidCloneVirtReg(unsigned New, unsigned Old) { 385 // Cloning a register we haven't even heard about yet? Just ignore it. 386 if (!ExtraRegInfo.inBounds(Old)) 387 return; 388 389 // LRE may clone a virtual register because dead code elimination causes it to 390 // be split into connected components. The new components are much smaller 391 // than the original, so they should get a new chance at being assigned. 392 // same stage as the parent. 393 ExtraRegInfo[Old].Stage = RS_Assign; 394 ExtraRegInfo.grow(New); 395 ExtraRegInfo[New] = ExtraRegInfo[Old]; 396} 397 398void RAGreedy::releaseMemory() { 399 SpillerInstance.reset(0); 400 ExtraRegInfo.clear(); 401 GlobalCand.clear(); 402 RegAllocBase::releaseMemory(); 403} 404 405void RAGreedy::enqueue(LiveInterval *LI) { 406 // Prioritize live ranges by size, assigning larger ranges first. 407 // The queue holds (size, reg) pairs. 408 const unsigned Size = LI->getSize(); 409 const unsigned Reg = LI->reg; 410 assert(TargetRegisterInfo::isVirtualRegister(Reg) && 411 "Can only enqueue virtual registers"); 412 unsigned Prio; 413 414 ExtraRegInfo.grow(Reg); 415 if (ExtraRegInfo[Reg].Stage == RS_New) 416 ExtraRegInfo[Reg].Stage = RS_Assign; 417 418 if (ExtraRegInfo[Reg].Stage == RS_Split) { 419 // Unsplit ranges that couldn't be allocated immediately are deferred until 420 // everything else has been allocated. 421 Prio = Size; 422 } else { 423 // Everything is allocated in long->short order. Long ranges that don't fit 424 // should be spilled (or split) ASAP so they don't create interference. 425 Prio = (1u << 31) + Size; 426 427 // Boost ranges that have a physical register hint. 428 if (TargetRegisterInfo::isPhysicalRegister(VRM->getRegAllocPref(Reg))) 429 Prio |= (1u << 30); 430 } 431 432 Queue.push(std::make_pair(Prio, ~Reg)); 433} 434 435LiveInterval *RAGreedy::dequeue() { 436 if (Queue.empty()) 437 return 0; 438 LiveInterval *LI = &LIS->getInterval(~Queue.top().second); 439 Queue.pop(); 440 return LI; 441} 442 443 444//===----------------------------------------------------------------------===// 445// Direct Assignment 446//===----------------------------------------------------------------------===// 447 448/// tryAssign - Try to assign VirtReg to an available register. 449unsigned RAGreedy::tryAssign(LiveInterval &VirtReg, 450 AllocationOrder &Order, 451 SmallVectorImpl<LiveInterval*> &NewVRegs) { 452 Order.rewind(); 453 unsigned PhysReg; 454 while ((PhysReg = Order.next())) { 455 if (clobberedByRegMask(PhysReg)) 456 continue; 457 if (!checkPhysRegInterference(VirtReg, PhysReg)) 458 break; 459 } 460 if (!PhysReg || Order.isHint(PhysReg)) 461 return PhysReg; 462 463 // PhysReg is available, but there may be a better choice. 464 465 // If we missed a simple hint, try to cheaply evict interference from the 466 // preferred register. 467 if (unsigned Hint = MRI->getSimpleHint(VirtReg.reg)) 468 if (Order.isHint(Hint) && !clobberedByRegMask(Hint)) { 469 DEBUG(dbgs() << "missed hint " << PrintReg(Hint, TRI) << '\n'); 470 EvictionCost MaxCost(1); 471 if (canEvictInterference(VirtReg, Hint, true, MaxCost)) { 472 evictInterference(VirtReg, Hint, NewVRegs); 473 return Hint; 474 } 475 } 476 477 // Try to evict interference from a cheaper alternative. 478 unsigned Cost = TRI->getCostPerUse(PhysReg); 479 480 // Most registers have 0 additional cost. 481 if (!Cost) 482 return PhysReg; 483 484 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " is available at cost " << Cost 485 << '\n'); 486 unsigned CheapReg = tryEvict(VirtReg, Order, NewVRegs, Cost); 487 return CheapReg ? CheapReg : PhysReg; 488} 489 490 491//===----------------------------------------------------------------------===// 492// Interference eviction 493//===----------------------------------------------------------------------===// 494 495/// shouldEvict - determine if A should evict the assigned live range B. The 496/// eviction policy defined by this function together with the allocation order 497/// defined by enqueue() decides which registers ultimately end up being split 498/// and spilled. 499/// 500/// Cascade numbers are used to prevent infinite loops if this function is a 501/// cyclic relation. 502/// 503/// @param A The live range to be assigned. 504/// @param IsHint True when A is about to be assigned to its preferred 505/// register. 506/// @param B The live range to be evicted. 507/// @param BreaksHint True when B is already assigned to its preferred register. 508bool RAGreedy::shouldEvict(LiveInterval &A, bool IsHint, 509 LiveInterval &B, bool BreaksHint) { 510 bool CanSplit = getStage(B) < RS_Spill; 511 512 // Be fairly aggressive about following hints as long as the evictee can be 513 // split. 514 if (CanSplit && IsHint && !BreaksHint) 515 return true; 516 517 return A.weight > B.weight; 518} 519 520/// canEvictInterference - Return true if all interferences between VirtReg and 521/// PhysReg can be evicted. When OnlyCheap is set, don't do anything 522/// 523/// @param VirtReg Live range that is about to be assigned. 524/// @param PhysReg Desired register for assignment. 525/// @prarm IsHint True when PhysReg is VirtReg's preferred register. 526/// @param MaxCost Only look for cheaper candidates and update with new cost 527/// when returning true. 528/// @returns True when interference can be evicted cheaper than MaxCost. 529bool RAGreedy::canEvictInterference(LiveInterval &VirtReg, unsigned PhysReg, 530 bool IsHint, EvictionCost &MaxCost) { 531 // Find VirtReg's cascade number. This will be unassigned if VirtReg was never 532 // involved in an eviction before. If a cascade number was assigned, deny 533 // evicting anything with the same or a newer cascade number. This prevents 534 // infinite eviction loops. 535 // 536 // This works out so a register without a cascade number is allowed to evict 537 // anything, and it can be evicted by anything. 538 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 539 if (!Cascade) 540 Cascade = NextCascade; 541 542 EvictionCost Cost; 543 for (MCRegAliasIterator AI(PhysReg, TRI, true); AI.isValid(); ++AI) { 544 LiveIntervalUnion::Query &Q = query(VirtReg, *AI); 545 // If there is 10 or more interferences, chances are one is heavier. 546 if (Q.collectInterferingVRegs(10) >= 10) 547 return false; 548 549 // Check if any interfering live range is heavier than MaxWeight. 550 for (unsigned i = Q.interferingVRegs().size(); i; --i) { 551 LiveInterval *Intf = Q.interferingVRegs()[i - 1]; 552 if (TargetRegisterInfo::isPhysicalRegister(Intf->reg)) 553 return false; 554 // Never evict spill products. They cannot split or spill. 555 if (getStage(*Intf) == RS_Done) 556 return false; 557 // Once a live range becomes small enough, it is urgent that we find a 558 // register for it. This is indicated by an infinite spill weight. These 559 // urgent live ranges get to evict almost anything. 560 // 561 // Also allow urgent evictions of unspillable ranges from a strictly 562 // larger allocation order. 563 bool Urgent = !VirtReg.isSpillable() && 564 (Intf->isSpillable() || 565 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(VirtReg.reg)) < 566 RegClassInfo.getNumAllocatableRegs(MRI->getRegClass(Intf->reg))); 567 // Only evict older cascades or live ranges without a cascade. 568 unsigned IntfCascade = ExtraRegInfo[Intf->reg].Cascade; 569 if (Cascade <= IntfCascade) { 570 if (!Urgent) 571 return false; 572 // We permit breaking cascades for urgent evictions. It should be the 573 // last resort, though, so make it really expensive. 574 Cost.BrokenHints += 10; 575 } 576 // Would this break a satisfied hint? 577 bool BreaksHint = VRM->hasPreferredPhys(Intf->reg); 578 // Update eviction cost. 579 Cost.BrokenHints += BreaksHint; 580 Cost.MaxWeight = std::max(Cost.MaxWeight, Intf->weight); 581 // Abort if this would be too expensive. 582 if (!(Cost < MaxCost)) 583 return false; 584 // Finally, apply the eviction policy for non-urgent evictions. 585 if (!Urgent && !shouldEvict(VirtReg, IsHint, *Intf, BreaksHint)) 586 return false; 587 } 588 } 589 MaxCost = Cost; 590 return true; 591} 592 593/// evictInterference - Evict any interferring registers that prevent VirtReg 594/// from being assigned to Physreg. This assumes that canEvictInterference 595/// returned true. 596void RAGreedy::evictInterference(LiveInterval &VirtReg, unsigned PhysReg, 597 SmallVectorImpl<LiveInterval*> &NewVRegs) { 598 // Make sure that VirtReg has a cascade number, and assign that cascade 599 // number to every evicted register. These live ranges than then only be 600 // evicted by a newer cascade, preventing infinite loops. 601 unsigned Cascade = ExtraRegInfo[VirtReg.reg].Cascade; 602 if (!Cascade) 603 Cascade = ExtraRegInfo[VirtReg.reg].Cascade = NextCascade++; 604 605 DEBUG(dbgs() << "evicting " << PrintReg(PhysReg, TRI) 606 << " interference: Cascade " << Cascade << '\n'); 607 for (MCRegAliasIterator AI(PhysReg, TRI, true); AI.isValid(); ++AI) { 608 LiveIntervalUnion::Query &Q = query(VirtReg, *AI); 609 assert(Q.seenAllInterferences() && "Didn't check all interfererences."); 610 for (unsigned i = 0, e = Q.interferingVRegs().size(); i != e; ++i) { 611 LiveInterval *Intf = Q.interferingVRegs()[i]; 612 unassign(*Intf, VRM->getPhys(Intf->reg)); 613 assert((ExtraRegInfo[Intf->reg].Cascade < Cascade || 614 VirtReg.isSpillable() < Intf->isSpillable()) && 615 "Cannot decrease cascade number, illegal eviction"); 616 ExtraRegInfo[Intf->reg].Cascade = Cascade; 617 ++NumEvicted; 618 NewVRegs.push_back(Intf); 619 } 620 } 621} 622 623/// tryEvict - Try to evict all interferences for a physreg. 624/// @param VirtReg Currently unassigned virtual register. 625/// @param Order Physregs to try. 626/// @return Physreg to assign VirtReg, or 0. 627unsigned RAGreedy::tryEvict(LiveInterval &VirtReg, 628 AllocationOrder &Order, 629 SmallVectorImpl<LiveInterval*> &NewVRegs, 630 unsigned CostPerUseLimit) { 631 NamedRegionTimer T("Evict", TimerGroupName, TimePassesIsEnabled); 632 633 // Keep track of the cheapest interference seen so far. 634 EvictionCost BestCost(~0u); 635 unsigned BestPhys = 0; 636 637 // When we are just looking for a reduced cost per use, don't break any 638 // hints, and only evict smaller spill weights. 639 if (CostPerUseLimit < ~0u) { 640 BestCost.BrokenHints = 0; 641 BestCost.MaxWeight = VirtReg.weight; 642 } 643 644 Order.rewind(); 645 while (unsigned PhysReg = Order.next()) { 646 if (clobberedByRegMask(PhysReg)) 647 continue; 648 if (TRI->getCostPerUse(PhysReg) >= CostPerUseLimit) 649 continue; 650 // The first use of a callee-saved register in a function has cost 1. 651 // Don't start using a CSR when the CostPerUseLimit is low. 652 if (CostPerUseLimit == 1) 653 if (unsigned CSR = RegClassInfo.getLastCalleeSavedAlias(PhysReg)) 654 if (!MRI->isPhysRegUsed(CSR)) { 655 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << " would clobber CSR " 656 << PrintReg(CSR, TRI) << '\n'); 657 continue; 658 } 659 660 if (!canEvictInterference(VirtReg, PhysReg, false, BestCost)) 661 continue; 662 663 // Best so far. 664 BestPhys = PhysReg; 665 666 // Stop if the hint can be used. 667 if (Order.isHint(PhysReg)) 668 break; 669 } 670 671 if (!BestPhys) 672 return 0; 673 674 evictInterference(VirtReg, BestPhys, NewVRegs); 675 return BestPhys; 676} 677 678 679//===----------------------------------------------------------------------===// 680// Region Splitting 681//===----------------------------------------------------------------------===// 682 683/// addSplitConstraints - Fill out the SplitConstraints vector based on the 684/// interference pattern in Physreg and its aliases. Add the constraints to 685/// SpillPlacement and return the static cost of this split in Cost, assuming 686/// that all preferences in SplitConstraints are met. 687/// Return false if there are no bundles with positive bias. 688bool RAGreedy::addSplitConstraints(InterferenceCache::Cursor Intf, 689 float &Cost) { 690 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 691 692 // Reset interference dependent info. 693 SplitConstraints.resize(UseBlocks.size()); 694 float StaticCost = 0; 695 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 696 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 697 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 698 699 BC.Number = BI.MBB->getNumber(); 700 Intf.moveToBlock(BC.Number); 701 BC.Entry = BI.LiveIn ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 702 BC.Exit = BI.LiveOut ? SpillPlacement::PrefReg : SpillPlacement::DontCare; 703 BC.ChangesValue = BI.FirstDef; 704 705 if (!Intf.hasInterference()) 706 continue; 707 708 // Number of spill code instructions to insert. 709 unsigned Ins = 0; 710 711 // Interference for the live-in value. 712 if (BI.LiveIn) { 713 if (Intf.first() <= Indexes->getMBBStartIdx(BC.Number)) 714 BC.Entry = SpillPlacement::MustSpill, ++Ins; 715 else if (Intf.first() < BI.FirstInstr) 716 BC.Entry = SpillPlacement::PrefSpill, ++Ins; 717 else if (Intf.first() < BI.LastInstr) 718 ++Ins; 719 } 720 721 // Interference for the live-out value. 722 if (BI.LiveOut) { 723 if (Intf.last() >= SA->getLastSplitPoint(BC.Number)) 724 BC.Exit = SpillPlacement::MustSpill, ++Ins; 725 else if (Intf.last() > BI.LastInstr) 726 BC.Exit = SpillPlacement::PrefSpill, ++Ins; 727 else if (Intf.last() > BI.FirstInstr) 728 ++Ins; 729 } 730 731 // Accumulate the total frequency of inserted spill code. 732 if (Ins) 733 StaticCost += Ins * SpillPlacer->getBlockFrequency(BC.Number); 734 } 735 Cost = StaticCost; 736 737 // Add constraints for use-blocks. Note that these are the only constraints 738 // that may add a positive bias, it is downhill from here. 739 SpillPlacer->addConstraints(SplitConstraints); 740 return SpillPlacer->scanActiveBundles(); 741} 742 743 744/// addThroughConstraints - Add constraints and links to SpillPlacer from the 745/// live-through blocks in Blocks. 746void RAGreedy::addThroughConstraints(InterferenceCache::Cursor Intf, 747 ArrayRef<unsigned> Blocks) { 748 const unsigned GroupSize = 8; 749 SpillPlacement::BlockConstraint BCS[GroupSize]; 750 unsigned TBS[GroupSize]; 751 unsigned B = 0, T = 0; 752 753 for (unsigned i = 0; i != Blocks.size(); ++i) { 754 unsigned Number = Blocks[i]; 755 Intf.moveToBlock(Number); 756 757 if (!Intf.hasInterference()) { 758 assert(T < GroupSize && "Array overflow"); 759 TBS[T] = Number; 760 if (++T == GroupSize) { 761 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 762 T = 0; 763 } 764 continue; 765 } 766 767 assert(B < GroupSize && "Array overflow"); 768 BCS[B].Number = Number; 769 770 // Interference for the live-in value. 771 if (Intf.first() <= Indexes->getMBBStartIdx(Number)) 772 BCS[B].Entry = SpillPlacement::MustSpill; 773 else 774 BCS[B].Entry = SpillPlacement::PrefSpill; 775 776 // Interference for the live-out value. 777 if (Intf.last() >= SA->getLastSplitPoint(Number)) 778 BCS[B].Exit = SpillPlacement::MustSpill; 779 else 780 BCS[B].Exit = SpillPlacement::PrefSpill; 781 782 if (++B == GroupSize) { 783 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 784 SpillPlacer->addConstraints(Array); 785 B = 0; 786 } 787 } 788 789 ArrayRef<SpillPlacement::BlockConstraint> Array(BCS, B); 790 SpillPlacer->addConstraints(Array); 791 SpillPlacer->addLinks(makeArrayRef(TBS, T)); 792} 793 794void RAGreedy::growRegion(GlobalSplitCandidate &Cand) { 795 // Keep track of through blocks that have not been added to SpillPlacer. 796 BitVector Todo = SA->getThroughBlocks(); 797 SmallVectorImpl<unsigned> &ActiveBlocks = Cand.ActiveBlocks; 798 unsigned AddedTo = 0; 799#ifndef NDEBUG 800 unsigned Visited = 0; 801#endif 802 803 for (;;) { 804 ArrayRef<unsigned> NewBundles = SpillPlacer->getRecentPositive(); 805 // Find new through blocks in the periphery of PrefRegBundles. 806 for (int i = 0, e = NewBundles.size(); i != e; ++i) { 807 unsigned Bundle = NewBundles[i]; 808 // Look at all blocks connected to Bundle in the full graph. 809 ArrayRef<unsigned> Blocks = Bundles->getBlocks(Bundle); 810 for (ArrayRef<unsigned>::iterator I = Blocks.begin(), E = Blocks.end(); 811 I != E; ++I) { 812 unsigned Block = *I; 813 if (!Todo.test(Block)) 814 continue; 815 Todo.reset(Block); 816 // This is a new through block. Add it to SpillPlacer later. 817 ActiveBlocks.push_back(Block); 818#ifndef NDEBUG 819 ++Visited; 820#endif 821 } 822 } 823 // Any new blocks to add? 824 if (ActiveBlocks.size() == AddedTo) 825 break; 826 827 // Compute through constraints from the interference, or assume that all 828 // through blocks prefer spilling when forming compact regions. 829 ArrayRef<unsigned> NewBlocks = makeArrayRef(ActiveBlocks).slice(AddedTo); 830 if (Cand.PhysReg) 831 addThroughConstraints(Cand.Intf, NewBlocks); 832 else 833 // Provide a strong negative bias on through blocks to prevent unwanted 834 // liveness on loop backedges. 835 SpillPlacer->addPrefSpill(NewBlocks, /* Strong= */ true); 836 AddedTo = ActiveBlocks.size(); 837 838 // Perhaps iterating can enable more bundles? 839 SpillPlacer->iterate(); 840 } 841 DEBUG(dbgs() << ", v=" << Visited); 842} 843 844/// calcCompactRegion - Compute the set of edge bundles that should be live 845/// when splitting the current live range into compact regions. Compact 846/// regions can be computed without looking at interference. They are the 847/// regions formed by removing all the live-through blocks from the live range. 848/// 849/// Returns false if the current live range is already compact, or if the 850/// compact regions would form single block regions anyway. 851bool RAGreedy::calcCompactRegion(GlobalSplitCandidate &Cand) { 852 // Without any through blocks, the live range is already compact. 853 if (!SA->getNumThroughBlocks()) 854 return false; 855 856 // Compact regions don't correspond to any physreg. 857 Cand.reset(IntfCache, 0); 858 859 DEBUG(dbgs() << "Compact region bundles"); 860 861 // Use the spill placer to determine the live bundles. GrowRegion pretends 862 // that all the through blocks have interference when PhysReg is unset. 863 SpillPlacer->prepare(Cand.LiveBundles); 864 865 // The static split cost will be zero since Cand.Intf reports no interference. 866 float Cost; 867 if (!addSplitConstraints(Cand.Intf, Cost)) { 868 DEBUG(dbgs() << ", none.\n"); 869 return false; 870 } 871 872 growRegion(Cand); 873 SpillPlacer->finish(); 874 875 if (!Cand.LiveBundles.any()) { 876 DEBUG(dbgs() << ", none.\n"); 877 return false; 878 } 879 880 DEBUG({ 881 for (int i = Cand.LiveBundles.find_first(); i>=0; 882 i = Cand.LiveBundles.find_next(i)) 883 dbgs() << " EB#" << i; 884 dbgs() << ".\n"; 885 }); 886 return true; 887} 888 889/// calcSpillCost - Compute how expensive it would be to split the live range in 890/// SA around all use blocks instead of forming bundle regions. 891float RAGreedy::calcSpillCost() { 892 float Cost = 0; 893 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 894 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 895 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 896 unsigned Number = BI.MBB->getNumber(); 897 // We normally only need one spill instruction - a load or a store. 898 Cost += SpillPlacer->getBlockFrequency(Number); 899 900 // Unless the value is redefined in the block. 901 if (BI.LiveIn && BI.LiveOut && BI.FirstDef) 902 Cost += SpillPlacer->getBlockFrequency(Number); 903 } 904 return Cost; 905} 906 907/// calcGlobalSplitCost - Return the global split cost of following the split 908/// pattern in LiveBundles. This cost should be added to the local cost of the 909/// interference pattern in SplitConstraints. 910/// 911float RAGreedy::calcGlobalSplitCost(GlobalSplitCandidate &Cand) { 912 float GlobalCost = 0; 913 const BitVector &LiveBundles = Cand.LiveBundles; 914 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 915 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 916 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 917 SpillPlacement::BlockConstraint &BC = SplitConstraints[i]; 918 bool RegIn = LiveBundles[Bundles->getBundle(BC.Number, 0)]; 919 bool RegOut = LiveBundles[Bundles->getBundle(BC.Number, 1)]; 920 unsigned Ins = 0; 921 922 if (BI.LiveIn) 923 Ins += RegIn != (BC.Entry == SpillPlacement::PrefReg); 924 if (BI.LiveOut) 925 Ins += RegOut != (BC.Exit == SpillPlacement::PrefReg); 926 if (Ins) 927 GlobalCost += Ins * SpillPlacer->getBlockFrequency(BC.Number); 928 } 929 930 for (unsigned i = 0, e = Cand.ActiveBlocks.size(); i != e; ++i) { 931 unsigned Number = Cand.ActiveBlocks[i]; 932 bool RegIn = LiveBundles[Bundles->getBundle(Number, 0)]; 933 bool RegOut = LiveBundles[Bundles->getBundle(Number, 1)]; 934 if (!RegIn && !RegOut) 935 continue; 936 if (RegIn && RegOut) { 937 // We need double spill code if this block has interference. 938 Cand.Intf.moveToBlock(Number); 939 if (Cand.Intf.hasInterference()) 940 GlobalCost += 2*SpillPlacer->getBlockFrequency(Number); 941 continue; 942 } 943 // live-in / stack-out or stack-in live-out. 944 GlobalCost += SpillPlacer->getBlockFrequency(Number); 945 } 946 return GlobalCost; 947} 948 949/// splitAroundRegion - Split the current live range around the regions 950/// determined by BundleCand and GlobalCand. 951/// 952/// Before calling this function, GlobalCand and BundleCand must be initialized 953/// so each bundle is assigned to a valid candidate, or NoCand for the 954/// stack-bound bundles. The shared SA/SE SplitAnalysis and SplitEditor 955/// objects must be initialized for the current live range, and intervals 956/// created for the used candidates. 957/// 958/// @param LREdit The LiveRangeEdit object handling the current split. 959/// @param UsedCands List of used GlobalCand entries. Every BundleCand value 960/// must appear in this list. 961void RAGreedy::splitAroundRegion(LiveRangeEdit &LREdit, 962 ArrayRef<unsigned> UsedCands) { 963 // These are the intervals created for new global ranges. We may create more 964 // intervals for local ranges. 965 const unsigned NumGlobalIntvs = LREdit.size(); 966 DEBUG(dbgs() << "splitAroundRegion with " << NumGlobalIntvs << " globals.\n"); 967 assert(NumGlobalIntvs && "No global intervals configured"); 968 969 // Isolate even single instructions when dealing with a proper sub-class. 970 // That guarantees register class inflation for the stack interval because it 971 // is all copies. 972 unsigned Reg = SA->getParent().reg; 973 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 974 975 // First handle all the blocks with uses. 976 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 977 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 978 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 979 unsigned Number = BI.MBB->getNumber(); 980 unsigned IntvIn = 0, IntvOut = 0; 981 SlotIndex IntfIn, IntfOut; 982 if (BI.LiveIn) { 983 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 984 if (CandIn != NoCand) { 985 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 986 IntvIn = Cand.IntvIdx; 987 Cand.Intf.moveToBlock(Number); 988 IntfIn = Cand.Intf.first(); 989 } 990 } 991 if (BI.LiveOut) { 992 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 993 if (CandOut != NoCand) { 994 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 995 IntvOut = Cand.IntvIdx; 996 Cand.Intf.moveToBlock(Number); 997 IntfOut = Cand.Intf.last(); 998 } 999 } 1000 1001 // Create separate intervals for isolated blocks with multiple uses. 1002 if (!IntvIn && !IntvOut) { 1003 DEBUG(dbgs() << "BB#" << BI.MBB->getNumber() << " isolated.\n"); 1004 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1005 SE->splitSingleBlock(BI); 1006 continue; 1007 } 1008 1009 if (IntvIn && IntvOut) 1010 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1011 else if (IntvIn) 1012 SE->splitRegInBlock(BI, IntvIn, IntfIn); 1013 else 1014 SE->splitRegOutBlock(BI, IntvOut, IntfOut); 1015 } 1016 1017 // Handle live-through blocks. The relevant live-through blocks are stored in 1018 // the ActiveBlocks list with each candidate. We need to filter out 1019 // duplicates. 1020 BitVector Todo = SA->getThroughBlocks(); 1021 for (unsigned c = 0; c != UsedCands.size(); ++c) { 1022 ArrayRef<unsigned> Blocks = GlobalCand[UsedCands[c]].ActiveBlocks; 1023 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { 1024 unsigned Number = Blocks[i]; 1025 if (!Todo.test(Number)) 1026 continue; 1027 Todo.reset(Number); 1028 1029 unsigned IntvIn = 0, IntvOut = 0; 1030 SlotIndex IntfIn, IntfOut; 1031 1032 unsigned CandIn = BundleCand[Bundles->getBundle(Number, 0)]; 1033 if (CandIn != NoCand) { 1034 GlobalSplitCandidate &Cand = GlobalCand[CandIn]; 1035 IntvIn = Cand.IntvIdx; 1036 Cand.Intf.moveToBlock(Number); 1037 IntfIn = Cand.Intf.first(); 1038 } 1039 1040 unsigned CandOut = BundleCand[Bundles->getBundle(Number, 1)]; 1041 if (CandOut != NoCand) { 1042 GlobalSplitCandidate &Cand = GlobalCand[CandOut]; 1043 IntvOut = Cand.IntvIdx; 1044 Cand.Intf.moveToBlock(Number); 1045 IntfOut = Cand.Intf.last(); 1046 } 1047 if (!IntvIn && !IntvOut) 1048 continue; 1049 SE->splitLiveThroughBlock(Number, IntvIn, IntfIn, IntvOut, IntfOut); 1050 } 1051 } 1052 1053 ++NumGlobalSplits; 1054 1055 SmallVector<unsigned, 8> IntvMap; 1056 SE->finish(&IntvMap); 1057 DebugVars->splitRegister(Reg, LREdit.regs()); 1058 1059 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1060 unsigned OrigBlocks = SA->getNumLiveBlocks(); 1061 1062 // Sort out the new intervals created by splitting. We get four kinds: 1063 // - Remainder intervals should not be split again. 1064 // - Candidate intervals can be assigned to Cand.PhysReg. 1065 // - Block-local splits are candidates for local splitting. 1066 // - DCE leftovers should go back on the queue. 1067 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1068 LiveInterval &Reg = *LREdit.get(i); 1069 1070 // Ignore old intervals from DCE. 1071 if (getStage(Reg) != RS_New) 1072 continue; 1073 1074 // Remainder interval. Don't try splitting again, spill if it doesn't 1075 // allocate. 1076 if (IntvMap[i] == 0) { 1077 setStage(Reg, RS_Spill); 1078 continue; 1079 } 1080 1081 // Global intervals. Allow repeated splitting as long as the number of live 1082 // blocks is strictly decreasing. 1083 if (IntvMap[i] < NumGlobalIntvs) { 1084 if (SA->countLiveBlocks(&Reg) >= OrigBlocks) { 1085 DEBUG(dbgs() << "Main interval covers the same " << OrigBlocks 1086 << " blocks as original.\n"); 1087 // Don't allow repeated splitting as a safe guard against looping. 1088 setStage(Reg, RS_Split2); 1089 } 1090 continue; 1091 } 1092 1093 // Other intervals are treated as new. This includes local intervals created 1094 // for blocks with multiple uses, and anything created by DCE. 1095 } 1096 1097 if (VerifyEnabled) 1098 MF->verify(this, "After splitting live range around region"); 1099} 1100 1101unsigned RAGreedy::tryRegionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1102 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1103 unsigned NumCands = 0; 1104 unsigned BestCand = NoCand; 1105 float BestCost; 1106 SmallVector<unsigned, 8> UsedCands; 1107 1108 // Check if we can split this live range around a compact region. 1109 bool HasCompact = calcCompactRegion(GlobalCand.front()); 1110 if (HasCompact) { 1111 // Yes, keep GlobalCand[0] as the compact region candidate. 1112 NumCands = 1; 1113 BestCost = HUGE_VALF; 1114 } else { 1115 // No benefit from the compact region, our fallback will be per-block 1116 // splitting. Make sure we find a solution that is cheaper than spilling. 1117 BestCost = Hysteresis * calcSpillCost(); 1118 DEBUG(dbgs() << "Cost of isolating all blocks = " << BestCost << '\n'); 1119 } 1120 1121 Order.rewind(); 1122 while (unsigned PhysReg = Order.next()) { 1123 // Discard bad candidates before we run out of interference cache cursors. 1124 // This will only affect register classes with a lot of registers (>32). 1125 if (NumCands == IntfCache.getMaxCursors()) { 1126 unsigned WorstCount = ~0u; 1127 unsigned Worst = 0; 1128 for (unsigned i = 0; i != NumCands; ++i) { 1129 if (i == BestCand || !GlobalCand[i].PhysReg) 1130 continue; 1131 unsigned Count = GlobalCand[i].LiveBundles.count(); 1132 if (Count < WorstCount) 1133 Worst = i, WorstCount = Count; 1134 } 1135 --NumCands; 1136 GlobalCand[Worst] = GlobalCand[NumCands]; 1137 if (BestCand == NumCands) 1138 BestCand = Worst; 1139 } 1140 1141 if (GlobalCand.size() <= NumCands) 1142 GlobalCand.resize(NumCands+1); 1143 GlobalSplitCandidate &Cand = GlobalCand[NumCands]; 1144 Cand.reset(IntfCache, PhysReg); 1145 1146 SpillPlacer->prepare(Cand.LiveBundles); 1147 float Cost; 1148 if (!addSplitConstraints(Cand.Intf, Cost)) { 1149 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tno positive bundles\n"); 1150 continue; 1151 } 1152 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << "\tstatic = " << Cost); 1153 if (Cost >= BestCost) { 1154 DEBUG({ 1155 if (BestCand == NoCand) 1156 dbgs() << " worse than no bundles\n"; 1157 else 1158 dbgs() << " worse than " 1159 << PrintReg(GlobalCand[BestCand].PhysReg, TRI) << '\n'; 1160 }); 1161 continue; 1162 } 1163 growRegion(Cand); 1164 1165 SpillPlacer->finish(); 1166 1167 // No live bundles, defer to splitSingleBlocks(). 1168 if (!Cand.LiveBundles.any()) { 1169 DEBUG(dbgs() << " no bundles.\n"); 1170 continue; 1171 } 1172 1173 Cost += calcGlobalSplitCost(Cand); 1174 DEBUG({ 1175 dbgs() << ", total = " << Cost << " with bundles"; 1176 for (int i = Cand.LiveBundles.find_first(); i>=0; 1177 i = Cand.LiveBundles.find_next(i)) 1178 dbgs() << " EB#" << i; 1179 dbgs() << ".\n"; 1180 }); 1181 if (Cost < BestCost) { 1182 BestCand = NumCands; 1183 BestCost = Hysteresis * Cost; // Prevent rounding effects. 1184 } 1185 ++NumCands; 1186 } 1187 1188 // No solutions found, fall back to single block splitting. 1189 if (!HasCompact && BestCand == NoCand) 1190 return 0; 1191 1192 // Prepare split editor. 1193 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1194 SE->reset(LREdit, SplitSpillMode); 1195 1196 // Assign all edge bundles to the preferred candidate, or NoCand. 1197 BundleCand.assign(Bundles->getNumBundles(), NoCand); 1198 1199 // Assign bundles for the best candidate region. 1200 if (BestCand != NoCand) { 1201 GlobalSplitCandidate &Cand = GlobalCand[BestCand]; 1202 if (unsigned B = Cand.getBundles(BundleCand, BestCand)) { 1203 UsedCands.push_back(BestCand); 1204 Cand.IntvIdx = SE->openIntv(); 1205 DEBUG(dbgs() << "Split for " << PrintReg(Cand.PhysReg, TRI) << " in " 1206 << B << " bundles, intv " << Cand.IntvIdx << ".\n"); 1207 (void)B; 1208 } 1209 } 1210 1211 // Assign bundles for the compact region. 1212 if (HasCompact) { 1213 GlobalSplitCandidate &Cand = GlobalCand.front(); 1214 assert(!Cand.PhysReg && "Compact region has no physreg"); 1215 if (unsigned B = Cand.getBundles(BundleCand, 0)) { 1216 UsedCands.push_back(0); 1217 Cand.IntvIdx = SE->openIntv(); 1218 DEBUG(dbgs() << "Split for compact region in " << B << " bundles, intv " 1219 << Cand.IntvIdx << ".\n"); 1220 (void)B; 1221 } 1222 } 1223 1224 splitAroundRegion(LREdit, UsedCands); 1225 return 0; 1226} 1227 1228 1229//===----------------------------------------------------------------------===// 1230// Per-Block Splitting 1231//===----------------------------------------------------------------------===// 1232 1233/// tryBlockSplit - Split a global live range around every block with uses. This 1234/// creates a lot of local live ranges, that will be split by tryLocalSplit if 1235/// they don't allocate. 1236unsigned RAGreedy::tryBlockSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1237 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1238 assert(&SA->getParent() == &VirtReg && "Live range wasn't analyzed"); 1239 unsigned Reg = VirtReg.reg; 1240 bool SingleInstrs = RegClassInfo.isProperSubClass(MRI->getRegClass(Reg)); 1241 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1242 SE->reset(LREdit, SplitSpillMode); 1243 ArrayRef<SplitAnalysis::BlockInfo> UseBlocks = SA->getUseBlocks(); 1244 for (unsigned i = 0; i != UseBlocks.size(); ++i) { 1245 const SplitAnalysis::BlockInfo &BI = UseBlocks[i]; 1246 if (SA->shouldSplitSingleBlock(BI, SingleInstrs)) 1247 SE->splitSingleBlock(BI); 1248 } 1249 // No blocks were split. 1250 if (LREdit.empty()) 1251 return 0; 1252 1253 // We did split for some blocks. 1254 SmallVector<unsigned, 8> IntvMap; 1255 SE->finish(&IntvMap); 1256 1257 // Tell LiveDebugVariables about the new ranges. 1258 DebugVars->splitRegister(Reg, LREdit.regs()); 1259 1260 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1261 1262 // Sort out the new intervals created by splitting. The remainder interval 1263 // goes straight to spilling, the new local ranges get to stay RS_New. 1264 for (unsigned i = 0, e = LREdit.size(); i != e; ++i) { 1265 LiveInterval &LI = *LREdit.get(i); 1266 if (getStage(LI) == RS_New && IntvMap[i] == 0) 1267 setStage(LI, RS_Spill); 1268 } 1269 1270 if (VerifyEnabled) 1271 MF->verify(this, "After splitting live range around basic blocks"); 1272 return 0; 1273} 1274 1275 1276//===----------------------------------------------------------------------===// 1277// Per-Instruction Splitting 1278//===----------------------------------------------------------------------===// 1279 1280/// tryInstructionSplit - Split a live range around individual instructions. 1281/// This is normally not worthwhile since the spiller is doing essentially the 1282/// same thing. However, when the live range is in a constrained register 1283/// class, it may help to insert copies such that parts of the live range can 1284/// be moved to a larger register class. 1285/// 1286/// This is similar to spilling to a larger register class. 1287unsigned 1288RAGreedy::tryInstructionSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1289 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1290 // There is no point to this if there are no larger sub-classes. 1291 if (!RegClassInfo.isProperSubClass(MRI->getRegClass(VirtReg.reg))) 1292 return 0; 1293 1294 // Always enable split spill mode, since we're effectively spilling to a 1295 // register. 1296 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1297 SE->reset(LREdit, SplitEditor::SM_Size); 1298 1299 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1300 if (Uses.size() <= 1) 1301 return 0; 1302 1303 DEBUG(dbgs() << "Split around " << Uses.size() << " individual instrs.\n"); 1304 1305 // Split around every non-copy instruction. 1306 for (unsigned i = 0; i != Uses.size(); ++i) { 1307 if (const MachineInstr *MI = Indexes->getInstructionFromIndex(Uses[i])) 1308 if (MI->isFullCopy()) { 1309 DEBUG(dbgs() << " skip:\t" << Uses[i] << '\t' << *MI); 1310 continue; 1311 } 1312 SE->openIntv(); 1313 SlotIndex SegStart = SE->enterIntvBefore(Uses[i]); 1314 SlotIndex SegStop = SE->leaveIntvAfter(Uses[i]); 1315 SE->useIntv(SegStart, SegStop); 1316 } 1317 1318 if (LREdit.empty()) { 1319 DEBUG(dbgs() << "All uses were copies.\n"); 1320 return 0; 1321 } 1322 1323 SmallVector<unsigned, 8> IntvMap; 1324 SE->finish(&IntvMap); 1325 DebugVars->splitRegister(VirtReg.reg, LREdit.regs()); 1326 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1327 1328 // Assign all new registers to RS_Spill. This was the last chance. 1329 setStage(LREdit.begin(), LREdit.end(), RS_Spill); 1330 return 0; 1331} 1332 1333 1334//===----------------------------------------------------------------------===// 1335// Local Splitting 1336//===----------------------------------------------------------------------===// 1337 1338 1339/// calcGapWeights - Compute the maximum spill weight that needs to be evicted 1340/// in order to use PhysReg between two entries in SA->UseSlots. 1341/// 1342/// GapWeight[i] represents the gap between UseSlots[i] and UseSlots[i+1]. 1343/// 1344void RAGreedy::calcGapWeights(unsigned PhysReg, 1345 SmallVectorImpl<float> &GapWeight) { 1346 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1347 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1348 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1349 const unsigned NumGaps = Uses.size()-1; 1350 1351 // Start and end points for the interference check. 1352 SlotIndex StartIdx = 1353 BI.LiveIn ? BI.FirstInstr.getBaseIndex() : BI.FirstInstr; 1354 SlotIndex StopIdx = 1355 BI.LiveOut ? BI.LastInstr.getBoundaryIndex() : BI.LastInstr; 1356 1357 GapWeight.assign(NumGaps, 0.0f); 1358 1359 // Add interference from each overlapping register. 1360 for (MCRegAliasIterator AI(PhysReg, TRI, true); AI.isValid(); ++AI) { 1361 if (!query(const_cast<LiveInterval&>(SA->getParent()), *AI) 1362 .checkInterference()) 1363 continue; 1364 1365 // We know that VirtReg is a continuous interval from FirstInstr to 1366 // LastInstr, so we don't need InterferenceQuery. 1367 // 1368 // Interference that overlaps an instruction is counted in both gaps 1369 // surrounding the instruction. The exception is interference before 1370 // StartIdx and after StopIdx. 1371 // 1372 LiveIntervalUnion::SegmentIter IntI = getLiveUnion(*AI).find(StartIdx); 1373 for (unsigned Gap = 0; IntI.valid() && IntI.start() < StopIdx; ++IntI) { 1374 // Skip the gaps before IntI. 1375 while (Uses[Gap+1].getBoundaryIndex() < IntI.start()) 1376 if (++Gap == NumGaps) 1377 break; 1378 if (Gap == NumGaps) 1379 break; 1380 1381 // Update the gaps covered by IntI. 1382 const float weight = IntI.value()->weight; 1383 for (; Gap != NumGaps; ++Gap) { 1384 GapWeight[Gap] = std::max(GapWeight[Gap], weight); 1385 if (Uses[Gap+1].getBaseIndex() >= IntI.stop()) 1386 break; 1387 } 1388 if (Gap == NumGaps) 1389 break; 1390 } 1391 } 1392} 1393 1394/// tryLocalSplit - Try to split VirtReg into smaller intervals inside its only 1395/// basic block. 1396/// 1397unsigned RAGreedy::tryLocalSplit(LiveInterval &VirtReg, AllocationOrder &Order, 1398 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1399 assert(SA->getUseBlocks().size() == 1 && "Not a local interval"); 1400 const SplitAnalysis::BlockInfo &BI = SA->getUseBlocks().front(); 1401 1402 // Note that it is possible to have an interval that is live-in or live-out 1403 // while only covering a single block - A phi-def can use undef values from 1404 // predecessors, and the block could be a single-block loop. 1405 // We don't bother doing anything clever about such a case, we simply assume 1406 // that the interval is continuous from FirstInstr to LastInstr. We should 1407 // make sure that we don't do anything illegal to such an interval, though. 1408 1409 ArrayRef<SlotIndex> Uses = SA->getUseSlots(); 1410 if (Uses.size() <= 2) 1411 return 0; 1412 const unsigned NumGaps = Uses.size()-1; 1413 1414 DEBUG({ 1415 dbgs() << "tryLocalSplit: "; 1416 for (unsigned i = 0, e = Uses.size(); i != e; ++i) 1417 dbgs() << ' ' << Uses[i]; 1418 dbgs() << '\n'; 1419 }); 1420 1421 // If VirtReg is live across any register mask operands, compute a list of 1422 // gaps with register masks. 1423 SmallVector<unsigned, 8> RegMaskGaps; 1424 if (!UsableRegs.empty()) { 1425 // Get regmask slots for the whole block. 1426 ArrayRef<SlotIndex> RMS = LIS->getRegMaskSlotsInBlock(BI.MBB->getNumber()); 1427 DEBUG(dbgs() << RMS.size() << " regmasks in block:"); 1428 // Constrain to VirtReg's live range. 1429 unsigned ri = std::lower_bound(RMS.begin(), RMS.end(), 1430 Uses.front().getRegSlot()) - RMS.begin(); 1431 unsigned re = RMS.size(); 1432 for (unsigned i = 0; i != NumGaps && ri != re; ++i) { 1433 // Look for Uses[i] <= RMS <= Uses[i+1]. 1434 assert(!SlotIndex::isEarlierInstr(RMS[ri], Uses[i])); 1435 if (SlotIndex::isEarlierInstr(Uses[i+1], RMS[ri])) 1436 continue; 1437 // Skip a regmask on the same instruction as the last use. It doesn't 1438 // overlap the live range. 1439 if (SlotIndex::isSameInstr(Uses[i+1], RMS[ri]) && i+1 == NumGaps) 1440 break; 1441 DEBUG(dbgs() << ' ' << RMS[ri] << ':' << Uses[i] << '-' << Uses[i+1]); 1442 RegMaskGaps.push_back(i); 1443 // Advance ri to the next gap. A regmask on one of the uses counts in 1444 // both gaps. 1445 while (ri != re && SlotIndex::isEarlierInstr(RMS[ri], Uses[i+1])) 1446 ++ri; 1447 } 1448 DEBUG(dbgs() << '\n'); 1449 } 1450 1451 // Since we allow local split results to be split again, there is a risk of 1452 // creating infinite loops. It is tempting to require that the new live 1453 // ranges have less instructions than the original. That would guarantee 1454 // convergence, but it is too strict. A live range with 3 instructions can be 1455 // split 2+3 (including the COPY), and we want to allow that. 1456 // 1457 // Instead we use these rules: 1458 // 1459 // 1. Allow any split for ranges with getStage() < RS_Split2. (Except for the 1460 // noop split, of course). 1461 // 2. Require progress be made for ranges with getStage() == RS_Split2. All 1462 // the new ranges must have fewer instructions than before the split. 1463 // 3. New ranges with the same number of instructions are marked RS_Split2, 1464 // smaller ranges are marked RS_New. 1465 // 1466 // These rules allow a 3 -> 2+3 split once, which we need. They also prevent 1467 // excessive splitting and infinite loops. 1468 // 1469 bool ProgressRequired = getStage(VirtReg) >= RS_Split2; 1470 1471 // Best split candidate. 1472 unsigned BestBefore = NumGaps; 1473 unsigned BestAfter = 0; 1474 float BestDiff = 0; 1475 1476 const float blockFreq = SpillPlacer->getBlockFrequency(BI.MBB->getNumber()); 1477 SmallVector<float, 8> GapWeight; 1478 1479 Order.rewind(); 1480 while (unsigned PhysReg = Order.next()) { 1481 // Keep track of the largest spill weight that would need to be evicted in 1482 // order to make use of PhysReg between UseSlots[i] and UseSlots[i+1]. 1483 calcGapWeights(PhysReg, GapWeight); 1484 1485 // Remove any gaps with regmask clobbers. 1486 if (clobberedByRegMask(PhysReg)) 1487 for (unsigned i = 0, e = RegMaskGaps.size(); i != e; ++i) 1488 GapWeight[RegMaskGaps[i]] = HUGE_VALF; 1489 1490 // Try to find the best sequence of gaps to close. 1491 // The new spill weight must be larger than any gap interference. 1492 1493 // We will split before Uses[SplitBefore] and after Uses[SplitAfter]. 1494 unsigned SplitBefore = 0, SplitAfter = 1; 1495 1496 // MaxGap should always be max(GapWeight[SplitBefore..SplitAfter-1]). 1497 // It is the spill weight that needs to be evicted. 1498 float MaxGap = GapWeight[0]; 1499 1500 for (;;) { 1501 // Live before/after split? 1502 const bool LiveBefore = SplitBefore != 0 || BI.LiveIn; 1503 const bool LiveAfter = SplitAfter != NumGaps || BI.LiveOut; 1504 1505 DEBUG(dbgs() << PrintReg(PhysReg, TRI) << ' ' 1506 << Uses[SplitBefore] << '-' << Uses[SplitAfter] 1507 << " i=" << MaxGap); 1508 1509 // Stop before the interval gets so big we wouldn't be making progress. 1510 if (!LiveBefore && !LiveAfter) { 1511 DEBUG(dbgs() << " all\n"); 1512 break; 1513 } 1514 // Should the interval be extended or shrunk? 1515 bool Shrink = true; 1516 1517 // How many gaps would the new range have? 1518 unsigned NewGaps = LiveBefore + SplitAfter - SplitBefore + LiveAfter; 1519 1520 // Legally, without causing looping? 1521 bool Legal = !ProgressRequired || NewGaps < NumGaps; 1522 1523 if (Legal && MaxGap < HUGE_VALF) { 1524 // Estimate the new spill weight. Each instruction reads or writes the 1525 // register. Conservatively assume there are no read-modify-write 1526 // instructions. 1527 // 1528 // Try to guess the size of the new interval. 1529 const float EstWeight = normalizeSpillWeight(blockFreq * (NewGaps + 1), 1530 Uses[SplitBefore].distance(Uses[SplitAfter]) + 1531 (LiveBefore + LiveAfter)*SlotIndex::InstrDist); 1532 // Would this split be possible to allocate? 1533 // Never allocate all gaps, we wouldn't be making progress. 1534 DEBUG(dbgs() << " w=" << EstWeight); 1535 if (EstWeight * Hysteresis >= MaxGap) { 1536 Shrink = false; 1537 float Diff = EstWeight - MaxGap; 1538 if (Diff > BestDiff) { 1539 DEBUG(dbgs() << " (best)"); 1540 BestDiff = Hysteresis * Diff; 1541 BestBefore = SplitBefore; 1542 BestAfter = SplitAfter; 1543 } 1544 } 1545 } 1546 1547 // Try to shrink. 1548 if (Shrink) { 1549 if (++SplitBefore < SplitAfter) { 1550 DEBUG(dbgs() << " shrink\n"); 1551 // Recompute the max when necessary. 1552 if (GapWeight[SplitBefore - 1] >= MaxGap) { 1553 MaxGap = GapWeight[SplitBefore]; 1554 for (unsigned i = SplitBefore + 1; i != SplitAfter; ++i) 1555 MaxGap = std::max(MaxGap, GapWeight[i]); 1556 } 1557 continue; 1558 } 1559 MaxGap = 0; 1560 } 1561 1562 // Try to extend the interval. 1563 if (SplitAfter >= NumGaps) { 1564 DEBUG(dbgs() << " end\n"); 1565 break; 1566 } 1567 1568 DEBUG(dbgs() << " extend\n"); 1569 MaxGap = std::max(MaxGap, GapWeight[SplitAfter++]); 1570 } 1571 } 1572 1573 // Didn't find any candidates? 1574 if (BestBefore == NumGaps) 1575 return 0; 1576 1577 DEBUG(dbgs() << "Best local split range: " << Uses[BestBefore] 1578 << '-' << Uses[BestAfter] << ", " << BestDiff 1579 << ", " << (BestAfter - BestBefore + 1) << " instrs\n"); 1580 1581 LiveRangeEdit LREdit(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1582 SE->reset(LREdit); 1583 1584 SE->openIntv(); 1585 SlotIndex SegStart = SE->enterIntvBefore(Uses[BestBefore]); 1586 SlotIndex SegStop = SE->leaveIntvAfter(Uses[BestAfter]); 1587 SE->useIntv(SegStart, SegStop); 1588 SmallVector<unsigned, 8> IntvMap; 1589 SE->finish(&IntvMap); 1590 DebugVars->splitRegister(VirtReg.reg, LREdit.regs()); 1591 1592 // If the new range has the same number of instructions as before, mark it as 1593 // RS_Split2 so the next split will be forced to make progress. Otherwise, 1594 // leave the new intervals as RS_New so they can compete. 1595 bool LiveBefore = BestBefore != 0 || BI.LiveIn; 1596 bool LiveAfter = BestAfter != NumGaps || BI.LiveOut; 1597 unsigned NewGaps = LiveBefore + BestAfter - BestBefore + LiveAfter; 1598 if (NewGaps >= NumGaps) { 1599 DEBUG(dbgs() << "Tagging non-progress ranges: "); 1600 assert(!ProgressRequired && "Didn't make progress when it was required."); 1601 for (unsigned i = 0, e = IntvMap.size(); i != e; ++i) 1602 if (IntvMap[i] == 1) { 1603 setStage(*LREdit.get(i), RS_Split2); 1604 DEBUG(dbgs() << PrintReg(LREdit.get(i)->reg)); 1605 } 1606 DEBUG(dbgs() << '\n'); 1607 } 1608 ++NumLocalSplits; 1609 1610 return 0; 1611} 1612 1613//===----------------------------------------------------------------------===// 1614// Live Range Splitting 1615//===----------------------------------------------------------------------===// 1616 1617/// trySplit - Try to split VirtReg or one of its interferences, making it 1618/// assignable. 1619/// @return Physreg when VirtReg may be assigned and/or new NewVRegs. 1620unsigned RAGreedy::trySplit(LiveInterval &VirtReg, AllocationOrder &Order, 1621 SmallVectorImpl<LiveInterval*>&NewVRegs) { 1622 // Ranges must be Split2 or less. 1623 if (getStage(VirtReg) >= RS_Spill) 1624 return 0; 1625 1626 // Local intervals are handled separately. 1627 if (LIS->intervalIsInOneMBB(VirtReg)) { 1628 NamedRegionTimer T("Local Splitting", TimerGroupName, TimePassesIsEnabled); 1629 SA->analyze(&VirtReg); 1630 unsigned PhysReg = tryLocalSplit(VirtReg, Order, NewVRegs); 1631 if (PhysReg || !NewVRegs.empty()) 1632 return PhysReg; 1633 return tryInstructionSplit(VirtReg, Order, NewVRegs); 1634 } 1635 1636 NamedRegionTimer T("Global Splitting", TimerGroupName, TimePassesIsEnabled); 1637 1638 SA->analyze(&VirtReg); 1639 1640 // FIXME: SplitAnalysis may repair broken live ranges coming from the 1641 // coalescer. That may cause the range to become allocatable which means that 1642 // tryRegionSplit won't be making progress. This check should be replaced with 1643 // an assertion when the coalescer is fixed. 1644 if (SA->didRepairRange()) { 1645 // VirtReg has changed, so all cached queries are invalid. 1646 invalidateVirtRegs(); 1647 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1648 return PhysReg; 1649 } 1650 1651 // First try to split around a region spanning multiple blocks. RS_Split2 1652 // ranges already made dubious progress with region splitting, so they go 1653 // straight to single block splitting. 1654 if (getStage(VirtReg) < RS_Split2) { 1655 unsigned PhysReg = tryRegionSplit(VirtReg, Order, NewVRegs); 1656 if (PhysReg || !NewVRegs.empty()) 1657 return PhysReg; 1658 } 1659 1660 // Then isolate blocks. 1661 return tryBlockSplit(VirtReg, Order, NewVRegs); 1662} 1663 1664 1665//===----------------------------------------------------------------------===// 1666// Main Entry Point 1667//===----------------------------------------------------------------------===// 1668 1669unsigned RAGreedy::selectOrSplit(LiveInterval &VirtReg, 1670 SmallVectorImpl<LiveInterval*> &NewVRegs) { 1671 // Check if VirtReg is live across any calls. 1672 UsableRegs.clear(); 1673 if (LIS->checkRegMaskInterference(VirtReg, UsableRegs)) 1674 DEBUG(dbgs() << "Live across regmasks.\n"); 1675 1676 // First try assigning a free register. 1677 AllocationOrder Order(VirtReg.reg, *VRM, RegClassInfo); 1678 if (unsigned PhysReg = tryAssign(VirtReg, Order, NewVRegs)) 1679 return PhysReg; 1680 1681 LiveRangeStage Stage = getStage(VirtReg); 1682 DEBUG(dbgs() << StageName[Stage] 1683 << " Cascade " << ExtraRegInfo[VirtReg.reg].Cascade << '\n'); 1684 1685 // Try to evict a less worthy live range, but only for ranges from the primary 1686 // queue. The RS_Split ranges already failed to do this, and they should not 1687 // get a second chance until they have been split. 1688 if (Stage != RS_Split) 1689 if (unsigned PhysReg = tryEvict(VirtReg, Order, NewVRegs)) 1690 return PhysReg; 1691 1692 assert(NewVRegs.empty() && "Cannot append to existing NewVRegs"); 1693 1694 // The first time we see a live range, don't try to split or spill. 1695 // Wait until the second time, when all smaller ranges have been allocated. 1696 // This gives a better picture of the interference to split around. 1697 if (Stage < RS_Split) { 1698 setStage(VirtReg, RS_Split); 1699 DEBUG(dbgs() << "wait for second round\n"); 1700 NewVRegs.push_back(&VirtReg); 1701 return 0; 1702 } 1703 1704 // If we couldn't allocate a register from spilling, there is probably some 1705 // invalid inline assembly. The base class wil report it. 1706 if (Stage >= RS_Done || !VirtReg.isSpillable()) 1707 return ~0u; 1708 1709 // Try splitting VirtReg or interferences. 1710 unsigned PhysReg = trySplit(VirtReg, Order, NewVRegs); 1711 if (PhysReg || !NewVRegs.empty()) 1712 return PhysReg; 1713 1714 // Finally spill VirtReg itself. 1715 NamedRegionTimer T("Spiller", TimerGroupName, TimePassesIsEnabled); 1716 LiveRangeEdit LRE(&VirtReg, NewVRegs, *MF, *LIS, VRM, this); 1717 spiller().spill(LRE); 1718 setStage(NewVRegs.begin(), NewVRegs.end(), RS_Done); 1719 1720 if (VerifyEnabled) 1721 MF->verify(this, "After spilling"); 1722 1723 // The live virtual register requesting allocation was spilled, so tell 1724 // the caller not to allocate anything during this round. 1725 return 0; 1726} 1727 1728bool RAGreedy::runOnMachineFunction(MachineFunction &mf) { 1729 DEBUG(dbgs() << "********** GREEDY REGISTER ALLOCATION **********\n" 1730 << "********** Function: " 1731 << ((Value*)mf.getFunction())->getName() << '\n'); 1732 1733 MF = &mf; 1734 if (VerifyEnabled) 1735 MF->verify(this, "Before greedy register allocator"); 1736 1737 RegAllocBase::init(getAnalysis<VirtRegMap>(), getAnalysis<LiveIntervals>()); 1738 Indexes = &getAnalysis<SlotIndexes>(); 1739 DomTree = &getAnalysis<MachineDominatorTree>(); 1740 SpillerInstance.reset(createInlineSpiller(*this, *MF, *VRM)); 1741 Loops = &getAnalysis<MachineLoopInfo>(); 1742 Bundles = &getAnalysis<EdgeBundles>(); 1743 SpillPlacer = &getAnalysis<SpillPlacement>(); 1744 DebugVars = &getAnalysis<LiveDebugVariables>(); 1745 1746 SA.reset(new SplitAnalysis(*VRM, *LIS, *Loops)); 1747 SE.reset(new SplitEditor(*SA, *LIS, *VRM, *DomTree)); 1748 ExtraRegInfo.clear(); 1749 ExtraRegInfo.resize(MRI->getNumVirtRegs()); 1750 NextCascade = 1; 1751 IntfCache.init(MF, &getLiveUnion(0), Indexes, LIS, TRI); 1752 GlobalCand.resize(32); // This will grow as needed. 1753 1754 allocatePhysRegs(); 1755 addMBBLiveIns(MF); 1756 LIS->addKillFlags(); 1757 1758 // Run rewriter 1759 { 1760 NamedRegionTimer T("Rewriter", TimerGroupName, TimePassesIsEnabled); 1761 VRM->rewrite(Indexes); 1762 } 1763 1764 // Write out new DBG_VALUE instructions. 1765 { 1766 NamedRegionTimer T("Emit Debug Info", TimerGroupName, TimePassesIsEnabled); 1767 DebugVars->emitDebugValues(VRM); 1768 } 1769 1770 // All machine operands and other references to virtual registers have been 1771 // replaced. Remove the virtual registers and release all the transient data. 1772 VRM->clearAllVirt(); 1773 MRI->clearVirtRegs(); 1774 releaseMemory(); 1775 1776 return true; 1777} 1778