VirtRegMap.cpp revision 8f1d6402ba73b96993ffd1eb9434b28c828d8856
1//===-- llvm/CodeGen/VirtRegMap.cpp - Virtual Register Map ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the VirtRegMap class. 11// 12// It also contains implementations of the the Spiller interface, which, given a 13// virtual register map and a machine function, eliminates all virtual 14// references by replacing them with physical register references - adding spill 15// code as necessary. 16// 17//===----------------------------------------------------------------------===// 18 19#define DEBUG_TYPE "spiller" 20#include "VirtRegMap.h" 21#include "llvm/Function.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineFunction.h" 24#include "llvm/CodeGen/SSARegMap.h" 25#include "llvm/Target/TargetMachine.h" 26#include "llvm/Target/TargetInstrInfo.h" 27#include "llvm/Support/CommandLine.h" 28#include "llvm/Support/Debug.h" 29#include "llvm/ADT/Statistic.h" 30#include "llvm/ADT/STLExtras.h" 31#include <algorithm> 32using namespace llvm; 33 34namespace { 35 Statistic<> NumSpills("spiller", "Number of register spills"); 36 Statistic<> NumStores("spiller", "Number of stores added"); 37 Statistic<> NumLoads ("spiller", "Number of loads added"); 38 Statistic<> NumReused("spiller", "Number of values reused"); 39 Statistic<> NumDSE ("spiller", "Number of dead stores elided"); 40 41 enum SpillerName { simple, local }; 42 43 cl::opt<SpillerName> 44 SpillerOpt("spiller", 45 cl::desc("Spiller to use: (default: local)"), 46 cl::Prefix, 47 cl::values(clEnumVal(simple, " simple spiller"), 48 clEnumVal(local, " local spiller"), 49 clEnumValEnd), 50 cl::init(local)); 51} 52 53//===----------------------------------------------------------------------===// 54// VirtRegMap implementation 55//===----------------------------------------------------------------------===// 56 57void VirtRegMap::grow() { 58 Virt2PhysMap.grow(MF.getSSARegMap()->getLastVirtReg()); 59 Virt2StackSlotMap.grow(MF.getSSARegMap()->getLastVirtReg()); 60} 61 62int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) { 63 assert(MRegisterInfo::isVirtualRegister(virtReg)); 64 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && 65 "attempt to assign stack slot to already spilled register"); 66 const TargetRegisterClass* RC = MF.getSSARegMap()->getRegClass(virtReg); 67 int frameIndex = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 68 RC->getAlignment()); 69 Virt2StackSlotMap[virtReg] = frameIndex; 70 ++NumSpills; 71 return frameIndex; 72} 73 74void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int frameIndex) { 75 assert(MRegisterInfo::isVirtualRegister(virtReg)); 76 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && 77 "attempt to assign stack slot to already spilled register"); 78 Virt2StackSlotMap[virtReg] = frameIndex; 79} 80 81void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI, 82 unsigned OpNo, MachineInstr *NewMI) { 83 // Move previous memory references folded to new instruction. 84 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(NewMI); 85 for (MI2VirtMapTy::iterator I = MI2VirtMap.lower_bound(OldMI), 86 E = MI2VirtMap.end(); I != E && I->first == OldMI; ) { 87 MI2VirtMap.insert(IP, std::make_pair(NewMI, I->second)); 88 MI2VirtMap.erase(I++); 89 } 90 91 ModRef MRInfo; 92 if (!OldMI->getOperand(OpNo).isDef()) { 93 assert(OldMI->getOperand(OpNo).isUse() && "Operand is not use or def?"); 94 MRInfo = isRef; 95 } else { 96 MRInfo = OldMI->getOperand(OpNo).isUse() ? isModRef : isMod; 97 } 98 99 // add new memory reference 100 MI2VirtMap.insert(IP, std::make_pair(NewMI, std::make_pair(VirtReg, MRInfo))); 101} 102 103void VirtRegMap::print(std::ostream &OS) const { 104 const MRegisterInfo* MRI = MF.getTarget().getRegisterInfo(); 105 106 OS << "********** REGISTER MAP **********\n"; 107 for (unsigned i = MRegisterInfo::FirstVirtualRegister, 108 e = MF.getSSARegMap()->getLastVirtReg(); i <= e; ++i) { 109 if (Virt2PhysMap[i] != (unsigned)VirtRegMap::NO_PHYS_REG) 110 OS << "[reg" << i << " -> " << MRI->getName(Virt2PhysMap[i]) << "]\n"; 111 112 } 113 114 for (unsigned i = MRegisterInfo::FirstVirtualRegister, 115 e = MF.getSSARegMap()->getLastVirtReg(); i <= e; ++i) 116 if (Virt2StackSlotMap[i] != VirtRegMap::NO_STACK_SLOT) 117 OS << "[reg" << i << " -> fi#" << Virt2StackSlotMap[i] << "]\n"; 118 OS << '\n'; 119} 120 121void VirtRegMap::dump() const { print(std::cerr); } 122 123 124//===----------------------------------------------------------------------===// 125// Simple Spiller Implementation 126//===----------------------------------------------------------------------===// 127 128Spiller::~Spiller() {} 129 130namespace { 131 struct SimpleSpiller : public Spiller { 132 bool runOnMachineFunction(MachineFunction& mf, const VirtRegMap &VRM); 133 }; 134} 135 136bool SimpleSpiller::runOnMachineFunction(MachineFunction& MF, 137 const VirtRegMap& VRM) { 138 DEBUG(std::cerr << "********** REWRITE MACHINE CODE **********\n"); 139 DEBUG(std::cerr << "********** Function: " 140 << MF.getFunction()->getName() << '\n'); 141 const TargetMachine& TM = MF.getTarget(); 142 const MRegisterInfo& MRI = *TM.getRegisterInfo(); 143 144 // LoadedRegs - Keep track of which vregs are loaded, so that we only load 145 // each vreg once (in the case where a spilled vreg is used by multiple 146 // operands). This is always smaller than the number of operands to the 147 // current machine instr, so it should be small. 148 std::vector<unsigned> LoadedRegs; 149 150 for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end(); 151 MBBI != E; ++MBBI) { 152 DEBUG(std::cerr << MBBI->getBasicBlock()->getName() << ":\n"); 153 MachineBasicBlock &MBB = *MBBI; 154 for (MachineBasicBlock::iterator MII = MBB.begin(), 155 E = MBB.end(); MII != E; ++MII) { 156 MachineInstr &MI = *MII; 157 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 158 MachineOperand &MO = MI.getOperand(i); 159 if (MO.isRegister() && MO.getReg() && 160 MRegisterInfo::isVirtualRegister(MO.getReg())) { 161 unsigned VirtReg = MO.getReg(); 162 unsigned PhysReg = VRM.getPhys(VirtReg); 163 if (VRM.hasStackSlot(VirtReg)) { 164 int StackSlot = VRM.getStackSlot(VirtReg); 165 166 if (MO.isUse() && 167 std::find(LoadedRegs.begin(), LoadedRegs.end(), VirtReg) 168 == LoadedRegs.end()) { 169 MRI.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot); 170 LoadedRegs.push_back(VirtReg); 171 ++NumLoads; 172 DEBUG(std::cerr << '\t' << *prior(MII)); 173 } 174 175 if (MO.isDef()) { 176 MRI.storeRegToStackSlot(MBB, next(MII), PhysReg, StackSlot); 177 ++NumStores; 178 } 179 } 180 MI.SetMachineOperandReg(i, PhysReg); 181 } 182 } 183 DEBUG(std::cerr << '\t' << MI); 184 LoadedRegs.clear(); 185 } 186 } 187 return true; 188} 189 190//===----------------------------------------------------------------------===// 191// Local Spiller Implementation 192//===----------------------------------------------------------------------===// 193 194namespace { 195 /// LocalSpiller - This spiller does a simple pass over the machine basic 196 /// block to attempt to keep spills in registers as much as possible for 197 /// blocks that have low register pressure (the vreg may be spilled due to 198 /// register pressure in other blocks). 199 class LocalSpiller : public Spiller { 200 const MRegisterInfo *MRI; 201 const TargetInstrInfo *TII; 202 public: 203 bool runOnMachineFunction(MachineFunction &MF, const VirtRegMap &VRM) { 204 MRI = MF.getTarget().getRegisterInfo(); 205 TII = MF.getTarget().getInstrInfo(); 206 DEBUG(std::cerr << "\n**** Local spiller rewriting function '" 207 << MF.getFunction()->getName() << "':\n"); 208 209 for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); 210 MBB != E; ++MBB) 211 RewriteMBB(*MBB, VRM); 212 return true; 213 } 214 private: 215 void RewriteMBB(MachineBasicBlock &MBB, const VirtRegMap &VRM); 216 void ClobberPhysReg(unsigned PR, std::map<int, unsigned> &SpillSlots, 217 std::map<unsigned, int> &PhysRegs); 218 void ClobberPhysRegOnly(unsigned PR, std::map<int, unsigned> &SpillSlots, 219 std::map<unsigned, int> &PhysRegs); 220 }; 221} 222 223void LocalSpiller::ClobberPhysRegOnly(unsigned PhysReg, 224 std::map<int, unsigned> &SpillSlots, 225 std::map<unsigned, int> &PhysRegs) { 226 std::map<unsigned, int>::iterator I = PhysRegs.find(PhysReg); 227 if (I != PhysRegs.end()) { 228 int Slot = I->second; 229 PhysRegs.erase(I); 230 assert(SpillSlots[Slot] == PhysReg && "Bidirectional map mismatch!"); 231 SpillSlots.erase(Slot); 232 DEBUG(std::cerr << "PhysReg " << MRI->getName(PhysReg) 233 << " clobbered, invalidating SS#" << Slot << "\n"); 234 235 } 236} 237 238void LocalSpiller::ClobberPhysReg(unsigned PhysReg, 239 std::map<int, unsigned> &SpillSlots, 240 std::map<unsigned, int> &PhysRegs) { 241 for (const unsigned *AS = MRI->getAliasSet(PhysReg); *AS; ++AS) 242 ClobberPhysRegOnly(*AS, SpillSlots, PhysRegs); 243 ClobberPhysRegOnly(PhysReg, SpillSlots, PhysRegs); 244} 245 246 247// ReusedOp - For each reused operand, we keep track of a bit of information, in 248// case we need to rollback upon processing a new operand. See comments below. 249namespace { 250 struct ReusedOp { 251 // The MachineInstr operand that reused an available value. 252 unsigned Operand; 253 254 // StackSlot - The spill slot of the value being reused. 255 unsigned StackSlot; 256 257 // PhysRegReused - The physical register the value was available in. 258 unsigned PhysRegReused; 259 260 // AssignedPhysReg - The physreg that was assigned for use by the reload. 261 unsigned AssignedPhysReg; 262 263 ReusedOp(unsigned o, unsigned ss, unsigned prr, unsigned apr) 264 : Operand(o), StackSlot(ss), PhysRegReused(prr), AssignedPhysReg(apr) {} 265 }; 266} 267 268 269/// rewriteMBB - Keep track of which spills are available even after the 270/// register allocator is done with them. If possible, avoid reloading vregs. 271void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, const VirtRegMap &VRM) { 272 273 // SpillSlotsAvailable - This map keeps track of all of the spilled virtual 274 // register values that are still available, due to being loaded to stored to, 275 // but not invalidated yet. 276 std::map<int, unsigned> SpillSlotsAvailable; 277 278 // PhysRegsAvailable - This is the inverse of SpillSlotsAvailable, indicating 279 // which physregs are in use holding a stack slot value. 280 std::map<unsigned, int> PhysRegsAvailable; 281 282 DEBUG(std::cerr << MBB.getBasicBlock()->getName() << ":\n"); 283 284 std::vector<ReusedOp> ReusedOperands; 285 286 // DefAndUseVReg - When we see a def&use operand that is spilled, keep track 287 // of it. ".first" is the machine operand index (should always be 0 for now), 288 // and ".second" is the virtual register that is spilled. 289 std::vector<std::pair<unsigned, unsigned> > DefAndUseVReg; 290 291 // MaybeDeadStores - When we need to write a value back into a stack slot, 292 // keep track of the inserted store. If the stack slot value is never read 293 // (because the value was used from some available register, for example), and 294 // subsequently stored to, the original store is dead. This map keeps track 295 // of inserted stores that are not used. If we see a subsequent store to the 296 // same stack slot, the original store is deleted. 297 std::map<int, MachineInstr*> MaybeDeadStores; 298 299 for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end(); 300 MII != E; ) { 301 MachineInstr &MI = *MII; 302 MachineBasicBlock::iterator NextMII = MII; ++NextMII; 303 304 ReusedOperands.clear(); 305 DefAndUseVReg.clear(); 306 307 // Process all of the spilled uses and all non spilled reg references. 308 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 309 MachineOperand &MO = MI.getOperand(i); 310 if (MO.isRegister() && MO.getReg() && 311 MRegisterInfo::isVirtualRegister(MO.getReg())) { 312 unsigned VirtReg = MO.getReg(); 313 314 if (!VRM.hasStackSlot(VirtReg)) { 315 // This virtual register was assigned a physreg! 316 MI.SetMachineOperandReg(i, VRM.getPhys(VirtReg)); 317 } else { 318 // Is this virtual register a spilled value? 319 if (MO.isUse()) { 320 int StackSlot = VRM.getStackSlot(VirtReg); 321 unsigned PhysReg; 322 323 // Check to see if this stack slot is available. 324 std::map<int, unsigned>::iterator SSI = 325 SpillSlotsAvailable.find(StackSlot); 326 if (SSI != SpillSlotsAvailable.end()) { 327 DEBUG(std::cerr << "Reusing SS#" << StackSlot << " from physreg " 328 << MRI->getName(SSI->second) << " for vreg" 329 << VirtReg <<" instead of reloading into physreg " 330 << MRI->getName(VRM.getPhys(VirtReg)) << "\n"); 331 // If this stack slot value is already available, reuse it! 332 PhysReg = SSI->second; 333 MI.SetMachineOperandReg(i, PhysReg); 334 335 // The only technical detail we have is that we don't know that 336 // PhysReg won't be clobbered by a reloaded stack slot that occurs 337 // later in the instruction. In particular, consider 'op V1, V2'. 338 // If V1 is available in physreg R0, we would choose to reuse it 339 // here, instead of reloading it into the register the allocator 340 // indicated (say R1). However, V2 might have to be reloaded 341 // later, and it might indicate that it needs to live in R0. When 342 // this occurs, we need to have information available that 343 // indicates it is safe to use R1 for the reload instead of R0. 344 // 345 // To further complicate matters, we might conflict with an alias, 346 // or R0 and R1 might not be compatible with each other. In this 347 // case, we actually insert a reload for V1 in R1, ensuring that 348 // we can get at R0 or its alias. 349 ReusedOperands.push_back(ReusedOp(i, StackSlot, PhysReg, 350 VRM.getPhys(VirtReg))); 351 ++NumReused; 352 } else { 353 // Otherwise, reload it and remember that we have it. 354 PhysReg = VRM.getPhys(VirtReg); 355 356 RecheckRegister: 357 // Note that, if we reused a register for a previous operand, the 358 // register we want to reload into might not actually be 359 // available. If this occurs, use the register indicated by the 360 // reuser. 361 if (!ReusedOperands.empty()) // This is most often empty. 362 for (unsigned ro = 0, e = ReusedOperands.size(); ro != e; ++ro) 363 if (ReusedOperands[ro].PhysRegReused == PhysReg) { 364 // Yup, use the reload register that we didn't use before. 365 PhysReg = ReusedOperands[ro].AssignedPhysReg; 366 goto RecheckRegister; 367 } else { 368 ReusedOp &Op = ReusedOperands[ro]; 369 unsigned PRRU = Op.PhysRegReused; 370 for (const unsigned *AS = MRI->getAliasSet(PRRU); *AS; ++AS) 371 if (*AS == PhysReg) { 372 // Okay, we found out that an alias of a reused register 373 // was used. This isn't good because it means we have 374 // to undo a previous reuse. 375 MRI->loadRegFromStackSlot(MBB, &MI, Op.AssignedPhysReg, 376 Op.StackSlot); 377 ClobberPhysReg(Op.AssignedPhysReg, SpillSlotsAvailable, 378 PhysRegsAvailable); 379 380 // Any stores to this stack slot are not dead anymore. 381 MaybeDeadStores.erase(Op.StackSlot); 382 383 MI.SetMachineOperandReg(Op.Operand, Op.AssignedPhysReg); 384 PhysRegsAvailable[Op.AssignedPhysReg] = Op.StackSlot; 385 SpillSlotsAvailable[Op.StackSlot] = Op.AssignedPhysReg; 386 PhysRegsAvailable.erase(Op.PhysRegReused); 387 DEBUG(std::cerr << "Remembering SS#" << Op.StackSlot 388 << " in physreg " 389 << MRI->getName(Op.AssignedPhysReg) << "\n"); 390 ++NumLoads; 391 DEBUG(std::cerr << '\t' << *prior(MII)); 392 393 DEBUG(std::cerr << "Reuse undone!\n"); 394 ReusedOperands.erase(ReusedOperands.begin()+ro); 395 --NumReused; 396 goto ContinueReload; 397 } 398 } 399 ContinueReload: 400 401 MRI->loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot); 402 // This invalidates PhysReg. 403 ClobberPhysReg(PhysReg, SpillSlotsAvailable, PhysRegsAvailable); 404 405 // Any stores to this stack slot are not dead anymore. 406 MaybeDeadStores.erase(StackSlot); 407 408 MI.SetMachineOperandReg(i, PhysReg); 409 PhysRegsAvailable[PhysReg] = StackSlot; 410 SpillSlotsAvailable[StackSlot] = PhysReg; 411 DEBUG(std::cerr << "Remembering SS#" << StackSlot <<" in physreg " 412 << MRI->getName(PhysReg) << "\n"); 413 ++NumLoads; 414 DEBUG(std::cerr << '\t' << *prior(MII)); 415 } 416 417 // If this is both a def and a use, we need to emit a store to the 418 // stack slot after the instruction. Keep track of D&U operands 419 // because we already changed it to a physreg here. 420 if (MO.isDef()) { 421 // Remember that this was a def-and-use operand, and that the 422 // stack slot is live after this instruction executes. 423 DefAndUseVReg.push_back(std::make_pair(i, VirtReg)); 424 } 425 } 426 } 427 } 428 } 429 430 // Loop over all of the implicit defs, clearing them from our available 431 // sets. 432 const TargetInstrDescriptor &InstrDesc = TII->get(MI.getOpcode()); 433 for (const unsigned* ImpDef = InstrDesc.ImplicitDefs; *ImpDef; ++ImpDef) 434 ClobberPhysReg(*ImpDef, SpillSlotsAvailable, PhysRegsAvailable); 435 436 DEBUG(std::cerr << '\t' << MI); 437 438 // If we have folded references to memory operands, make sure we clear all 439 // physical registers that may contain the value of the spilled virtual 440 // register 441 VirtRegMap::MI2VirtMapTy::const_iterator I, End; 442 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ++I) { 443 DEBUG(std::cerr << "Folded vreg: " << I->second.first << " MR: " 444 << I->second.second); 445 unsigned VirtReg = I->second.first; 446 VirtRegMap::ModRef MR = I->second.second; 447 if (VRM.hasStackSlot(VirtReg)) { 448 int SS = VRM.getStackSlot(VirtReg); 449 DEBUG(std::cerr << " - StackSlot: " << SS << "\n"); 450 451 // If this reference is not a use, any previous store is now dead. 452 // Otherwise, the store to this stack slot is not dead anymore. 453 std::map<int, MachineInstr*>::iterator MDSI = MaybeDeadStores.find(SS); 454 if (MDSI != MaybeDeadStores.end()) { 455 if (MR & VirtRegMap::isRef) // Previous store is not dead. 456 MaybeDeadStores.erase(MDSI); 457 else { 458 // If we get here, the store is dead, nuke it now. 459 assert(MR == VirtRegMap::isMod && "Can't be modref!"); 460 MBB.erase(MDSI->second); 461 MaybeDeadStores.erase(MDSI); 462 ++NumDSE; 463 } 464 } 465 466 // If the spill slot value is available, and this is a new definition of 467 // the value, the value is not available anymore. 468 if (MR & VirtRegMap::isMod) { 469 std::map<int, unsigned>::iterator It = SpillSlotsAvailable.find(SS); 470 if (It != SpillSlotsAvailable.end()) { 471 PhysRegsAvailable.erase(It->second); 472 SpillSlotsAvailable.erase(It); 473 } 474 } 475 } else { 476 DEBUG(std::cerr << ": No stack slot!\n"); 477 } 478 } 479 480 // Process all of the spilled defs. 481 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 482 MachineOperand &MO = MI.getOperand(i); 483 if (MO.isRegister() && MO.getReg() && MO.isDef()) { 484 unsigned VirtReg = MO.getReg(); 485 486 bool TakenCareOf = false; 487 if (!MRegisterInfo::isVirtualRegister(VirtReg)) { 488 // Check to see if this is a def-and-use vreg operand that we do need 489 // to insert a store for. 490 bool OpTakenCareOf = false; 491 if (MO.isUse() && !DefAndUseVReg.empty()) { 492 for (unsigned dau = 0, e = DefAndUseVReg.size(); dau != e; ++dau) 493 if (DefAndUseVReg[dau].first == i) { 494 VirtReg = DefAndUseVReg[dau].second; 495 OpTakenCareOf = true; 496 break; 497 } 498 } 499 500 if (!OpTakenCareOf) { 501 ClobberPhysReg(VirtReg, SpillSlotsAvailable, PhysRegsAvailable); 502 TakenCareOf = true; 503 } 504 } 505 506 if (!TakenCareOf) { 507 // The only vregs left are stack slot definitions. 508 int StackSlot = VRM.getStackSlot(VirtReg); 509 unsigned PhysReg; 510 511 // If this is a def&use operand, and we used a different physreg for 512 // it than the one assigned, make sure to execute the store from the 513 // correct physical register. 514 if (MO.getReg() == VirtReg) 515 PhysReg = VRM.getPhys(VirtReg); 516 else 517 PhysReg = MO.getReg(); 518 519 MRI->storeRegToStackSlot(MBB, next(MII), PhysReg, StackSlot); 520 DEBUG(std::cerr << "Store:\t" << *next(MII)); 521 MI.SetMachineOperandReg(i, PhysReg); 522 523 // If there is a dead store to this stack slot, nuke it now. 524 MachineInstr *&LastStore = MaybeDeadStores[StackSlot]; 525 if (LastStore) { 526 DEBUG(std::cerr << " Killed store:\t" << *LastStore); 527 ++NumDSE; 528 MBB.erase(LastStore); 529 } 530 LastStore = next(MII); 531 532 // If the stack slot value was previously available in some other 533 // register, change it now. Otherwise, make the register available, 534 // in PhysReg. 535 std::map<int, unsigned>::iterator SSA = 536 SpillSlotsAvailable.find(StackSlot); 537 if (SSA != SpillSlotsAvailable.end()) { 538 // Remove the record for physreg. 539 PhysRegsAvailable.erase(SSA->second); 540 SpillSlotsAvailable.erase(SSA); 541 } 542 ClobberPhysReg(PhysReg, SpillSlotsAvailable, PhysRegsAvailable); 543 544 PhysRegsAvailable[PhysReg] = StackSlot; 545 SpillSlotsAvailable[StackSlot] = PhysReg; 546 DEBUG(std::cerr << "Updating SS#" << StackSlot <<" in physreg " 547 << MRI->getName(PhysReg) << " for virtreg #" 548 << VirtReg << "\n"); 549 550 ++NumStores; 551 VirtReg = PhysReg; 552 } 553 } 554 } 555 MII = NextMII; 556 } 557} 558 559 560 561llvm::Spiller* llvm::createSpiller() { 562 switch (SpillerOpt) { 563 default: assert(0 && "Unreachable!"); 564 case local: 565 return new LocalSpiller(); 566 case simple: 567 return new SimpleSpiller(); 568 } 569} 570