VirtRegMap.cpp revision 8c33368479221bbf80ef8f1fca85a74a8aebb2c2
1//===-- llvm/CodeGen/VirtRegMap.cpp - Virtual Register Map ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the VirtRegMap class. 11// 12// It also contains implementations of the the Spiller interface, which, given a 13// virtual register map and a machine function, eliminates all virtual 14// references by replacing them with physical register references - adding spill 15// code as necessary. 16// 17//===----------------------------------------------------------------------===// 18 19#define DEBUG_TYPE "spiller" 20#include "VirtRegMap.h" 21#include "llvm/Function.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineFunction.h" 24#include "llvm/CodeGen/MachineInstrBuilder.h" 25#include "llvm/CodeGen/MachineRegisterInfo.h" 26#include "llvm/Target/TargetMachine.h" 27#include "llvm/Target/TargetInstrInfo.h" 28#include "llvm/Support/CommandLine.h" 29#include "llvm/Support/Debug.h" 30#include "llvm/Support/Compiler.h" 31#include "llvm/ADT/BitVector.h" 32#include "llvm/ADT/Statistic.h" 33#include "llvm/ADT/STLExtras.h" 34#include "llvm/ADT/SmallSet.h" 35#include <algorithm> 36using namespace llvm; 37 38STATISTIC(NumSpills, "Number of register spills"); 39STATISTIC(NumPSpills,"Number of physical register spills"); 40STATISTIC(NumReMats, "Number of re-materialization"); 41STATISTIC(NumDRM , "Number of re-materializable defs elided"); 42STATISTIC(NumStores, "Number of stores added"); 43STATISTIC(NumLoads , "Number of loads added"); 44STATISTIC(NumReused, "Number of values reused"); 45STATISTIC(NumDSE , "Number of dead stores elided"); 46STATISTIC(NumDCE , "Number of copies elided"); 47STATISTIC(NumDSS , "Number of dead spill slots removed"); 48 49namespace { 50 enum SpillerName { simple, local }; 51} 52 53static cl::opt<SpillerName> 54SpillerOpt("spiller", 55 cl::desc("Spiller to use: (default: local)"), 56 cl::Prefix, 57 cl::values(clEnumVal(simple, " simple spiller"), 58 clEnumVal(local, " local spiller"), 59 clEnumValEnd), 60 cl::init(local)); 61 62//===----------------------------------------------------------------------===// 63// VirtRegMap implementation 64//===----------------------------------------------------------------------===// 65 66VirtRegMap::VirtRegMap(MachineFunction &mf) 67 : TII(*mf.getTarget().getInstrInfo()), MF(mf), 68 Virt2PhysMap(NO_PHYS_REG), Virt2StackSlotMap(NO_STACK_SLOT), 69 Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0), 70 Virt2SplitKillMap(0), ReMatMap(NULL), ReMatId(MAX_STACK_SLOT+1), 71 LowSpillSlot(NO_STACK_SLOT), HighSpillSlot(NO_STACK_SLOT) { 72 SpillSlotToUsesMap.resize(8); 73 ImplicitDefed.resize(MF.getRegInfo().getLastVirtReg()+1- 74 TargetRegisterInfo::FirstVirtualRegister); 75 grow(); 76} 77 78void VirtRegMap::grow() { 79 unsigned LastVirtReg = MF.getRegInfo().getLastVirtReg(); 80 Virt2PhysMap.grow(LastVirtReg); 81 Virt2StackSlotMap.grow(LastVirtReg); 82 Virt2ReMatIdMap.grow(LastVirtReg); 83 Virt2SplitMap.grow(LastVirtReg); 84 Virt2SplitKillMap.grow(LastVirtReg); 85 ReMatMap.grow(LastVirtReg); 86 ImplicitDefed.resize(LastVirtReg-TargetRegisterInfo::FirstVirtualRegister+1); 87} 88 89int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) { 90 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 91 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && 92 "attempt to assign stack slot to already spilled register"); 93 const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(virtReg); 94 int SS = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 95 RC->getAlignment()); 96 if (LowSpillSlot == NO_STACK_SLOT) 97 LowSpillSlot = SS; 98 if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot) 99 HighSpillSlot = SS; 100 unsigned Idx = SS-LowSpillSlot; 101 while (Idx >= SpillSlotToUsesMap.size()) 102 SpillSlotToUsesMap.resize(SpillSlotToUsesMap.size()*2); 103 Virt2StackSlotMap[virtReg] = SS; 104 ++NumSpills; 105 return SS; 106} 107 108void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int SS) { 109 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 110 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && 111 "attempt to assign stack slot to already spilled register"); 112 assert((SS >= 0 || 113 (SS >= MF.getFrameInfo()->getObjectIndexBegin())) && 114 "illegal fixed frame index"); 115 Virt2StackSlotMap[virtReg] = SS; 116} 117 118int VirtRegMap::assignVirtReMatId(unsigned virtReg) { 119 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 120 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT && 121 "attempt to assign re-mat id to already spilled register"); 122 Virt2ReMatIdMap[virtReg] = ReMatId; 123 return ReMatId++; 124} 125 126void VirtRegMap::assignVirtReMatId(unsigned virtReg, int id) { 127 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 128 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT && 129 "attempt to assign re-mat id to already spilled register"); 130 Virt2ReMatIdMap[virtReg] = id; 131} 132 133int VirtRegMap::getEmergencySpillSlot(const TargetRegisterClass *RC) { 134 std::map<const TargetRegisterClass*, int>::iterator I = 135 EmergencySpillSlots.find(RC); 136 if (I != EmergencySpillSlots.end()) 137 return I->second; 138 int SS = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 139 RC->getAlignment()); 140 if (LowSpillSlot == NO_STACK_SLOT) 141 LowSpillSlot = SS; 142 if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot) 143 HighSpillSlot = SS; 144 I->second = SS; 145 return SS; 146} 147 148void VirtRegMap::addSpillSlotUse(int FI, MachineInstr *MI) { 149 if (!MF.getFrameInfo()->isFixedObjectIndex(FI)) { 150 // If FI < LowSpillSlot, this stack reference was produced by 151 // instruction selection and is not a spill 152 if (FI >= LowSpillSlot) { 153 assert(FI >= 0 && "Spill slot index should not be negative!"); 154 assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size() 155 && "Invalid spill slot"); 156 SpillSlotToUsesMap[FI-LowSpillSlot].insert(MI); 157 } 158 } 159} 160 161void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI, 162 MachineInstr *NewMI, ModRef MRInfo) { 163 // Move previous memory references folded to new instruction. 164 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(NewMI); 165 for (MI2VirtMapTy::iterator I = MI2VirtMap.lower_bound(OldMI), 166 E = MI2VirtMap.end(); I != E && I->first == OldMI; ) { 167 MI2VirtMap.insert(IP, std::make_pair(NewMI, I->second)); 168 MI2VirtMap.erase(I++); 169 } 170 171 // add new memory reference 172 MI2VirtMap.insert(IP, std::make_pair(NewMI, std::make_pair(VirtReg, MRInfo))); 173} 174 175void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *MI, ModRef MRInfo) { 176 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(MI); 177 MI2VirtMap.insert(IP, std::make_pair(MI, std::make_pair(VirtReg, MRInfo))); 178} 179 180void VirtRegMap::RemoveMachineInstrFromMaps(MachineInstr *MI) { 181 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 182 MachineOperand &MO = MI->getOperand(i); 183 if (!MO.isFrameIndex()) 184 continue; 185 int FI = MO.getIndex(); 186 if (MF.getFrameInfo()->isFixedObjectIndex(FI)) 187 continue; 188 // This stack reference was produced by instruction selection and 189 // is not a spill 190 if (FI < LowSpillSlot) 191 continue; 192 assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size() 193 && "Invalid spill slot"); 194 SpillSlotToUsesMap[FI-LowSpillSlot].erase(MI); 195 } 196 MI2VirtMap.erase(MI); 197 SpillPt2VirtMap.erase(MI); 198 RestorePt2VirtMap.erase(MI); 199 EmergencySpillMap.erase(MI); 200} 201 202void VirtRegMap::print(std::ostream &OS) const { 203 const TargetRegisterInfo* TRI = MF.getTarget().getRegisterInfo(); 204 205 OS << "********** REGISTER MAP **********\n"; 206 for (unsigned i = TargetRegisterInfo::FirstVirtualRegister, 207 e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) { 208 if (Virt2PhysMap[i] != (unsigned)VirtRegMap::NO_PHYS_REG) 209 OS << "[reg" << i << " -> " << TRI->getName(Virt2PhysMap[i]) 210 << "]\n"; 211 } 212 213 for (unsigned i = TargetRegisterInfo::FirstVirtualRegister, 214 e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) 215 if (Virt2StackSlotMap[i] != VirtRegMap::NO_STACK_SLOT) 216 OS << "[reg" << i << " -> fi#" << Virt2StackSlotMap[i] << "]\n"; 217 OS << '\n'; 218} 219 220void VirtRegMap::dump() const { 221 print(cerr); 222} 223 224 225//===----------------------------------------------------------------------===// 226// Simple Spiller Implementation 227//===----------------------------------------------------------------------===// 228 229Spiller::~Spiller() {} 230 231namespace { 232 struct VISIBILITY_HIDDEN SimpleSpiller : public Spiller { 233 bool runOnMachineFunction(MachineFunction& mf, VirtRegMap &VRM); 234 }; 235} 236 237bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { 238 DOUT << "********** REWRITE MACHINE CODE **********\n"; 239 DOUT << "********** Function: " << MF.getFunction()->getName() << '\n'; 240 const TargetMachine &TM = MF.getTarget(); 241 const TargetInstrInfo &TII = *TM.getInstrInfo(); 242 243 244 // LoadedRegs - Keep track of which vregs are loaded, so that we only load 245 // each vreg once (in the case where a spilled vreg is used by multiple 246 // operands). This is always smaller than the number of operands to the 247 // current machine instr, so it should be small. 248 std::vector<unsigned> LoadedRegs; 249 250 for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end(); 251 MBBI != E; ++MBBI) { 252 DOUT << MBBI->getBasicBlock()->getName() << ":\n"; 253 MachineBasicBlock &MBB = *MBBI; 254 for (MachineBasicBlock::iterator MII = MBB.begin(), 255 E = MBB.end(); MII != E; ++MII) { 256 MachineInstr &MI = *MII; 257 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 258 MachineOperand &MO = MI.getOperand(i); 259 if (MO.isRegister() && MO.getReg()) { 260 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) { 261 unsigned VirtReg = MO.getReg(); 262 unsigned PhysReg = VRM.getPhys(VirtReg); 263 if (!VRM.isAssignedReg(VirtReg)) { 264 int StackSlot = VRM.getStackSlot(VirtReg); 265 const TargetRegisterClass* RC = 266 MF.getRegInfo().getRegClass(VirtReg); 267 268 if (MO.isUse() && 269 std::find(LoadedRegs.begin(), LoadedRegs.end(), VirtReg) 270 == LoadedRegs.end()) { 271 TII.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC); 272 MachineInstr *LoadMI = prior(MII); 273 VRM.addSpillSlotUse(StackSlot, LoadMI); 274 LoadedRegs.push_back(VirtReg); 275 ++NumLoads; 276 DOUT << '\t' << *LoadMI; 277 } 278 279 if (MO.isDef()) { 280 TII.storeRegToStackSlot(MBB, next(MII), PhysReg, true, 281 StackSlot, RC); 282 MachineInstr *StoreMI = next(MII); 283 VRM.addSpillSlotUse(StackSlot, StoreMI); 284 ++NumStores; 285 } 286 } 287 MF.getRegInfo().setPhysRegUsed(PhysReg); 288 MI.getOperand(i).setReg(PhysReg); 289 } else { 290 MF.getRegInfo().setPhysRegUsed(MO.getReg()); 291 } 292 } 293 } 294 295 DOUT << '\t' << MI; 296 LoadedRegs.clear(); 297 } 298 } 299 return true; 300} 301 302//===----------------------------------------------------------------------===// 303// Local Spiller Implementation 304//===----------------------------------------------------------------------===// 305 306namespace { 307 class AvailableSpills; 308 309 /// LocalSpiller - This spiller does a simple pass over the machine basic 310 /// block to attempt to keep spills in registers as much as possible for 311 /// blocks that have low register pressure (the vreg may be spilled due to 312 /// register pressure in other blocks). 313 class VISIBILITY_HIDDEN LocalSpiller : public Spiller { 314 MachineRegisterInfo *RegInfo; 315 const TargetRegisterInfo *TRI; 316 const TargetInstrInfo *TII; 317 DenseMap<MachineInstr*, unsigned> DistanceMap; 318 public: 319 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { 320 RegInfo = &MF.getRegInfo(); 321 TRI = MF.getTarget().getRegisterInfo(); 322 TII = MF.getTarget().getInstrInfo(); 323 DOUT << "\n**** Local spiller rewriting function '" 324 << MF.getFunction()->getName() << "':\n"; 325 DOUT << "**** Machine Instrs (NOTE! Does not include spills and reloads!)" 326 " ****\n"; 327 DEBUG(MF.dump()); 328 329 for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); 330 MBB != E; ++MBB) 331 RewriteMBB(*MBB, VRM); 332 333 // Mark unused spill slots. 334 MachineFrameInfo *MFI = MF.getFrameInfo(); 335 int SS = VRM.getLowSpillSlot(); 336 if (SS != VirtRegMap::NO_STACK_SLOT) 337 for (int e = VRM.getHighSpillSlot(); SS <= e; ++SS) 338 if (!VRM.isSpillSlotUsed(SS)) { 339 MFI->RemoveStackObject(SS); 340 ++NumDSS; 341 } 342 343 DOUT << "**** Post Machine Instrs ****\n"; 344 DEBUG(MF.dump()); 345 346 return true; 347 } 348 private: 349 void TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist, 350 unsigned Reg, BitVector &RegKills, 351 std::vector<MachineOperand*> &KillOps); 352 bool PrepForUnfoldOpti(MachineBasicBlock &MBB, 353 MachineBasicBlock::iterator &MII, 354 std::vector<MachineInstr*> &MaybeDeadStores, 355 AvailableSpills &Spills, BitVector &RegKills, 356 std::vector<MachineOperand*> &KillOps, 357 VirtRegMap &VRM); 358 void SpillRegToStackSlot(MachineBasicBlock &MBB, 359 MachineBasicBlock::iterator &MII, 360 int Idx, unsigned PhysReg, int StackSlot, 361 const TargetRegisterClass *RC, 362 bool isAvailable, MachineInstr *&LastStore, 363 AvailableSpills &Spills, 364 SmallSet<MachineInstr*, 4> &ReMatDefs, 365 BitVector &RegKills, 366 std::vector<MachineOperand*> &KillOps, 367 VirtRegMap &VRM); 368 void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM); 369 }; 370} 371 372/// AvailableSpills - As the local spiller is scanning and rewriting an MBB from 373/// top down, keep track of which spills slots or remat are available in each 374/// register. 375/// 376/// Note that not all physregs are created equal here. In particular, some 377/// physregs are reloads that we are allowed to clobber or ignore at any time. 378/// Other physregs are values that the register allocated program is using that 379/// we cannot CHANGE, but we can read if we like. We keep track of this on a 380/// per-stack-slot / remat id basis as the low bit in the value of the 381/// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks 382/// this bit and addAvailable sets it if. 383namespace { 384class VISIBILITY_HIDDEN AvailableSpills { 385 const TargetRegisterInfo *TRI; 386 const TargetInstrInfo *TII; 387 388 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled 389 // or remat'ed virtual register values that are still available, due to being 390 // loaded or stored to, but not invalidated yet. 391 std::map<int, unsigned> SpillSlotsOrReMatsAvailable; 392 393 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable, 394 // indicating which stack slot values are currently held by a physreg. This 395 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a 396 // physreg is modified. 397 std::multimap<unsigned, int> PhysRegsAvailable; 398 399 void disallowClobberPhysRegOnly(unsigned PhysReg); 400 401 void ClobberPhysRegOnly(unsigned PhysReg); 402public: 403 AvailableSpills(const TargetRegisterInfo *tri, const TargetInstrInfo *tii) 404 : TRI(tri), TII(tii) { 405 } 406 407 const TargetRegisterInfo *getRegInfo() const { return TRI; } 408 409 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is 410 /// available in a physical register, return that PhysReg, otherwise 411 /// return 0. 412 unsigned getSpillSlotOrReMatPhysReg(int Slot) const { 413 std::map<int, unsigned>::const_iterator I = 414 SpillSlotsOrReMatsAvailable.find(Slot); 415 if (I != SpillSlotsOrReMatsAvailable.end()) { 416 return I->second >> 1; // Remove the CanClobber bit. 417 } 418 return 0; 419 } 420 421 /// addAvailable - Mark that the specified stack slot / remat is available in 422 /// the specified physreg. If CanClobber is true, the physreg can be modified 423 /// at any time without changing the semantics of the program. 424 void addAvailable(int SlotOrReMat, MachineInstr *MI, unsigned Reg, 425 bool CanClobber = true) { 426 // If this stack slot is thought to be available in some other physreg, 427 // remove its record. 428 ModifyStackSlotOrReMat(SlotOrReMat); 429 430 PhysRegsAvailable.insert(std::make_pair(Reg, SlotOrReMat)); 431 SpillSlotsOrReMatsAvailable[SlotOrReMat]= (Reg << 1) | (unsigned)CanClobber; 432 433 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT) 434 DOUT << "Remembering RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1; 435 else 436 DOUT << "Remembering SS#" << SlotOrReMat; 437 DOUT << " in physreg " << TRI->getName(Reg) << "\n"; 438 } 439 440 /// canClobberPhysReg - Return true if the spiller is allowed to change the 441 /// value of the specified stackslot register if it desires. The specified 442 /// stack slot must be available in a physreg for this query to make sense. 443 bool canClobberPhysReg(int SlotOrReMat) const { 444 assert(SpillSlotsOrReMatsAvailable.count(SlotOrReMat) && 445 "Value not available!"); 446 return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second & 1; 447 } 448 449 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified 450 /// stackslot register. The register is still available but is no longer 451 /// allowed to be modifed. 452 void disallowClobberPhysReg(unsigned PhysReg); 453 454 /// ClobberPhysReg - This is called when the specified physreg changes 455 /// value. We use this to invalidate any info about stuff that lives in 456 /// it and any of its aliases. 457 void ClobberPhysReg(unsigned PhysReg); 458 459 /// ModifyStackSlotOrReMat - This method is called when the value in a stack 460 /// slot changes. This removes information about which register the previous 461 /// value for this slot lives in (as the previous value is dead now). 462 void ModifyStackSlotOrReMat(int SlotOrReMat); 463}; 464} 465 466/// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified 467/// stackslot register. The register is still available but is no longer 468/// allowed to be modifed. 469void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) { 470 std::multimap<unsigned, int>::iterator I = 471 PhysRegsAvailable.lower_bound(PhysReg); 472 while (I != PhysRegsAvailable.end() && I->first == PhysReg) { 473 int SlotOrReMat = I->second; 474 I++; 475 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg && 476 "Bidirectional map mismatch!"); 477 SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1; 478 DOUT << "PhysReg " << TRI->getName(PhysReg) 479 << " copied, it is available for use but can no longer be modified\n"; 480 } 481} 482 483/// disallowClobberPhysReg - Unset the CanClobber bit of the specified 484/// stackslot register and its aliases. The register and its aliases may 485/// still available but is no longer allowed to be modifed. 486void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) { 487 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS) 488 disallowClobberPhysRegOnly(*AS); 489 disallowClobberPhysRegOnly(PhysReg); 490} 491 492/// ClobberPhysRegOnly - This is called when the specified physreg changes 493/// value. We use this to invalidate any info about stuff we thing lives in it. 494void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) { 495 std::multimap<unsigned, int>::iterator I = 496 PhysRegsAvailable.lower_bound(PhysReg); 497 while (I != PhysRegsAvailable.end() && I->first == PhysReg) { 498 int SlotOrReMat = I->second; 499 PhysRegsAvailable.erase(I++); 500 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg && 501 "Bidirectional map mismatch!"); 502 SpillSlotsOrReMatsAvailable.erase(SlotOrReMat); 503 DOUT << "PhysReg " << TRI->getName(PhysReg) 504 << " clobbered, invalidating "; 505 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT) 506 DOUT << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 << "\n"; 507 else 508 DOUT << "SS#" << SlotOrReMat << "\n"; 509 } 510} 511 512/// ClobberPhysReg - This is called when the specified physreg changes 513/// value. We use this to invalidate any info about stuff we thing lives in 514/// it and any of its aliases. 515void AvailableSpills::ClobberPhysReg(unsigned PhysReg) { 516 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS) 517 ClobberPhysRegOnly(*AS); 518 ClobberPhysRegOnly(PhysReg); 519} 520 521/// ModifyStackSlotOrReMat - This method is called when the value in a stack 522/// slot changes. This removes information about which register the previous 523/// value for this slot lives in (as the previous value is dead now). 524void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) { 525 std::map<int, unsigned>::iterator It = 526 SpillSlotsOrReMatsAvailable.find(SlotOrReMat); 527 if (It == SpillSlotsOrReMatsAvailable.end()) return; 528 unsigned Reg = It->second >> 1; 529 SpillSlotsOrReMatsAvailable.erase(It); 530 531 // This register may hold the value of multiple stack slots, only remove this 532 // stack slot from the set of values the register contains. 533 std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg); 534 for (; ; ++I) { 535 assert(I != PhysRegsAvailable.end() && I->first == Reg && 536 "Map inverse broken!"); 537 if (I->second == SlotOrReMat) break; 538 } 539 PhysRegsAvailable.erase(I); 540} 541 542 543 544/// InvalidateKills - MI is going to be deleted. If any of its operands are 545/// marked kill, then invalidate the information. 546static void InvalidateKills(MachineInstr &MI, BitVector &RegKills, 547 std::vector<MachineOperand*> &KillOps, 548 SmallVector<unsigned, 2> *KillRegs = NULL) { 549 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 550 MachineOperand &MO = MI.getOperand(i); 551 if (!MO.isRegister() || !MO.isUse() || !MO.isKill()) 552 continue; 553 unsigned Reg = MO.getReg(); 554 if (KillRegs) 555 KillRegs->push_back(Reg); 556 if (KillOps[Reg] == &MO) { 557 RegKills.reset(Reg); 558 KillOps[Reg] = NULL; 559 } 560 } 561} 562 563/// InvalidateKill - A MI that defines the specified register is being deleted, 564/// invalidate the register kill information. 565static void InvalidateKill(unsigned Reg, BitVector &RegKills, 566 std::vector<MachineOperand*> &KillOps) { 567 if (RegKills[Reg]) { 568 KillOps[Reg]->setIsKill(false); 569 KillOps[Reg] = NULL; 570 RegKills.reset(Reg); 571 } 572} 573 574/// InvalidateRegDef - If the def operand of the specified def MI is now dead 575/// (since it's spill instruction is removed), mark it isDead. Also checks if 576/// the def MI has other definition operands that are not dead. Returns it by 577/// reference. 578static bool InvalidateRegDef(MachineBasicBlock::iterator I, 579 MachineInstr &NewDef, unsigned Reg, 580 bool &HasLiveDef) { 581 // Due to remat, it's possible this reg isn't being reused. That is, 582 // the def of this reg (by prev MI) is now dead. 583 MachineInstr *DefMI = I; 584 MachineOperand *DefOp = NULL; 585 for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) { 586 MachineOperand &MO = DefMI->getOperand(i); 587 if (MO.isRegister() && MO.isDef()) { 588 if (MO.getReg() == Reg) 589 DefOp = &MO; 590 else if (!MO.isDead()) 591 HasLiveDef = true; 592 } 593 } 594 if (!DefOp) 595 return false; 596 597 bool FoundUse = false, Done = false; 598 MachineBasicBlock::iterator E = NewDef; 599 ++I; ++E; 600 for (; !Done && I != E; ++I) { 601 MachineInstr *NMI = I; 602 for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) { 603 MachineOperand &MO = NMI->getOperand(j); 604 if (!MO.isRegister() || MO.getReg() != Reg) 605 continue; 606 if (MO.isUse()) 607 FoundUse = true; 608 Done = true; // Stop after scanning all the operands of this MI. 609 } 610 } 611 if (!FoundUse) { 612 // Def is dead! 613 DefOp->setIsDead(); 614 return true; 615 } 616 return false; 617} 618 619/// UpdateKills - Track and update kill info. If a MI reads a register that is 620/// marked kill, then it must be due to register reuse. Transfer the kill info 621/// over. 622static void UpdateKills(MachineInstr &MI, BitVector &RegKills, 623 std::vector<MachineOperand*> &KillOps) { 624 const TargetInstrDesc &TID = MI.getDesc(); 625 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 626 MachineOperand &MO = MI.getOperand(i); 627 if (!MO.isRegister() || !MO.isUse()) 628 continue; 629 unsigned Reg = MO.getReg(); 630 if (Reg == 0) 631 continue; 632 633 if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) { 634 // That can't be right. Register is killed but not re-defined and it's 635 // being reused. Let's fix that. 636 KillOps[Reg]->setIsKill(false); 637 KillOps[Reg] = NULL; 638 RegKills.reset(Reg); 639 if (i < TID.getNumOperands() && 640 TID.getOperandConstraint(i, TOI::TIED_TO) == -1) 641 // Unless it's a two-address operand, this is the new kill. 642 MO.setIsKill(); 643 } 644 if (MO.isKill()) { 645 RegKills.set(Reg); 646 KillOps[Reg] = &MO; 647 } 648 } 649 650 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 651 const MachineOperand &MO = MI.getOperand(i); 652 if (!MO.isRegister() || !MO.isDef()) 653 continue; 654 unsigned Reg = MO.getReg(); 655 RegKills.reset(Reg); 656 KillOps[Reg] = NULL; 657 } 658} 659 660/// ReMaterialize - Re-materialize definition for Reg targetting DestReg. 661/// 662static void ReMaterialize(MachineBasicBlock &MBB, 663 MachineBasicBlock::iterator &MII, 664 unsigned DestReg, unsigned Reg, 665 const TargetInstrInfo *TII, 666 const TargetRegisterInfo *TRI, 667 VirtRegMap &VRM) { 668 TII->reMaterialize(MBB, MII, DestReg, VRM.getReMaterializedMI(Reg)); 669 MachineInstr *NewMI = prior(MII); 670 for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) { 671 MachineOperand &MO = NewMI->getOperand(i); 672 if (!MO.isRegister() || MO.getReg() == 0) 673 continue; 674 unsigned VirtReg = MO.getReg(); 675 if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) 676 continue; 677 assert(MO.isUse()); 678 unsigned SubIdx = MO.getSubReg(); 679 unsigned Phys = VRM.getPhys(VirtReg); 680 assert(Phys); 681 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys; 682 MO.setReg(RReg); 683 } 684 ++NumReMats; 685} 686 687 688// ReusedOp - For each reused operand, we keep track of a bit of information, in 689// case we need to rollback upon processing a new operand. See comments below. 690namespace { 691 struct ReusedOp { 692 // The MachineInstr operand that reused an available value. 693 unsigned Operand; 694 695 // StackSlotOrReMat - The spill slot or remat id of the value being reused. 696 unsigned StackSlotOrReMat; 697 698 // PhysRegReused - The physical register the value was available in. 699 unsigned PhysRegReused; 700 701 // AssignedPhysReg - The physreg that was assigned for use by the reload. 702 unsigned AssignedPhysReg; 703 704 // VirtReg - The virtual register itself. 705 unsigned VirtReg; 706 707 ReusedOp(unsigned o, unsigned ss, unsigned prr, unsigned apr, 708 unsigned vreg) 709 : Operand(o), StackSlotOrReMat(ss), PhysRegReused(prr), 710 AssignedPhysReg(apr), VirtReg(vreg) {} 711 }; 712 713 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that 714 /// is reused instead of reloaded. 715 class VISIBILITY_HIDDEN ReuseInfo { 716 MachineInstr &MI; 717 std::vector<ReusedOp> Reuses; 718 BitVector PhysRegsClobbered; 719 public: 720 ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) { 721 PhysRegsClobbered.resize(tri->getNumRegs()); 722 } 723 724 bool hasReuses() const { 725 return !Reuses.empty(); 726 } 727 728 /// addReuse - If we choose to reuse a virtual register that is already 729 /// available instead of reloading it, remember that we did so. 730 void addReuse(unsigned OpNo, unsigned StackSlotOrReMat, 731 unsigned PhysRegReused, unsigned AssignedPhysReg, 732 unsigned VirtReg) { 733 // If the reload is to the assigned register anyway, no undo will be 734 // required. 735 if (PhysRegReused == AssignedPhysReg) return; 736 737 // Otherwise, remember this. 738 Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused, 739 AssignedPhysReg, VirtReg)); 740 } 741 742 void markClobbered(unsigned PhysReg) { 743 PhysRegsClobbered.set(PhysReg); 744 } 745 746 bool isClobbered(unsigned PhysReg) const { 747 return PhysRegsClobbered.test(PhysReg); 748 } 749 750 /// GetRegForReload - We are about to emit a reload into PhysReg. If there 751 /// is some other operand that is using the specified register, either pick 752 /// a new register to use, or evict the previous reload and use this reg. 753 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI, 754 AvailableSpills &Spills, 755 std::vector<MachineInstr*> &MaybeDeadStores, 756 SmallSet<unsigned, 8> &Rejected, 757 BitVector &RegKills, 758 std::vector<MachineOperand*> &KillOps, 759 VirtRegMap &VRM) { 760 const TargetInstrInfo* TII = MI->getParent()->getParent()->getTarget() 761 .getInstrInfo(); 762 763 if (Reuses.empty()) return PhysReg; // This is most often empty. 764 765 for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) { 766 ReusedOp &Op = Reuses[ro]; 767 // If we find some other reuse that was supposed to use this register 768 // exactly for its reload, we can change this reload to use ITS reload 769 // register. That is, unless its reload register has already been 770 // considered and subsequently rejected because it has also been reused 771 // by another operand. 772 if (Op.PhysRegReused == PhysReg && 773 Rejected.count(Op.AssignedPhysReg) == 0) { 774 // Yup, use the reload register that we didn't use before. 775 unsigned NewReg = Op.AssignedPhysReg; 776 Rejected.insert(PhysReg); 777 return GetRegForReload(NewReg, MI, Spills, MaybeDeadStores, Rejected, 778 RegKills, KillOps, VRM); 779 } else { 780 // Otherwise, we might also have a problem if a previously reused 781 // value aliases the new register. If so, codegen the previous reload 782 // and use this one. 783 unsigned PRRU = Op.PhysRegReused; 784 const TargetRegisterInfo *TRI = Spills.getRegInfo(); 785 if (TRI->areAliases(PRRU, PhysReg)) { 786 // Okay, we found out that an alias of a reused register 787 // was used. This isn't good because it means we have 788 // to undo a previous reuse. 789 MachineBasicBlock *MBB = MI->getParent(); 790 const TargetRegisterClass *AliasRC = 791 MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg); 792 793 // Copy Op out of the vector and remove it, we're going to insert an 794 // explicit load for it. 795 ReusedOp NewOp = Op; 796 Reuses.erase(Reuses.begin()+ro); 797 798 // Ok, we're going to try to reload the assigned physreg into the 799 // slot that we were supposed to in the first place. However, that 800 // register could hold a reuse. Check to see if it conflicts or 801 // would prefer us to use a different register. 802 unsigned NewPhysReg = GetRegForReload(NewOp.AssignedPhysReg, 803 MI, Spills, MaybeDeadStores, 804 Rejected, RegKills, KillOps, VRM); 805 806 MachineBasicBlock::iterator MII = MI; 807 if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) { 808 ReMaterialize(*MBB, MII, NewPhysReg, NewOp.VirtReg, TII, TRI,VRM); 809 } else { 810 TII->loadRegFromStackSlot(*MBB, MII, NewPhysReg, 811 NewOp.StackSlotOrReMat, AliasRC); 812 MachineInstr *LoadMI = prior(MII); 813 VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI); 814 // Any stores to this stack slot are not dead anymore. 815 MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL; 816 ++NumLoads; 817 } 818 Spills.ClobberPhysReg(NewPhysReg); 819 Spills.ClobberPhysReg(NewOp.PhysRegReused); 820 821 MI->getOperand(NewOp.Operand).setReg(NewPhysReg); 822 823 Spills.addAvailable(NewOp.StackSlotOrReMat, MI, NewPhysReg); 824 --MII; 825 UpdateKills(*MII, RegKills, KillOps); 826 DOUT << '\t' << *MII; 827 828 DOUT << "Reuse undone!\n"; 829 --NumReused; 830 831 // Finally, PhysReg is now available, go ahead and use it. 832 return PhysReg; 833 } 834 } 835 } 836 return PhysReg; 837 } 838 839 /// GetRegForReload - Helper for the above GetRegForReload(). Add a 840 /// 'Rejected' set to remember which registers have been considered and 841 /// rejected for the reload. This avoids infinite looping in case like 842 /// this: 843 /// t1 := op t2, t3 844 /// t2 <- assigned r0 for use by the reload but ended up reuse r1 845 /// t3 <- assigned r1 for use by the reload but ended up reuse r0 846 /// t1 <- desires r1 847 /// sees r1 is taken by t2, tries t2's reload register r0 848 /// sees r0 is taken by t3, tries t3's reload register r1 849 /// sees r1 is taken by t2, tries t2's reload register r0 ... 850 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI, 851 AvailableSpills &Spills, 852 std::vector<MachineInstr*> &MaybeDeadStores, 853 BitVector &RegKills, 854 std::vector<MachineOperand*> &KillOps, 855 VirtRegMap &VRM) { 856 SmallSet<unsigned, 8> Rejected; 857 return GetRegForReload(PhysReg, MI, Spills, MaybeDeadStores, Rejected, 858 RegKills, KillOps, VRM); 859 } 860 }; 861} 862 863/// PrepForUnfoldOpti - Turn a store folding instruction into a load folding 864/// instruction. e.g. 865/// xorl %edi, %eax 866/// movl %eax, -32(%ebp) 867/// movl -36(%ebp), %eax 868/// orl %eax, -32(%ebp) 869/// ==> 870/// xorl %edi, %eax 871/// orl -36(%ebp), %eax 872/// mov %eax, -32(%ebp) 873/// This enables unfolding optimization for a subsequent instruction which will 874/// also eliminate the newly introduced store instruction. 875bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB, 876 MachineBasicBlock::iterator &MII, 877 std::vector<MachineInstr*> &MaybeDeadStores, 878 AvailableSpills &Spills, 879 BitVector &RegKills, 880 std::vector<MachineOperand*> &KillOps, 881 VirtRegMap &VRM) { 882 MachineFunction &MF = *MBB.getParent(); 883 MachineInstr &MI = *MII; 884 unsigned UnfoldedOpc = 0; 885 unsigned UnfoldPR = 0; 886 unsigned UnfoldVR = 0; 887 int FoldedSS = VirtRegMap::NO_STACK_SLOT; 888 VirtRegMap::MI2VirtMapTy::const_iterator I, End; 889 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) { 890 // Only transform a MI that folds a single register. 891 if (UnfoldedOpc) 892 return false; 893 UnfoldVR = I->second.first; 894 VirtRegMap::ModRef MR = I->second.second; 895 // MI2VirtMap be can updated which invalidate the iterator. 896 // Increment the iterator first. 897 ++I; 898 if (VRM.isAssignedReg(UnfoldVR)) 899 continue; 900 // If this reference is not a use, any previous store is now dead. 901 // Otherwise, the store to this stack slot is not dead anymore. 902 FoldedSS = VRM.getStackSlot(UnfoldVR); 903 MachineInstr* DeadStore = MaybeDeadStores[FoldedSS]; 904 if (DeadStore && (MR & VirtRegMap::isModRef)) { 905 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS); 906 if (!PhysReg || !DeadStore->readsRegister(PhysReg)) 907 continue; 908 UnfoldPR = PhysReg; 909 UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(), 910 false, true); 911 } 912 } 913 914 if (!UnfoldedOpc) 915 return false; 916 917 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 918 MachineOperand &MO = MI.getOperand(i); 919 if (!MO.isRegister() || MO.getReg() == 0 || !MO.isUse()) 920 continue; 921 unsigned VirtReg = MO.getReg(); 922 if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg()) 923 continue; 924 if (VRM.isAssignedReg(VirtReg)) { 925 unsigned PhysReg = VRM.getPhys(VirtReg); 926 if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR)) 927 return false; 928 } else if (VRM.isReMaterialized(VirtReg)) 929 continue; 930 int SS = VRM.getStackSlot(VirtReg); 931 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); 932 if (PhysReg) { 933 if (TRI->regsOverlap(PhysReg, UnfoldPR)) 934 return false; 935 continue; 936 } 937 PhysReg = VRM.getPhys(VirtReg); 938 if (!TRI->regsOverlap(PhysReg, UnfoldPR)) 939 continue; 940 941 // Ok, we'll need to reload the value into a register which makes 942 // it impossible to perform the store unfolding optimization later. 943 // Let's see if it is possible to fold the load if the store is 944 // unfolded. This allows us to perform the store unfolding 945 // optimization. 946 SmallVector<MachineInstr*, 4> NewMIs; 947 if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) { 948 assert(NewMIs.size() == 1); 949 MachineInstr *NewMI = NewMIs.back(); 950 NewMIs.clear(); 951 int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false); 952 assert(Idx != -1); 953 SmallVector<unsigned, 2> Ops; 954 Ops.push_back(Idx); 955 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS); 956 if (FoldedMI) { 957 VRM.addSpillSlotUse(SS, FoldedMI); 958 if (!VRM.hasPhys(UnfoldVR)) 959 VRM.assignVirt2Phys(UnfoldVR, UnfoldPR); 960 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef); 961 MII = MBB.insert(MII, FoldedMI); 962 InvalidateKills(MI, RegKills, KillOps); 963 VRM.RemoveMachineInstrFromMaps(&MI); 964 MBB.erase(&MI); 965 return true; 966 } 967 delete NewMI; 968 } 969 } 970 return false; 971} 972 973/// findSuperReg - Find the SubReg's super-register of given register class 974/// where its SubIdx sub-register is SubReg. 975static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg, 976 unsigned SubIdx, const TargetRegisterInfo *TRI) { 977 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 978 I != E; ++I) { 979 unsigned Reg = *I; 980 if (TRI->getSubReg(Reg, SubIdx) == SubReg) 981 return Reg; 982 } 983 return 0; 984} 985 986/// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if 987/// the last store to the same slot is now dead. If so, remove the last store. 988void LocalSpiller::SpillRegToStackSlot(MachineBasicBlock &MBB, 989 MachineBasicBlock::iterator &MII, 990 int Idx, unsigned PhysReg, int StackSlot, 991 const TargetRegisterClass *RC, 992 bool isAvailable, MachineInstr *&LastStore, 993 AvailableSpills &Spills, 994 SmallSet<MachineInstr*, 4> &ReMatDefs, 995 BitVector &RegKills, 996 std::vector<MachineOperand*> &KillOps, 997 VirtRegMap &VRM) { 998 TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC); 999 MachineInstr *StoreMI = next(MII); 1000 VRM.addSpillSlotUse(StackSlot, StoreMI); 1001 DOUT << "Store:\t" << *StoreMI; 1002 1003 // If there is a dead store to this stack slot, nuke it now. 1004 if (LastStore) { 1005 DOUT << "Removed dead store:\t" << *LastStore; 1006 ++NumDSE; 1007 SmallVector<unsigned, 2> KillRegs; 1008 InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs); 1009 MachineBasicBlock::iterator PrevMII = LastStore; 1010 bool CheckDef = PrevMII != MBB.begin(); 1011 if (CheckDef) 1012 --PrevMII; 1013 VRM.RemoveMachineInstrFromMaps(LastStore); 1014 MBB.erase(LastStore); 1015 if (CheckDef) { 1016 // Look at defs of killed registers on the store. Mark the defs 1017 // as dead since the store has been deleted and they aren't 1018 // being reused. 1019 for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) { 1020 bool HasOtherDef = false; 1021 if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef)) { 1022 MachineInstr *DeadDef = PrevMII; 1023 if (ReMatDefs.count(DeadDef) && !HasOtherDef) { 1024 // FIXME: This assumes a remat def does not have side 1025 // effects. 1026 VRM.RemoveMachineInstrFromMaps(DeadDef); 1027 MBB.erase(DeadDef); 1028 ++NumDRM; 1029 } 1030 } 1031 } 1032 } 1033 } 1034 1035 LastStore = next(MII); 1036 1037 // If the stack slot value was previously available in some other 1038 // register, change it now. Otherwise, make the register available, 1039 // in PhysReg. 1040 Spills.ModifyStackSlotOrReMat(StackSlot); 1041 Spills.ClobberPhysReg(PhysReg); 1042 Spills.addAvailable(StackSlot, LastStore, PhysReg, isAvailable); 1043 ++NumStores; 1044} 1045 1046/// TransferDeadness - A identity copy definition is dead and it's being 1047/// removed. Find the last def or use and mark it as dead / kill. 1048void LocalSpiller::TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist, 1049 unsigned Reg, BitVector &RegKills, 1050 std::vector<MachineOperand*> &KillOps) { 1051 int LastUDDist = -1; 1052 MachineInstr *LastUDMI = NULL; 1053 for (MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(Reg), 1054 RE = RegInfo->reg_end(); RI != RE; ++RI) { 1055 MachineInstr *UDMI = &*RI; 1056 if (UDMI->getParent() != MBB) 1057 continue; 1058 DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI); 1059 if (DI == DistanceMap.end() || DI->second > CurDist) 1060 continue; 1061 if ((int)DI->second < LastUDDist) 1062 continue; 1063 LastUDDist = DI->second; 1064 LastUDMI = UDMI; 1065 } 1066 1067 if (LastUDMI) { 1068 const TargetInstrDesc &TID = LastUDMI->getDesc(); 1069 MachineOperand *LastUD = NULL; 1070 for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) { 1071 MachineOperand &MO = LastUDMI->getOperand(i); 1072 if (!MO.isRegister() || MO.getReg() != Reg) 1073 continue; 1074 if (!LastUD || (LastUD->isUse() && MO.isDef())) 1075 LastUD = &MO; 1076 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) 1077 return; 1078 } 1079 if (LastUD->isDef()) 1080 LastUD->setIsDead(); 1081 else { 1082 LastUD->setIsKill(); 1083 RegKills.set(Reg); 1084 KillOps[Reg] = LastUD; 1085 } 1086 } 1087} 1088 1089/// rewriteMBB - Keep track of which spills are available even after the 1090/// register allocator is done with them. If possible, avid reloading vregs. 1091void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { 1092 DOUT << MBB.getBasicBlock()->getName() << ":\n"; 1093 1094 MachineFunction &MF = *MBB.getParent(); 1095 1096 // Spills - Keep track of which spilled values are available in physregs so 1097 // that we can choose to reuse the physregs instead of emitting reloads. 1098 AvailableSpills Spills(TRI, TII); 1099 1100 // MaybeDeadStores - When we need to write a value back into a stack slot, 1101 // keep track of the inserted store. If the stack slot value is never read 1102 // (because the value was used from some available register, for example), and 1103 // subsequently stored to, the original store is dead. This map keeps track 1104 // of inserted stores that are not used. If we see a subsequent store to the 1105 // same stack slot, the original store is deleted. 1106 std::vector<MachineInstr*> MaybeDeadStores; 1107 MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL); 1108 1109 // ReMatDefs - These are rematerializable def MIs which are not deleted. 1110 SmallSet<MachineInstr*, 4> ReMatDefs; 1111 1112 // Keep track of kill information. 1113 BitVector RegKills(TRI->getNumRegs()); 1114 std::vector<MachineOperand*> KillOps; 1115 KillOps.resize(TRI->getNumRegs(), NULL); 1116 1117 unsigned Dist = 0; 1118 DistanceMap.clear(); 1119 for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end(); 1120 MII != E; ) { 1121 MachineBasicBlock::iterator NextMII = MII; ++NextMII; 1122 1123 VirtRegMap::MI2VirtMapTy::const_iterator I, End; 1124 bool Erased = false; 1125 bool BackTracked = false; 1126 if (PrepForUnfoldOpti(MBB, MII, 1127 MaybeDeadStores, Spills, RegKills, KillOps, VRM)) 1128 NextMII = next(MII); 1129 1130 MachineInstr &MI = *MII; 1131 const TargetInstrDesc &TID = MI.getDesc(); 1132 1133 if (VRM.hasEmergencySpills(&MI)) { 1134 // Spill physical register(s) in the rare case the allocator has run out 1135 // of registers to allocate. 1136 SmallSet<int, 4> UsedSS; 1137 std::vector<unsigned> &EmSpills = VRM.getEmergencySpills(&MI); 1138 for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) { 1139 unsigned PhysReg = EmSpills[i]; 1140 const TargetRegisterClass *RC = 1141 TRI->getPhysicalRegisterRegClass(PhysReg); 1142 assert(RC && "Unable to determine register class!"); 1143 int SS = VRM.getEmergencySpillSlot(RC); 1144 if (UsedSS.count(SS)) 1145 assert(0 && "Need to spill more than one physical registers!"); 1146 UsedSS.insert(SS); 1147 TII->storeRegToStackSlot(MBB, MII, PhysReg, true, SS, RC); 1148 MachineInstr *StoreMI = prior(MII); 1149 VRM.addSpillSlotUse(SS, StoreMI); 1150 TII->loadRegFromStackSlot(MBB, next(MII), PhysReg, SS, RC); 1151 MachineInstr *LoadMI = next(MII); 1152 VRM.addSpillSlotUse(SS, LoadMI); 1153 ++NumPSpills; 1154 } 1155 NextMII = next(MII); 1156 } 1157 1158 // Insert restores here if asked to. 1159 if (VRM.isRestorePt(&MI)) { 1160 std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI); 1161 for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) { 1162 unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order. 1163 if (!VRM.getPreSplitReg(VirtReg)) 1164 continue; // Split interval spilled again. 1165 unsigned Phys = VRM.getPhys(VirtReg); 1166 RegInfo->setPhysRegUsed(Phys); 1167 if (VRM.isReMaterialized(VirtReg)) { 1168 ReMaterialize(MBB, MII, Phys, VirtReg, TII, TRI, VRM); 1169 } else { 1170 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1171 int SS = VRM.getStackSlot(VirtReg); 1172 TII->loadRegFromStackSlot(MBB, &MI, Phys, SS, RC); 1173 MachineInstr *LoadMI = prior(MII); 1174 VRM.addSpillSlotUse(SS, LoadMI); 1175 ++NumLoads; 1176 } 1177 // This invalidates Phys. 1178 Spills.ClobberPhysReg(Phys); 1179 UpdateKills(*prior(MII), RegKills, KillOps); 1180 DOUT << '\t' << *prior(MII); 1181 } 1182 } 1183 1184 // Insert spills here if asked to. 1185 if (VRM.isSpillPt(&MI)) { 1186 std::vector<std::pair<unsigned,bool> > &SpillRegs = 1187 VRM.getSpillPtSpills(&MI); 1188 for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) { 1189 unsigned VirtReg = SpillRegs[i].first; 1190 bool isKill = SpillRegs[i].second; 1191 if (!VRM.getPreSplitReg(VirtReg)) 1192 continue; // Split interval spilled again. 1193 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); 1194 unsigned Phys = VRM.getPhys(VirtReg); 1195 int StackSlot = VRM.getStackSlot(VirtReg); 1196 TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC); 1197 MachineInstr *StoreMI = next(MII); 1198 VRM.addSpillSlotUse(StackSlot, StoreMI); 1199 DOUT << "Store:\t" << *StoreMI; 1200 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod); 1201 } 1202 NextMII = next(MII); 1203 } 1204 1205 /// ReusedOperands - Keep track of operand reuse in case we need to undo 1206 /// reuse. 1207 ReuseInfo ReusedOperands(MI, TRI); 1208 SmallVector<unsigned, 4> VirtUseOps; 1209 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1210 MachineOperand &MO = MI.getOperand(i); 1211 if (!MO.isRegister() || MO.getReg() == 0) 1212 continue; // Ignore non-register operands. 1213 1214 unsigned VirtReg = MO.getReg(); 1215 if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) { 1216 // Ignore physregs for spilling, but remember that it is used by this 1217 // function. 1218 RegInfo->setPhysRegUsed(VirtReg); 1219 continue; 1220 } 1221 1222 // We want to process implicit virtual register uses first. 1223 if (MO.isImplicit()) 1224 // If the virtual register is implicitly defined, emit a implicit_def 1225 // before so scavenger knows it's "defined". 1226 VirtUseOps.insert(VirtUseOps.begin(), i); 1227 else 1228 VirtUseOps.push_back(i); 1229 } 1230 1231 // Process all of the spilled uses and all non spilled reg references. 1232 for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) { 1233 unsigned i = VirtUseOps[j]; 1234 MachineOperand &MO = MI.getOperand(i); 1235 unsigned VirtReg = MO.getReg(); 1236 assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && 1237 "Not a virtual register?"); 1238 1239 unsigned SubIdx = MO.getSubReg(); 1240 if (VRM.isAssignedReg(VirtReg)) { 1241 // This virtual register was assigned a physreg! 1242 unsigned Phys = VRM.getPhys(VirtReg); 1243 RegInfo->setPhysRegUsed(Phys); 1244 if (MO.isDef()) 1245 ReusedOperands.markClobbered(Phys); 1246 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys; 1247 MI.getOperand(i).setReg(RReg); 1248 if (VRM.isImplicitlyDefined(VirtReg)) 1249 BuildMI(MBB, MI, TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg); 1250 continue; 1251 } 1252 1253 // This virtual register is now known to be a spilled value. 1254 if (!MO.isUse()) 1255 continue; // Handle defs in the loop below (handle use&def here though) 1256 1257 bool DoReMat = VRM.isReMaterialized(VirtReg); 1258 int SSorRMId = DoReMat 1259 ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg); 1260 int ReuseSlot = SSorRMId; 1261 1262 // Check to see if this stack slot is available. 1263 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId); 1264 1265 // If this is a sub-register use, make sure the reuse register is in the 1266 // right register class. For example, for x86 not all of the 32-bit 1267 // registers have accessible sub-registers. 1268 // Similarly so for EXTRACT_SUBREG. Consider this: 1269 // EDI = op 1270 // MOV32_mr fi#1, EDI 1271 // ... 1272 // = EXTRACT_SUBREG fi#1 1273 // fi#1 is available in EDI, but it cannot be reused because it's not in 1274 // the right register file. 1275 if (PhysReg && 1276 (SubIdx || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) { 1277 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1278 if (!RC->contains(PhysReg)) 1279 PhysReg = 0; 1280 } 1281 1282 if (PhysReg) { 1283 // This spilled operand might be part of a two-address operand. If this 1284 // is the case, then changing it will necessarily require changing the 1285 // def part of the instruction as well. However, in some cases, we 1286 // aren't allowed to modify the reused register. If none of these cases 1287 // apply, reuse it. 1288 bool CanReuse = true; 1289 int ti = TID.getOperandConstraint(i, TOI::TIED_TO); 1290 if (ti != -1 && 1291 MI.getOperand(ti).isRegister() && 1292 MI.getOperand(ti).getReg() == VirtReg) { 1293 // Okay, we have a two address operand. We can reuse this physreg as 1294 // long as we are allowed to clobber the value and there isn't an 1295 // earlier def that has already clobbered the physreg. 1296 CanReuse = Spills.canClobberPhysReg(ReuseSlot) && 1297 !ReusedOperands.isClobbered(PhysReg); 1298 } 1299 1300 if (CanReuse) { 1301 // If this stack slot value is already available, reuse it! 1302 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT) 1303 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1; 1304 else 1305 DOUT << "Reusing SS#" << ReuseSlot; 1306 DOUT << " from physreg " 1307 << TRI->getName(PhysReg) << " for vreg" 1308 << VirtReg <<" instead of reloading into physreg " 1309 << TRI->getName(VRM.getPhys(VirtReg)) << "\n"; 1310 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1311 MI.getOperand(i).setReg(RReg); 1312 1313 // The only technical detail we have is that we don't know that 1314 // PhysReg won't be clobbered by a reloaded stack slot that occurs 1315 // later in the instruction. In particular, consider 'op V1, V2'. 1316 // If V1 is available in physreg R0, we would choose to reuse it 1317 // here, instead of reloading it into the register the allocator 1318 // indicated (say R1). However, V2 might have to be reloaded 1319 // later, and it might indicate that it needs to live in R0. When 1320 // this occurs, we need to have information available that 1321 // indicates it is safe to use R1 for the reload instead of R0. 1322 // 1323 // To further complicate matters, we might conflict with an alias, 1324 // or R0 and R1 might not be compatible with each other. In this 1325 // case, we actually insert a reload for V1 in R1, ensuring that 1326 // we can get at R0 or its alias. 1327 ReusedOperands.addReuse(i, ReuseSlot, PhysReg, 1328 VRM.getPhys(VirtReg), VirtReg); 1329 if (ti != -1) 1330 // Only mark it clobbered if this is a use&def operand. 1331 ReusedOperands.markClobbered(PhysReg); 1332 ++NumReused; 1333 1334 if (MI.getOperand(i).isKill() && 1335 ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) { 1336 // This was the last use and the spilled value is still available 1337 // for reuse. That means the spill was unnecessary! 1338 MachineInstr* DeadStore = MaybeDeadStores[ReuseSlot]; 1339 if (DeadStore) { 1340 DOUT << "Removed dead store:\t" << *DeadStore; 1341 InvalidateKills(*DeadStore, RegKills, KillOps); 1342 VRM.RemoveMachineInstrFromMaps(DeadStore); 1343 MBB.erase(DeadStore); 1344 MaybeDeadStores[ReuseSlot] = NULL; 1345 ++NumDSE; 1346 } 1347 } 1348 continue; 1349 } // CanReuse 1350 1351 // Otherwise we have a situation where we have a two-address instruction 1352 // whose mod/ref operand needs to be reloaded. This reload is already 1353 // available in some register "PhysReg", but if we used PhysReg as the 1354 // operand to our 2-addr instruction, the instruction would modify 1355 // PhysReg. This isn't cool if something later uses PhysReg and expects 1356 // to get its initial value. 1357 // 1358 // To avoid this problem, and to avoid doing a load right after a store, 1359 // we emit a copy from PhysReg into the designated register for this 1360 // operand. 1361 unsigned DesignatedReg = VRM.getPhys(VirtReg); 1362 assert(DesignatedReg && "Must map virtreg to physreg!"); 1363 1364 // Note that, if we reused a register for a previous operand, the 1365 // register we want to reload into might not actually be 1366 // available. If this occurs, use the register indicated by the 1367 // reuser. 1368 if (ReusedOperands.hasReuses()) 1369 DesignatedReg = ReusedOperands.GetRegForReload(DesignatedReg, &MI, 1370 Spills, MaybeDeadStores, RegKills, KillOps, VRM); 1371 1372 // If the mapped designated register is actually the physreg we have 1373 // incoming, we don't need to inserted a dead copy. 1374 if (DesignatedReg == PhysReg) { 1375 // If this stack slot value is already available, reuse it! 1376 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT) 1377 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1; 1378 else 1379 DOUT << "Reusing SS#" << ReuseSlot; 1380 DOUT << " from physreg " << TRI->getName(PhysReg) 1381 << " for vreg" << VirtReg 1382 << " instead of reloading into same physreg.\n"; 1383 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1384 MI.getOperand(i).setReg(RReg); 1385 ReusedOperands.markClobbered(RReg); 1386 ++NumReused; 1387 continue; 1388 } 1389 1390 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1391 RegInfo->setPhysRegUsed(DesignatedReg); 1392 ReusedOperands.markClobbered(DesignatedReg); 1393 TII->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC); 1394 1395 MachineInstr *CopyMI = prior(MII); 1396 UpdateKills(*CopyMI, RegKills, KillOps); 1397 1398 // This invalidates DesignatedReg. 1399 Spills.ClobberPhysReg(DesignatedReg); 1400 1401 Spills.addAvailable(ReuseSlot, &MI, DesignatedReg); 1402 unsigned RReg = 1403 SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg; 1404 MI.getOperand(i).setReg(RReg); 1405 DOUT << '\t' << *prior(MII); 1406 ++NumReused; 1407 continue; 1408 } // if (PhysReg) 1409 1410 // Otherwise, reload it and remember that we have it. 1411 PhysReg = VRM.getPhys(VirtReg); 1412 assert(PhysReg && "Must map virtreg to physreg!"); 1413 1414 // Note that, if we reused a register for a previous operand, the 1415 // register we want to reload into might not actually be 1416 // available. If this occurs, use the register indicated by the 1417 // reuser. 1418 if (ReusedOperands.hasReuses()) 1419 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI, 1420 Spills, MaybeDeadStores, RegKills, KillOps, VRM); 1421 1422 RegInfo->setPhysRegUsed(PhysReg); 1423 ReusedOperands.markClobbered(PhysReg); 1424 if (DoReMat) { 1425 ReMaterialize(MBB, MII, PhysReg, VirtReg, TII, TRI, VRM); 1426 } else { 1427 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1428 TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC); 1429 MachineInstr *LoadMI = prior(MII); 1430 VRM.addSpillSlotUse(SSorRMId, LoadMI); 1431 ++NumLoads; 1432 } 1433 // This invalidates PhysReg. 1434 Spills.ClobberPhysReg(PhysReg); 1435 1436 // Any stores to this stack slot are not dead anymore. 1437 if (!DoReMat) 1438 MaybeDeadStores[SSorRMId] = NULL; 1439 Spills.addAvailable(SSorRMId, &MI, PhysReg); 1440 // Assumes this is the last use. IsKill will be unset if reg is reused 1441 // unless it's a two-address operand. 1442 if (TID.getOperandConstraint(i, TOI::TIED_TO) == -1) 1443 MI.getOperand(i).setIsKill(); 1444 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1445 MI.getOperand(i).setReg(RReg); 1446 UpdateKills(*prior(MII), RegKills, KillOps); 1447 DOUT << '\t' << *prior(MII); 1448 } 1449 1450 DOUT << '\t' << MI; 1451 1452 1453 // If we have folded references to memory operands, make sure we clear all 1454 // physical registers that may contain the value of the spilled virtual 1455 // register 1456 SmallSet<int, 2> FoldedSS; 1457 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) { 1458 unsigned VirtReg = I->second.first; 1459 VirtRegMap::ModRef MR = I->second.second; 1460 DOUT << "Folded vreg: " << VirtReg << " MR: " << MR; 1461 1462 // MI2VirtMap be can updated which invalidate the iterator. 1463 // Increment the iterator first. 1464 ++I; 1465 int SS = VRM.getStackSlot(VirtReg); 1466 if (SS == VirtRegMap::NO_STACK_SLOT) 1467 continue; 1468 FoldedSS.insert(SS); 1469 DOUT << " - StackSlot: " << SS << "\n"; 1470 1471 // If this folded instruction is just a use, check to see if it's a 1472 // straight load from the virt reg slot. 1473 if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) { 1474 int FrameIdx; 1475 unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx); 1476 if (DestReg && FrameIdx == SS) { 1477 // If this spill slot is available, turn it into a copy (or nothing) 1478 // instead of leaving it as a load! 1479 if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) { 1480 DOUT << "Promoted Load To Copy: " << MI; 1481 if (DestReg != InReg) { 1482 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); 1483 TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC); 1484 // Revisit the copy so we make sure to notice the effects of the 1485 // operation on the destreg (either needing to RA it if it's 1486 // virtual or needing to clobber any values if it's physical). 1487 NextMII = &MI; 1488 --NextMII; // backtrack to the copy. 1489 BackTracked = true; 1490 } else { 1491 DOUT << "Removing now-noop copy: " << MI; 1492 // Unset last kill since it's being reused. 1493 InvalidateKill(InReg, RegKills, KillOps); 1494 } 1495 1496 InvalidateKills(MI, RegKills, KillOps); 1497 VRM.RemoveMachineInstrFromMaps(&MI); 1498 MBB.erase(&MI); 1499 Erased = true; 1500 goto ProcessNextInst; 1501 } 1502 } else { 1503 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); 1504 SmallVector<MachineInstr*, 4> NewMIs; 1505 if (PhysReg && 1506 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) { 1507 MBB.insert(MII, NewMIs[0]); 1508 InvalidateKills(MI, RegKills, KillOps); 1509 VRM.RemoveMachineInstrFromMaps(&MI); 1510 MBB.erase(&MI); 1511 Erased = true; 1512 --NextMII; // backtrack to the unfolded instruction. 1513 BackTracked = true; 1514 goto ProcessNextInst; 1515 } 1516 } 1517 } 1518 1519 // If this reference is not a use, any previous store is now dead. 1520 // Otherwise, the store to this stack slot is not dead anymore. 1521 MachineInstr* DeadStore = MaybeDeadStores[SS]; 1522 if (DeadStore) { 1523 bool isDead = !(MR & VirtRegMap::isRef); 1524 MachineInstr *NewStore = NULL; 1525 if (MR & VirtRegMap::isModRef) { 1526 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); 1527 SmallVector<MachineInstr*, 4> NewMIs; 1528 // We can reuse this physreg as long as we are allowed to clobber 1529 // the value and there isn't an earlier def that has already clobbered 1530 // the physreg. 1531 if (PhysReg && 1532 !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable! 1533 MachineOperand *KillOpnd = 1534 DeadStore->findRegisterUseOperand(PhysReg, true); 1535 // Note, if the store is storing a sub-register, it's possible the 1536 // super-register is needed below. 1537 if (KillOpnd && !KillOpnd->getSubReg() && 1538 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){ 1539 MBB.insert(MII, NewMIs[0]); 1540 NewStore = NewMIs[1]; 1541 MBB.insert(MII, NewStore); 1542 VRM.addSpillSlotUse(SS, NewStore); 1543 InvalidateKills(MI, RegKills, KillOps); 1544 VRM.RemoveMachineInstrFromMaps(&MI); 1545 MBB.erase(&MI); 1546 Erased = true; 1547 --NextMII; 1548 --NextMII; // backtrack to the unfolded instruction. 1549 BackTracked = true; 1550 isDead = true; 1551 } 1552 } 1553 } 1554 1555 if (isDead) { // Previous store is dead. 1556 // If we get here, the store is dead, nuke it now. 1557 DOUT << "Removed dead store:\t" << *DeadStore; 1558 InvalidateKills(*DeadStore, RegKills, KillOps); 1559 VRM.RemoveMachineInstrFromMaps(DeadStore); 1560 MBB.erase(DeadStore); 1561 if (!NewStore) 1562 ++NumDSE; 1563 } 1564 1565 MaybeDeadStores[SS] = NULL; 1566 if (NewStore) { 1567 // Treat this store as a spill merged into a copy. That makes the 1568 // stack slot value available. 1569 VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod); 1570 goto ProcessNextInst; 1571 } 1572 } 1573 1574 // If the spill slot value is available, and this is a new definition of 1575 // the value, the value is not available anymore. 1576 if (MR & VirtRegMap::isMod) { 1577 // Notice that the value in this stack slot has been modified. 1578 Spills.ModifyStackSlotOrReMat(SS); 1579 1580 // If this is *just* a mod of the value, check to see if this is just a 1581 // store to the spill slot (i.e. the spill got merged into the copy). If 1582 // so, realize that the vreg is available now, and add the store to the 1583 // MaybeDeadStore info. 1584 int StackSlot; 1585 if (!(MR & VirtRegMap::isRef)) { 1586 if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) { 1587 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) && 1588 "Src hasn't been allocated yet?"); 1589 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark 1590 // this as a potentially dead store in case there is a subsequent 1591 // store into the stack slot without a read from it. 1592 MaybeDeadStores[StackSlot] = &MI; 1593 1594 // If the stack slot value was previously available in some other 1595 // register, change it now. Otherwise, make the register available, 1596 // in PhysReg. 1597 Spills.addAvailable(StackSlot, &MI, SrcReg, false/*don't clobber*/); 1598 } 1599 } 1600 } 1601 } 1602 1603 // Process all of the spilled defs. 1604 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1605 MachineOperand &MO = MI.getOperand(i); 1606 if (!(MO.isRegister() && MO.getReg() && MO.isDef())) 1607 continue; 1608 1609 unsigned VirtReg = MO.getReg(); 1610 if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) { 1611 // Check to see if this is a noop copy. If so, eliminate the 1612 // instruction before considering the dest reg to be changed. 1613 unsigned Src, Dst; 1614 if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) { 1615 ++NumDCE; 1616 DOUT << "Removing now-noop copy: " << MI; 1617 SmallVector<unsigned, 2> KillRegs; 1618 InvalidateKills(MI, RegKills, KillOps, &KillRegs); 1619 if (MO.isDead() && !KillRegs.empty()) { 1620 assert(KillRegs[0] == Dst); 1621 // Last def is now dead. 1622 TransferDeadness(&MBB, Dist, Src, RegKills, KillOps); 1623 } 1624 VRM.RemoveMachineInstrFromMaps(&MI); 1625 MBB.erase(&MI); 1626 Erased = true; 1627 Spills.disallowClobberPhysReg(VirtReg); 1628 goto ProcessNextInst; 1629 } 1630 1631 // If it's not a no-op copy, it clobbers the value in the destreg. 1632 Spills.ClobberPhysReg(VirtReg); 1633 ReusedOperands.markClobbered(VirtReg); 1634 1635 // Check to see if this instruction is a load from a stack slot into 1636 // a register. If so, this provides the stack slot value in the reg. 1637 int FrameIdx; 1638 if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) { 1639 assert(DestReg == VirtReg && "Unknown load situation!"); 1640 1641 // If it is a folded reference, then it's not safe to clobber. 1642 bool Folded = FoldedSS.count(FrameIdx); 1643 // Otherwise, if it wasn't available, remember that it is now! 1644 Spills.addAvailable(FrameIdx, &MI, DestReg, !Folded); 1645 goto ProcessNextInst; 1646 } 1647 1648 continue; 1649 } 1650 1651 unsigned SubIdx = MO.getSubReg(); 1652 bool DoReMat = VRM.isReMaterialized(VirtReg); 1653 if (DoReMat) 1654 ReMatDefs.insert(&MI); 1655 1656 // The only vregs left are stack slot definitions. 1657 int StackSlot = VRM.getStackSlot(VirtReg); 1658 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); 1659 1660 // If this def is part of a two-address operand, make sure to execute 1661 // the store from the correct physical register. 1662 unsigned PhysReg; 1663 int TiedOp = MI.getDesc().findTiedToSrcOperand(i); 1664 if (TiedOp != -1) { 1665 PhysReg = MI.getOperand(TiedOp).getReg(); 1666 if (SubIdx) { 1667 unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI); 1668 assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg && 1669 "Can't find corresponding super-register!"); 1670 PhysReg = SuperReg; 1671 } 1672 } else { 1673 PhysReg = VRM.getPhys(VirtReg); 1674 if (ReusedOperands.isClobbered(PhysReg)) { 1675 // Another def has taken the assigned physreg. It must have been a 1676 // use&def which got it due to reuse. Undo the reuse! 1677 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI, 1678 Spills, MaybeDeadStores, RegKills, KillOps, VRM); 1679 } 1680 } 1681 1682 assert(PhysReg && "VR not assigned a physical register?"); 1683 RegInfo->setPhysRegUsed(PhysReg); 1684 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1685 ReusedOperands.markClobbered(RReg); 1686 MI.getOperand(i).setReg(RReg); 1687 1688 if (!MO.isDead()) { 1689 MachineInstr *&LastStore = MaybeDeadStores[StackSlot]; 1690 SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, true, 1691 LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM); 1692 NextMII = next(MII); 1693 1694 // Check to see if this is a noop copy. If so, eliminate the 1695 // instruction before considering the dest reg to be changed. 1696 { 1697 unsigned Src, Dst; 1698 if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) { 1699 ++NumDCE; 1700 DOUT << "Removing now-noop copy: " << MI; 1701 InvalidateKills(MI, RegKills, KillOps); 1702 VRM.RemoveMachineInstrFromMaps(&MI); 1703 MBB.erase(&MI); 1704 Erased = true; 1705 UpdateKills(*LastStore, RegKills, KillOps); 1706 goto ProcessNextInst; 1707 } 1708 } 1709 } 1710 } 1711 ProcessNextInst: 1712 DistanceMap.insert(std::make_pair(&MI, Dist++)); 1713 if (!Erased && !BackTracked) { 1714 for (MachineBasicBlock::iterator II = MI; II != NextMII; ++II) 1715 UpdateKills(*II, RegKills, KillOps); 1716 } 1717 MII = NextMII; 1718 } 1719} 1720 1721llvm::Spiller* llvm::createSpiller() { 1722 switch (SpillerOpt) { 1723 default: assert(0 && "Unreachable!"); 1724 case local: 1725 return new LocalSpiller(); 1726 case simple: 1727 return new SimpleSpiller(); 1728 } 1729} 1730