VirtRegMap.cpp revision 547df1a7004df315a73894f377d31cc5a9fd9b5b
1//===-- llvm/CodeGen/VirtRegMap.cpp - Virtual Register Map ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the VirtRegMap class. 11// 12// It also contains implementations of the the Spiller interface, which, given a 13// virtual register map and a machine function, eliminates all virtual 14// references by replacing them with physical register references - adding spill 15// code as necessary. 16// 17//===----------------------------------------------------------------------===// 18 19#define DEBUG_TYPE "spiller" 20#include "VirtRegMap.h" 21#include "llvm/Function.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineFunction.h" 24#include "llvm/CodeGen/MachineInstrBuilder.h" 25#include "llvm/CodeGen/MachineRegisterInfo.h" 26#include "llvm/Target/TargetMachine.h" 27#include "llvm/Target/TargetInstrInfo.h" 28#include "llvm/Support/CommandLine.h" 29#include "llvm/Support/Debug.h" 30#include "llvm/Support/Compiler.h" 31#include "llvm/ADT/BitVector.h" 32#include "llvm/ADT/DenseMap.h" 33#include "llvm/ADT/Statistic.h" 34#include "llvm/ADT/STLExtras.h" 35#include "llvm/ADT/SmallSet.h" 36#include <algorithm> 37using namespace llvm; 38 39STATISTIC(NumSpills , "Number of register spills"); 40STATISTIC(NumPSpills , "Number of physical register spills"); 41STATISTIC(NumReMats , "Number of re-materialization"); 42STATISTIC(NumDRM , "Number of re-materializable defs elided"); 43STATISTIC(NumStores , "Number of stores added"); 44STATISTIC(NumLoads , "Number of loads added"); 45STATISTIC(NumReused , "Number of values reused"); 46STATISTIC(NumDSE , "Number of dead stores elided"); 47STATISTIC(NumDCE , "Number of copies elided"); 48STATISTIC(NumDSS , "Number of dead spill slots removed"); 49STATISTIC(NumCommutes, "Number of instructions commuted"); 50 51namespace { 52 enum SpillerName { simple, local }; 53} 54 55static cl::opt<SpillerName> 56SpillerOpt("spiller", 57 cl::desc("Spiller to use: (default: local)"), 58 cl::Prefix, 59 cl::values(clEnumVal(simple, " simple spiller"), 60 clEnumVal(local, " local spiller"), 61 clEnumValEnd), 62 cl::init(local)); 63 64//===----------------------------------------------------------------------===// 65// VirtRegMap implementation 66//===----------------------------------------------------------------------===// 67 68VirtRegMap::VirtRegMap(MachineFunction &mf) 69 : TII(*mf.getTarget().getInstrInfo()), MF(mf), 70 Virt2PhysMap(NO_PHYS_REG), Virt2StackSlotMap(NO_STACK_SLOT), 71 Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0), 72 Virt2SplitKillMap(0), ReMatMap(NULL), ReMatId(MAX_STACK_SLOT+1), 73 LowSpillSlot(NO_STACK_SLOT), HighSpillSlot(NO_STACK_SLOT) { 74 SpillSlotToUsesMap.resize(8); 75 ImplicitDefed.resize(MF.getRegInfo().getLastVirtReg()+1- 76 TargetRegisterInfo::FirstVirtualRegister); 77 grow(); 78} 79 80void VirtRegMap::grow() { 81 unsigned LastVirtReg = MF.getRegInfo().getLastVirtReg(); 82 Virt2PhysMap.grow(LastVirtReg); 83 Virt2StackSlotMap.grow(LastVirtReg); 84 Virt2ReMatIdMap.grow(LastVirtReg); 85 Virt2SplitMap.grow(LastVirtReg); 86 Virt2SplitKillMap.grow(LastVirtReg); 87 ReMatMap.grow(LastVirtReg); 88 ImplicitDefed.resize(LastVirtReg-TargetRegisterInfo::FirstVirtualRegister+1); 89} 90 91int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) { 92 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 93 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && 94 "attempt to assign stack slot to already spilled register"); 95 const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(virtReg); 96 int SS = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 97 RC->getAlignment()); 98 if (LowSpillSlot == NO_STACK_SLOT) 99 LowSpillSlot = SS; 100 if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot) 101 HighSpillSlot = SS; 102 unsigned Idx = SS-LowSpillSlot; 103 while (Idx >= SpillSlotToUsesMap.size()) 104 SpillSlotToUsesMap.resize(SpillSlotToUsesMap.size()*2); 105 Virt2StackSlotMap[virtReg] = SS; 106 ++NumSpills; 107 return SS; 108} 109 110void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int SS) { 111 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 112 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && 113 "attempt to assign stack slot to already spilled register"); 114 assert((SS >= 0 || 115 (SS >= MF.getFrameInfo()->getObjectIndexBegin())) && 116 "illegal fixed frame index"); 117 Virt2StackSlotMap[virtReg] = SS; 118} 119 120int VirtRegMap::assignVirtReMatId(unsigned virtReg) { 121 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 122 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT && 123 "attempt to assign re-mat id to already spilled register"); 124 Virt2ReMatIdMap[virtReg] = ReMatId; 125 return ReMatId++; 126} 127 128void VirtRegMap::assignVirtReMatId(unsigned virtReg, int id) { 129 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 130 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT && 131 "attempt to assign re-mat id to already spilled register"); 132 Virt2ReMatIdMap[virtReg] = id; 133} 134 135int VirtRegMap::getEmergencySpillSlot(const TargetRegisterClass *RC) { 136 std::map<const TargetRegisterClass*, int>::iterator I = 137 EmergencySpillSlots.find(RC); 138 if (I != EmergencySpillSlots.end()) 139 return I->second; 140 int SS = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 141 RC->getAlignment()); 142 if (LowSpillSlot == NO_STACK_SLOT) 143 LowSpillSlot = SS; 144 if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot) 145 HighSpillSlot = SS; 146 I->second = SS; 147 return SS; 148} 149 150void VirtRegMap::addSpillSlotUse(int FI, MachineInstr *MI) { 151 if (!MF.getFrameInfo()->isFixedObjectIndex(FI)) { 152 // If FI < LowSpillSlot, this stack reference was produced by 153 // instruction selection and is not a spill 154 if (FI >= LowSpillSlot) { 155 assert(FI >= 0 && "Spill slot index should not be negative!"); 156 assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size() 157 && "Invalid spill slot"); 158 SpillSlotToUsesMap[FI-LowSpillSlot].insert(MI); 159 } 160 } 161} 162 163void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI, 164 MachineInstr *NewMI, ModRef MRInfo) { 165 // Move previous memory references folded to new instruction. 166 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(NewMI); 167 for (MI2VirtMapTy::iterator I = MI2VirtMap.lower_bound(OldMI), 168 E = MI2VirtMap.end(); I != E && I->first == OldMI; ) { 169 MI2VirtMap.insert(IP, std::make_pair(NewMI, I->second)); 170 MI2VirtMap.erase(I++); 171 } 172 173 // add new memory reference 174 MI2VirtMap.insert(IP, std::make_pair(NewMI, std::make_pair(VirtReg, MRInfo))); 175} 176 177void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *MI, ModRef MRInfo) { 178 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(MI); 179 MI2VirtMap.insert(IP, std::make_pair(MI, std::make_pair(VirtReg, MRInfo))); 180} 181 182void VirtRegMap::RemoveMachineInstrFromMaps(MachineInstr *MI) { 183 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 184 MachineOperand &MO = MI->getOperand(i); 185 if (!MO.isFrameIndex()) 186 continue; 187 int FI = MO.getIndex(); 188 if (MF.getFrameInfo()->isFixedObjectIndex(FI)) 189 continue; 190 // This stack reference was produced by instruction selection and 191 // is not a spill 192 if (FI < LowSpillSlot) 193 continue; 194 assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size() 195 && "Invalid spill slot"); 196 SpillSlotToUsesMap[FI-LowSpillSlot].erase(MI); 197 } 198 MI2VirtMap.erase(MI); 199 SpillPt2VirtMap.erase(MI); 200 RestorePt2VirtMap.erase(MI); 201 EmergencySpillMap.erase(MI); 202} 203 204void VirtRegMap::print(std::ostream &OS) const { 205 const TargetRegisterInfo* TRI = MF.getTarget().getRegisterInfo(); 206 207 OS << "********** REGISTER MAP **********\n"; 208 for (unsigned i = TargetRegisterInfo::FirstVirtualRegister, 209 e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) { 210 if (Virt2PhysMap[i] != (unsigned)VirtRegMap::NO_PHYS_REG) 211 OS << "[reg" << i << " -> " << TRI->getName(Virt2PhysMap[i]) 212 << "]\n"; 213 } 214 215 for (unsigned i = TargetRegisterInfo::FirstVirtualRegister, 216 e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) 217 if (Virt2StackSlotMap[i] != VirtRegMap::NO_STACK_SLOT) 218 OS << "[reg" << i << " -> fi#" << Virt2StackSlotMap[i] << "]\n"; 219 OS << '\n'; 220} 221 222void VirtRegMap::dump() const { 223 print(cerr); 224} 225 226 227//===----------------------------------------------------------------------===// 228// Simple Spiller Implementation 229//===----------------------------------------------------------------------===// 230 231Spiller::~Spiller() {} 232 233namespace { 234 struct VISIBILITY_HIDDEN SimpleSpiller : public Spiller { 235 bool runOnMachineFunction(MachineFunction& mf, VirtRegMap &VRM); 236 }; 237} 238 239bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { 240 DOUT << "********** REWRITE MACHINE CODE **********\n"; 241 DOUT << "********** Function: " << MF.getFunction()->getName() << '\n'; 242 const TargetMachine &TM = MF.getTarget(); 243 const TargetInstrInfo &TII = *TM.getInstrInfo(); 244 245 246 // LoadedRegs - Keep track of which vregs are loaded, so that we only load 247 // each vreg once (in the case where a spilled vreg is used by multiple 248 // operands). This is always smaller than the number of operands to the 249 // current machine instr, so it should be small. 250 std::vector<unsigned> LoadedRegs; 251 252 for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end(); 253 MBBI != E; ++MBBI) { 254 DOUT << MBBI->getBasicBlock()->getName() << ":\n"; 255 MachineBasicBlock &MBB = *MBBI; 256 for (MachineBasicBlock::iterator MII = MBB.begin(), 257 E = MBB.end(); MII != E; ++MII) { 258 MachineInstr &MI = *MII; 259 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 260 MachineOperand &MO = MI.getOperand(i); 261 if (MO.isRegister() && MO.getReg()) { 262 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) { 263 unsigned VirtReg = MO.getReg(); 264 unsigned PhysReg = VRM.getPhys(VirtReg); 265 if (!VRM.isAssignedReg(VirtReg)) { 266 int StackSlot = VRM.getStackSlot(VirtReg); 267 const TargetRegisterClass* RC = 268 MF.getRegInfo().getRegClass(VirtReg); 269 270 if (MO.isUse() && 271 std::find(LoadedRegs.begin(), LoadedRegs.end(), VirtReg) 272 == LoadedRegs.end()) { 273 TII.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC); 274 MachineInstr *LoadMI = prior(MII); 275 VRM.addSpillSlotUse(StackSlot, LoadMI); 276 LoadedRegs.push_back(VirtReg); 277 ++NumLoads; 278 DOUT << '\t' << *LoadMI; 279 } 280 281 if (MO.isDef()) { 282 TII.storeRegToStackSlot(MBB, next(MII), PhysReg, true, 283 StackSlot, RC); 284 MachineInstr *StoreMI = next(MII); 285 VRM.addSpillSlotUse(StackSlot, StoreMI); 286 ++NumStores; 287 } 288 } 289 MF.getRegInfo().setPhysRegUsed(PhysReg); 290 MI.getOperand(i).setReg(PhysReg); 291 } else { 292 MF.getRegInfo().setPhysRegUsed(MO.getReg()); 293 } 294 } 295 } 296 297 DOUT << '\t' << MI; 298 LoadedRegs.clear(); 299 } 300 } 301 return true; 302} 303 304//===----------------------------------------------------------------------===// 305// Local Spiller Implementation 306//===----------------------------------------------------------------------===// 307 308namespace { 309 class AvailableSpills; 310 311 /// LocalSpiller - This spiller does a simple pass over the machine basic 312 /// block to attempt to keep spills in registers as much as possible for 313 /// blocks that have low register pressure (the vreg may be spilled due to 314 /// register pressure in other blocks). 315 class VISIBILITY_HIDDEN LocalSpiller : public Spiller { 316 MachineRegisterInfo *RegInfo; 317 const TargetRegisterInfo *TRI; 318 const TargetInstrInfo *TII; 319 DenseMap<MachineInstr*, unsigned> DistanceMap; 320 public: 321 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { 322 RegInfo = &MF.getRegInfo(); 323 TRI = MF.getTarget().getRegisterInfo(); 324 TII = MF.getTarget().getInstrInfo(); 325 DOUT << "\n**** Local spiller rewriting function '" 326 << MF.getFunction()->getName() << "':\n"; 327 DOUT << "**** Machine Instrs (NOTE! Does not include spills and reloads!)" 328 " ****\n"; 329 DEBUG(MF.dump()); 330 331 for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); 332 MBB != E; ++MBB) 333 RewriteMBB(*MBB, VRM); 334 335 // Mark unused spill slots. 336 MachineFrameInfo *MFI = MF.getFrameInfo(); 337 int SS = VRM.getLowSpillSlot(); 338 if (SS != VirtRegMap::NO_STACK_SLOT) 339 for (int e = VRM.getHighSpillSlot(); SS <= e; ++SS) 340 if (!VRM.isSpillSlotUsed(SS)) { 341 MFI->RemoveStackObject(SS); 342 ++NumDSS; 343 } 344 345 DOUT << "**** Post Machine Instrs ****\n"; 346 DEBUG(MF.dump()); 347 348 return true; 349 } 350 private: 351 void TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist, 352 unsigned Reg, BitVector &RegKills, 353 std::vector<MachineOperand*> &KillOps); 354 bool PrepForUnfoldOpti(MachineBasicBlock &MBB, 355 MachineBasicBlock::iterator &MII, 356 std::vector<MachineInstr*> &MaybeDeadStores, 357 AvailableSpills &Spills, BitVector &RegKills, 358 std::vector<MachineOperand*> &KillOps, 359 VirtRegMap &VRM); 360 bool CommuteToFoldReload(MachineBasicBlock &MBB, 361 MachineBasicBlock::iterator &MII, 362 unsigned VirtReg, unsigned SrcReg, int SS, 363 BitVector &RegKills, 364 std::vector<MachineOperand*> &KillOps, 365 const TargetRegisterInfo *TRI, 366 VirtRegMap &VRM); 367 void SpillRegToStackSlot(MachineBasicBlock &MBB, 368 MachineBasicBlock::iterator &MII, 369 int Idx, unsigned PhysReg, int StackSlot, 370 const TargetRegisterClass *RC, 371 bool isAvailable, MachineInstr *&LastStore, 372 AvailableSpills &Spills, 373 SmallSet<MachineInstr*, 4> &ReMatDefs, 374 BitVector &RegKills, 375 std::vector<MachineOperand*> &KillOps, 376 VirtRegMap &VRM); 377 void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM); 378 }; 379} 380 381/// AvailableSpills - As the local spiller is scanning and rewriting an MBB from 382/// top down, keep track of which spills slots or remat are available in each 383/// register. 384/// 385/// Note that not all physregs are created equal here. In particular, some 386/// physregs are reloads that we are allowed to clobber or ignore at any time. 387/// Other physregs are values that the register allocated program is using that 388/// we cannot CHANGE, but we can read if we like. We keep track of this on a 389/// per-stack-slot / remat id basis as the low bit in the value of the 390/// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks 391/// this bit and addAvailable sets it if. 392namespace { 393class VISIBILITY_HIDDEN AvailableSpills { 394 const TargetRegisterInfo *TRI; 395 const TargetInstrInfo *TII; 396 397 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled 398 // or remat'ed virtual register values that are still available, due to being 399 // loaded or stored to, but not invalidated yet. 400 std::map<int, unsigned> SpillSlotsOrReMatsAvailable; 401 402 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable, 403 // indicating which stack slot values are currently held by a physreg. This 404 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a 405 // physreg is modified. 406 std::multimap<unsigned, int> PhysRegsAvailable; 407 408 void disallowClobberPhysRegOnly(unsigned PhysReg); 409 410 void ClobberPhysRegOnly(unsigned PhysReg); 411public: 412 AvailableSpills(const TargetRegisterInfo *tri, const TargetInstrInfo *tii) 413 : TRI(tri), TII(tii) { 414 } 415 416 const TargetRegisterInfo *getRegInfo() const { return TRI; } 417 418 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is 419 /// available in a physical register, return that PhysReg, otherwise 420 /// return 0. 421 unsigned getSpillSlotOrReMatPhysReg(int Slot) const { 422 std::map<int, unsigned>::const_iterator I = 423 SpillSlotsOrReMatsAvailable.find(Slot); 424 if (I != SpillSlotsOrReMatsAvailable.end()) { 425 return I->second >> 1; // Remove the CanClobber bit. 426 } 427 return 0; 428 } 429 430 /// addAvailable - Mark that the specified stack slot / remat is available in 431 /// the specified physreg. If CanClobber is true, the physreg can be modified 432 /// at any time without changing the semantics of the program. 433 void addAvailable(int SlotOrReMat, MachineInstr *MI, unsigned Reg, 434 bool CanClobber = true) { 435 // If this stack slot is thought to be available in some other physreg, 436 // remove its record. 437 ModifyStackSlotOrReMat(SlotOrReMat); 438 439 PhysRegsAvailable.insert(std::make_pair(Reg, SlotOrReMat)); 440 SpillSlotsOrReMatsAvailable[SlotOrReMat]= (Reg << 1) | (unsigned)CanClobber; 441 442 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT) 443 DOUT << "Remembering RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1; 444 else 445 DOUT << "Remembering SS#" << SlotOrReMat; 446 DOUT << " in physreg " << TRI->getName(Reg) << "\n"; 447 } 448 449 /// canClobberPhysReg - Return true if the spiller is allowed to change the 450 /// value of the specified stackslot register if it desires. The specified 451 /// stack slot must be available in a physreg for this query to make sense. 452 bool canClobberPhysReg(int SlotOrReMat) const { 453 assert(SpillSlotsOrReMatsAvailable.count(SlotOrReMat) && 454 "Value not available!"); 455 return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second & 1; 456 } 457 458 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified 459 /// stackslot register. The register is still available but is no longer 460 /// allowed to be modifed. 461 void disallowClobberPhysReg(unsigned PhysReg); 462 463 /// ClobberPhysReg - This is called when the specified physreg changes 464 /// value. We use this to invalidate any info about stuff that lives in 465 /// it and any of its aliases. 466 void ClobberPhysReg(unsigned PhysReg); 467 468 /// ModifyStackSlotOrReMat - This method is called when the value in a stack 469 /// slot changes. This removes information about which register the previous 470 /// value for this slot lives in (as the previous value is dead now). 471 void ModifyStackSlotOrReMat(int SlotOrReMat); 472}; 473} 474 475/// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified 476/// stackslot register. The register is still available but is no longer 477/// allowed to be modifed. 478void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) { 479 std::multimap<unsigned, int>::iterator I = 480 PhysRegsAvailable.lower_bound(PhysReg); 481 while (I != PhysRegsAvailable.end() && I->first == PhysReg) { 482 int SlotOrReMat = I->second; 483 I++; 484 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg && 485 "Bidirectional map mismatch!"); 486 SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1; 487 DOUT << "PhysReg " << TRI->getName(PhysReg) 488 << " copied, it is available for use but can no longer be modified\n"; 489 } 490} 491 492/// disallowClobberPhysReg - Unset the CanClobber bit of the specified 493/// stackslot register and its aliases. The register and its aliases may 494/// still available but is no longer allowed to be modifed. 495void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) { 496 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS) 497 disallowClobberPhysRegOnly(*AS); 498 disallowClobberPhysRegOnly(PhysReg); 499} 500 501/// ClobberPhysRegOnly - This is called when the specified physreg changes 502/// value. We use this to invalidate any info about stuff we thing lives in it. 503void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) { 504 std::multimap<unsigned, int>::iterator I = 505 PhysRegsAvailable.lower_bound(PhysReg); 506 while (I != PhysRegsAvailable.end() && I->first == PhysReg) { 507 int SlotOrReMat = I->second; 508 PhysRegsAvailable.erase(I++); 509 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg && 510 "Bidirectional map mismatch!"); 511 SpillSlotsOrReMatsAvailable.erase(SlotOrReMat); 512 DOUT << "PhysReg " << TRI->getName(PhysReg) 513 << " clobbered, invalidating "; 514 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT) 515 DOUT << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 << "\n"; 516 else 517 DOUT << "SS#" << SlotOrReMat << "\n"; 518 } 519} 520 521/// ClobberPhysReg - This is called when the specified physreg changes 522/// value. We use this to invalidate any info about stuff we thing lives in 523/// it and any of its aliases. 524void AvailableSpills::ClobberPhysReg(unsigned PhysReg) { 525 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS) 526 ClobberPhysRegOnly(*AS); 527 ClobberPhysRegOnly(PhysReg); 528} 529 530/// ModifyStackSlotOrReMat - This method is called when the value in a stack 531/// slot changes. This removes information about which register the previous 532/// value for this slot lives in (as the previous value is dead now). 533void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) { 534 std::map<int, unsigned>::iterator It = 535 SpillSlotsOrReMatsAvailable.find(SlotOrReMat); 536 if (It == SpillSlotsOrReMatsAvailable.end()) return; 537 unsigned Reg = It->second >> 1; 538 SpillSlotsOrReMatsAvailable.erase(It); 539 540 // This register may hold the value of multiple stack slots, only remove this 541 // stack slot from the set of values the register contains. 542 std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg); 543 for (; ; ++I) { 544 assert(I != PhysRegsAvailable.end() && I->first == Reg && 545 "Map inverse broken!"); 546 if (I->second == SlotOrReMat) break; 547 } 548 PhysRegsAvailable.erase(I); 549} 550 551 552 553/// InvalidateKills - MI is going to be deleted. If any of its operands are 554/// marked kill, then invalidate the information. 555static void InvalidateKills(MachineInstr &MI, BitVector &RegKills, 556 std::vector<MachineOperand*> &KillOps, 557 SmallVector<unsigned, 2> *KillRegs = NULL) { 558 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 559 MachineOperand &MO = MI.getOperand(i); 560 if (!MO.isRegister() || !MO.isUse() || !MO.isKill()) 561 continue; 562 unsigned Reg = MO.getReg(); 563 if (TargetRegisterInfo::isVirtualRegister(Reg)) 564 continue; 565 if (KillRegs) 566 KillRegs->push_back(Reg); 567 assert(Reg < KillOps.size()); 568 if (KillOps[Reg] == &MO) { 569 RegKills.reset(Reg); 570 KillOps[Reg] = NULL; 571 } 572 } 573} 574 575/// InvalidateKill - A MI that defines the specified register is being deleted, 576/// invalidate the register kill information. 577static void InvalidateKill(unsigned Reg, BitVector &RegKills, 578 std::vector<MachineOperand*> &KillOps) { 579 if (RegKills[Reg]) { 580 KillOps[Reg]->setIsKill(false); 581 KillOps[Reg] = NULL; 582 RegKills.reset(Reg); 583 } 584} 585 586/// InvalidateRegDef - If the def operand of the specified def MI is now dead 587/// (since it's spill instruction is removed), mark it isDead. Also checks if 588/// the def MI has other definition operands that are not dead. Returns it by 589/// reference. 590static bool InvalidateRegDef(MachineBasicBlock::iterator I, 591 MachineInstr &NewDef, unsigned Reg, 592 bool &HasLiveDef) { 593 // Due to remat, it's possible this reg isn't being reused. That is, 594 // the def of this reg (by prev MI) is now dead. 595 MachineInstr *DefMI = I; 596 MachineOperand *DefOp = NULL; 597 for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) { 598 MachineOperand &MO = DefMI->getOperand(i); 599 if (MO.isRegister() && MO.isDef()) { 600 if (MO.getReg() == Reg) 601 DefOp = &MO; 602 else if (!MO.isDead()) 603 HasLiveDef = true; 604 } 605 } 606 if (!DefOp) 607 return false; 608 609 bool FoundUse = false, Done = false; 610 MachineBasicBlock::iterator E = &NewDef; 611 ++I; ++E; 612 for (; !Done && I != E; ++I) { 613 MachineInstr *NMI = I; 614 for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) { 615 MachineOperand &MO = NMI->getOperand(j); 616 if (!MO.isRegister() || MO.getReg() != Reg) 617 continue; 618 if (MO.isUse()) 619 FoundUse = true; 620 Done = true; // Stop after scanning all the operands of this MI. 621 } 622 } 623 if (!FoundUse) { 624 // Def is dead! 625 DefOp->setIsDead(); 626 return true; 627 } 628 return false; 629} 630 631/// UpdateKills - Track and update kill info. If a MI reads a register that is 632/// marked kill, then it must be due to register reuse. Transfer the kill info 633/// over. 634static void UpdateKills(MachineInstr &MI, BitVector &RegKills, 635 std::vector<MachineOperand*> &KillOps) { 636 const TargetInstrDesc &TID = MI.getDesc(); 637 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 638 MachineOperand &MO = MI.getOperand(i); 639 if (!MO.isRegister() || !MO.isUse()) 640 continue; 641 unsigned Reg = MO.getReg(); 642 if (Reg == 0) 643 continue; 644 645 if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) { 646 // That can't be right. Register is killed but not re-defined and it's 647 // being reused. Let's fix that. 648 KillOps[Reg]->setIsKill(false); 649 KillOps[Reg] = NULL; 650 RegKills.reset(Reg); 651 if (i < TID.getNumOperands() && 652 TID.getOperandConstraint(i, TOI::TIED_TO) == -1) 653 // Unless it's a two-address operand, this is the new kill. 654 MO.setIsKill(); 655 } 656 if (MO.isKill()) { 657 RegKills.set(Reg); 658 KillOps[Reg] = &MO; 659 } 660 } 661 662 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 663 const MachineOperand &MO = MI.getOperand(i); 664 if (!MO.isRegister() || !MO.isDef()) 665 continue; 666 unsigned Reg = MO.getReg(); 667 RegKills.reset(Reg); 668 KillOps[Reg] = NULL; 669 } 670} 671 672/// ReMaterialize - Re-materialize definition for Reg targetting DestReg. 673/// 674static void ReMaterialize(MachineBasicBlock &MBB, 675 MachineBasicBlock::iterator &MII, 676 unsigned DestReg, unsigned Reg, 677 const TargetInstrInfo *TII, 678 const TargetRegisterInfo *TRI, 679 VirtRegMap &VRM) { 680 TII->reMaterialize(MBB, MII, DestReg, VRM.getReMaterializedMI(Reg)); 681 MachineInstr *NewMI = prior(MII); 682 for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) { 683 MachineOperand &MO = NewMI->getOperand(i); 684 if (!MO.isRegister() || MO.getReg() == 0) 685 continue; 686 unsigned VirtReg = MO.getReg(); 687 if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) 688 continue; 689 assert(MO.isUse()); 690 unsigned SubIdx = MO.getSubReg(); 691 unsigned Phys = VRM.getPhys(VirtReg); 692 assert(Phys); 693 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys; 694 MO.setReg(RReg); 695 } 696 ++NumReMats; 697} 698 699 700// ReusedOp - For each reused operand, we keep track of a bit of information, in 701// case we need to rollback upon processing a new operand. See comments below. 702namespace { 703 struct ReusedOp { 704 // The MachineInstr operand that reused an available value. 705 unsigned Operand; 706 707 // StackSlotOrReMat - The spill slot or remat id of the value being reused. 708 unsigned StackSlotOrReMat; 709 710 // PhysRegReused - The physical register the value was available in. 711 unsigned PhysRegReused; 712 713 // AssignedPhysReg - The physreg that was assigned for use by the reload. 714 unsigned AssignedPhysReg; 715 716 // VirtReg - The virtual register itself. 717 unsigned VirtReg; 718 719 ReusedOp(unsigned o, unsigned ss, unsigned prr, unsigned apr, 720 unsigned vreg) 721 : Operand(o), StackSlotOrReMat(ss), PhysRegReused(prr), 722 AssignedPhysReg(apr), VirtReg(vreg) {} 723 }; 724 725 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that 726 /// is reused instead of reloaded. 727 class VISIBILITY_HIDDEN ReuseInfo { 728 MachineInstr &MI; 729 std::vector<ReusedOp> Reuses; 730 BitVector PhysRegsClobbered; 731 public: 732 ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) { 733 PhysRegsClobbered.resize(tri->getNumRegs()); 734 } 735 736 bool hasReuses() const { 737 return !Reuses.empty(); 738 } 739 740 /// addReuse - If we choose to reuse a virtual register that is already 741 /// available instead of reloading it, remember that we did so. 742 void addReuse(unsigned OpNo, unsigned StackSlotOrReMat, 743 unsigned PhysRegReused, unsigned AssignedPhysReg, 744 unsigned VirtReg) { 745 // If the reload is to the assigned register anyway, no undo will be 746 // required. 747 if (PhysRegReused == AssignedPhysReg) return; 748 749 // Otherwise, remember this. 750 Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused, 751 AssignedPhysReg, VirtReg)); 752 } 753 754 void markClobbered(unsigned PhysReg) { 755 PhysRegsClobbered.set(PhysReg); 756 } 757 758 bool isClobbered(unsigned PhysReg) const { 759 return PhysRegsClobbered.test(PhysReg); 760 } 761 762 /// GetRegForReload - We are about to emit a reload into PhysReg. If there 763 /// is some other operand that is using the specified register, either pick 764 /// a new register to use, or evict the previous reload and use this reg. 765 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI, 766 AvailableSpills &Spills, 767 std::vector<MachineInstr*> &MaybeDeadStores, 768 SmallSet<unsigned, 8> &Rejected, 769 BitVector &RegKills, 770 std::vector<MachineOperand*> &KillOps, 771 VirtRegMap &VRM) { 772 const TargetInstrInfo* TII = MI->getParent()->getParent()->getTarget() 773 .getInstrInfo(); 774 775 if (Reuses.empty()) return PhysReg; // This is most often empty. 776 777 for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) { 778 ReusedOp &Op = Reuses[ro]; 779 // If we find some other reuse that was supposed to use this register 780 // exactly for its reload, we can change this reload to use ITS reload 781 // register. That is, unless its reload register has already been 782 // considered and subsequently rejected because it has also been reused 783 // by another operand. 784 if (Op.PhysRegReused == PhysReg && 785 Rejected.count(Op.AssignedPhysReg) == 0) { 786 // Yup, use the reload register that we didn't use before. 787 unsigned NewReg = Op.AssignedPhysReg; 788 Rejected.insert(PhysReg); 789 return GetRegForReload(NewReg, MI, Spills, MaybeDeadStores, Rejected, 790 RegKills, KillOps, VRM); 791 } else { 792 // Otherwise, we might also have a problem if a previously reused 793 // value aliases the new register. If so, codegen the previous reload 794 // and use this one. 795 unsigned PRRU = Op.PhysRegReused; 796 const TargetRegisterInfo *TRI = Spills.getRegInfo(); 797 if (TRI->areAliases(PRRU, PhysReg)) { 798 // Okay, we found out that an alias of a reused register 799 // was used. This isn't good because it means we have 800 // to undo a previous reuse. 801 MachineBasicBlock *MBB = MI->getParent(); 802 const TargetRegisterClass *AliasRC = 803 MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg); 804 805 // Copy Op out of the vector and remove it, we're going to insert an 806 // explicit load for it. 807 ReusedOp NewOp = Op; 808 Reuses.erase(Reuses.begin()+ro); 809 810 // Ok, we're going to try to reload the assigned physreg into the 811 // slot that we were supposed to in the first place. However, that 812 // register could hold a reuse. Check to see if it conflicts or 813 // would prefer us to use a different register. 814 unsigned NewPhysReg = GetRegForReload(NewOp.AssignedPhysReg, 815 MI, Spills, MaybeDeadStores, 816 Rejected, RegKills, KillOps, VRM); 817 818 MachineBasicBlock::iterator MII = MI; 819 if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) { 820 ReMaterialize(*MBB, MII, NewPhysReg, NewOp.VirtReg, TII, TRI,VRM); 821 } else { 822 TII->loadRegFromStackSlot(*MBB, MII, NewPhysReg, 823 NewOp.StackSlotOrReMat, AliasRC); 824 MachineInstr *LoadMI = prior(MII); 825 VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI); 826 // Any stores to this stack slot are not dead anymore. 827 MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL; 828 ++NumLoads; 829 } 830 Spills.ClobberPhysReg(NewPhysReg); 831 Spills.ClobberPhysReg(NewOp.PhysRegReused); 832 833 MI->getOperand(NewOp.Operand).setReg(NewPhysReg); 834 835 Spills.addAvailable(NewOp.StackSlotOrReMat, MI, NewPhysReg); 836 --MII; 837 UpdateKills(*MII, RegKills, KillOps); 838 DOUT << '\t' << *MII; 839 840 DOUT << "Reuse undone!\n"; 841 --NumReused; 842 843 // Finally, PhysReg is now available, go ahead and use it. 844 return PhysReg; 845 } 846 } 847 } 848 return PhysReg; 849 } 850 851 /// GetRegForReload - Helper for the above GetRegForReload(). Add a 852 /// 'Rejected' set to remember which registers have been considered and 853 /// rejected for the reload. This avoids infinite looping in case like 854 /// this: 855 /// t1 := op t2, t3 856 /// t2 <- assigned r0 for use by the reload but ended up reuse r1 857 /// t3 <- assigned r1 for use by the reload but ended up reuse r0 858 /// t1 <- desires r1 859 /// sees r1 is taken by t2, tries t2's reload register r0 860 /// sees r0 is taken by t3, tries t3's reload register r1 861 /// sees r1 is taken by t2, tries t2's reload register r0 ... 862 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI, 863 AvailableSpills &Spills, 864 std::vector<MachineInstr*> &MaybeDeadStores, 865 BitVector &RegKills, 866 std::vector<MachineOperand*> &KillOps, 867 VirtRegMap &VRM) { 868 SmallSet<unsigned, 8> Rejected; 869 return GetRegForReload(PhysReg, MI, Spills, MaybeDeadStores, Rejected, 870 RegKills, KillOps, VRM); 871 } 872 }; 873} 874 875/// PrepForUnfoldOpti - Turn a store folding instruction into a load folding 876/// instruction. e.g. 877/// xorl %edi, %eax 878/// movl %eax, -32(%ebp) 879/// movl -36(%ebp), %eax 880/// orl %eax, -32(%ebp) 881/// ==> 882/// xorl %edi, %eax 883/// orl -36(%ebp), %eax 884/// mov %eax, -32(%ebp) 885/// This enables unfolding optimization for a subsequent instruction which will 886/// also eliminate the newly introduced store instruction. 887bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB, 888 MachineBasicBlock::iterator &MII, 889 std::vector<MachineInstr*> &MaybeDeadStores, 890 AvailableSpills &Spills, 891 BitVector &RegKills, 892 std::vector<MachineOperand*> &KillOps, 893 VirtRegMap &VRM) { 894 MachineFunction &MF = *MBB.getParent(); 895 MachineInstr &MI = *MII; 896 unsigned UnfoldedOpc = 0; 897 unsigned UnfoldPR = 0; 898 unsigned UnfoldVR = 0; 899 int FoldedSS = VirtRegMap::NO_STACK_SLOT; 900 VirtRegMap::MI2VirtMapTy::const_iterator I, End; 901 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) { 902 // Only transform a MI that folds a single register. 903 if (UnfoldedOpc) 904 return false; 905 UnfoldVR = I->second.first; 906 VirtRegMap::ModRef MR = I->second.second; 907 // MI2VirtMap be can updated which invalidate the iterator. 908 // Increment the iterator first. 909 ++I; 910 if (VRM.isAssignedReg(UnfoldVR)) 911 continue; 912 // If this reference is not a use, any previous store is now dead. 913 // Otherwise, the store to this stack slot is not dead anymore. 914 FoldedSS = VRM.getStackSlot(UnfoldVR); 915 MachineInstr* DeadStore = MaybeDeadStores[FoldedSS]; 916 if (DeadStore && (MR & VirtRegMap::isModRef)) { 917 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS); 918 if (!PhysReg || !DeadStore->readsRegister(PhysReg)) 919 continue; 920 UnfoldPR = PhysReg; 921 UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(), 922 false, true); 923 } 924 } 925 926 if (!UnfoldedOpc) 927 return false; 928 929 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 930 MachineOperand &MO = MI.getOperand(i); 931 if (!MO.isRegister() || MO.getReg() == 0 || !MO.isUse()) 932 continue; 933 unsigned VirtReg = MO.getReg(); 934 if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg()) 935 continue; 936 if (VRM.isAssignedReg(VirtReg)) { 937 unsigned PhysReg = VRM.getPhys(VirtReg); 938 if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR)) 939 return false; 940 } else if (VRM.isReMaterialized(VirtReg)) 941 continue; 942 int SS = VRM.getStackSlot(VirtReg); 943 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); 944 if (PhysReg) { 945 if (TRI->regsOverlap(PhysReg, UnfoldPR)) 946 return false; 947 continue; 948 } 949 if (VRM.hasPhys(VirtReg)) { 950 PhysReg = VRM.getPhys(VirtReg); 951 if (!TRI->regsOverlap(PhysReg, UnfoldPR)) 952 continue; 953 } 954 955 // Ok, we'll need to reload the value into a register which makes 956 // it impossible to perform the store unfolding optimization later. 957 // Let's see if it is possible to fold the load if the store is 958 // unfolded. This allows us to perform the store unfolding 959 // optimization. 960 SmallVector<MachineInstr*, 4> NewMIs; 961 if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) { 962 assert(NewMIs.size() == 1); 963 MachineInstr *NewMI = NewMIs.back(); 964 NewMIs.clear(); 965 int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false); 966 assert(Idx != -1); 967 SmallVector<unsigned, 2> Ops; 968 Ops.push_back(Idx); 969 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS); 970 if (FoldedMI) { 971 VRM.addSpillSlotUse(SS, FoldedMI); 972 if (!VRM.hasPhys(UnfoldVR)) 973 VRM.assignVirt2Phys(UnfoldVR, UnfoldPR); 974 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef); 975 MII = MBB.insert(MII, FoldedMI); 976 InvalidateKills(MI, RegKills, KillOps); 977 VRM.RemoveMachineInstrFromMaps(&MI); 978 MBB.erase(&MI); 979 MF.DeleteMachineInstr(NewMI); 980 return true; 981 } 982 MF.DeleteMachineInstr(NewMI); 983 } 984 } 985 return false; 986} 987 988/// CommuteToFoldReload - 989/// Look for 990/// r1 = load fi#1 991/// r1 = op r1, r2<kill> 992/// store r1, fi#1 993/// 994/// If op is commutable and r2 is killed, then we can xform these to 995/// r2 = op r2, fi#1 996/// store r2, fi#1 997bool LocalSpiller::CommuteToFoldReload(MachineBasicBlock &MBB, 998 MachineBasicBlock::iterator &MII, 999 unsigned VirtReg, unsigned SrcReg, int SS, 1000 BitVector &RegKills, 1001 std::vector<MachineOperand*> &KillOps, 1002 const TargetRegisterInfo *TRI, 1003 VirtRegMap &VRM) { 1004 if (MII == MBB.begin() || !MII->killsRegister(SrcReg)) 1005 return false; 1006 1007 MachineFunction &MF = *MBB.getParent(); 1008 MachineInstr &MI = *MII; 1009 MachineBasicBlock::iterator DefMII = prior(MII); 1010 MachineInstr *DefMI = DefMII; 1011 const TargetInstrDesc &TID = DefMI->getDesc(); 1012 unsigned NewDstIdx; 1013 if (DefMII != MBB.begin() && 1014 TID.isCommutable() && 1015 TII->CommuteChangesDestination(DefMI, NewDstIdx)) { 1016 MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx); 1017 unsigned NewReg = NewDstMO.getReg(); 1018 if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg)) 1019 return false; 1020 MachineInstr *ReloadMI = prior(DefMII); 1021 int FrameIdx; 1022 unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx); 1023 if (DestReg != SrcReg || FrameIdx != SS) 1024 return false; 1025 int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false); 1026 if (UseIdx == -1) 1027 return false; 1028 int DefIdx = TID.getOperandConstraint(UseIdx, TOI::TIED_TO); 1029 if (DefIdx == -1) 1030 return false; 1031 assert(DefMI->getOperand(DefIdx).isRegister() && 1032 DefMI->getOperand(DefIdx).getReg() == SrcReg); 1033 1034 // Now commute def instruction. 1035 MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true); 1036 if (!CommutedMI) 1037 return false; 1038 SmallVector<unsigned, 2> Ops; 1039 Ops.push_back(NewDstIdx); 1040 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS); 1041 // Not needed since foldMemoryOperand returns new MI. 1042 MF.DeleteMachineInstr(CommutedMI); 1043 if (!FoldedMI) 1044 return false; 1045 1046 VRM.addSpillSlotUse(SS, FoldedMI); 1047 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef); 1048 // Insert new def MI and spill MI. 1049 const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(VirtReg); 1050 TII->storeRegToStackSlot(MBB, &MI, NewReg, true, SS, RC); 1051 MII = prior(MII); 1052 MachineInstr *StoreMI = MII; 1053 VRM.addSpillSlotUse(SS, StoreMI); 1054 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod); 1055 MII = MBB.insert(MII, FoldedMI); // Update MII to backtrack. 1056 1057 // Delete all 3 old instructions. 1058 InvalidateKills(*ReloadMI, RegKills, KillOps); 1059 VRM.RemoveMachineInstrFromMaps(ReloadMI); 1060 MBB.erase(ReloadMI); 1061 InvalidateKills(*DefMI, RegKills, KillOps); 1062 VRM.RemoveMachineInstrFromMaps(DefMI); 1063 MBB.erase(DefMI); 1064 InvalidateKills(MI, RegKills, KillOps); 1065 VRM.RemoveMachineInstrFromMaps(&MI); 1066 MBB.erase(&MI); 1067 1068 ++NumCommutes; 1069 return true; 1070 } 1071 1072 return false; 1073} 1074 1075/// findSuperReg - Find the SubReg's super-register of given register class 1076/// where its SubIdx sub-register is SubReg. 1077static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg, 1078 unsigned SubIdx, const TargetRegisterInfo *TRI) { 1079 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 1080 I != E; ++I) { 1081 unsigned Reg = *I; 1082 if (TRI->getSubReg(Reg, SubIdx) == SubReg) 1083 return Reg; 1084 } 1085 return 0; 1086} 1087 1088/// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if 1089/// the last store to the same slot is now dead. If so, remove the last store. 1090void LocalSpiller::SpillRegToStackSlot(MachineBasicBlock &MBB, 1091 MachineBasicBlock::iterator &MII, 1092 int Idx, unsigned PhysReg, int StackSlot, 1093 const TargetRegisterClass *RC, 1094 bool isAvailable, MachineInstr *&LastStore, 1095 AvailableSpills &Spills, 1096 SmallSet<MachineInstr*, 4> &ReMatDefs, 1097 BitVector &RegKills, 1098 std::vector<MachineOperand*> &KillOps, 1099 VirtRegMap &VRM) { 1100 TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC); 1101 MachineInstr *StoreMI = next(MII); 1102 VRM.addSpillSlotUse(StackSlot, StoreMI); 1103 DOUT << "Store:\t" << *StoreMI; 1104 1105 // If there is a dead store to this stack slot, nuke it now. 1106 if (LastStore) { 1107 DOUT << "Removed dead store:\t" << *LastStore; 1108 ++NumDSE; 1109 SmallVector<unsigned, 2> KillRegs; 1110 InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs); 1111 MachineBasicBlock::iterator PrevMII = LastStore; 1112 bool CheckDef = PrevMII != MBB.begin(); 1113 if (CheckDef) 1114 --PrevMII; 1115 VRM.RemoveMachineInstrFromMaps(LastStore); 1116 MBB.erase(LastStore); 1117 if (CheckDef) { 1118 // Look at defs of killed registers on the store. Mark the defs 1119 // as dead since the store has been deleted and they aren't 1120 // being reused. 1121 for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) { 1122 bool HasOtherDef = false; 1123 if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef)) { 1124 MachineInstr *DeadDef = PrevMII; 1125 if (ReMatDefs.count(DeadDef) && !HasOtherDef) { 1126 // FIXME: This assumes a remat def does not have side 1127 // effects. 1128 VRM.RemoveMachineInstrFromMaps(DeadDef); 1129 MBB.erase(DeadDef); 1130 ++NumDRM; 1131 } 1132 } 1133 } 1134 } 1135 } 1136 1137 LastStore = next(MII); 1138 1139 // If the stack slot value was previously available in some other 1140 // register, change it now. Otherwise, make the register available, 1141 // in PhysReg. 1142 Spills.ModifyStackSlotOrReMat(StackSlot); 1143 Spills.ClobberPhysReg(PhysReg); 1144 Spills.addAvailable(StackSlot, LastStore, PhysReg, isAvailable); 1145 ++NumStores; 1146} 1147 1148/// TransferDeadness - A identity copy definition is dead and it's being 1149/// removed. Find the last def or use and mark it as dead / kill. 1150void LocalSpiller::TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist, 1151 unsigned Reg, BitVector &RegKills, 1152 std::vector<MachineOperand*> &KillOps) { 1153 int LastUDDist = -1; 1154 MachineInstr *LastUDMI = NULL; 1155 for (MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(Reg), 1156 RE = RegInfo->reg_end(); RI != RE; ++RI) { 1157 MachineInstr *UDMI = &*RI; 1158 if (UDMI->getParent() != MBB) 1159 continue; 1160 DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI); 1161 if (DI == DistanceMap.end() || DI->second > CurDist) 1162 continue; 1163 if ((int)DI->second < LastUDDist) 1164 continue; 1165 LastUDDist = DI->second; 1166 LastUDMI = UDMI; 1167 } 1168 1169 if (LastUDMI) { 1170 const TargetInstrDesc &TID = LastUDMI->getDesc(); 1171 MachineOperand *LastUD = NULL; 1172 for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) { 1173 MachineOperand &MO = LastUDMI->getOperand(i); 1174 if (!MO.isRegister() || MO.getReg() != Reg) 1175 continue; 1176 if (!LastUD || (LastUD->isUse() && MO.isDef())) 1177 LastUD = &MO; 1178 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) 1179 return; 1180 } 1181 if (LastUD->isDef()) 1182 LastUD->setIsDead(); 1183 else { 1184 LastUD->setIsKill(); 1185 RegKills.set(Reg); 1186 KillOps[Reg] = LastUD; 1187 } 1188 } 1189} 1190 1191/// rewriteMBB - Keep track of which spills are available even after the 1192/// register allocator is done with them. If possible, avid reloading vregs. 1193void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { 1194 DOUT << MBB.getBasicBlock()->getName() << ":\n"; 1195 1196 MachineFunction &MF = *MBB.getParent(); 1197 1198 // Spills - Keep track of which spilled values are available in physregs so 1199 // that we can choose to reuse the physregs instead of emitting reloads. 1200 AvailableSpills Spills(TRI, TII); 1201 1202 // MaybeDeadStores - When we need to write a value back into a stack slot, 1203 // keep track of the inserted store. If the stack slot value is never read 1204 // (because the value was used from some available register, for example), and 1205 // subsequently stored to, the original store is dead. This map keeps track 1206 // of inserted stores that are not used. If we see a subsequent store to the 1207 // same stack slot, the original store is deleted. 1208 std::vector<MachineInstr*> MaybeDeadStores; 1209 MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL); 1210 1211 // ReMatDefs - These are rematerializable def MIs which are not deleted. 1212 SmallSet<MachineInstr*, 4> ReMatDefs; 1213 1214 // Keep track of kill information. 1215 BitVector RegKills(TRI->getNumRegs()); 1216 std::vector<MachineOperand*> KillOps; 1217 KillOps.resize(TRI->getNumRegs(), NULL); 1218 1219 unsigned Dist = 0; 1220 DistanceMap.clear(); 1221 for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end(); 1222 MII != E; ) { 1223 MachineBasicBlock::iterator NextMII = MII; ++NextMII; 1224 1225 VirtRegMap::MI2VirtMapTy::const_iterator I, End; 1226 bool Erased = false; 1227 bool BackTracked = false; 1228 if (PrepForUnfoldOpti(MBB, MII, 1229 MaybeDeadStores, Spills, RegKills, KillOps, VRM)) 1230 NextMII = next(MII); 1231 1232 MachineInstr &MI = *MII; 1233 const TargetInstrDesc &TID = MI.getDesc(); 1234 1235 if (VRM.hasEmergencySpills(&MI)) { 1236 // Spill physical register(s) in the rare case the allocator has run out 1237 // of registers to allocate. 1238 SmallSet<int, 4> UsedSS; 1239 std::vector<unsigned> &EmSpills = VRM.getEmergencySpills(&MI); 1240 for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) { 1241 unsigned PhysReg = EmSpills[i]; 1242 const TargetRegisterClass *RC = 1243 TRI->getPhysicalRegisterRegClass(PhysReg); 1244 assert(RC && "Unable to determine register class!"); 1245 int SS = VRM.getEmergencySpillSlot(RC); 1246 if (UsedSS.count(SS)) 1247 assert(0 && "Need to spill more than one physical registers!"); 1248 UsedSS.insert(SS); 1249 TII->storeRegToStackSlot(MBB, MII, PhysReg, true, SS, RC); 1250 MachineInstr *StoreMI = prior(MII); 1251 VRM.addSpillSlotUse(SS, StoreMI); 1252 TII->loadRegFromStackSlot(MBB, next(MII), PhysReg, SS, RC); 1253 MachineInstr *LoadMI = next(MII); 1254 VRM.addSpillSlotUse(SS, LoadMI); 1255 ++NumPSpills; 1256 } 1257 NextMII = next(MII); 1258 } 1259 1260 // Insert restores here if asked to. 1261 if (VRM.isRestorePt(&MI)) { 1262 std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI); 1263 for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) { 1264 unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order. 1265 if (!VRM.getPreSplitReg(VirtReg)) 1266 continue; // Split interval spilled again. 1267 unsigned Phys = VRM.getPhys(VirtReg); 1268 RegInfo->setPhysRegUsed(Phys); 1269 if (VRM.isReMaterialized(VirtReg)) { 1270 ReMaterialize(MBB, MII, Phys, VirtReg, TII, TRI, VRM); 1271 } else { 1272 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1273 int SS = VRM.getStackSlot(VirtReg); 1274 TII->loadRegFromStackSlot(MBB, &MI, Phys, SS, RC); 1275 MachineInstr *LoadMI = prior(MII); 1276 VRM.addSpillSlotUse(SS, LoadMI); 1277 ++NumLoads; 1278 } 1279 // This invalidates Phys. 1280 Spills.ClobberPhysReg(Phys); 1281 UpdateKills(*prior(MII), RegKills, KillOps); 1282 DOUT << '\t' << *prior(MII); 1283 } 1284 } 1285 1286 // Insert spills here if asked to. 1287 if (VRM.isSpillPt(&MI)) { 1288 std::vector<std::pair<unsigned,bool> > &SpillRegs = 1289 VRM.getSpillPtSpills(&MI); 1290 for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) { 1291 unsigned VirtReg = SpillRegs[i].first; 1292 bool isKill = SpillRegs[i].second; 1293 if (!VRM.getPreSplitReg(VirtReg)) 1294 continue; // Split interval spilled again. 1295 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); 1296 unsigned Phys = VRM.getPhys(VirtReg); 1297 int StackSlot = VRM.getStackSlot(VirtReg); 1298 TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC); 1299 MachineInstr *StoreMI = next(MII); 1300 VRM.addSpillSlotUse(StackSlot, StoreMI); 1301 DOUT << "Store:\t" << *StoreMI; 1302 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod); 1303 } 1304 NextMII = next(MII); 1305 } 1306 1307 /// ReusedOperands - Keep track of operand reuse in case we need to undo 1308 /// reuse. 1309 ReuseInfo ReusedOperands(MI, TRI); 1310 SmallVector<unsigned, 4> VirtUseOps; 1311 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1312 MachineOperand &MO = MI.getOperand(i); 1313 if (!MO.isRegister() || MO.getReg() == 0) 1314 continue; // Ignore non-register operands. 1315 1316 unsigned VirtReg = MO.getReg(); 1317 if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) { 1318 // Ignore physregs for spilling, but remember that it is used by this 1319 // function. 1320 RegInfo->setPhysRegUsed(VirtReg); 1321 continue; 1322 } 1323 1324 // We want to process implicit virtual register uses first. 1325 if (MO.isImplicit()) 1326 // If the virtual register is implicitly defined, emit a implicit_def 1327 // before so scavenger knows it's "defined". 1328 VirtUseOps.insert(VirtUseOps.begin(), i); 1329 else 1330 VirtUseOps.push_back(i); 1331 } 1332 1333 // Process all of the spilled uses and all non spilled reg references. 1334 for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) { 1335 unsigned i = VirtUseOps[j]; 1336 MachineOperand &MO = MI.getOperand(i); 1337 unsigned VirtReg = MO.getReg(); 1338 assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && 1339 "Not a virtual register?"); 1340 1341 unsigned SubIdx = MO.getSubReg(); 1342 if (VRM.isAssignedReg(VirtReg)) { 1343 // This virtual register was assigned a physreg! 1344 unsigned Phys = VRM.getPhys(VirtReg); 1345 RegInfo->setPhysRegUsed(Phys); 1346 if (MO.isDef()) 1347 ReusedOperands.markClobbered(Phys); 1348 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys; 1349 MI.getOperand(i).setReg(RReg); 1350 if (VRM.isImplicitlyDefined(VirtReg)) 1351 BuildMI(MBB, &MI, TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg); 1352 continue; 1353 } 1354 1355 // This virtual register is now known to be a spilled value. 1356 if (!MO.isUse()) 1357 continue; // Handle defs in the loop below (handle use&def here though) 1358 1359 bool DoReMat = VRM.isReMaterialized(VirtReg); 1360 int SSorRMId = DoReMat 1361 ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg); 1362 int ReuseSlot = SSorRMId; 1363 1364 // Check to see if this stack slot is available. 1365 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId); 1366 1367 // If this is a sub-register use, make sure the reuse register is in the 1368 // right register class. For example, for x86 not all of the 32-bit 1369 // registers have accessible sub-registers. 1370 // Similarly so for EXTRACT_SUBREG. Consider this: 1371 // EDI = op 1372 // MOV32_mr fi#1, EDI 1373 // ... 1374 // = EXTRACT_SUBREG fi#1 1375 // fi#1 is available in EDI, but it cannot be reused because it's not in 1376 // the right register file. 1377 if (PhysReg && 1378 (SubIdx || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) { 1379 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1380 if (!RC->contains(PhysReg)) 1381 PhysReg = 0; 1382 } 1383 1384 if (PhysReg) { 1385 // This spilled operand might be part of a two-address operand. If this 1386 // is the case, then changing it will necessarily require changing the 1387 // def part of the instruction as well. However, in some cases, we 1388 // aren't allowed to modify the reused register. If none of these cases 1389 // apply, reuse it. 1390 bool CanReuse = true; 1391 int ti = TID.getOperandConstraint(i, TOI::TIED_TO); 1392 if (ti != -1 && 1393 MI.getOperand(ti).isRegister() && 1394 MI.getOperand(ti).getReg() == VirtReg) { 1395 // Okay, we have a two address operand. We can reuse this physreg as 1396 // long as we are allowed to clobber the value and there isn't an 1397 // earlier def that has already clobbered the physreg. 1398 CanReuse = Spills.canClobberPhysReg(ReuseSlot) && 1399 !ReusedOperands.isClobbered(PhysReg); 1400 } 1401 1402 if (CanReuse) { 1403 // If this stack slot value is already available, reuse it! 1404 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT) 1405 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1; 1406 else 1407 DOUT << "Reusing SS#" << ReuseSlot; 1408 DOUT << " from physreg " 1409 << TRI->getName(PhysReg) << " for vreg" 1410 << VirtReg <<" instead of reloading into physreg " 1411 << TRI->getName(VRM.getPhys(VirtReg)) << "\n"; 1412 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1413 MI.getOperand(i).setReg(RReg); 1414 1415 // The only technical detail we have is that we don't know that 1416 // PhysReg won't be clobbered by a reloaded stack slot that occurs 1417 // later in the instruction. In particular, consider 'op V1, V2'. 1418 // If V1 is available in physreg R0, we would choose to reuse it 1419 // here, instead of reloading it into the register the allocator 1420 // indicated (say R1). However, V2 might have to be reloaded 1421 // later, and it might indicate that it needs to live in R0. When 1422 // this occurs, we need to have information available that 1423 // indicates it is safe to use R1 for the reload instead of R0. 1424 // 1425 // To further complicate matters, we might conflict with an alias, 1426 // or R0 and R1 might not be compatible with each other. In this 1427 // case, we actually insert a reload for V1 in R1, ensuring that 1428 // we can get at R0 or its alias. 1429 ReusedOperands.addReuse(i, ReuseSlot, PhysReg, 1430 VRM.getPhys(VirtReg), VirtReg); 1431 if (ti != -1) 1432 // Only mark it clobbered if this is a use&def operand. 1433 ReusedOperands.markClobbered(PhysReg); 1434 ++NumReused; 1435 1436 if (MI.getOperand(i).isKill() && 1437 ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) { 1438 // This was the last use and the spilled value is still available 1439 // for reuse. That means the spill was unnecessary! 1440 MachineInstr* DeadStore = MaybeDeadStores[ReuseSlot]; 1441 if (DeadStore) { 1442 DOUT << "Removed dead store:\t" << *DeadStore; 1443 InvalidateKills(*DeadStore, RegKills, KillOps); 1444 VRM.RemoveMachineInstrFromMaps(DeadStore); 1445 MBB.erase(DeadStore); 1446 MaybeDeadStores[ReuseSlot] = NULL; 1447 ++NumDSE; 1448 } 1449 } 1450 continue; 1451 } // CanReuse 1452 1453 // Otherwise we have a situation where we have a two-address instruction 1454 // whose mod/ref operand needs to be reloaded. This reload is already 1455 // available in some register "PhysReg", but if we used PhysReg as the 1456 // operand to our 2-addr instruction, the instruction would modify 1457 // PhysReg. This isn't cool if something later uses PhysReg and expects 1458 // to get its initial value. 1459 // 1460 // To avoid this problem, and to avoid doing a load right after a store, 1461 // we emit a copy from PhysReg into the designated register for this 1462 // operand. 1463 unsigned DesignatedReg = VRM.getPhys(VirtReg); 1464 assert(DesignatedReg && "Must map virtreg to physreg!"); 1465 1466 // Note that, if we reused a register for a previous operand, the 1467 // register we want to reload into might not actually be 1468 // available. If this occurs, use the register indicated by the 1469 // reuser. 1470 if (ReusedOperands.hasReuses()) 1471 DesignatedReg = ReusedOperands.GetRegForReload(DesignatedReg, &MI, 1472 Spills, MaybeDeadStores, RegKills, KillOps, VRM); 1473 1474 // If the mapped designated register is actually the physreg we have 1475 // incoming, we don't need to inserted a dead copy. 1476 if (DesignatedReg == PhysReg) { 1477 // If this stack slot value is already available, reuse it! 1478 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT) 1479 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1; 1480 else 1481 DOUT << "Reusing SS#" << ReuseSlot; 1482 DOUT << " from physreg " << TRI->getName(PhysReg) 1483 << " for vreg" << VirtReg 1484 << " instead of reloading into same physreg.\n"; 1485 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1486 MI.getOperand(i).setReg(RReg); 1487 ReusedOperands.markClobbered(RReg); 1488 ++NumReused; 1489 continue; 1490 } 1491 1492 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1493 RegInfo->setPhysRegUsed(DesignatedReg); 1494 ReusedOperands.markClobbered(DesignatedReg); 1495 TII->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC); 1496 1497 MachineInstr *CopyMI = prior(MII); 1498 UpdateKills(*CopyMI, RegKills, KillOps); 1499 1500 // This invalidates DesignatedReg. 1501 Spills.ClobberPhysReg(DesignatedReg); 1502 1503 Spills.addAvailable(ReuseSlot, &MI, DesignatedReg); 1504 unsigned RReg = 1505 SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg; 1506 MI.getOperand(i).setReg(RReg); 1507 DOUT << '\t' << *prior(MII); 1508 ++NumReused; 1509 continue; 1510 } // if (PhysReg) 1511 1512 // Otherwise, reload it and remember that we have it. 1513 PhysReg = VRM.getPhys(VirtReg); 1514 assert(PhysReg && "Must map virtreg to physreg!"); 1515 1516 // Note that, if we reused a register for a previous operand, the 1517 // register we want to reload into might not actually be 1518 // available. If this occurs, use the register indicated by the 1519 // reuser. 1520 if (ReusedOperands.hasReuses()) 1521 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI, 1522 Spills, MaybeDeadStores, RegKills, KillOps, VRM); 1523 1524 RegInfo->setPhysRegUsed(PhysReg); 1525 ReusedOperands.markClobbered(PhysReg); 1526 if (DoReMat) { 1527 ReMaterialize(MBB, MII, PhysReg, VirtReg, TII, TRI, VRM); 1528 } else { 1529 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1530 TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC); 1531 MachineInstr *LoadMI = prior(MII); 1532 VRM.addSpillSlotUse(SSorRMId, LoadMI); 1533 ++NumLoads; 1534 } 1535 // This invalidates PhysReg. 1536 Spills.ClobberPhysReg(PhysReg); 1537 1538 // Any stores to this stack slot are not dead anymore. 1539 if (!DoReMat) 1540 MaybeDeadStores[SSorRMId] = NULL; 1541 Spills.addAvailable(SSorRMId, &MI, PhysReg); 1542 // Assumes this is the last use. IsKill will be unset if reg is reused 1543 // unless it's a two-address operand. 1544 if (TID.getOperandConstraint(i, TOI::TIED_TO) == -1) 1545 MI.getOperand(i).setIsKill(); 1546 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1547 MI.getOperand(i).setReg(RReg); 1548 UpdateKills(*prior(MII), RegKills, KillOps); 1549 DOUT << '\t' << *prior(MII); 1550 } 1551 1552 DOUT << '\t' << MI; 1553 1554 1555 // If we have folded references to memory operands, make sure we clear all 1556 // physical registers that may contain the value of the spilled virtual 1557 // register 1558 SmallSet<int, 2> FoldedSS; 1559 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) { 1560 unsigned VirtReg = I->second.first; 1561 VirtRegMap::ModRef MR = I->second.second; 1562 DOUT << "Folded vreg: " << VirtReg << " MR: " << MR; 1563 1564 // MI2VirtMap be can updated which invalidate the iterator. 1565 // Increment the iterator first. 1566 ++I; 1567 int SS = VRM.getStackSlot(VirtReg); 1568 if (SS == VirtRegMap::NO_STACK_SLOT) 1569 continue; 1570 FoldedSS.insert(SS); 1571 DOUT << " - StackSlot: " << SS << "\n"; 1572 1573 // If this folded instruction is just a use, check to see if it's a 1574 // straight load from the virt reg slot. 1575 if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) { 1576 int FrameIdx; 1577 unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx); 1578 if (DestReg && FrameIdx == SS) { 1579 // If this spill slot is available, turn it into a copy (or nothing) 1580 // instead of leaving it as a load! 1581 if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) { 1582 DOUT << "Promoted Load To Copy: " << MI; 1583 if (DestReg != InReg) { 1584 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); 1585 TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC); 1586 // Revisit the copy so we make sure to notice the effects of the 1587 // operation on the destreg (either needing to RA it if it's 1588 // virtual or needing to clobber any values if it's physical). 1589 NextMII = &MI; 1590 --NextMII; // backtrack to the copy. 1591 BackTracked = true; 1592 } else { 1593 DOUT << "Removing now-noop copy: " << MI; 1594 // Unset last kill since it's being reused. 1595 InvalidateKill(InReg, RegKills, KillOps); 1596 } 1597 1598 InvalidateKills(MI, RegKills, KillOps); 1599 VRM.RemoveMachineInstrFromMaps(&MI); 1600 MBB.erase(&MI); 1601 Erased = true; 1602 goto ProcessNextInst; 1603 } 1604 } else { 1605 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); 1606 SmallVector<MachineInstr*, 4> NewMIs; 1607 if (PhysReg && 1608 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) { 1609 MBB.insert(MII, NewMIs[0]); 1610 InvalidateKills(MI, RegKills, KillOps); 1611 VRM.RemoveMachineInstrFromMaps(&MI); 1612 MBB.erase(&MI); 1613 Erased = true; 1614 --NextMII; // backtrack to the unfolded instruction. 1615 BackTracked = true; 1616 goto ProcessNextInst; 1617 } 1618 } 1619 } 1620 1621 // If this reference is not a use, any previous store is now dead. 1622 // Otherwise, the store to this stack slot is not dead anymore. 1623 MachineInstr* DeadStore = MaybeDeadStores[SS]; 1624 if (DeadStore) { 1625 bool isDead = !(MR & VirtRegMap::isRef); 1626 MachineInstr *NewStore = NULL; 1627 if (MR & VirtRegMap::isModRef) { 1628 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); 1629 SmallVector<MachineInstr*, 4> NewMIs; 1630 // We can reuse this physreg as long as we are allowed to clobber 1631 // the value and there isn't an earlier def that has already clobbered 1632 // the physreg. 1633 if (PhysReg && 1634 !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable! 1635 MachineOperand *KillOpnd = 1636 DeadStore->findRegisterUseOperand(PhysReg, true); 1637 // Note, if the store is storing a sub-register, it's possible the 1638 // super-register is needed below. 1639 if (KillOpnd && !KillOpnd->getSubReg() && 1640 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){ 1641 MBB.insert(MII, NewMIs[0]); 1642 NewStore = NewMIs[1]; 1643 MBB.insert(MII, NewStore); 1644 VRM.addSpillSlotUse(SS, NewStore); 1645 InvalidateKills(MI, RegKills, KillOps); 1646 VRM.RemoveMachineInstrFromMaps(&MI); 1647 MBB.erase(&MI); 1648 Erased = true; 1649 --NextMII; 1650 --NextMII; // backtrack to the unfolded instruction. 1651 BackTracked = true; 1652 isDead = true; 1653 } 1654 } 1655 } 1656 1657 if (isDead) { // Previous store is dead. 1658 // If we get here, the store is dead, nuke it now. 1659 DOUT << "Removed dead store:\t" << *DeadStore; 1660 InvalidateKills(*DeadStore, RegKills, KillOps); 1661 VRM.RemoveMachineInstrFromMaps(DeadStore); 1662 MBB.erase(DeadStore); 1663 if (!NewStore) 1664 ++NumDSE; 1665 } 1666 1667 MaybeDeadStores[SS] = NULL; 1668 if (NewStore) { 1669 // Treat this store as a spill merged into a copy. That makes the 1670 // stack slot value available. 1671 VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod); 1672 goto ProcessNextInst; 1673 } 1674 } 1675 1676 // If the spill slot value is available, and this is a new definition of 1677 // the value, the value is not available anymore. 1678 if (MR & VirtRegMap::isMod) { 1679 // Notice that the value in this stack slot has been modified. 1680 Spills.ModifyStackSlotOrReMat(SS); 1681 1682 // If this is *just* a mod of the value, check to see if this is just a 1683 // store to the spill slot (i.e. the spill got merged into the copy). If 1684 // so, realize that the vreg is available now, and add the store to the 1685 // MaybeDeadStore info. 1686 int StackSlot; 1687 if (!(MR & VirtRegMap::isRef)) { 1688 if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) { 1689 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) && 1690 "Src hasn't been allocated yet?"); 1691 1692 if (CommuteToFoldReload(MBB, MII, VirtReg, SrcReg, StackSlot, 1693 RegKills, KillOps, TRI, VRM)) { 1694 NextMII = next(MII); 1695 BackTracked = true; 1696 goto ProcessNextInst; 1697 } 1698 1699 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark 1700 // this as a potentially dead store in case there is a subsequent 1701 // store into the stack slot without a read from it. 1702 MaybeDeadStores[StackSlot] = &MI; 1703 1704 // If the stack slot value was previously available in some other 1705 // register, change it now. Otherwise, make the register 1706 // available in PhysReg. 1707 Spills.addAvailable(StackSlot, &MI, SrcReg, false/*!clobber*/); 1708 } 1709 } 1710 } 1711 } 1712 1713 // Process all of the spilled defs. 1714 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1715 MachineOperand &MO = MI.getOperand(i); 1716 if (!(MO.isRegister() && MO.getReg() && MO.isDef())) 1717 continue; 1718 1719 unsigned VirtReg = MO.getReg(); 1720 if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) { 1721 // Check to see if this is a noop copy. If so, eliminate the 1722 // instruction before considering the dest reg to be changed. 1723 unsigned Src, Dst; 1724 if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) { 1725 ++NumDCE; 1726 DOUT << "Removing now-noop copy: " << MI; 1727 SmallVector<unsigned, 2> KillRegs; 1728 InvalidateKills(MI, RegKills, KillOps, &KillRegs); 1729 if (MO.isDead() && !KillRegs.empty()) { 1730 assert(KillRegs[0] == Dst); 1731 // Last def is now dead. 1732 TransferDeadness(&MBB, Dist, Src, RegKills, KillOps); 1733 } 1734 VRM.RemoveMachineInstrFromMaps(&MI); 1735 MBB.erase(&MI); 1736 Erased = true; 1737 Spills.disallowClobberPhysReg(VirtReg); 1738 goto ProcessNextInst; 1739 } 1740 1741 // If it's not a no-op copy, it clobbers the value in the destreg. 1742 Spills.ClobberPhysReg(VirtReg); 1743 ReusedOperands.markClobbered(VirtReg); 1744 1745 // Check to see if this instruction is a load from a stack slot into 1746 // a register. If so, this provides the stack slot value in the reg. 1747 int FrameIdx; 1748 if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) { 1749 assert(DestReg == VirtReg && "Unknown load situation!"); 1750 1751 // If it is a folded reference, then it's not safe to clobber. 1752 bool Folded = FoldedSS.count(FrameIdx); 1753 // Otherwise, if it wasn't available, remember that it is now! 1754 Spills.addAvailable(FrameIdx, &MI, DestReg, !Folded); 1755 goto ProcessNextInst; 1756 } 1757 1758 continue; 1759 } 1760 1761 unsigned SubIdx = MO.getSubReg(); 1762 bool DoReMat = VRM.isReMaterialized(VirtReg); 1763 if (DoReMat) 1764 ReMatDefs.insert(&MI); 1765 1766 // The only vregs left are stack slot definitions. 1767 int StackSlot = VRM.getStackSlot(VirtReg); 1768 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); 1769 1770 // If this def is part of a two-address operand, make sure to execute 1771 // the store from the correct physical register. 1772 unsigned PhysReg; 1773 int TiedOp = MI.getDesc().findTiedToSrcOperand(i); 1774 if (TiedOp != -1) { 1775 PhysReg = MI.getOperand(TiedOp).getReg(); 1776 if (SubIdx) { 1777 unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI); 1778 assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg && 1779 "Can't find corresponding super-register!"); 1780 PhysReg = SuperReg; 1781 } 1782 } else { 1783 PhysReg = VRM.getPhys(VirtReg); 1784 if (ReusedOperands.isClobbered(PhysReg)) { 1785 // Another def has taken the assigned physreg. It must have been a 1786 // use&def which got it due to reuse. Undo the reuse! 1787 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI, 1788 Spills, MaybeDeadStores, RegKills, KillOps, VRM); 1789 } 1790 } 1791 1792 assert(PhysReg && "VR not assigned a physical register?"); 1793 RegInfo->setPhysRegUsed(PhysReg); 1794 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1795 ReusedOperands.markClobbered(RReg); 1796 MI.getOperand(i).setReg(RReg); 1797 1798 if (!MO.isDead()) { 1799 MachineInstr *&LastStore = MaybeDeadStores[StackSlot]; 1800 SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, true, 1801 LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM); 1802 NextMII = next(MII); 1803 1804 // Check to see if this is a noop copy. If so, eliminate the 1805 // instruction before considering the dest reg to be changed. 1806 { 1807 unsigned Src, Dst; 1808 if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) { 1809 ++NumDCE; 1810 DOUT << "Removing now-noop copy: " << MI; 1811 InvalidateKills(MI, RegKills, KillOps); 1812 VRM.RemoveMachineInstrFromMaps(&MI); 1813 MBB.erase(&MI); 1814 Erased = true; 1815 UpdateKills(*LastStore, RegKills, KillOps); 1816 goto ProcessNextInst; 1817 } 1818 } 1819 } 1820 } 1821 ProcessNextInst: 1822 DistanceMap.insert(std::make_pair(&MI, Dist++)); 1823 if (!Erased && !BackTracked) { 1824 for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II) 1825 UpdateKills(*II, RegKills, KillOps); 1826 } 1827 MII = NextMII; 1828 } 1829} 1830 1831llvm::Spiller* llvm::createSpiller() { 1832 switch (SpillerOpt) { 1833 default: assert(0 && "Unreachable!"); 1834 case local: 1835 return new LocalSpiller(); 1836 case simple: 1837 return new SimpleSpiller(); 1838 } 1839} 1840