VirtRegMap.cpp revision f97496a054876b3bb9c0acf424379eb2f48377ce
1//===-- llvm/CodeGen/VirtRegMap.cpp - Virtual Register Map ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the VirtRegMap class. 11// 12// It also contains implementations of the the Spiller interface, which, given a 13// virtual register map and a machine function, eliminates all virtual 14// references by replacing them with physical register references - adding spill 15// code as necessary. 16// 17//===----------------------------------------------------------------------===// 18 19#define DEBUG_TYPE "spiller" 20#include "VirtRegMap.h" 21#include "llvm/Function.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineFunction.h" 24#include "llvm/CodeGen/MachineInstrBuilder.h" 25#include "llvm/CodeGen/MachineRegisterInfo.h" 26#include "llvm/Target/TargetMachine.h" 27#include "llvm/Target/TargetInstrInfo.h" 28#include "llvm/Support/CommandLine.h" 29#include "llvm/Support/Debug.h" 30#include "llvm/Support/Compiler.h" 31#include "llvm/ADT/BitVector.h" 32#include "llvm/ADT/DenseMap.h" 33#include "llvm/ADT/Statistic.h" 34#include "llvm/ADT/STLExtras.h" 35#include "llvm/ADT/SmallSet.h" 36#include <algorithm> 37using namespace llvm; 38 39STATISTIC(NumSpills , "Number of register spills"); 40STATISTIC(NumPSpills , "Number of physical register spills"); 41STATISTIC(NumReMats , "Number of re-materialization"); 42STATISTIC(NumDRM , "Number of re-materializable defs elided"); 43STATISTIC(NumStores , "Number of stores added"); 44STATISTIC(NumLoads , "Number of loads added"); 45STATISTIC(NumReused , "Number of values reused"); 46STATISTIC(NumDSE , "Number of dead stores elided"); 47STATISTIC(NumDCE , "Number of copies elided"); 48STATISTIC(NumDSS , "Number of dead spill slots removed"); 49STATISTIC(NumCommutes, "Number of instructions commuted"); 50 51namespace { 52 enum SpillerName { simple, local }; 53} 54 55static cl::opt<SpillerName> 56SpillerOpt("spiller", 57 cl::desc("Spiller to use: (default: local)"), 58 cl::Prefix, 59 cl::values(clEnumVal(simple, "simple spiller"), 60 clEnumVal(local, "local spiller"), 61 clEnumValEnd), 62 cl::init(local)); 63 64//===----------------------------------------------------------------------===// 65// VirtRegMap implementation 66//===----------------------------------------------------------------------===// 67 68VirtRegMap::VirtRegMap(MachineFunction &mf) 69 : TII(*mf.getTarget().getInstrInfo()), MF(mf), 70 Virt2PhysMap(NO_PHYS_REG), Virt2StackSlotMap(NO_STACK_SLOT), 71 Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0), 72 Virt2SplitKillMap(0), ReMatMap(NULL), ReMatId(MAX_STACK_SLOT+1), 73 LowSpillSlot(NO_STACK_SLOT), HighSpillSlot(NO_STACK_SLOT) { 74 SpillSlotToUsesMap.resize(8); 75 ImplicitDefed.resize(MF.getRegInfo().getLastVirtReg()+1- 76 TargetRegisterInfo::FirstVirtualRegister); 77 grow(); 78} 79 80void VirtRegMap::grow() { 81 unsigned LastVirtReg = MF.getRegInfo().getLastVirtReg(); 82 Virt2PhysMap.grow(LastVirtReg); 83 Virt2StackSlotMap.grow(LastVirtReg); 84 Virt2ReMatIdMap.grow(LastVirtReg); 85 Virt2SplitMap.grow(LastVirtReg); 86 Virt2SplitKillMap.grow(LastVirtReg); 87 ReMatMap.grow(LastVirtReg); 88 ImplicitDefed.resize(LastVirtReg-TargetRegisterInfo::FirstVirtualRegister+1); 89} 90 91int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) { 92 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 93 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && 94 "attempt to assign stack slot to already spilled register"); 95 const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(virtReg); 96 int SS = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 97 RC->getAlignment()); 98 if (LowSpillSlot == NO_STACK_SLOT) 99 LowSpillSlot = SS; 100 if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot) 101 HighSpillSlot = SS; 102 unsigned Idx = SS-LowSpillSlot; 103 while (Idx >= SpillSlotToUsesMap.size()) 104 SpillSlotToUsesMap.resize(SpillSlotToUsesMap.size()*2); 105 Virt2StackSlotMap[virtReg] = SS; 106 ++NumSpills; 107 return SS; 108} 109 110void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int SS) { 111 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 112 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && 113 "attempt to assign stack slot to already spilled register"); 114 assert((SS >= 0 || 115 (SS >= MF.getFrameInfo()->getObjectIndexBegin())) && 116 "illegal fixed frame index"); 117 Virt2StackSlotMap[virtReg] = SS; 118} 119 120int VirtRegMap::assignVirtReMatId(unsigned virtReg) { 121 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 122 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT && 123 "attempt to assign re-mat id to already spilled register"); 124 Virt2ReMatIdMap[virtReg] = ReMatId; 125 return ReMatId++; 126} 127 128void VirtRegMap::assignVirtReMatId(unsigned virtReg, int id) { 129 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 130 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT && 131 "attempt to assign re-mat id to already spilled register"); 132 Virt2ReMatIdMap[virtReg] = id; 133} 134 135int VirtRegMap::getEmergencySpillSlot(const TargetRegisterClass *RC) { 136 std::map<const TargetRegisterClass*, int>::iterator I = 137 EmergencySpillSlots.find(RC); 138 if (I != EmergencySpillSlots.end()) 139 return I->second; 140 int SS = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 141 RC->getAlignment()); 142 if (LowSpillSlot == NO_STACK_SLOT) 143 LowSpillSlot = SS; 144 if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot) 145 HighSpillSlot = SS; 146 EmergencySpillSlots[RC] = SS; 147 return SS; 148} 149 150void VirtRegMap::addSpillSlotUse(int FI, MachineInstr *MI) { 151 if (!MF.getFrameInfo()->isFixedObjectIndex(FI)) { 152 // If FI < LowSpillSlot, this stack reference was produced by 153 // instruction selection and is not a spill 154 if (FI >= LowSpillSlot) { 155 assert(FI >= 0 && "Spill slot index should not be negative!"); 156 assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size() 157 && "Invalid spill slot"); 158 SpillSlotToUsesMap[FI-LowSpillSlot].insert(MI); 159 } 160 } 161} 162 163void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI, 164 MachineInstr *NewMI, ModRef MRInfo) { 165 // Move previous memory references folded to new instruction. 166 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(NewMI); 167 for (MI2VirtMapTy::iterator I = MI2VirtMap.lower_bound(OldMI), 168 E = MI2VirtMap.end(); I != E && I->first == OldMI; ) { 169 MI2VirtMap.insert(IP, std::make_pair(NewMI, I->second)); 170 MI2VirtMap.erase(I++); 171 } 172 173 // add new memory reference 174 MI2VirtMap.insert(IP, std::make_pair(NewMI, std::make_pair(VirtReg, MRInfo))); 175} 176 177void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *MI, ModRef MRInfo) { 178 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(MI); 179 MI2VirtMap.insert(IP, std::make_pair(MI, std::make_pair(VirtReg, MRInfo))); 180} 181 182void VirtRegMap::RemoveMachineInstrFromMaps(MachineInstr *MI) { 183 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 184 MachineOperand &MO = MI->getOperand(i); 185 if (!MO.isFI()) 186 continue; 187 int FI = MO.getIndex(); 188 if (MF.getFrameInfo()->isFixedObjectIndex(FI)) 189 continue; 190 // This stack reference was produced by instruction selection and 191 // is not a spill 192 if (FI < LowSpillSlot) 193 continue; 194 assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size() 195 && "Invalid spill slot"); 196 SpillSlotToUsesMap[FI-LowSpillSlot].erase(MI); 197 } 198 MI2VirtMap.erase(MI); 199 SpillPt2VirtMap.erase(MI); 200 RestorePt2VirtMap.erase(MI); 201 EmergencySpillMap.erase(MI); 202} 203 204void VirtRegMap::print(std::ostream &OS) const { 205 const TargetRegisterInfo* TRI = MF.getTarget().getRegisterInfo(); 206 207 OS << "********** REGISTER MAP **********\n"; 208 for (unsigned i = TargetRegisterInfo::FirstVirtualRegister, 209 e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) { 210 if (Virt2PhysMap[i] != (unsigned)VirtRegMap::NO_PHYS_REG) 211 OS << "[reg" << i << " -> " << TRI->getName(Virt2PhysMap[i]) 212 << "]\n"; 213 } 214 215 for (unsigned i = TargetRegisterInfo::FirstVirtualRegister, 216 e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) 217 if (Virt2StackSlotMap[i] != VirtRegMap::NO_STACK_SLOT) 218 OS << "[reg" << i << " -> fi#" << Virt2StackSlotMap[i] << "]\n"; 219 OS << '\n'; 220} 221 222void VirtRegMap::dump() const { 223 print(cerr); 224} 225 226 227//===----------------------------------------------------------------------===// 228// Simple Spiller Implementation 229//===----------------------------------------------------------------------===// 230 231Spiller::~Spiller() {} 232 233namespace { 234 struct VISIBILITY_HIDDEN SimpleSpiller : public Spiller { 235 bool runOnMachineFunction(MachineFunction& mf, VirtRegMap &VRM); 236 }; 237} 238 239bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { 240 DOUT << "********** REWRITE MACHINE CODE **********\n"; 241 DOUT << "********** Function: " << MF.getFunction()->getName() << '\n'; 242 const TargetMachine &TM = MF.getTarget(); 243 const TargetInstrInfo &TII = *TM.getInstrInfo(); 244 const TargetRegisterInfo &TRI = *TM.getRegisterInfo(); 245 246 247 // LoadedRegs - Keep track of which vregs are loaded, so that we only load 248 // each vreg once (in the case where a spilled vreg is used by multiple 249 // operands). This is always smaller than the number of operands to the 250 // current machine instr, so it should be small. 251 std::vector<unsigned> LoadedRegs; 252 253 for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end(); 254 MBBI != E; ++MBBI) { 255 DOUT << MBBI->getBasicBlock()->getName() << ":\n"; 256 MachineBasicBlock &MBB = *MBBI; 257 for (MachineBasicBlock::iterator MII = MBB.begin(), 258 E = MBB.end(); MII != E; ++MII) { 259 MachineInstr &MI = *MII; 260 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 261 MachineOperand &MO = MI.getOperand(i); 262 if (MO.isReg() && MO.getReg()) { 263 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) { 264 unsigned VirtReg = MO.getReg(); 265 unsigned SubIdx = MO.getSubReg(); 266 unsigned PhysReg = VRM.getPhys(VirtReg); 267 unsigned RReg = SubIdx ? TRI.getSubReg(PhysReg, SubIdx) : PhysReg; 268 if (!VRM.isAssignedReg(VirtReg)) { 269 int StackSlot = VRM.getStackSlot(VirtReg); 270 const TargetRegisterClass* RC = 271 MF.getRegInfo().getRegClass(VirtReg); 272 273 if (MO.isUse() && 274 std::find(LoadedRegs.begin(), LoadedRegs.end(), VirtReg) 275 == LoadedRegs.end()) { 276 TII.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC); 277 MachineInstr *LoadMI = prior(MII); 278 VRM.addSpillSlotUse(StackSlot, LoadMI); 279 LoadedRegs.push_back(VirtReg); 280 ++NumLoads; 281 DOUT << '\t' << *LoadMI; 282 } 283 284 if (MO.isDef()) { 285 TII.storeRegToStackSlot(MBB, next(MII), PhysReg, true, 286 StackSlot, RC); 287 MachineInstr *StoreMI = next(MII); 288 VRM.addSpillSlotUse(StackSlot, StoreMI); 289 ++NumStores; 290 } 291 } 292 MF.getRegInfo().setPhysRegUsed(RReg); 293 MI.getOperand(i).setReg(RReg); 294 } else { 295 MF.getRegInfo().setPhysRegUsed(MO.getReg()); 296 } 297 } 298 } 299 300 DOUT << '\t' << MI; 301 LoadedRegs.clear(); 302 } 303 } 304 return true; 305} 306 307//===----------------------------------------------------------------------===// 308// Local Spiller Implementation 309//===----------------------------------------------------------------------===// 310 311namespace { 312 class AvailableSpills; 313 314 /// LocalSpiller - This spiller does a simple pass over the machine basic 315 /// block to attempt to keep spills in registers as much as possible for 316 /// blocks that have low register pressure (the vreg may be spilled due to 317 /// register pressure in other blocks). 318 class VISIBILITY_HIDDEN LocalSpiller : public Spiller { 319 MachineRegisterInfo *RegInfo; 320 const TargetRegisterInfo *TRI; 321 const TargetInstrInfo *TII; 322 DenseMap<MachineInstr*, unsigned> DistanceMap; 323 public: 324 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { 325 RegInfo = &MF.getRegInfo(); 326 TRI = MF.getTarget().getRegisterInfo(); 327 TII = MF.getTarget().getInstrInfo(); 328 DOUT << "\n**** Local spiller rewriting function '" 329 << MF.getFunction()->getName() << "':\n"; 330 DOUT << "**** Machine Instrs (NOTE! Does not include spills and reloads!)" 331 " ****\n"; 332 DEBUG(MF.dump()); 333 334 for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); 335 MBB != E; ++MBB) 336 RewriteMBB(*MBB, VRM); 337 338 // Mark unused spill slots. 339 MachineFrameInfo *MFI = MF.getFrameInfo(); 340 int SS = VRM.getLowSpillSlot(); 341 if (SS != VirtRegMap::NO_STACK_SLOT) 342 for (int e = VRM.getHighSpillSlot(); SS <= e; ++SS) 343 if (!VRM.isSpillSlotUsed(SS)) { 344 MFI->RemoveStackObject(SS); 345 ++NumDSS; 346 } 347 348 DOUT << "**** Post Machine Instrs ****\n"; 349 DEBUG(MF.dump()); 350 351 return true; 352 } 353 private: 354 void TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist, 355 unsigned Reg, BitVector &RegKills, 356 std::vector<MachineOperand*> &KillOps); 357 bool PrepForUnfoldOpti(MachineBasicBlock &MBB, 358 MachineBasicBlock::iterator &MII, 359 std::vector<MachineInstr*> &MaybeDeadStores, 360 AvailableSpills &Spills, BitVector &RegKills, 361 std::vector<MachineOperand*> &KillOps, 362 VirtRegMap &VRM); 363 bool CommuteToFoldReload(MachineBasicBlock &MBB, 364 MachineBasicBlock::iterator &MII, 365 unsigned VirtReg, unsigned SrcReg, int SS, 366 BitVector &RegKills, 367 std::vector<MachineOperand*> &KillOps, 368 const TargetRegisterInfo *TRI, 369 VirtRegMap &VRM); 370 void SpillRegToStackSlot(MachineBasicBlock &MBB, 371 MachineBasicBlock::iterator &MII, 372 int Idx, unsigned PhysReg, int StackSlot, 373 const TargetRegisterClass *RC, 374 bool isAvailable, MachineInstr *&LastStore, 375 AvailableSpills &Spills, 376 SmallSet<MachineInstr*, 4> &ReMatDefs, 377 BitVector &RegKills, 378 std::vector<MachineOperand*> &KillOps, 379 VirtRegMap &VRM); 380 void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM); 381 }; 382} 383 384/// AvailableSpills - As the local spiller is scanning and rewriting an MBB from 385/// top down, keep track of which spills slots or remat are available in each 386/// register. 387/// 388/// Note that not all physregs are created equal here. In particular, some 389/// physregs are reloads that we are allowed to clobber or ignore at any time. 390/// Other physregs are values that the register allocated program is using that 391/// we cannot CHANGE, but we can read if we like. We keep track of this on a 392/// per-stack-slot / remat id basis as the low bit in the value of the 393/// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks 394/// this bit and addAvailable sets it if. 395namespace { 396class VISIBILITY_HIDDEN AvailableSpills { 397 const TargetRegisterInfo *TRI; 398 const TargetInstrInfo *TII; 399 400 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled 401 // or remat'ed virtual register values that are still available, due to being 402 // loaded or stored to, but not invalidated yet. 403 std::map<int, unsigned> SpillSlotsOrReMatsAvailable; 404 405 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable, 406 // indicating which stack slot values are currently held by a physreg. This 407 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a 408 // physreg is modified. 409 std::multimap<unsigned, int> PhysRegsAvailable; 410 411 void disallowClobberPhysRegOnly(unsigned PhysReg); 412 413 void ClobberPhysRegOnly(unsigned PhysReg); 414public: 415 AvailableSpills(const TargetRegisterInfo *tri, const TargetInstrInfo *tii) 416 : TRI(tri), TII(tii) { 417 } 418 419 const TargetRegisterInfo *getRegInfo() const { return TRI; } 420 421 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is 422 /// available in a physical register, return that PhysReg, otherwise 423 /// return 0. 424 unsigned getSpillSlotOrReMatPhysReg(int Slot) const { 425 std::map<int, unsigned>::const_iterator I = 426 SpillSlotsOrReMatsAvailable.find(Slot); 427 if (I != SpillSlotsOrReMatsAvailable.end()) { 428 return I->second >> 1; // Remove the CanClobber bit. 429 } 430 return 0; 431 } 432 433 /// addAvailable - Mark that the specified stack slot / remat is available in 434 /// the specified physreg. If CanClobber is true, the physreg can be modified 435 /// at any time without changing the semantics of the program. 436 void addAvailable(int SlotOrReMat, MachineInstr *MI, unsigned Reg, 437 bool CanClobber = true) { 438 // If this stack slot is thought to be available in some other physreg, 439 // remove its record. 440 ModifyStackSlotOrReMat(SlotOrReMat); 441 442 PhysRegsAvailable.insert(std::make_pair(Reg, SlotOrReMat)); 443 SpillSlotsOrReMatsAvailable[SlotOrReMat]= (Reg << 1) | (unsigned)CanClobber; 444 445 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT) 446 DOUT << "Remembering RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1; 447 else 448 DOUT << "Remembering SS#" << SlotOrReMat; 449 DOUT << " in physreg " << TRI->getName(Reg) << "\n"; 450 } 451 452 /// canClobberPhysReg - Return true if the spiller is allowed to change the 453 /// value of the specified stackslot register if it desires. The specified 454 /// stack slot must be available in a physreg for this query to make sense. 455 bool canClobberPhysReg(int SlotOrReMat) const { 456 assert(SpillSlotsOrReMatsAvailable.count(SlotOrReMat) && 457 "Value not available!"); 458 return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second & 1; 459 } 460 461 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified 462 /// stackslot register. The register is still available but is no longer 463 /// allowed to be modifed. 464 void disallowClobberPhysReg(unsigned PhysReg); 465 466 /// ClobberPhysReg - This is called when the specified physreg changes 467 /// value. We use this to invalidate any info about stuff that lives in 468 /// it and any of its aliases. 469 void ClobberPhysReg(unsigned PhysReg); 470 471 /// ModifyStackSlotOrReMat - This method is called when the value in a stack 472 /// slot changes. This removes information about which register the previous 473 /// value for this slot lives in (as the previous value is dead now). 474 void ModifyStackSlotOrReMat(int SlotOrReMat); 475}; 476} 477 478/// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified 479/// stackslot register. The register is still available but is no longer 480/// allowed to be modifed. 481void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) { 482 std::multimap<unsigned, int>::iterator I = 483 PhysRegsAvailable.lower_bound(PhysReg); 484 while (I != PhysRegsAvailable.end() && I->first == PhysReg) { 485 int SlotOrReMat = I->second; 486 I++; 487 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg && 488 "Bidirectional map mismatch!"); 489 SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1; 490 DOUT << "PhysReg " << TRI->getName(PhysReg) 491 << " copied, it is available for use but can no longer be modified\n"; 492 } 493} 494 495/// disallowClobberPhysReg - Unset the CanClobber bit of the specified 496/// stackslot register and its aliases. The register and its aliases may 497/// still available but is no longer allowed to be modifed. 498void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) { 499 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS) 500 disallowClobberPhysRegOnly(*AS); 501 disallowClobberPhysRegOnly(PhysReg); 502} 503 504/// ClobberPhysRegOnly - This is called when the specified physreg changes 505/// value. We use this to invalidate any info about stuff we thing lives in it. 506void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) { 507 std::multimap<unsigned, int>::iterator I = 508 PhysRegsAvailable.lower_bound(PhysReg); 509 while (I != PhysRegsAvailable.end() && I->first == PhysReg) { 510 int SlotOrReMat = I->second; 511 PhysRegsAvailable.erase(I++); 512 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg && 513 "Bidirectional map mismatch!"); 514 SpillSlotsOrReMatsAvailable.erase(SlotOrReMat); 515 DOUT << "PhysReg " << TRI->getName(PhysReg) 516 << " clobbered, invalidating "; 517 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT) 518 DOUT << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 << "\n"; 519 else 520 DOUT << "SS#" << SlotOrReMat << "\n"; 521 } 522} 523 524/// ClobberPhysReg - This is called when the specified physreg changes 525/// value. We use this to invalidate any info about stuff we thing lives in 526/// it and any of its aliases. 527void AvailableSpills::ClobberPhysReg(unsigned PhysReg) { 528 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS) 529 ClobberPhysRegOnly(*AS); 530 ClobberPhysRegOnly(PhysReg); 531} 532 533/// ModifyStackSlotOrReMat - This method is called when the value in a stack 534/// slot changes. This removes information about which register the previous 535/// value for this slot lives in (as the previous value is dead now). 536void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) { 537 std::map<int, unsigned>::iterator It = 538 SpillSlotsOrReMatsAvailable.find(SlotOrReMat); 539 if (It == SpillSlotsOrReMatsAvailable.end()) return; 540 unsigned Reg = It->second >> 1; 541 SpillSlotsOrReMatsAvailable.erase(It); 542 543 // This register may hold the value of multiple stack slots, only remove this 544 // stack slot from the set of values the register contains. 545 std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg); 546 for (; ; ++I) { 547 assert(I != PhysRegsAvailable.end() && I->first == Reg && 548 "Map inverse broken!"); 549 if (I->second == SlotOrReMat) break; 550 } 551 PhysRegsAvailable.erase(I); 552} 553 554 555 556/// InvalidateKills - MI is going to be deleted. If any of its operands are 557/// marked kill, then invalidate the information. 558static void InvalidateKills(MachineInstr &MI, BitVector &RegKills, 559 std::vector<MachineOperand*> &KillOps, 560 SmallVector<unsigned, 2> *KillRegs = NULL) { 561 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 562 MachineOperand &MO = MI.getOperand(i); 563 if (!MO.isReg() || !MO.isUse() || !MO.isKill()) 564 continue; 565 unsigned Reg = MO.getReg(); 566 if (TargetRegisterInfo::isVirtualRegister(Reg)) 567 continue; 568 if (KillRegs) 569 KillRegs->push_back(Reg); 570 assert(Reg < KillOps.size()); 571 if (KillOps[Reg] == &MO) { 572 RegKills.reset(Reg); 573 KillOps[Reg] = NULL; 574 } 575 } 576} 577 578/// InvalidateKill - A MI that defines the specified register is being deleted, 579/// invalidate the register kill information. 580static void InvalidateKill(unsigned Reg, BitVector &RegKills, 581 std::vector<MachineOperand*> &KillOps) { 582 if (RegKills[Reg]) { 583 KillOps[Reg]->setIsKill(false); 584 KillOps[Reg] = NULL; 585 RegKills.reset(Reg); 586 } 587} 588 589/// InvalidateRegDef - If the def operand of the specified def MI is now dead 590/// (since it's spill instruction is removed), mark it isDead. Also checks if 591/// the def MI has other definition operands that are not dead. Returns it by 592/// reference. 593static bool InvalidateRegDef(MachineBasicBlock::iterator I, 594 MachineInstr &NewDef, unsigned Reg, 595 bool &HasLiveDef) { 596 // Due to remat, it's possible this reg isn't being reused. That is, 597 // the def of this reg (by prev MI) is now dead. 598 MachineInstr *DefMI = I; 599 MachineOperand *DefOp = NULL; 600 for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) { 601 MachineOperand &MO = DefMI->getOperand(i); 602 if (MO.isReg() && MO.isDef()) { 603 if (MO.getReg() == Reg) 604 DefOp = &MO; 605 else if (!MO.isDead()) 606 HasLiveDef = true; 607 } 608 } 609 if (!DefOp) 610 return false; 611 612 bool FoundUse = false, Done = false; 613 MachineBasicBlock::iterator E = &NewDef; 614 ++I; ++E; 615 for (; !Done && I != E; ++I) { 616 MachineInstr *NMI = I; 617 for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) { 618 MachineOperand &MO = NMI->getOperand(j); 619 if (!MO.isReg() || MO.getReg() != Reg) 620 continue; 621 if (MO.isUse()) 622 FoundUse = true; 623 Done = true; // Stop after scanning all the operands of this MI. 624 } 625 } 626 if (!FoundUse) { 627 // Def is dead! 628 DefOp->setIsDead(); 629 return true; 630 } 631 return false; 632} 633 634/// UpdateKills - Track and update kill info. If a MI reads a register that is 635/// marked kill, then it must be due to register reuse. Transfer the kill info 636/// over. 637static void UpdateKills(MachineInstr &MI, BitVector &RegKills, 638 std::vector<MachineOperand*> &KillOps, 639 const TargetRegisterInfo* TRI) { 640 const TargetInstrDesc &TID = MI.getDesc(); 641 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 642 MachineOperand &MO = MI.getOperand(i); 643 if (!MO.isReg() || !MO.isUse()) 644 continue; 645 unsigned Reg = MO.getReg(); 646 if (Reg == 0) 647 continue; 648 649 if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) { 650 // That can't be right. Register is killed but not re-defined and it's 651 // being reused. Let's fix that. 652 KillOps[Reg]->setIsKill(false); 653 KillOps[Reg] = NULL; 654 RegKills.reset(Reg); 655 if (i < TID.getNumOperands() && 656 TID.getOperandConstraint(i, TOI::TIED_TO) == -1) 657 // Unless it's a two-address operand, this is the new kill. 658 MO.setIsKill(); 659 } 660 if (MO.isKill()) { 661 RegKills.set(Reg); 662 KillOps[Reg] = &MO; 663 } 664 } 665 666 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 667 const MachineOperand &MO = MI.getOperand(i); 668 if (!MO.isReg() || !MO.isDef()) 669 continue; 670 unsigned Reg = MO.getReg(); 671 RegKills.reset(Reg); 672 KillOps[Reg] = NULL; 673 // It also defines (or partially define) aliases. 674 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) { 675 RegKills.reset(*AS); 676 KillOps[*AS] = NULL; 677 } 678 } 679} 680 681/// ReMaterialize - Re-materialize definition for Reg targetting DestReg. 682/// 683static void ReMaterialize(MachineBasicBlock &MBB, 684 MachineBasicBlock::iterator &MII, 685 unsigned DestReg, unsigned Reg, 686 const TargetInstrInfo *TII, 687 const TargetRegisterInfo *TRI, 688 VirtRegMap &VRM) { 689 TII->reMaterialize(MBB, MII, DestReg, VRM.getReMaterializedMI(Reg)); 690 MachineInstr *NewMI = prior(MII); 691 for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) { 692 MachineOperand &MO = NewMI->getOperand(i); 693 if (!MO.isReg() || MO.getReg() == 0) 694 continue; 695 unsigned VirtReg = MO.getReg(); 696 if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) 697 continue; 698 assert(MO.isUse()); 699 unsigned SubIdx = MO.getSubReg(); 700 unsigned Phys = VRM.getPhys(VirtReg); 701 assert(Phys); 702 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys; 703 MO.setReg(RReg); 704 } 705 ++NumReMats; 706} 707 708 709// ReusedOp - For each reused operand, we keep track of a bit of information, in 710// case we need to rollback upon processing a new operand. See comments below. 711namespace { 712 struct ReusedOp { 713 // The MachineInstr operand that reused an available value. 714 unsigned Operand; 715 716 // StackSlotOrReMat - The spill slot or remat id of the value being reused. 717 unsigned StackSlotOrReMat; 718 719 // PhysRegReused - The physical register the value was available in. 720 unsigned PhysRegReused; 721 722 // AssignedPhysReg - The physreg that was assigned for use by the reload. 723 unsigned AssignedPhysReg; 724 725 // VirtReg - The virtual register itself. 726 unsigned VirtReg; 727 728 ReusedOp(unsigned o, unsigned ss, unsigned prr, unsigned apr, 729 unsigned vreg) 730 : Operand(o), StackSlotOrReMat(ss), PhysRegReused(prr), 731 AssignedPhysReg(apr), VirtReg(vreg) {} 732 }; 733 734 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that 735 /// is reused instead of reloaded. 736 class VISIBILITY_HIDDEN ReuseInfo { 737 MachineInstr &MI; 738 std::vector<ReusedOp> Reuses; 739 BitVector PhysRegsClobbered; 740 public: 741 ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) { 742 PhysRegsClobbered.resize(tri->getNumRegs()); 743 } 744 745 bool hasReuses() const { 746 return !Reuses.empty(); 747 } 748 749 /// addReuse - If we choose to reuse a virtual register that is already 750 /// available instead of reloading it, remember that we did so. 751 void addReuse(unsigned OpNo, unsigned StackSlotOrReMat, 752 unsigned PhysRegReused, unsigned AssignedPhysReg, 753 unsigned VirtReg) { 754 // If the reload is to the assigned register anyway, no undo will be 755 // required. 756 if (PhysRegReused == AssignedPhysReg) return; 757 758 // Otherwise, remember this. 759 Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused, 760 AssignedPhysReg, VirtReg)); 761 } 762 763 void markClobbered(unsigned PhysReg) { 764 PhysRegsClobbered.set(PhysReg); 765 } 766 767 bool isClobbered(unsigned PhysReg) const { 768 return PhysRegsClobbered.test(PhysReg); 769 } 770 771 /// GetRegForReload - We are about to emit a reload into PhysReg. If there 772 /// is some other operand that is using the specified register, either pick 773 /// a new register to use, or evict the previous reload and use this reg. 774 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI, 775 AvailableSpills &Spills, 776 std::vector<MachineInstr*> &MaybeDeadStores, 777 SmallSet<unsigned, 8> &Rejected, 778 BitVector &RegKills, 779 std::vector<MachineOperand*> &KillOps, 780 VirtRegMap &VRM) { 781 const TargetInstrInfo* TII = MI->getParent()->getParent()->getTarget() 782 .getInstrInfo(); 783 784 if (Reuses.empty()) return PhysReg; // This is most often empty. 785 786 for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) { 787 ReusedOp &Op = Reuses[ro]; 788 // If we find some other reuse that was supposed to use this register 789 // exactly for its reload, we can change this reload to use ITS reload 790 // register. That is, unless its reload register has already been 791 // considered and subsequently rejected because it has also been reused 792 // by another operand. 793 if (Op.PhysRegReused == PhysReg && 794 Rejected.count(Op.AssignedPhysReg) == 0) { 795 // Yup, use the reload register that we didn't use before. 796 unsigned NewReg = Op.AssignedPhysReg; 797 Rejected.insert(PhysReg); 798 return GetRegForReload(NewReg, MI, Spills, MaybeDeadStores, Rejected, 799 RegKills, KillOps, VRM); 800 } else { 801 // Otherwise, we might also have a problem if a previously reused 802 // value aliases the new register. If so, codegen the previous reload 803 // and use this one. 804 unsigned PRRU = Op.PhysRegReused; 805 const TargetRegisterInfo *TRI = Spills.getRegInfo(); 806 if (TRI->areAliases(PRRU, PhysReg)) { 807 // Okay, we found out that an alias of a reused register 808 // was used. This isn't good because it means we have 809 // to undo a previous reuse. 810 MachineBasicBlock *MBB = MI->getParent(); 811 const TargetRegisterClass *AliasRC = 812 MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg); 813 814 // Copy Op out of the vector and remove it, we're going to insert an 815 // explicit load for it. 816 ReusedOp NewOp = Op; 817 Reuses.erase(Reuses.begin()+ro); 818 819 // Ok, we're going to try to reload the assigned physreg into the 820 // slot that we were supposed to in the first place. However, that 821 // register could hold a reuse. Check to see if it conflicts or 822 // would prefer us to use a different register. 823 unsigned NewPhysReg = GetRegForReload(NewOp.AssignedPhysReg, 824 MI, Spills, MaybeDeadStores, 825 Rejected, RegKills, KillOps, VRM); 826 827 MachineBasicBlock::iterator MII = MI; 828 if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) { 829 ReMaterialize(*MBB, MII, NewPhysReg, NewOp.VirtReg, TII, TRI,VRM); 830 } else { 831 TII->loadRegFromStackSlot(*MBB, MII, NewPhysReg, 832 NewOp.StackSlotOrReMat, AliasRC); 833 MachineInstr *LoadMI = prior(MII); 834 VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI); 835 // Any stores to this stack slot are not dead anymore. 836 MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL; 837 ++NumLoads; 838 } 839 Spills.ClobberPhysReg(NewPhysReg); 840 Spills.ClobberPhysReg(NewOp.PhysRegReused); 841 842 unsigned SubIdx = MI->getOperand(NewOp.Operand).getSubReg(); 843 unsigned RReg = SubIdx ? TRI->getSubReg(NewPhysReg, SubIdx) : NewPhysReg; 844 MI->getOperand(NewOp.Operand).setReg(RReg); 845 846 Spills.addAvailable(NewOp.StackSlotOrReMat, MI, NewPhysReg); 847 --MII; 848 UpdateKills(*MII, RegKills, KillOps, TRI); 849 DOUT << '\t' << *MII; 850 851 DOUT << "Reuse undone!\n"; 852 --NumReused; 853 854 // Finally, PhysReg is now available, go ahead and use it. 855 return PhysReg; 856 } 857 } 858 } 859 return PhysReg; 860 } 861 862 /// GetRegForReload - Helper for the above GetRegForReload(). Add a 863 /// 'Rejected' set to remember which registers have been considered and 864 /// rejected for the reload. This avoids infinite looping in case like 865 /// this: 866 /// t1 := op t2, t3 867 /// t2 <- assigned r0 for use by the reload but ended up reuse r1 868 /// t3 <- assigned r1 for use by the reload but ended up reuse r0 869 /// t1 <- desires r1 870 /// sees r1 is taken by t2, tries t2's reload register r0 871 /// sees r0 is taken by t3, tries t3's reload register r1 872 /// sees r1 is taken by t2, tries t2's reload register r0 ... 873 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI, 874 AvailableSpills &Spills, 875 std::vector<MachineInstr*> &MaybeDeadStores, 876 BitVector &RegKills, 877 std::vector<MachineOperand*> &KillOps, 878 VirtRegMap &VRM) { 879 SmallSet<unsigned, 8> Rejected; 880 return GetRegForReload(PhysReg, MI, Spills, MaybeDeadStores, Rejected, 881 RegKills, KillOps, VRM); 882 } 883 }; 884} 885 886/// PrepForUnfoldOpti - Turn a store folding instruction into a load folding 887/// instruction. e.g. 888/// xorl %edi, %eax 889/// movl %eax, -32(%ebp) 890/// movl -36(%ebp), %eax 891/// orl %eax, -32(%ebp) 892/// ==> 893/// xorl %edi, %eax 894/// orl -36(%ebp), %eax 895/// mov %eax, -32(%ebp) 896/// This enables unfolding optimization for a subsequent instruction which will 897/// also eliminate the newly introduced store instruction. 898bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB, 899 MachineBasicBlock::iterator &MII, 900 std::vector<MachineInstr*> &MaybeDeadStores, 901 AvailableSpills &Spills, 902 BitVector &RegKills, 903 std::vector<MachineOperand*> &KillOps, 904 VirtRegMap &VRM) { 905 MachineFunction &MF = *MBB.getParent(); 906 MachineInstr &MI = *MII; 907 unsigned UnfoldedOpc = 0; 908 unsigned UnfoldPR = 0; 909 unsigned UnfoldVR = 0; 910 int FoldedSS = VirtRegMap::NO_STACK_SLOT; 911 VirtRegMap::MI2VirtMapTy::const_iterator I, End; 912 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) { 913 // Only transform a MI that folds a single register. 914 if (UnfoldedOpc) 915 return false; 916 UnfoldVR = I->second.first; 917 VirtRegMap::ModRef MR = I->second.second; 918 // MI2VirtMap be can updated which invalidate the iterator. 919 // Increment the iterator first. 920 ++I; 921 if (VRM.isAssignedReg(UnfoldVR)) 922 continue; 923 // If this reference is not a use, any previous store is now dead. 924 // Otherwise, the store to this stack slot is not dead anymore. 925 FoldedSS = VRM.getStackSlot(UnfoldVR); 926 MachineInstr* DeadStore = MaybeDeadStores[FoldedSS]; 927 if (DeadStore && (MR & VirtRegMap::isModRef)) { 928 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS); 929 if (!PhysReg || !DeadStore->readsRegister(PhysReg)) 930 continue; 931 UnfoldPR = PhysReg; 932 UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(), 933 false, true); 934 } 935 } 936 937 if (!UnfoldedOpc) 938 return false; 939 940 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 941 MachineOperand &MO = MI.getOperand(i); 942 if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse()) 943 continue; 944 unsigned VirtReg = MO.getReg(); 945 if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg()) 946 continue; 947 if (VRM.isAssignedReg(VirtReg)) { 948 unsigned PhysReg = VRM.getPhys(VirtReg); 949 if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR)) 950 return false; 951 } else if (VRM.isReMaterialized(VirtReg)) 952 continue; 953 int SS = VRM.getStackSlot(VirtReg); 954 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); 955 if (PhysReg) { 956 if (TRI->regsOverlap(PhysReg, UnfoldPR)) 957 return false; 958 continue; 959 } 960 if (VRM.hasPhys(VirtReg)) { 961 PhysReg = VRM.getPhys(VirtReg); 962 if (!TRI->regsOverlap(PhysReg, UnfoldPR)) 963 continue; 964 } 965 966 // Ok, we'll need to reload the value into a register which makes 967 // it impossible to perform the store unfolding optimization later. 968 // Let's see if it is possible to fold the load if the store is 969 // unfolded. This allows us to perform the store unfolding 970 // optimization. 971 SmallVector<MachineInstr*, 4> NewMIs; 972 if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) { 973 assert(NewMIs.size() == 1); 974 MachineInstr *NewMI = NewMIs.back(); 975 NewMIs.clear(); 976 int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false); 977 assert(Idx != -1); 978 SmallVector<unsigned, 2> Ops; 979 Ops.push_back(Idx); 980 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS); 981 if (FoldedMI) { 982 VRM.addSpillSlotUse(SS, FoldedMI); 983 if (!VRM.hasPhys(UnfoldVR)) 984 VRM.assignVirt2Phys(UnfoldVR, UnfoldPR); 985 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef); 986 MII = MBB.insert(MII, FoldedMI); 987 InvalidateKills(MI, RegKills, KillOps); 988 VRM.RemoveMachineInstrFromMaps(&MI); 989 MBB.erase(&MI); 990 MF.DeleteMachineInstr(NewMI); 991 return true; 992 } 993 MF.DeleteMachineInstr(NewMI); 994 } 995 } 996 return false; 997} 998 999/// CommuteToFoldReload - 1000/// Look for 1001/// r1 = load fi#1 1002/// r1 = op r1, r2<kill> 1003/// store r1, fi#1 1004/// 1005/// If op is commutable and r2 is killed, then we can xform these to 1006/// r2 = op r2, fi#1 1007/// store r2, fi#1 1008bool LocalSpiller::CommuteToFoldReload(MachineBasicBlock &MBB, 1009 MachineBasicBlock::iterator &MII, 1010 unsigned VirtReg, unsigned SrcReg, int SS, 1011 BitVector &RegKills, 1012 std::vector<MachineOperand*> &KillOps, 1013 const TargetRegisterInfo *TRI, 1014 VirtRegMap &VRM) { 1015 if (MII == MBB.begin() || !MII->killsRegister(SrcReg)) 1016 return false; 1017 1018 MachineFunction &MF = *MBB.getParent(); 1019 MachineInstr &MI = *MII; 1020 MachineBasicBlock::iterator DefMII = prior(MII); 1021 MachineInstr *DefMI = DefMII; 1022 const TargetInstrDesc &TID = DefMI->getDesc(); 1023 unsigned NewDstIdx; 1024 if (DefMII != MBB.begin() && 1025 TID.isCommutable() && 1026 TII->CommuteChangesDestination(DefMI, NewDstIdx)) { 1027 MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx); 1028 unsigned NewReg = NewDstMO.getReg(); 1029 if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg)) 1030 return false; 1031 MachineInstr *ReloadMI = prior(DefMII); 1032 int FrameIdx; 1033 unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx); 1034 if (DestReg != SrcReg || FrameIdx != SS) 1035 return false; 1036 int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false); 1037 if (UseIdx == -1) 1038 return false; 1039 int DefIdx = TID.getOperandConstraint(UseIdx, TOI::TIED_TO); 1040 if (DefIdx == -1) 1041 return false; 1042 assert(DefMI->getOperand(DefIdx).isReg() && 1043 DefMI->getOperand(DefIdx).getReg() == SrcReg); 1044 1045 // Now commute def instruction. 1046 MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true); 1047 if (!CommutedMI) 1048 return false; 1049 SmallVector<unsigned, 2> Ops; 1050 Ops.push_back(NewDstIdx); 1051 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS); 1052 // Not needed since foldMemoryOperand returns new MI. 1053 MF.DeleteMachineInstr(CommutedMI); 1054 if (!FoldedMI) 1055 return false; 1056 1057 VRM.addSpillSlotUse(SS, FoldedMI); 1058 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef); 1059 // Insert new def MI and spill MI. 1060 const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(VirtReg); 1061 TII->storeRegToStackSlot(MBB, &MI, NewReg, true, SS, RC); 1062 MII = prior(MII); 1063 MachineInstr *StoreMI = MII; 1064 VRM.addSpillSlotUse(SS, StoreMI); 1065 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod); 1066 MII = MBB.insert(MII, FoldedMI); // Update MII to backtrack. 1067 1068 // Delete all 3 old instructions. 1069 InvalidateKills(*ReloadMI, RegKills, KillOps); 1070 VRM.RemoveMachineInstrFromMaps(ReloadMI); 1071 MBB.erase(ReloadMI); 1072 InvalidateKills(*DefMI, RegKills, KillOps); 1073 VRM.RemoveMachineInstrFromMaps(DefMI); 1074 MBB.erase(DefMI); 1075 InvalidateKills(MI, RegKills, KillOps); 1076 VRM.RemoveMachineInstrFromMaps(&MI); 1077 MBB.erase(&MI); 1078 1079 ++NumCommutes; 1080 return true; 1081 } 1082 1083 return false; 1084} 1085 1086/// findSuperReg - Find the SubReg's super-register of given register class 1087/// where its SubIdx sub-register is SubReg. 1088static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg, 1089 unsigned SubIdx, const TargetRegisterInfo *TRI) { 1090 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 1091 I != E; ++I) { 1092 unsigned Reg = *I; 1093 if (TRI->getSubReg(Reg, SubIdx) == SubReg) 1094 return Reg; 1095 } 1096 return 0; 1097} 1098 1099/// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if 1100/// the last store to the same slot is now dead. If so, remove the last store. 1101void LocalSpiller::SpillRegToStackSlot(MachineBasicBlock &MBB, 1102 MachineBasicBlock::iterator &MII, 1103 int Idx, unsigned PhysReg, int StackSlot, 1104 const TargetRegisterClass *RC, 1105 bool isAvailable, MachineInstr *&LastStore, 1106 AvailableSpills &Spills, 1107 SmallSet<MachineInstr*, 4> &ReMatDefs, 1108 BitVector &RegKills, 1109 std::vector<MachineOperand*> &KillOps, 1110 VirtRegMap &VRM) { 1111 TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC); 1112 MachineInstr *StoreMI = next(MII); 1113 VRM.addSpillSlotUse(StackSlot, StoreMI); 1114 DOUT << "Store:\t" << *StoreMI; 1115 1116 // If there is a dead store to this stack slot, nuke it now. 1117 if (LastStore) { 1118 DOUT << "Removed dead store:\t" << *LastStore; 1119 ++NumDSE; 1120 SmallVector<unsigned, 2> KillRegs; 1121 InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs); 1122 MachineBasicBlock::iterator PrevMII = LastStore; 1123 bool CheckDef = PrevMII != MBB.begin(); 1124 if (CheckDef) 1125 --PrevMII; 1126 VRM.RemoveMachineInstrFromMaps(LastStore); 1127 MBB.erase(LastStore); 1128 if (CheckDef) { 1129 // Look at defs of killed registers on the store. Mark the defs 1130 // as dead since the store has been deleted and they aren't 1131 // being reused. 1132 for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) { 1133 bool HasOtherDef = false; 1134 if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef)) { 1135 MachineInstr *DeadDef = PrevMII; 1136 if (ReMatDefs.count(DeadDef) && !HasOtherDef) { 1137 // FIXME: This assumes a remat def does not have side 1138 // effects. 1139 VRM.RemoveMachineInstrFromMaps(DeadDef); 1140 MBB.erase(DeadDef); 1141 ++NumDRM; 1142 } 1143 } 1144 } 1145 } 1146 } 1147 1148 LastStore = next(MII); 1149 1150 // If the stack slot value was previously available in some other 1151 // register, change it now. Otherwise, make the register available, 1152 // in PhysReg. 1153 Spills.ModifyStackSlotOrReMat(StackSlot); 1154 Spills.ClobberPhysReg(PhysReg); 1155 Spills.addAvailable(StackSlot, LastStore, PhysReg, isAvailable); 1156 ++NumStores; 1157} 1158 1159/// TransferDeadness - A identity copy definition is dead and it's being 1160/// removed. Find the last def or use and mark it as dead / kill. 1161void LocalSpiller::TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist, 1162 unsigned Reg, BitVector &RegKills, 1163 std::vector<MachineOperand*> &KillOps) { 1164 int LastUDDist = -1; 1165 MachineInstr *LastUDMI = NULL; 1166 for (MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(Reg), 1167 RE = RegInfo->reg_end(); RI != RE; ++RI) { 1168 MachineInstr *UDMI = &*RI; 1169 if (UDMI->getParent() != MBB) 1170 continue; 1171 DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI); 1172 if (DI == DistanceMap.end() || DI->second > CurDist) 1173 continue; 1174 if ((int)DI->second < LastUDDist) 1175 continue; 1176 LastUDDist = DI->second; 1177 LastUDMI = UDMI; 1178 } 1179 1180 if (LastUDMI) { 1181 const TargetInstrDesc &TID = LastUDMI->getDesc(); 1182 MachineOperand *LastUD = NULL; 1183 for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) { 1184 MachineOperand &MO = LastUDMI->getOperand(i); 1185 if (!MO.isReg() || MO.getReg() != Reg) 1186 continue; 1187 if (!LastUD || (LastUD->isUse() && MO.isDef())) 1188 LastUD = &MO; 1189 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) 1190 return; 1191 } 1192 if (LastUD->isDef()) 1193 LastUD->setIsDead(); 1194 else { 1195 LastUD->setIsKill(); 1196 RegKills.set(Reg); 1197 KillOps[Reg] = LastUD; 1198 } 1199 } 1200} 1201 1202/// rewriteMBB - Keep track of which spills are available even after the 1203/// register allocator is done with them. If possible, avid reloading vregs. 1204void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { 1205 DOUT << MBB.getBasicBlock()->getName() << ":\n"; 1206 1207 MachineFunction &MF = *MBB.getParent(); 1208 1209 // Spills - Keep track of which spilled values are available in physregs so 1210 // that we can choose to reuse the physregs instead of emitting reloads. 1211 AvailableSpills Spills(TRI, TII); 1212 1213 // MaybeDeadStores - When we need to write a value back into a stack slot, 1214 // keep track of the inserted store. If the stack slot value is never read 1215 // (because the value was used from some available register, for example), and 1216 // subsequently stored to, the original store is dead. This map keeps track 1217 // of inserted stores that are not used. If we see a subsequent store to the 1218 // same stack slot, the original store is deleted. 1219 std::vector<MachineInstr*> MaybeDeadStores; 1220 MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL); 1221 1222 // ReMatDefs - These are rematerializable def MIs which are not deleted. 1223 SmallSet<MachineInstr*, 4> ReMatDefs; 1224 1225 // Keep track of kill information. 1226 BitVector RegKills(TRI->getNumRegs()); 1227 std::vector<MachineOperand*> KillOps; 1228 KillOps.resize(TRI->getNumRegs(), NULL); 1229 1230 unsigned Dist = 0; 1231 DistanceMap.clear(); 1232 for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end(); 1233 MII != E; ) { 1234 MachineBasicBlock::iterator NextMII = MII; ++NextMII; 1235 1236 VirtRegMap::MI2VirtMapTy::const_iterator I, End; 1237 bool Erased = false; 1238 bool BackTracked = false; 1239 if (PrepForUnfoldOpti(MBB, MII, 1240 MaybeDeadStores, Spills, RegKills, KillOps, VRM)) 1241 NextMII = next(MII); 1242 1243 MachineInstr &MI = *MII; 1244 const TargetInstrDesc &TID = MI.getDesc(); 1245 1246 if (VRM.hasEmergencySpills(&MI)) { 1247 // Spill physical register(s) in the rare case the allocator has run out 1248 // of registers to allocate. 1249 SmallSet<int, 4> UsedSS; 1250 std::vector<unsigned> &EmSpills = VRM.getEmergencySpills(&MI); 1251 for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) { 1252 unsigned PhysReg = EmSpills[i]; 1253 const TargetRegisterClass *RC = 1254 TRI->getPhysicalRegisterRegClass(PhysReg); 1255 assert(RC && "Unable to determine register class!"); 1256 int SS = VRM.getEmergencySpillSlot(RC); 1257 if (UsedSS.count(SS)) 1258 assert(0 && "Need to spill more than one physical registers!"); 1259 UsedSS.insert(SS); 1260 TII->storeRegToStackSlot(MBB, MII, PhysReg, true, SS, RC); 1261 MachineInstr *StoreMI = prior(MII); 1262 VRM.addSpillSlotUse(SS, StoreMI); 1263 TII->loadRegFromStackSlot(MBB, next(MII), PhysReg, SS, RC); 1264 MachineInstr *LoadMI = next(MII); 1265 VRM.addSpillSlotUse(SS, LoadMI); 1266 ++NumPSpills; 1267 } 1268 NextMII = next(MII); 1269 } 1270 1271 // Insert restores here if asked to. 1272 if (VRM.isRestorePt(&MI)) { 1273 std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI); 1274 for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) { 1275 unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order. 1276 if (!VRM.getPreSplitReg(VirtReg)) 1277 continue; // Split interval spilled again. 1278 unsigned Phys = VRM.getPhys(VirtReg); 1279 RegInfo->setPhysRegUsed(Phys); 1280 if (VRM.isReMaterialized(VirtReg)) { 1281 ReMaterialize(MBB, MII, Phys, VirtReg, TII, TRI, VRM); 1282 } else { 1283 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1284 int SS = VRM.getStackSlot(VirtReg); 1285 TII->loadRegFromStackSlot(MBB, &MI, Phys, SS, RC); 1286 MachineInstr *LoadMI = prior(MII); 1287 VRM.addSpillSlotUse(SS, LoadMI); 1288 ++NumLoads; 1289 } 1290 // This invalidates Phys. 1291 Spills.ClobberPhysReg(Phys); 1292 UpdateKills(*prior(MII), RegKills, KillOps, TRI); 1293 DOUT << '\t' << *prior(MII); 1294 } 1295 } 1296 1297 // Insert spills here if asked to. 1298 if (VRM.isSpillPt(&MI)) { 1299 std::vector<std::pair<unsigned,bool> > &SpillRegs = 1300 VRM.getSpillPtSpills(&MI); 1301 for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) { 1302 unsigned VirtReg = SpillRegs[i].first; 1303 bool isKill = SpillRegs[i].second; 1304 if (!VRM.getPreSplitReg(VirtReg)) 1305 continue; // Split interval spilled again. 1306 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); 1307 unsigned Phys = VRM.getPhys(VirtReg); 1308 int StackSlot = VRM.getStackSlot(VirtReg); 1309 TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC); 1310 MachineInstr *StoreMI = next(MII); 1311 VRM.addSpillSlotUse(StackSlot, StoreMI); 1312 DOUT << "Store:\t" << *StoreMI; 1313 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod); 1314 } 1315 NextMII = next(MII); 1316 } 1317 1318 /// ReusedOperands - Keep track of operand reuse in case we need to undo 1319 /// reuse. 1320 ReuseInfo ReusedOperands(MI, TRI); 1321 SmallVector<unsigned, 4> VirtUseOps; 1322 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1323 MachineOperand &MO = MI.getOperand(i); 1324 if (!MO.isReg() || MO.getReg() == 0) 1325 continue; // Ignore non-register operands. 1326 1327 unsigned VirtReg = MO.getReg(); 1328 if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) { 1329 // Ignore physregs for spilling, but remember that it is used by this 1330 // function. 1331 RegInfo->setPhysRegUsed(VirtReg); 1332 continue; 1333 } 1334 1335 // We want to process implicit virtual register uses first. 1336 if (MO.isImplicit()) 1337 // If the virtual register is implicitly defined, emit a implicit_def 1338 // before so scavenger knows it's "defined". 1339 VirtUseOps.insert(VirtUseOps.begin(), i); 1340 else 1341 VirtUseOps.push_back(i); 1342 } 1343 1344 // Process all of the spilled uses and all non spilled reg references. 1345 SmallVector<int, 2> PotentialDeadStoreSlots; 1346 for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) { 1347 unsigned i = VirtUseOps[j]; 1348 MachineOperand &MO = MI.getOperand(i); 1349 unsigned VirtReg = MO.getReg(); 1350 assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && 1351 "Not a virtual register?"); 1352 1353 unsigned SubIdx = MO.getSubReg(); 1354 if (VRM.isAssignedReg(VirtReg)) { 1355 // This virtual register was assigned a physreg! 1356 unsigned Phys = VRM.getPhys(VirtReg); 1357 RegInfo->setPhysRegUsed(Phys); 1358 if (MO.isDef()) 1359 ReusedOperands.markClobbered(Phys); 1360 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys; 1361 MI.getOperand(i).setReg(RReg); 1362 if (VRM.isImplicitlyDefined(VirtReg)) 1363 BuildMI(MBB, &MI, TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg); 1364 continue; 1365 } 1366 1367 // This virtual register is now known to be a spilled value. 1368 if (!MO.isUse()) 1369 continue; // Handle defs in the loop below (handle use&def here though) 1370 1371 bool DoReMat = VRM.isReMaterialized(VirtReg); 1372 int SSorRMId = DoReMat 1373 ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg); 1374 int ReuseSlot = SSorRMId; 1375 1376 // Check to see if this stack slot is available. 1377 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId); 1378 1379 // If this is a sub-register use, make sure the reuse register is in the 1380 // right register class. For example, for x86 not all of the 32-bit 1381 // registers have accessible sub-registers. 1382 // Similarly so for EXTRACT_SUBREG. Consider this: 1383 // EDI = op 1384 // MOV32_mr fi#1, EDI 1385 // ... 1386 // = EXTRACT_SUBREG fi#1 1387 // fi#1 is available in EDI, but it cannot be reused because it's not in 1388 // the right register file. 1389 if (PhysReg && 1390 (SubIdx || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) { 1391 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1392 if (!RC->contains(PhysReg)) 1393 PhysReg = 0; 1394 } 1395 1396 if (PhysReg) { 1397 // This spilled operand might be part of a two-address operand. If this 1398 // is the case, then changing it will necessarily require changing the 1399 // def part of the instruction as well. However, in some cases, we 1400 // aren't allowed to modify the reused register. If none of these cases 1401 // apply, reuse it. 1402 bool CanReuse = true; 1403 int ti = TID.getOperandConstraint(i, TOI::TIED_TO); 1404 if (ti != -1 && 1405 MI.getOperand(ti).isReg() && 1406 MI.getOperand(ti).getReg() == VirtReg) { 1407 // Okay, we have a two address operand. We can reuse this physreg as 1408 // long as we are allowed to clobber the value and there isn't an 1409 // earlier def that has already clobbered the physreg. 1410 CanReuse = Spills.canClobberPhysReg(ReuseSlot) && 1411 !ReusedOperands.isClobbered(PhysReg); 1412 } 1413 1414 if (CanReuse) { 1415 // If this stack slot value is already available, reuse it! 1416 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT) 1417 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1; 1418 else 1419 DOUT << "Reusing SS#" << ReuseSlot; 1420 DOUT << " from physreg " 1421 << TRI->getName(PhysReg) << " for vreg" 1422 << VirtReg <<" instead of reloading into physreg " 1423 << TRI->getName(VRM.getPhys(VirtReg)) << "\n"; 1424 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1425 MI.getOperand(i).setReg(RReg); 1426 1427 // The only technical detail we have is that we don't know that 1428 // PhysReg won't be clobbered by a reloaded stack slot that occurs 1429 // later in the instruction. In particular, consider 'op V1, V2'. 1430 // If V1 is available in physreg R0, we would choose to reuse it 1431 // here, instead of reloading it into the register the allocator 1432 // indicated (say R1). However, V2 might have to be reloaded 1433 // later, and it might indicate that it needs to live in R0. When 1434 // this occurs, we need to have information available that 1435 // indicates it is safe to use R1 for the reload instead of R0. 1436 // 1437 // To further complicate matters, we might conflict with an alias, 1438 // or R0 and R1 might not be compatible with each other. In this 1439 // case, we actually insert a reload for V1 in R1, ensuring that 1440 // we can get at R0 or its alias. 1441 ReusedOperands.addReuse(i, ReuseSlot, PhysReg, 1442 VRM.getPhys(VirtReg), VirtReg); 1443 if (ti != -1) 1444 // Only mark it clobbered if this is a use&def operand. 1445 ReusedOperands.markClobbered(PhysReg); 1446 ++NumReused; 1447 1448 if (MI.getOperand(i).isKill() && 1449 ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) { 1450 1451 // The store of this spilled value is potentially dead, but we 1452 // won't know for certain until we've confirmed that the re-use 1453 // above is valid, which means waiting until the other operands 1454 // are processed. For now we just track the spill slot, we'll 1455 // remove it after the other operands are processed if valid. 1456 1457 PotentialDeadStoreSlots.push_back(ReuseSlot); 1458 } 1459 continue; 1460 } // CanReuse 1461 1462 // Otherwise we have a situation where we have a two-address instruction 1463 // whose mod/ref operand needs to be reloaded. This reload is already 1464 // available in some register "PhysReg", but if we used PhysReg as the 1465 // operand to our 2-addr instruction, the instruction would modify 1466 // PhysReg. This isn't cool if something later uses PhysReg and expects 1467 // to get its initial value. 1468 // 1469 // To avoid this problem, and to avoid doing a load right after a store, 1470 // we emit a copy from PhysReg into the designated register for this 1471 // operand. 1472 unsigned DesignatedReg = VRM.getPhys(VirtReg); 1473 assert(DesignatedReg && "Must map virtreg to physreg!"); 1474 1475 // Note that, if we reused a register for a previous operand, the 1476 // register we want to reload into might not actually be 1477 // available. If this occurs, use the register indicated by the 1478 // reuser. 1479 if (ReusedOperands.hasReuses()) 1480 DesignatedReg = ReusedOperands.GetRegForReload(DesignatedReg, &MI, 1481 Spills, MaybeDeadStores, RegKills, KillOps, VRM); 1482 1483 // If the mapped designated register is actually the physreg we have 1484 // incoming, we don't need to inserted a dead copy. 1485 if (DesignatedReg == PhysReg) { 1486 // If this stack slot value is already available, reuse it! 1487 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT) 1488 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1; 1489 else 1490 DOUT << "Reusing SS#" << ReuseSlot; 1491 DOUT << " from physreg " << TRI->getName(PhysReg) 1492 << " for vreg" << VirtReg 1493 << " instead of reloading into same physreg.\n"; 1494 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1495 MI.getOperand(i).setReg(RReg); 1496 ReusedOperands.markClobbered(RReg); 1497 ++NumReused; 1498 continue; 1499 } 1500 1501 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1502 RegInfo->setPhysRegUsed(DesignatedReg); 1503 ReusedOperands.markClobbered(DesignatedReg); 1504 TII->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC); 1505 1506 MachineInstr *CopyMI = prior(MII); 1507 UpdateKills(*CopyMI, RegKills, KillOps, TRI); 1508 1509 // This invalidates DesignatedReg. 1510 Spills.ClobberPhysReg(DesignatedReg); 1511 1512 Spills.addAvailable(ReuseSlot, &MI, DesignatedReg); 1513 unsigned RReg = 1514 SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg; 1515 MI.getOperand(i).setReg(RReg); 1516 DOUT << '\t' << *prior(MII); 1517 ++NumReused; 1518 continue; 1519 } // if (PhysReg) 1520 1521 // Otherwise, reload it and remember that we have it. 1522 PhysReg = VRM.getPhys(VirtReg); 1523 assert(PhysReg && "Must map virtreg to physreg!"); 1524 1525 // Note that, if we reused a register for a previous operand, the 1526 // register we want to reload into might not actually be 1527 // available. If this occurs, use the register indicated by the 1528 // reuser. 1529 if (ReusedOperands.hasReuses()) 1530 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI, 1531 Spills, MaybeDeadStores, RegKills, KillOps, VRM); 1532 1533 RegInfo->setPhysRegUsed(PhysReg); 1534 ReusedOperands.markClobbered(PhysReg); 1535 if (DoReMat) { 1536 ReMaterialize(MBB, MII, PhysReg, VirtReg, TII, TRI, VRM); 1537 } else { 1538 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1539 TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC); 1540 MachineInstr *LoadMI = prior(MII); 1541 VRM.addSpillSlotUse(SSorRMId, LoadMI); 1542 ++NumLoads; 1543 } 1544 // This invalidates PhysReg. 1545 Spills.ClobberPhysReg(PhysReg); 1546 1547 // Any stores to this stack slot are not dead anymore. 1548 if (!DoReMat) 1549 MaybeDeadStores[SSorRMId] = NULL; 1550 Spills.addAvailable(SSorRMId, &MI, PhysReg); 1551 // Assumes this is the last use. IsKill will be unset if reg is reused 1552 // unless it's a two-address operand. 1553 if (TID.getOperandConstraint(i, TOI::TIED_TO) == -1) 1554 MI.getOperand(i).setIsKill(); 1555 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1556 MI.getOperand(i).setReg(RReg); 1557 UpdateKills(*prior(MII), RegKills, KillOps, TRI); 1558 DOUT << '\t' << *prior(MII); 1559 } 1560 1561 // Ok - now we can remove stores that have been confirmed dead. 1562 for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) { 1563 // This was the last use and the spilled value is still available 1564 // for reuse. That means the spill was unnecessary! 1565 int PDSSlot = PotentialDeadStoreSlots[j]; 1566 MachineInstr* DeadStore = MaybeDeadStores[PDSSlot]; 1567 if (DeadStore) { 1568 DOUT << "Removed dead store:\t" << *DeadStore; 1569 InvalidateKills(*DeadStore, RegKills, KillOps); 1570 VRM.RemoveMachineInstrFromMaps(DeadStore); 1571 MBB.erase(DeadStore); 1572 MaybeDeadStores[PDSSlot] = NULL; 1573 ++NumDSE; 1574 } 1575 } 1576 1577 1578 DOUT << '\t' << MI; 1579 1580 1581 // If we have folded references to memory operands, make sure we clear all 1582 // physical registers that may contain the value of the spilled virtual 1583 // register 1584 SmallSet<int, 2> FoldedSS; 1585 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) { 1586 unsigned VirtReg = I->second.first; 1587 VirtRegMap::ModRef MR = I->second.second; 1588 DOUT << "Folded vreg: " << VirtReg << " MR: " << MR; 1589 1590 // MI2VirtMap be can updated which invalidate the iterator. 1591 // Increment the iterator first. 1592 ++I; 1593 int SS = VRM.getStackSlot(VirtReg); 1594 if (SS == VirtRegMap::NO_STACK_SLOT) 1595 continue; 1596 FoldedSS.insert(SS); 1597 DOUT << " - StackSlot: " << SS << "\n"; 1598 1599 // If this folded instruction is just a use, check to see if it's a 1600 // straight load from the virt reg slot. 1601 if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) { 1602 int FrameIdx; 1603 unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx); 1604 if (DestReg && FrameIdx == SS) { 1605 // If this spill slot is available, turn it into a copy (or nothing) 1606 // instead of leaving it as a load! 1607 if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) { 1608 DOUT << "Promoted Load To Copy: " << MI; 1609 if (DestReg != InReg) { 1610 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); 1611 TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC); 1612 MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg); 1613 unsigned SubIdx = DefMO->getSubReg(); 1614 // Revisit the copy so we make sure to notice the effects of the 1615 // operation on the destreg (either needing to RA it if it's 1616 // virtual or needing to clobber any values if it's physical). 1617 NextMII = &MI; 1618 --NextMII; // backtrack to the copy. 1619 // Propagate the sub-register index over. 1620 if (SubIdx) { 1621 DefMO = NextMII->findRegisterDefOperand(DestReg); 1622 DefMO->setSubReg(SubIdx); 1623 } 1624 BackTracked = true; 1625 } else { 1626 DOUT << "Removing now-noop copy: " << MI; 1627 // Unset last kill since it's being reused. 1628 InvalidateKill(InReg, RegKills, KillOps); 1629 } 1630 1631 InvalidateKills(MI, RegKills, KillOps); 1632 VRM.RemoveMachineInstrFromMaps(&MI); 1633 MBB.erase(&MI); 1634 Erased = true; 1635 goto ProcessNextInst; 1636 } 1637 } else { 1638 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); 1639 SmallVector<MachineInstr*, 4> NewMIs; 1640 if (PhysReg && 1641 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) { 1642 MBB.insert(MII, NewMIs[0]); 1643 InvalidateKills(MI, RegKills, KillOps); 1644 VRM.RemoveMachineInstrFromMaps(&MI); 1645 MBB.erase(&MI); 1646 Erased = true; 1647 --NextMII; // backtrack to the unfolded instruction. 1648 BackTracked = true; 1649 goto ProcessNextInst; 1650 } 1651 } 1652 } 1653 1654 // If this reference is not a use, any previous store is now dead. 1655 // Otherwise, the store to this stack slot is not dead anymore. 1656 MachineInstr* DeadStore = MaybeDeadStores[SS]; 1657 if (DeadStore) { 1658 bool isDead = !(MR & VirtRegMap::isRef); 1659 MachineInstr *NewStore = NULL; 1660 if (MR & VirtRegMap::isModRef) { 1661 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); 1662 SmallVector<MachineInstr*, 4> NewMIs; 1663 // We can reuse this physreg as long as we are allowed to clobber 1664 // the value and there isn't an earlier def that has already clobbered 1665 // the physreg. 1666 if (PhysReg && 1667 !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable! 1668 MachineOperand *KillOpnd = 1669 DeadStore->findRegisterUseOperand(PhysReg, true); 1670 // Note, if the store is storing a sub-register, it's possible the 1671 // super-register is needed below. 1672 if (KillOpnd && !KillOpnd->getSubReg() && 1673 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){ 1674 MBB.insert(MII, NewMIs[0]); 1675 NewStore = NewMIs[1]; 1676 MBB.insert(MII, NewStore); 1677 VRM.addSpillSlotUse(SS, NewStore); 1678 InvalidateKills(MI, RegKills, KillOps); 1679 VRM.RemoveMachineInstrFromMaps(&MI); 1680 MBB.erase(&MI); 1681 Erased = true; 1682 --NextMII; 1683 --NextMII; // backtrack to the unfolded instruction. 1684 BackTracked = true; 1685 isDead = true; 1686 } 1687 } 1688 } 1689 1690 if (isDead) { // Previous store is dead. 1691 // If we get here, the store is dead, nuke it now. 1692 DOUT << "Removed dead store:\t" << *DeadStore; 1693 InvalidateKills(*DeadStore, RegKills, KillOps); 1694 VRM.RemoveMachineInstrFromMaps(DeadStore); 1695 MBB.erase(DeadStore); 1696 if (!NewStore) 1697 ++NumDSE; 1698 } 1699 1700 MaybeDeadStores[SS] = NULL; 1701 if (NewStore) { 1702 // Treat this store as a spill merged into a copy. That makes the 1703 // stack slot value available. 1704 VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod); 1705 goto ProcessNextInst; 1706 } 1707 } 1708 1709 // If the spill slot value is available, and this is a new definition of 1710 // the value, the value is not available anymore. 1711 if (MR & VirtRegMap::isMod) { 1712 // Notice that the value in this stack slot has been modified. 1713 Spills.ModifyStackSlotOrReMat(SS); 1714 1715 // If this is *just* a mod of the value, check to see if this is just a 1716 // store to the spill slot (i.e. the spill got merged into the copy). If 1717 // so, realize that the vreg is available now, and add the store to the 1718 // MaybeDeadStore info. 1719 int StackSlot; 1720 if (!(MR & VirtRegMap::isRef)) { 1721 if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) { 1722 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) && 1723 "Src hasn't been allocated yet?"); 1724 1725 if (CommuteToFoldReload(MBB, MII, VirtReg, SrcReg, StackSlot, 1726 RegKills, KillOps, TRI, VRM)) { 1727 NextMII = next(MII); 1728 BackTracked = true; 1729 goto ProcessNextInst; 1730 } 1731 1732 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark 1733 // this as a potentially dead store in case there is a subsequent 1734 // store into the stack slot without a read from it. 1735 MaybeDeadStores[StackSlot] = &MI; 1736 1737 // If the stack slot value was previously available in some other 1738 // register, change it now. Otherwise, make the register 1739 // available in PhysReg. 1740 Spills.addAvailable(StackSlot, &MI, SrcReg, false/*!clobber*/); 1741 } 1742 } 1743 } 1744 } 1745 1746 // Process all of the spilled defs. 1747 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1748 MachineOperand &MO = MI.getOperand(i); 1749 if (!(MO.isReg() && MO.getReg() && MO.isDef())) 1750 continue; 1751 1752 unsigned VirtReg = MO.getReg(); 1753 if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) { 1754 // Check to see if this is a noop copy. If so, eliminate the 1755 // instruction before considering the dest reg to be changed. 1756 unsigned Src, Dst, SrcSR, DstSR; 1757 if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) { 1758 ++NumDCE; 1759 DOUT << "Removing now-noop copy: " << MI; 1760 SmallVector<unsigned, 2> KillRegs; 1761 InvalidateKills(MI, RegKills, KillOps, &KillRegs); 1762 if (MO.isDead() && !KillRegs.empty()) { 1763 // Source register or an implicit super/sub-register use is killed. 1764 assert(KillRegs[0] == Dst || 1765 TRI->isSubRegister(KillRegs[0], Dst) || 1766 TRI->isSuperRegister(KillRegs[0], Dst)); 1767 // Last def is now dead. 1768 TransferDeadness(&MBB, Dist, Src, RegKills, KillOps); 1769 } 1770 VRM.RemoveMachineInstrFromMaps(&MI); 1771 MBB.erase(&MI); 1772 Erased = true; 1773 Spills.disallowClobberPhysReg(VirtReg); 1774 goto ProcessNextInst; 1775 } 1776 1777 // If it's not a no-op copy, it clobbers the value in the destreg. 1778 Spills.ClobberPhysReg(VirtReg); 1779 ReusedOperands.markClobbered(VirtReg); 1780 1781 // Check to see if this instruction is a load from a stack slot into 1782 // a register. If so, this provides the stack slot value in the reg. 1783 int FrameIdx; 1784 if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) { 1785 assert(DestReg == VirtReg && "Unknown load situation!"); 1786 1787 // If it is a folded reference, then it's not safe to clobber. 1788 bool Folded = FoldedSS.count(FrameIdx); 1789 // Otherwise, if it wasn't available, remember that it is now! 1790 Spills.addAvailable(FrameIdx, &MI, DestReg, !Folded); 1791 goto ProcessNextInst; 1792 } 1793 1794 continue; 1795 } 1796 1797 unsigned SubIdx = MO.getSubReg(); 1798 bool DoReMat = VRM.isReMaterialized(VirtReg); 1799 if (DoReMat) 1800 ReMatDefs.insert(&MI); 1801 1802 // The only vregs left are stack slot definitions. 1803 int StackSlot = VRM.getStackSlot(VirtReg); 1804 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); 1805 1806 // If this def is part of a two-address operand, make sure to execute 1807 // the store from the correct physical register. 1808 unsigned PhysReg; 1809 int TiedOp = MI.getDesc().findTiedToSrcOperand(i); 1810 if (TiedOp != -1) { 1811 PhysReg = MI.getOperand(TiedOp).getReg(); 1812 if (SubIdx) { 1813 unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI); 1814 assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg && 1815 "Can't find corresponding super-register!"); 1816 PhysReg = SuperReg; 1817 } 1818 } else { 1819 PhysReg = VRM.getPhys(VirtReg); 1820 if (ReusedOperands.isClobbered(PhysReg)) { 1821 // Another def has taken the assigned physreg. It must have been a 1822 // use&def which got it due to reuse. Undo the reuse! 1823 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI, 1824 Spills, MaybeDeadStores, RegKills, KillOps, VRM); 1825 } 1826 } 1827 1828 assert(PhysReg && "VR not assigned a physical register?"); 1829 RegInfo->setPhysRegUsed(PhysReg); 1830 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1831 ReusedOperands.markClobbered(RReg); 1832 MI.getOperand(i).setReg(RReg); 1833 1834 if (!MO.isDead()) { 1835 MachineInstr *&LastStore = MaybeDeadStores[StackSlot]; 1836 SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, true, 1837 LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM); 1838 NextMII = next(MII); 1839 1840 // Check to see if this is a noop copy. If so, eliminate the 1841 // instruction before considering the dest reg to be changed. 1842 { 1843 unsigned Src, Dst, SrcSR, DstSR; 1844 if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) { 1845 ++NumDCE; 1846 DOUT << "Removing now-noop copy: " << MI; 1847 InvalidateKills(MI, RegKills, KillOps); 1848 VRM.RemoveMachineInstrFromMaps(&MI); 1849 MBB.erase(&MI); 1850 Erased = true; 1851 UpdateKills(*LastStore, RegKills, KillOps, TRI); 1852 goto ProcessNextInst; 1853 } 1854 } 1855 } 1856 } 1857 ProcessNextInst: 1858 DistanceMap.insert(std::make_pair(&MI, Dist++)); 1859 if (!Erased && !BackTracked) { 1860 for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II) 1861 UpdateKills(*II, RegKills, KillOps, TRI); 1862 } 1863 MII = NextMII; 1864 } 1865} 1866 1867llvm::Spiller* llvm::createSpiller() { 1868 switch (SpillerOpt) { 1869 default: assert(0 && "Unreachable!"); 1870 case local: 1871 return new LocalSpiller(); 1872 case simple: 1873 return new SimpleSpiller(); 1874 } 1875} 1876