VirtRegMap.cpp revision 19fb06e74b84acd238aec9e48c6c9a8f476d1ee8
1//===-- llvm/CodeGen/VirtRegMap.cpp - Virtual Register Map ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the VirtRegMap class. 11// 12// It also contains implementations of the the Spiller interface, which, given a 13// virtual register map and a machine function, eliminates all virtual 14// references by replacing them with physical register references - adding spill 15// code as necessary. 16// 17//===----------------------------------------------------------------------===// 18 19#define DEBUG_TYPE "spiller" 20#include "VirtRegMap.h" 21#include "llvm/Function.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineFunction.h" 24#include "llvm/CodeGen/MachineInstrBuilder.h" 25#include "llvm/CodeGen/MachineRegisterInfo.h" 26#include "llvm/Target/TargetMachine.h" 27#include "llvm/Target/TargetInstrInfo.h" 28#include "llvm/Support/CommandLine.h" 29#include "llvm/Support/Compiler.h" 30#include "llvm/Support/Debug.h" 31#include "llvm/ADT/BitVector.h" 32#include "llvm/ADT/DenseMap.h" 33#include "llvm/ADT/DepthFirstIterator.h" 34#include "llvm/ADT/Statistic.h" 35#include "llvm/ADT/STLExtras.h" 36#include "llvm/ADT/SmallSet.h" 37#include <algorithm> 38using namespace llvm; 39 40STATISTIC(NumSpills , "Number of register spills"); 41STATISTIC(NumPSpills , "Number of physical register spills"); 42STATISTIC(NumReMats , "Number of re-materialization"); 43STATISTIC(NumDRM , "Number of re-materializable defs elided"); 44STATISTIC(NumStores , "Number of stores added"); 45STATISTIC(NumLoads , "Number of loads added"); 46STATISTIC(NumReused , "Number of values reused"); 47STATISTIC(NumDSE , "Number of dead stores elided"); 48STATISTIC(NumDCE , "Number of copies elided"); 49STATISTIC(NumDSS , "Number of dead spill slots removed"); 50STATISTIC(NumCommutes, "Number of instructions commuted"); 51STATISTIC(NumOmitted , "Number of reloads omited"); 52STATISTIC(NumCopified, "Number of available reloads turned into copies"); 53 54namespace { 55 enum SpillerName { simple, local }; 56} 57 58static cl::opt<SpillerName> 59SpillerOpt("spiller", 60 cl::desc("Spiller to use: (default: local)"), 61 cl::Prefix, 62 cl::values(clEnumVal(simple, "simple spiller"), 63 clEnumVal(local, "local spiller"), 64 clEnumValEnd), 65 cl::init(local)); 66 67//===----------------------------------------------------------------------===// 68// VirtRegMap implementation 69//===----------------------------------------------------------------------===// 70 71VirtRegMap::VirtRegMap(MachineFunction &mf) 72 : TII(*mf.getTarget().getInstrInfo()), MF(mf), 73 Virt2PhysMap(NO_PHYS_REG), Virt2StackSlotMap(NO_STACK_SLOT), 74 Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0), 75 Virt2SplitKillMap(0), ReMatMap(NULL), ReMatId(MAX_STACK_SLOT+1), 76 LowSpillSlot(NO_STACK_SLOT), HighSpillSlot(NO_STACK_SLOT) { 77 SpillSlotToUsesMap.resize(8); 78 ImplicitDefed.resize(MF.getRegInfo().getLastVirtReg()+1- 79 TargetRegisterInfo::FirstVirtualRegister); 80 grow(); 81} 82 83void VirtRegMap::grow() { 84 unsigned LastVirtReg = MF.getRegInfo().getLastVirtReg(); 85 Virt2PhysMap.grow(LastVirtReg); 86 Virt2StackSlotMap.grow(LastVirtReg); 87 Virt2ReMatIdMap.grow(LastVirtReg); 88 Virt2SplitMap.grow(LastVirtReg); 89 Virt2SplitKillMap.grow(LastVirtReg); 90 ReMatMap.grow(LastVirtReg); 91 ImplicitDefed.resize(LastVirtReg-TargetRegisterInfo::FirstVirtualRegister+1); 92} 93 94int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) { 95 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 96 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && 97 "attempt to assign stack slot to already spilled register"); 98 const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(virtReg); 99 int SS = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 100 RC->getAlignment()); 101 if (LowSpillSlot == NO_STACK_SLOT) 102 LowSpillSlot = SS; 103 if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot) 104 HighSpillSlot = SS; 105 unsigned Idx = SS-LowSpillSlot; 106 while (Idx >= SpillSlotToUsesMap.size()) 107 SpillSlotToUsesMap.resize(SpillSlotToUsesMap.size()*2); 108 Virt2StackSlotMap[virtReg] = SS; 109 ++NumSpills; 110 return SS; 111} 112 113void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int SS) { 114 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 115 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && 116 "attempt to assign stack slot to already spilled register"); 117 assert((SS >= 0 || 118 (SS >= MF.getFrameInfo()->getObjectIndexBegin())) && 119 "illegal fixed frame index"); 120 Virt2StackSlotMap[virtReg] = SS; 121} 122 123int VirtRegMap::assignVirtReMatId(unsigned virtReg) { 124 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 125 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT && 126 "attempt to assign re-mat id to already spilled register"); 127 Virt2ReMatIdMap[virtReg] = ReMatId; 128 return ReMatId++; 129} 130 131void VirtRegMap::assignVirtReMatId(unsigned virtReg, int id) { 132 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 133 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT && 134 "attempt to assign re-mat id to already spilled register"); 135 Virt2ReMatIdMap[virtReg] = id; 136} 137 138int VirtRegMap::getEmergencySpillSlot(const TargetRegisterClass *RC) { 139 std::map<const TargetRegisterClass*, int>::iterator I = 140 EmergencySpillSlots.find(RC); 141 if (I != EmergencySpillSlots.end()) 142 return I->second; 143 int SS = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 144 RC->getAlignment()); 145 if (LowSpillSlot == NO_STACK_SLOT) 146 LowSpillSlot = SS; 147 if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot) 148 HighSpillSlot = SS; 149 EmergencySpillSlots[RC] = SS; 150 return SS; 151} 152 153void VirtRegMap::addSpillSlotUse(int FI, MachineInstr *MI) { 154 if (!MF.getFrameInfo()->isFixedObjectIndex(FI)) { 155 // If FI < LowSpillSlot, this stack reference was produced by 156 // instruction selection and is not a spill 157 if (FI >= LowSpillSlot) { 158 assert(FI >= 0 && "Spill slot index should not be negative!"); 159 assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size() 160 && "Invalid spill slot"); 161 SpillSlotToUsesMap[FI-LowSpillSlot].insert(MI); 162 } 163 } 164} 165 166void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI, 167 MachineInstr *NewMI, ModRef MRInfo) { 168 // Move previous memory references folded to new instruction. 169 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(NewMI); 170 for (MI2VirtMapTy::iterator I = MI2VirtMap.lower_bound(OldMI), 171 E = MI2VirtMap.end(); I != E && I->first == OldMI; ) { 172 MI2VirtMap.insert(IP, std::make_pair(NewMI, I->second)); 173 MI2VirtMap.erase(I++); 174 } 175 176 // add new memory reference 177 MI2VirtMap.insert(IP, std::make_pair(NewMI, std::make_pair(VirtReg, MRInfo))); 178} 179 180void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *MI, ModRef MRInfo) { 181 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(MI); 182 MI2VirtMap.insert(IP, std::make_pair(MI, std::make_pair(VirtReg, MRInfo))); 183} 184 185void VirtRegMap::RemoveMachineInstrFromMaps(MachineInstr *MI) { 186 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 187 MachineOperand &MO = MI->getOperand(i); 188 if (!MO.isFI()) 189 continue; 190 int FI = MO.getIndex(); 191 if (MF.getFrameInfo()->isFixedObjectIndex(FI)) 192 continue; 193 // This stack reference was produced by instruction selection and 194 // is not a spill 195 if (FI < LowSpillSlot) 196 continue; 197 assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size() 198 && "Invalid spill slot"); 199 SpillSlotToUsesMap[FI-LowSpillSlot].erase(MI); 200 } 201 MI2VirtMap.erase(MI); 202 SpillPt2VirtMap.erase(MI); 203 RestorePt2VirtMap.erase(MI); 204 EmergencySpillMap.erase(MI); 205} 206 207void VirtRegMap::print(std::ostream &OS) const { 208 const TargetRegisterInfo* TRI = MF.getTarget().getRegisterInfo(); 209 210 OS << "********** REGISTER MAP **********\n"; 211 for (unsigned i = TargetRegisterInfo::FirstVirtualRegister, 212 e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) { 213 if (Virt2PhysMap[i] != (unsigned)VirtRegMap::NO_PHYS_REG) 214 OS << "[reg" << i << " -> " << TRI->getName(Virt2PhysMap[i]) 215 << "]\n"; 216 } 217 218 for (unsigned i = TargetRegisterInfo::FirstVirtualRegister, 219 e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) 220 if (Virt2StackSlotMap[i] != VirtRegMap::NO_STACK_SLOT) 221 OS << "[reg" << i << " -> fi#" << Virt2StackSlotMap[i] << "]\n"; 222 OS << '\n'; 223} 224 225void VirtRegMap::dump() const { 226 print(cerr); 227} 228 229 230//===----------------------------------------------------------------------===// 231// Simple Spiller Implementation 232//===----------------------------------------------------------------------===// 233 234Spiller::~Spiller() {} 235 236namespace { 237 struct VISIBILITY_HIDDEN SimpleSpiller : public Spiller { 238 bool runOnMachineFunction(MachineFunction& mf, VirtRegMap &VRM); 239 }; 240} 241 242bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { 243 DOUT << "********** REWRITE MACHINE CODE **********\n"; 244 DOUT << "********** Function: " << MF.getFunction()->getName() << '\n'; 245 const TargetMachine &TM = MF.getTarget(); 246 const TargetInstrInfo &TII = *TM.getInstrInfo(); 247 const TargetRegisterInfo &TRI = *TM.getRegisterInfo(); 248 249 250 // LoadedRegs - Keep track of which vregs are loaded, so that we only load 251 // each vreg once (in the case where a spilled vreg is used by multiple 252 // operands). This is always smaller than the number of operands to the 253 // current machine instr, so it should be small. 254 std::vector<unsigned> LoadedRegs; 255 256 for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end(); 257 MBBI != E; ++MBBI) { 258 DOUT << MBBI->getBasicBlock()->getName() << ":\n"; 259 MachineBasicBlock &MBB = *MBBI; 260 for (MachineBasicBlock::iterator MII = MBB.begin(), 261 E = MBB.end(); MII != E; ++MII) { 262 MachineInstr &MI = *MII; 263 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 264 MachineOperand &MO = MI.getOperand(i); 265 if (MO.isReg() && MO.getReg()) { 266 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) { 267 unsigned VirtReg = MO.getReg(); 268 unsigned SubIdx = MO.getSubReg(); 269 unsigned PhysReg = VRM.getPhys(VirtReg); 270 unsigned RReg = SubIdx ? TRI.getSubReg(PhysReg, SubIdx) : PhysReg; 271 if (!VRM.isAssignedReg(VirtReg)) { 272 int StackSlot = VRM.getStackSlot(VirtReg); 273 const TargetRegisterClass* RC = 274 MF.getRegInfo().getRegClass(VirtReg); 275 276 if (MO.isUse() && 277 std::find(LoadedRegs.begin(), LoadedRegs.end(), VirtReg) 278 == LoadedRegs.end()) { 279 TII.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC); 280 MachineInstr *LoadMI = prior(MII); 281 VRM.addSpillSlotUse(StackSlot, LoadMI); 282 LoadedRegs.push_back(VirtReg); 283 ++NumLoads; 284 DOUT << '\t' << *LoadMI; 285 } 286 287 if (MO.isDef()) { 288 TII.storeRegToStackSlot(MBB, next(MII), PhysReg, true, 289 StackSlot, RC); 290 MachineInstr *StoreMI = next(MII); 291 VRM.addSpillSlotUse(StackSlot, StoreMI); 292 ++NumStores; 293 } 294 } 295 MF.getRegInfo().setPhysRegUsed(RReg); 296 MI.getOperand(i).setReg(RReg); 297 } else { 298 MF.getRegInfo().setPhysRegUsed(MO.getReg()); 299 } 300 } 301 } 302 303 DOUT << '\t' << MI; 304 LoadedRegs.clear(); 305 } 306 } 307 return true; 308} 309 310//===----------------------------------------------------------------------===// 311// Local Spiller Implementation 312//===----------------------------------------------------------------------===// 313 314/// AvailableSpills - As the local spiller is scanning and rewriting an MBB from 315/// top down, keep track of which spills slots or remat are available in each 316/// register. 317/// 318/// Note that not all physregs are created equal here. In particular, some 319/// physregs are reloads that we are allowed to clobber or ignore at any time. 320/// Other physregs are values that the register allocated program is using that 321/// we cannot CHANGE, but we can read if we like. We keep track of this on a 322/// per-stack-slot / remat id basis as the low bit in the value of the 323/// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks 324/// this bit and addAvailable sets it if. 325namespace { 326class VISIBILITY_HIDDEN AvailableSpills { 327 const TargetRegisterInfo *TRI; 328 const TargetInstrInfo *TII; 329 330 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled 331 // or remat'ed virtual register values that are still available, due to being 332 // loaded or stored to, but not invalidated yet. 333 std::map<int, unsigned> SpillSlotsOrReMatsAvailable; 334 335 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable, 336 // indicating which stack slot values are currently held by a physreg. This 337 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a 338 // physreg is modified. 339 std::multimap<unsigned, int> PhysRegsAvailable; 340 341 void disallowClobberPhysRegOnly(unsigned PhysReg); 342 343 void ClobberPhysRegOnly(unsigned PhysReg); 344public: 345 AvailableSpills(const TargetRegisterInfo *tri, const TargetInstrInfo *tii) 346 : TRI(tri), TII(tii) { 347 } 348 349 /// clear - Reset the state. 350 void clear() { 351 SpillSlotsOrReMatsAvailable.clear(); 352 PhysRegsAvailable.clear(); 353 } 354 355 const TargetRegisterInfo *getRegInfo() const { return TRI; } 356 357 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is 358 /// available in a physical register, return that PhysReg, otherwise 359 /// return 0. 360 unsigned getSpillSlotOrReMatPhysReg(int Slot) const { 361 std::map<int, unsigned>::const_iterator I = 362 SpillSlotsOrReMatsAvailable.find(Slot); 363 if (I != SpillSlotsOrReMatsAvailable.end()) { 364 return I->second >> 1; // Remove the CanClobber bit. 365 } 366 return 0; 367 } 368 369 /// addAvailable - Mark that the specified stack slot / remat is available in 370 /// the specified physreg. If CanClobber is true, the physreg can be modified 371 /// at any time without changing the semantics of the program. 372 void addAvailable(int SlotOrReMat, unsigned Reg, bool CanClobber = true) { 373 // If this stack slot is thought to be available in some other physreg, 374 // remove its record. 375 ModifyStackSlotOrReMat(SlotOrReMat); 376 377 PhysRegsAvailable.insert(std::make_pair(Reg, SlotOrReMat)); 378 SpillSlotsOrReMatsAvailable[SlotOrReMat]= (Reg << 1) | (unsigned)CanClobber; 379 380 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT) 381 DOUT << "Remembering RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1; 382 else 383 DOUT << "Remembering SS#" << SlotOrReMat; 384 DOUT << " in physreg " << TRI->getName(Reg) << "\n"; 385 } 386 387 /// canClobberPhysReg - Return true if the spiller is allowed to change the 388 /// value of the specified stackslot register if it desires. The specified 389 /// stack slot must be available in a physreg for this query to make sense. 390 bool canClobberPhysReg(int SlotOrReMat) const { 391 assert(SpillSlotsOrReMatsAvailable.count(SlotOrReMat) && 392 "Value not available!"); 393 return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second & 1; 394 } 395 396 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified 397 /// stackslot register. The register is still available but is no longer 398 /// allowed to be modifed. 399 void disallowClobberPhysReg(unsigned PhysReg); 400 401 /// ClobberPhysReg - This is called when the specified physreg changes 402 /// value. We use this to invalidate any info about stuff that lives in 403 /// it and any of its aliases. 404 void ClobberPhysReg(unsigned PhysReg); 405 406 /// ModifyStackSlotOrReMat - This method is called when the value in a stack 407 /// slot changes. This removes information about which register the previous 408 /// value for this slot lives in (as the previous value is dead now). 409 void ModifyStackSlotOrReMat(int SlotOrReMat); 410 411 void AddAvailableRegsToLiveIn(MachineBasicBlock &MBB); 412}; 413} 414 415/// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified 416/// stackslot register. The register is still available but is no longer 417/// allowed to be modifed. 418void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) { 419 std::multimap<unsigned, int>::iterator I = 420 PhysRegsAvailable.lower_bound(PhysReg); 421 while (I != PhysRegsAvailable.end() && I->first == PhysReg) { 422 int SlotOrReMat = I->second; 423 I++; 424 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg && 425 "Bidirectional map mismatch!"); 426 SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1; 427 DOUT << "PhysReg " << TRI->getName(PhysReg) 428 << " copied, it is available for use but can no longer be modified\n"; 429 } 430} 431 432/// disallowClobberPhysReg - Unset the CanClobber bit of the specified 433/// stackslot register and its aliases. The register and its aliases may 434/// still available but is no longer allowed to be modifed. 435void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) { 436 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS) 437 disallowClobberPhysRegOnly(*AS); 438 disallowClobberPhysRegOnly(PhysReg); 439} 440 441/// ClobberPhysRegOnly - This is called when the specified physreg changes 442/// value. We use this to invalidate any info about stuff we thing lives in it. 443void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) { 444 std::multimap<unsigned, int>::iterator I = 445 PhysRegsAvailable.lower_bound(PhysReg); 446 while (I != PhysRegsAvailable.end() && I->first == PhysReg) { 447 int SlotOrReMat = I->second; 448 PhysRegsAvailable.erase(I++); 449 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg && 450 "Bidirectional map mismatch!"); 451 SpillSlotsOrReMatsAvailable.erase(SlotOrReMat); 452 DOUT << "PhysReg " << TRI->getName(PhysReg) 453 << " clobbered, invalidating "; 454 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT) 455 DOUT << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 << "\n"; 456 else 457 DOUT << "SS#" << SlotOrReMat << "\n"; 458 } 459} 460 461/// ClobberPhysReg - This is called when the specified physreg changes 462/// value. We use this to invalidate any info about stuff we thing lives in 463/// it and any of its aliases. 464void AvailableSpills::ClobberPhysReg(unsigned PhysReg) { 465 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS) 466 ClobberPhysRegOnly(*AS); 467 ClobberPhysRegOnly(PhysReg); 468} 469 470/// ModifyStackSlotOrReMat - This method is called when the value in a stack 471/// slot changes. This removes information about which register the previous 472/// value for this slot lives in (as the previous value is dead now). 473void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) { 474 std::map<int, unsigned>::iterator It = 475 SpillSlotsOrReMatsAvailable.find(SlotOrReMat); 476 if (It == SpillSlotsOrReMatsAvailable.end()) return; 477 unsigned Reg = It->second >> 1; 478 SpillSlotsOrReMatsAvailable.erase(It); 479 480 // This register may hold the value of multiple stack slots, only remove this 481 // stack slot from the set of values the register contains. 482 std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg); 483 for (; ; ++I) { 484 assert(I != PhysRegsAvailable.end() && I->first == Reg && 485 "Map inverse broken!"); 486 if (I->second == SlotOrReMat) break; 487 } 488 PhysRegsAvailable.erase(I); 489} 490 491/// AddAvailableRegsToLiveIn - Availability information is being kept coming 492/// into the specified MBB. Add available physical registers as live-in's 493/// so register scavenger and post-allocation scheduler are happy. 494void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock &MBB) { 495 for (std::multimap<unsigned, int>::iterator 496 I = PhysRegsAvailable.begin(), E = PhysRegsAvailable.end(); 497 I != E; ++I) { 498 unsigned Reg = (*I).first; 499 const TargetRegisterClass* RC = TRI->getPhysicalRegisterRegClass(Reg); 500 // FIXME: A temporary workaround. We can't reuse available value if it's 501 // not safe to move the def of the virtual register's class. e.g. 502 // X86::RFP* register classes. Do not add it as a live-in. 503 if (!TII->isSafeToMoveRegClassDefs(RC)) 504 continue; 505 if (!MBB.isLiveIn(Reg)) 506 MBB.addLiveIn(Reg); 507 } 508} 509 510/// findSinglePredSuccessor - Return via reference a vector of machine basic 511/// blocks each of which is a successor of the specified BB and has no other 512/// predecessor. 513static void findSinglePredSuccessor(MachineBasicBlock *MBB, 514 SmallVectorImpl<MachineBasicBlock *> &Succs) { 515 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(), 516 SE = MBB->succ_end(); SI != SE; ++SI) { 517 MachineBasicBlock *SuccMBB = *SI; 518 if (SuccMBB->pred_size() == 1) 519 Succs.push_back(SuccMBB); 520 } 521} 522 523namespace { 524 /// LocalSpiller - This spiller does a simple pass over the machine basic 525 /// block to attempt to keep spills in registers as much as possible for 526 /// blocks that have low register pressure (the vreg may be spilled due to 527 /// register pressure in other blocks). 528 class VISIBILITY_HIDDEN LocalSpiller : public Spiller { 529 MachineRegisterInfo *RegInfo; 530 const TargetRegisterInfo *TRI; 531 const TargetInstrInfo *TII; 532 DenseMap<MachineInstr*, unsigned> DistanceMap; 533 public: 534 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { 535 RegInfo = &MF.getRegInfo(); 536 TRI = MF.getTarget().getRegisterInfo(); 537 TII = MF.getTarget().getInstrInfo(); 538 DOUT << "\n**** Local spiller rewriting function '" 539 << MF.getFunction()->getName() << "':\n"; 540 DOUT << "**** Machine Instrs (NOTE! Does not include spills and reloads!)" 541 " ****\n"; 542 DEBUG(MF.dump()); 543 544 // Spills - Keep track of which spilled values are available in physregs 545 // so that we can choose to reuse the physregs instead of emitting 546 // reloads. This is usually refreshed per basic block. 547 AvailableSpills Spills(TRI, TII); 548 549 // SingleEntrySuccs - Successor blocks which have a single predecessor. 550 SmallVector<MachineBasicBlock*, 4> SinglePredSuccs; 551 SmallPtrSet<MachineBasicBlock*,16> EarlyVisited; 552 553 // Traverse the basic blocks depth first. 554 MachineBasicBlock *Entry = MF.begin(); 555 SmallPtrSet<MachineBasicBlock*,16> Visited; 556 for (df_ext_iterator<MachineBasicBlock*, 557 SmallPtrSet<MachineBasicBlock*,16> > 558 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited); 559 DFI != E; ++DFI) { 560 MachineBasicBlock *MBB = *DFI; 561 if (!EarlyVisited.count(MBB)) 562 RewriteMBB(*MBB, VRM, Spills); 563 564 // If this MBB is the only predecessor of a successor. Keep the 565 // availability information and visit it next. 566 do { 567 // Keep visiting single predecessor successor as long as possible. 568 SinglePredSuccs.clear(); 569 findSinglePredSuccessor(MBB, SinglePredSuccs); 570 if (SinglePredSuccs.empty()) 571 MBB = 0; 572 else { 573 // FIXME: More than one successors, each of which has MBB has 574 // the only predecessor. 575 MBB = SinglePredSuccs[0]; 576 if (!Visited.count(MBB) && EarlyVisited.insert(MBB)) { 577 Spills.AddAvailableRegsToLiveIn(*MBB); 578 RewriteMBB(*MBB, VRM, Spills); 579 } 580 } 581 } while (MBB); 582 583 // Clear the availability info. 584 Spills.clear(); 585 } 586 587 DOUT << "**** Post Machine Instrs ****\n"; 588 DEBUG(MF.dump()); 589 590 // Mark unused spill slots. 591 MachineFrameInfo *MFI = MF.getFrameInfo(); 592 int SS = VRM.getLowSpillSlot(); 593 if (SS != VirtRegMap::NO_STACK_SLOT) 594 for (int e = VRM.getHighSpillSlot(); SS <= e; ++SS) 595 if (!VRM.isSpillSlotUsed(SS)) { 596 MFI->RemoveStackObject(SS); 597 ++NumDSS; 598 } 599 600 return true; 601 } 602 private: 603 void TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist, 604 unsigned Reg, BitVector &RegKills, 605 std::vector<MachineOperand*> &KillOps); 606 bool PrepForUnfoldOpti(MachineBasicBlock &MBB, 607 MachineBasicBlock::iterator &MII, 608 std::vector<MachineInstr*> &MaybeDeadStores, 609 AvailableSpills &Spills, BitVector &RegKills, 610 std::vector<MachineOperand*> &KillOps, 611 VirtRegMap &VRM); 612 bool CommuteToFoldReload(MachineBasicBlock &MBB, 613 MachineBasicBlock::iterator &MII, 614 unsigned VirtReg, unsigned SrcReg, int SS, 615 BitVector &RegKills, 616 std::vector<MachineOperand*> &KillOps, 617 const TargetRegisterInfo *TRI, 618 VirtRegMap &VRM); 619 void SpillRegToStackSlot(MachineBasicBlock &MBB, 620 MachineBasicBlock::iterator &MII, 621 int Idx, unsigned PhysReg, int StackSlot, 622 const TargetRegisterClass *RC, 623 bool isAvailable, MachineInstr *&LastStore, 624 AvailableSpills &Spills, 625 SmallSet<MachineInstr*, 4> &ReMatDefs, 626 BitVector &RegKills, 627 std::vector<MachineOperand*> &KillOps, 628 VirtRegMap &VRM); 629 void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM, 630 AvailableSpills &Spills); 631 }; 632} 633 634/// InvalidateKills - MI is going to be deleted. If any of its operands are 635/// marked kill, then invalidate the information. 636static void InvalidateKills(MachineInstr &MI, BitVector &RegKills, 637 std::vector<MachineOperand*> &KillOps, 638 SmallVector<unsigned, 2> *KillRegs = NULL) { 639 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 640 MachineOperand &MO = MI.getOperand(i); 641 if (!MO.isReg() || !MO.isUse() || !MO.isKill()) 642 continue; 643 unsigned Reg = MO.getReg(); 644 if (TargetRegisterInfo::isVirtualRegister(Reg)) 645 continue; 646 if (KillRegs) 647 KillRegs->push_back(Reg); 648 assert(Reg < KillOps.size()); 649 if (KillOps[Reg] == &MO) { 650 RegKills.reset(Reg); 651 KillOps[Reg] = NULL; 652 } 653 } 654} 655 656/// InvalidateKill - A MI that defines the specified register is being deleted, 657/// invalidate the register kill information. 658static void InvalidateKill(unsigned Reg, BitVector &RegKills, 659 std::vector<MachineOperand*> &KillOps) { 660 if (RegKills[Reg]) { 661 KillOps[Reg]->setIsKill(false); 662 KillOps[Reg] = NULL; 663 RegKills.reset(Reg); 664 } 665} 666 667/// InvalidateRegDef - If the def operand of the specified def MI is now dead 668/// (since it's spill instruction is removed), mark it isDead. Also checks if 669/// the def MI has other definition operands that are not dead. Returns it by 670/// reference. 671static bool InvalidateRegDef(MachineBasicBlock::iterator I, 672 MachineInstr &NewDef, unsigned Reg, 673 bool &HasLiveDef) { 674 // Due to remat, it's possible this reg isn't being reused. That is, 675 // the def of this reg (by prev MI) is now dead. 676 MachineInstr *DefMI = I; 677 MachineOperand *DefOp = NULL; 678 for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) { 679 MachineOperand &MO = DefMI->getOperand(i); 680 if (MO.isReg() && MO.isDef()) { 681 if (MO.getReg() == Reg) 682 DefOp = &MO; 683 else if (!MO.isDead()) 684 HasLiveDef = true; 685 } 686 } 687 if (!DefOp) 688 return false; 689 690 bool FoundUse = false, Done = false; 691 MachineBasicBlock::iterator E = &NewDef; 692 ++I; ++E; 693 for (; !Done && I != E; ++I) { 694 MachineInstr *NMI = I; 695 for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) { 696 MachineOperand &MO = NMI->getOperand(j); 697 if (!MO.isReg() || MO.getReg() != Reg) 698 continue; 699 if (MO.isUse()) 700 FoundUse = true; 701 Done = true; // Stop after scanning all the operands of this MI. 702 } 703 } 704 if (!FoundUse) { 705 // Def is dead! 706 DefOp->setIsDead(); 707 return true; 708 } 709 return false; 710} 711 712/// UpdateKills - Track and update kill info. If a MI reads a register that is 713/// marked kill, then it must be due to register reuse. Transfer the kill info 714/// over. 715static void UpdateKills(MachineInstr &MI, BitVector &RegKills, 716 std::vector<MachineOperand*> &KillOps, 717 const TargetRegisterInfo* TRI) { 718 const TargetInstrDesc &TID = MI.getDesc(); 719 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 720 MachineOperand &MO = MI.getOperand(i); 721 if (!MO.isReg() || !MO.isUse()) 722 continue; 723 unsigned Reg = MO.getReg(); 724 if (Reg == 0) 725 continue; 726 727 if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) { 728 // That can't be right. Register is killed but not re-defined and it's 729 // being reused. Let's fix that. 730 KillOps[Reg]->setIsKill(false); 731 KillOps[Reg] = NULL; 732 RegKills.reset(Reg); 733 if (i < TID.getNumOperands() && 734 TID.getOperandConstraint(i, TOI::TIED_TO) == -1) 735 // Unless it's a two-address operand, this is the new kill. 736 MO.setIsKill(); 737 } 738 if (MO.isKill()) { 739 RegKills.set(Reg); 740 KillOps[Reg] = &MO; 741 } 742 } 743 744 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 745 const MachineOperand &MO = MI.getOperand(i); 746 if (!MO.isReg() || !MO.isDef()) 747 continue; 748 unsigned Reg = MO.getReg(); 749 RegKills.reset(Reg); 750 KillOps[Reg] = NULL; 751 // It also defines (or partially define) aliases. 752 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) { 753 RegKills.reset(*AS); 754 KillOps[*AS] = NULL; 755 } 756 } 757} 758 759/// ReMaterialize - Re-materialize definition for Reg targetting DestReg. 760/// 761static void ReMaterialize(MachineBasicBlock &MBB, 762 MachineBasicBlock::iterator &MII, 763 unsigned DestReg, unsigned Reg, 764 const TargetInstrInfo *TII, 765 const TargetRegisterInfo *TRI, 766 VirtRegMap &VRM) { 767 TII->reMaterialize(MBB, MII, DestReg, VRM.getReMaterializedMI(Reg)); 768 MachineInstr *NewMI = prior(MII); 769 for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) { 770 MachineOperand &MO = NewMI->getOperand(i); 771 if (!MO.isReg() || MO.getReg() == 0) 772 continue; 773 unsigned VirtReg = MO.getReg(); 774 if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) 775 continue; 776 assert(MO.isUse()); 777 unsigned SubIdx = MO.getSubReg(); 778 unsigned Phys = VRM.getPhys(VirtReg); 779 assert(Phys); 780 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys; 781 MO.setReg(RReg); 782 } 783 ++NumReMats; 784} 785 786 787// ReusedOp - For each reused operand, we keep track of a bit of information, in 788// case we need to rollback upon processing a new operand. See comments below. 789namespace { 790 struct ReusedOp { 791 // The MachineInstr operand that reused an available value. 792 unsigned Operand; 793 794 // StackSlotOrReMat - The spill slot or remat id of the value being reused. 795 unsigned StackSlotOrReMat; 796 797 // PhysRegReused - The physical register the value was available in. 798 unsigned PhysRegReused; 799 800 // AssignedPhysReg - The physreg that was assigned for use by the reload. 801 unsigned AssignedPhysReg; 802 803 // VirtReg - The virtual register itself. 804 unsigned VirtReg; 805 806 ReusedOp(unsigned o, unsigned ss, unsigned prr, unsigned apr, 807 unsigned vreg) 808 : Operand(o), StackSlotOrReMat(ss), PhysRegReused(prr), 809 AssignedPhysReg(apr), VirtReg(vreg) {} 810 }; 811 812 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that 813 /// is reused instead of reloaded. 814 class VISIBILITY_HIDDEN ReuseInfo { 815 MachineInstr &MI; 816 std::vector<ReusedOp> Reuses; 817 BitVector PhysRegsClobbered; 818 public: 819 ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) { 820 PhysRegsClobbered.resize(tri->getNumRegs()); 821 } 822 823 bool hasReuses() const { 824 return !Reuses.empty(); 825 } 826 827 /// addReuse - If we choose to reuse a virtual register that is already 828 /// available instead of reloading it, remember that we did so. 829 void addReuse(unsigned OpNo, unsigned StackSlotOrReMat, 830 unsigned PhysRegReused, unsigned AssignedPhysReg, 831 unsigned VirtReg) { 832 // If the reload is to the assigned register anyway, no undo will be 833 // required. 834 if (PhysRegReused == AssignedPhysReg) return; 835 836 // Otherwise, remember this. 837 Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused, 838 AssignedPhysReg, VirtReg)); 839 } 840 841 void markClobbered(unsigned PhysReg) { 842 PhysRegsClobbered.set(PhysReg); 843 } 844 845 bool isClobbered(unsigned PhysReg) const { 846 return PhysRegsClobbered.test(PhysReg); 847 } 848 849 /// GetRegForReload - We are about to emit a reload into PhysReg. If there 850 /// is some other operand that is using the specified register, either pick 851 /// a new register to use, or evict the previous reload and use this reg. 852 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI, 853 AvailableSpills &Spills, 854 std::vector<MachineInstr*> &MaybeDeadStores, 855 SmallSet<unsigned, 8> &Rejected, 856 BitVector &RegKills, 857 std::vector<MachineOperand*> &KillOps, 858 VirtRegMap &VRM) { 859 const TargetInstrInfo* TII = MI->getParent()->getParent()->getTarget() 860 .getInstrInfo(); 861 862 if (Reuses.empty()) return PhysReg; // This is most often empty. 863 864 for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) { 865 ReusedOp &Op = Reuses[ro]; 866 // If we find some other reuse that was supposed to use this register 867 // exactly for its reload, we can change this reload to use ITS reload 868 // register. That is, unless its reload register has already been 869 // considered and subsequently rejected because it has also been reused 870 // by another operand. 871 if (Op.PhysRegReused == PhysReg && 872 Rejected.count(Op.AssignedPhysReg) == 0) { 873 // Yup, use the reload register that we didn't use before. 874 unsigned NewReg = Op.AssignedPhysReg; 875 Rejected.insert(PhysReg); 876 return GetRegForReload(NewReg, MI, Spills, MaybeDeadStores, Rejected, 877 RegKills, KillOps, VRM); 878 } else { 879 // Otherwise, we might also have a problem if a previously reused 880 // value aliases the new register. If so, codegen the previous reload 881 // and use this one. 882 unsigned PRRU = Op.PhysRegReused; 883 const TargetRegisterInfo *TRI = Spills.getRegInfo(); 884 if (TRI->areAliases(PRRU, PhysReg)) { 885 // Okay, we found out that an alias of a reused register 886 // was used. This isn't good because it means we have 887 // to undo a previous reuse. 888 MachineBasicBlock *MBB = MI->getParent(); 889 const TargetRegisterClass *AliasRC = 890 MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg); 891 892 // Copy Op out of the vector and remove it, we're going to insert an 893 // explicit load for it. 894 ReusedOp NewOp = Op; 895 Reuses.erase(Reuses.begin()+ro); 896 897 // Ok, we're going to try to reload the assigned physreg into the 898 // slot that we were supposed to in the first place. However, that 899 // register could hold a reuse. Check to see if it conflicts or 900 // would prefer us to use a different register. 901 unsigned NewPhysReg = GetRegForReload(NewOp.AssignedPhysReg, 902 MI, Spills, MaybeDeadStores, 903 Rejected, RegKills, KillOps, VRM); 904 905 MachineBasicBlock::iterator MII = MI; 906 if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) { 907 ReMaterialize(*MBB, MII, NewPhysReg, NewOp.VirtReg, TII, TRI,VRM); 908 } else { 909 TII->loadRegFromStackSlot(*MBB, MII, NewPhysReg, 910 NewOp.StackSlotOrReMat, AliasRC); 911 MachineInstr *LoadMI = prior(MII); 912 VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI); 913 // Any stores to this stack slot are not dead anymore. 914 MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL; 915 ++NumLoads; 916 } 917 Spills.ClobberPhysReg(NewPhysReg); 918 Spills.ClobberPhysReg(NewOp.PhysRegReused); 919 920 unsigned SubIdx = MI->getOperand(NewOp.Operand).getSubReg(); 921 unsigned RReg = SubIdx ? TRI->getSubReg(NewPhysReg, SubIdx) : NewPhysReg; 922 MI->getOperand(NewOp.Operand).setReg(RReg); 923 924 Spills.addAvailable(NewOp.StackSlotOrReMat, NewPhysReg); 925 --MII; 926 UpdateKills(*MII, RegKills, KillOps, TRI); 927 DOUT << '\t' << *MII; 928 929 DOUT << "Reuse undone!\n"; 930 --NumReused; 931 932 // Finally, PhysReg is now available, go ahead and use it. 933 return PhysReg; 934 } 935 } 936 } 937 return PhysReg; 938 } 939 940 /// GetRegForReload - Helper for the above GetRegForReload(). Add a 941 /// 'Rejected' set to remember which registers have been considered and 942 /// rejected for the reload. This avoids infinite looping in case like 943 /// this: 944 /// t1 := op t2, t3 945 /// t2 <- assigned r0 for use by the reload but ended up reuse r1 946 /// t3 <- assigned r1 for use by the reload but ended up reuse r0 947 /// t1 <- desires r1 948 /// sees r1 is taken by t2, tries t2's reload register r0 949 /// sees r0 is taken by t3, tries t3's reload register r1 950 /// sees r1 is taken by t2, tries t2's reload register r0 ... 951 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI, 952 AvailableSpills &Spills, 953 std::vector<MachineInstr*> &MaybeDeadStores, 954 BitVector &RegKills, 955 std::vector<MachineOperand*> &KillOps, 956 VirtRegMap &VRM) { 957 SmallSet<unsigned, 8> Rejected; 958 return GetRegForReload(PhysReg, MI, Spills, MaybeDeadStores, Rejected, 959 RegKills, KillOps, VRM); 960 } 961 }; 962} 963 964/// PrepForUnfoldOpti - Turn a store folding instruction into a load folding 965/// instruction. e.g. 966/// xorl %edi, %eax 967/// movl %eax, -32(%ebp) 968/// movl -36(%ebp), %eax 969/// orl %eax, -32(%ebp) 970/// ==> 971/// xorl %edi, %eax 972/// orl -36(%ebp), %eax 973/// mov %eax, -32(%ebp) 974/// This enables unfolding optimization for a subsequent instruction which will 975/// also eliminate the newly introduced store instruction. 976bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB, 977 MachineBasicBlock::iterator &MII, 978 std::vector<MachineInstr*> &MaybeDeadStores, 979 AvailableSpills &Spills, 980 BitVector &RegKills, 981 std::vector<MachineOperand*> &KillOps, 982 VirtRegMap &VRM) { 983 MachineFunction &MF = *MBB.getParent(); 984 MachineInstr &MI = *MII; 985 unsigned UnfoldedOpc = 0; 986 unsigned UnfoldPR = 0; 987 unsigned UnfoldVR = 0; 988 int FoldedSS = VirtRegMap::NO_STACK_SLOT; 989 VirtRegMap::MI2VirtMapTy::const_iterator I, End; 990 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) { 991 // Only transform a MI that folds a single register. 992 if (UnfoldedOpc) 993 return false; 994 UnfoldVR = I->second.first; 995 VirtRegMap::ModRef MR = I->second.second; 996 // MI2VirtMap be can updated which invalidate the iterator. 997 // Increment the iterator first. 998 ++I; 999 if (VRM.isAssignedReg(UnfoldVR)) 1000 continue; 1001 // If this reference is not a use, any previous store is now dead. 1002 // Otherwise, the store to this stack slot is not dead anymore. 1003 FoldedSS = VRM.getStackSlot(UnfoldVR); 1004 MachineInstr* DeadStore = MaybeDeadStores[FoldedSS]; 1005 if (DeadStore && (MR & VirtRegMap::isModRef)) { 1006 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS); 1007 if (!PhysReg || !DeadStore->readsRegister(PhysReg)) 1008 continue; 1009 UnfoldPR = PhysReg; 1010 UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(), 1011 false, true); 1012 } 1013 } 1014 1015 if (!UnfoldedOpc) 1016 return false; 1017 1018 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1019 MachineOperand &MO = MI.getOperand(i); 1020 if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse()) 1021 continue; 1022 unsigned VirtReg = MO.getReg(); 1023 if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg()) 1024 continue; 1025 if (VRM.isAssignedReg(VirtReg)) { 1026 unsigned PhysReg = VRM.getPhys(VirtReg); 1027 if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR)) 1028 return false; 1029 } else if (VRM.isReMaterialized(VirtReg)) 1030 continue; 1031 int SS = VRM.getStackSlot(VirtReg); 1032 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); 1033 if (PhysReg) { 1034 if (TRI->regsOverlap(PhysReg, UnfoldPR)) 1035 return false; 1036 continue; 1037 } 1038 if (VRM.hasPhys(VirtReg)) { 1039 PhysReg = VRM.getPhys(VirtReg); 1040 if (!TRI->regsOverlap(PhysReg, UnfoldPR)) 1041 continue; 1042 } 1043 1044 // Ok, we'll need to reload the value into a register which makes 1045 // it impossible to perform the store unfolding optimization later. 1046 // Let's see if it is possible to fold the load if the store is 1047 // unfolded. This allows us to perform the store unfolding 1048 // optimization. 1049 SmallVector<MachineInstr*, 4> NewMIs; 1050 if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) { 1051 assert(NewMIs.size() == 1); 1052 MachineInstr *NewMI = NewMIs.back(); 1053 NewMIs.clear(); 1054 int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false); 1055 assert(Idx != -1); 1056 SmallVector<unsigned, 1> Ops; 1057 Ops.push_back(Idx); 1058 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS); 1059 if (FoldedMI) { 1060 VRM.addSpillSlotUse(SS, FoldedMI); 1061 if (!VRM.hasPhys(UnfoldVR)) 1062 VRM.assignVirt2Phys(UnfoldVR, UnfoldPR); 1063 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef); 1064 MII = MBB.insert(MII, FoldedMI); 1065 InvalidateKills(MI, RegKills, KillOps); 1066 VRM.RemoveMachineInstrFromMaps(&MI); 1067 MBB.erase(&MI); 1068 MF.DeleteMachineInstr(NewMI); 1069 return true; 1070 } 1071 MF.DeleteMachineInstr(NewMI); 1072 } 1073 } 1074 return false; 1075} 1076 1077/// CommuteToFoldReload - 1078/// Look for 1079/// r1 = load fi#1 1080/// r1 = op r1, r2<kill> 1081/// store r1, fi#1 1082/// 1083/// If op is commutable and r2 is killed, then we can xform these to 1084/// r2 = op r2, fi#1 1085/// store r2, fi#1 1086bool LocalSpiller::CommuteToFoldReload(MachineBasicBlock &MBB, 1087 MachineBasicBlock::iterator &MII, 1088 unsigned VirtReg, unsigned SrcReg, int SS, 1089 BitVector &RegKills, 1090 std::vector<MachineOperand*> &KillOps, 1091 const TargetRegisterInfo *TRI, 1092 VirtRegMap &VRM) { 1093 if (MII == MBB.begin() || !MII->killsRegister(SrcReg)) 1094 return false; 1095 1096 MachineFunction &MF = *MBB.getParent(); 1097 MachineInstr &MI = *MII; 1098 MachineBasicBlock::iterator DefMII = prior(MII); 1099 MachineInstr *DefMI = DefMII; 1100 const TargetInstrDesc &TID = DefMI->getDesc(); 1101 unsigned NewDstIdx; 1102 if (DefMII != MBB.begin() && 1103 TID.isCommutable() && 1104 TII->CommuteChangesDestination(DefMI, NewDstIdx)) { 1105 MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx); 1106 unsigned NewReg = NewDstMO.getReg(); 1107 if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg)) 1108 return false; 1109 MachineInstr *ReloadMI = prior(DefMII); 1110 int FrameIdx; 1111 unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx); 1112 if (DestReg != SrcReg || FrameIdx != SS) 1113 return false; 1114 int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false); 1115 if (UseIdx == -1) 1116 return false; 1117 int DefIdx = TID.getOperandConstraint(UseIdx, TOI::TIED_TO); 1118 if (DefIdx == -1) 1119 return false; 1120 assert(DefMI->getOperand(DefIdx).isReg() && 1121 DefMI->getOperand(DefIdx).getReg() == SrcReg); 1122 1123 // Now commute def instruction. 1124 MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true); 1125 if (!CommutedMI) 1126 return false; 1127 SmallVector<unsigned, 1> Ops; 1128 Ops.push_back(NewDstIdx); 1129 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS); 1130 // Not needed since foldMemoryOperand returns new MI. 1131 MF.DeleteMachineInstr(CommutedMI); 1132 if (!FoldedMI) 1133 return false; 1134 1135 VRM.addSpillSlotUse(SS, FoldedMI); 1136 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef); 1137 // Insert new def MI and spill MI. 1138 const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(VirtReg); 1139 TII->storeRegToStackSlot(MBB, &MI, NewReg, true, SS, RC); 1140 MII = prior(MII); 1141 MachineInstr *StoreMI = MII; 1142 VRM.addSpillSlotUse(SS, StoreMI); 1143 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod); 1144 MII = MBB.insert(MII, FoldedMI); // Update MII to backtrack. 1145 1146 // Delete all 3 old instructions. 1147 InvalidateKills(*ReloadMI, RegKills, KillOps); 1148 VRM.RemoveMachineInstrFromMaps(ReloadMI); 1149 MBB.erase(ReloadMI); 1150 InvalidateKills(*DefMI, RegKills, KillOps); 1151 VRM.RemoveMachineInstrFromMaps(DefMI); 1152 MBB.erase(DefMI); 1153 InvalidateKills(MI, RegKills, KillOps); 1154 VRM.RemoveMachineInstrFromMaps(&MI); 1155 MBB.erase(&MI); 1156 1157 ++NumCommutes; 1158 return true; 1159 } 1160 1161 return false; 1162} 1163 1164/// findSuperReg - Find the SubReg's super-register of given register class 1165/// where its SubIdx sub-register is SubReg. 1166static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg, 1167 unsigned SubIdx, const TargetRegisterInfo *TRI) { 1168 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 1169 I != E; ++I) { 1170 unsigned Reg = *I; 1171 if (TRI->getSubReg(Reg, SubIdx) == SubReg) 1172 return Reg; 1173 } 1174 return 0; 1175} 1176 1177/// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if 1178/// the last store to the same slot is now dead. If so, remove the last store. 1179void LocalSpiller::SpillRegToStackSlot(MachineBasicBlock &MBB, 1180 MachineBasicBlock::iterator &MII, 1181 int Idx, unsigned PhysReg, int StackSlot, 1182 const TargetRegisterClass *RC, 1183 bool isAvailable, MachineInstr *&LastStore, 1184 AvailableSpills &Spills, 1185 SmallSet<MachineInstr*, 4> &ReMatDefs, 1186 BitVector &RegKills, 1187 std::vector<MachineOperand*> &KillOps, 1188 VirtRegMap &VRM) { 1189 TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC); 1190 MachineInstr *StoreMI = next(MII); 1191 VRM.addSpillSlotUse(StackSlot, StoreMI); 1192 DOUT << "Store:\t" << *StoreMI; 1193 1194 // If there is a dead store to this stack slot, nuke it now. 1195 if (LastStore) { 1196 DOUT << "Removed dead store:\t" << *LastStore; 1197 ++NumDSE; 1198 SmallVector<unsigned, 2> KillRegs; 1199 InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs); 1200 MachineBasicBlock::iterator PrevMII = LastStore; 1201 bool CheckDef = PrevMII != MBB.begin(); 1202 if (CheckDef) 1203 --PrevMII; 1204 VRM.RemoveMachineInstrFromMaps(LastStore); 1205 MBB.erase(LastStore); 1206 if (CheckDef) { 1207 // Look at defs of killed registers on the store. Mark the defs 1208 // as dead since the store has been deleted and they aren't 1209 // being reused. 1210 for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) { 1211 bool HasOtherDef = false; 1212 if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef)) { 1213 MachineInstr *DeadDef = PrevMII; 1214 if (ReMatDefs.count(DeadDef) && !HasOtherDef) { 1215 // FIXME: This assumes a remat def does not have side 1216 // effects. 1217 VRM.RemoveMachineInstrFromMaps(DeadDef); 1218 MBB.erase(DeadDef); 1219 ++NumDRM; 1220 } 1221 } 1222 } 1223 } 1224 } 1225 1226 LastStore = next(MII); 1227 1228 // If the stack slot value was previously available in some other 1229 // register, change it now. Otherwise, make the register available, 1230 // in PhysReg. 1231 Spills.ModifyStackSlotOrReMat(StackSlot); 1232 Spills.ClobberPhysReg(PhysReg); 1233 Spills.addAvailable(StackSlot, PhysReg, isAvailable); 1234 ++NumStores; 1235} 1236 1237/// TransferDeadness - A identity copy definition is dead and it's being 1238/// removed. Find the last def or use and mark it as dead / kill. 1239void LocalSpiller::TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist, 1240 unsigned Reg, BitVector &RegKills, 1241 std::vector<MachineOperand*> &KillOps) { 1242 int LastUDDist = -1; 1243 MachineInstr *LastUDMI = NULL; 1244 for (MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(Reg), 1245 RE = RegInfo->reg_end(); RI != RE; ++RI) { 1246 MachineInstr *UDMI = &*RI; 1247 if (UDMI->getParent() != MBB) 1248 continue; 1249 DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI); 1250 if (DI == DistanceMap.end() || DI->second > CurDist) 1251 continue; 1252 if ((int)DI->second < LastUDDist) 1253 continue; 1254 LastUDDist = DI->second; 1255 LastUDMI = UDMI; 1256 } 1257 1258 if (LastUDMI) { 1259 const TargetInstrDesc &TID = LastUDMI->getDesc(); 1260 MachineOperand *LastUD = NULL; 1261 for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) { 1262 MachineOperand &MO = LastUDMI->getOperand(i); 1263 if (!MO.isReg() || MO.getReg() != Reg) 1264 continue; 1265 if (!LastUD || (LastUD->isUse() && MO.isDef())) 1266 LastUD = &MO; 1267 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) 1268 return; 1269 } 1270 if (LastUD->isDef()) 1271 LastUD->setIsDead(); 1272 else { 1273 LastUD->setIsKill(); 1274 RegKills.set(Reg); 1275 KillOps[Reg] = LastUD; 1276 } 1277 } 1278} 1279 1280/// rewriteMBB - Keep track of which spills are available even after the 1281/// register allocator is done with them. If possible, avid reloading vregs. 1282void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM, 1283 AvailableSpills &Spills) { 1284 DOUT << "\n**** Local spiller rewriting MBB '" 1285 << MBB.getBasicBlock()->getName() << ":\n"; 1286 1287 MachineFunction &MF = *MBB.getParent(); 1288 1289 // MaybeDeadStores - When we need to write a value back into a stack slot, 1290 // keep track of the inserted store. If the stack slot value is never read 1291 // (because the value was used from some available register, for example), and 1292 // subsequently stored to, the original store is dead. This map keeps track 1293 // of inserted stores that are not used. If we see a subsequent store to the 1294 // same stack slot, the original store is deleted. 1295 std::vector<MachineInstr*> MaybeDeadStores; 1296 MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL); 1297 1298 // ReMatDefs - These are rematerializable def MIs which are not deleted. 1299 SmallSet<MachineInstr*, 4> ReMatDefs; 1300 1301 // Keep track of kill information. 1302 BitVector RegKills(TRI->getNumRegs()); 1303 std::vector<MachineOperand*> KillOps; 1304 KillOps.resize(TRI->getNumRegs(), NULL); 1305 1306 unsigned Dist = 0; 1307 DistanceMap.clear(); 1308 for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end(); 1309 MII != E; ) { 1310 MachineBasicBlock::iterator NextMII = MII; ++NextMII; 1311 1312 VirtRegMap::MI2VirtMapTy::const_iterator I, End; 1313 bool Erased = false; 1314 bool BackTracked = false; 1315 if (PrepForUnfoldOpti(MBB, MII, 1316 MaybeDeadStores, Spills, RegKills, KillOps, VRM)) 1317 NextMII = next(MII); 1318 1319 MachineInstr &MI = *MII; 1320 const TargetInstrDesc &TID = MI.getDesc(); 1321 1322 if (VRM.hasEmergencySpills(&MI)) { 1323 // Spill physical register(s) in the rare case the allocator has run out 1324 // of registers to allocate. 1325 SmallSet<int, 4> UsedSS; 1326 std::vector<unsigned> &EmSpills = VRM.getEmergencySpills(&MI); 1327 for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) { 1328 unsigned PhysReg = EmSpills[i]; 1329 const TargetRegisterClass *RC = 1330 TRI->getPhysicalRegisterRegClass(PhysReg); 1331 assert(RC && "Unable to determine register class!"); 1332 int SS = VRM.getEmergencySpillSlot(RC); 1333 if (UsedSS.count(SS)) 1334 assert(0 && "Need to spill more than one physical registers!"); 1335 UsedSS.insert(SS); 1336 TII->storeRegToStackSlot(MBB, MII, PhysReg, true, SS, RC); 1337 MachineInstr *StoreMI = prior(MII); 1338 VRM.addSpillSlotUse(SS, StoreMI); 1339 TII->loadRegFromStackSlot(MBB, next(MII), PhysReg, SS, RC); 1340 MachineInstr *LoadMI = next(MII); 1341 VRM.addSpillSlotUse(SS, LoadMI); 1342 ++NumPSpills; 1343 } 1344 NextMII = next(MII); 1345 } 1346 1347 // Insert restores here if asked to. 1348 if (VRM.isRestorePt(&MI)) { 1349 std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI); 1350 for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) { 1351 unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order. 1352 if (!VRM.getPreSplitReg(VirtReg)) 1353 continue; // Split interval spilled again. 1354 unsigned Phys = VRM.getPhys(VirtReg); 1355 RegInfo->setPhysRegUsed(Phys); 1356 1357 // Check if the value being restored if available. If so, it must be 1358 // from a predecessor BB that fallthrough into this BB. We do not 1359 // expect: 1360 // BB1: 1361 // r1 = load fi#1 1362 // ... 1363 // = r1<kill> 1364 // ... # r1 not clobbered 1365 // ... 1366 // = load fi#1 1367 bool DoReMat = VRM.isReMaterialized(VirtReg); 1368 int SSorRMId = DoReMat 1369 ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg); 1370 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1371 // FIXME: A temporary workaround. Don't reuse available value if it's 1372 // not safe to move the def of the virtual register's class. e.g. 1373 // X86::RFP* register classes. 1374 unsigned InReg = TII->isSafeToMoveRegClassDefs(RC) ? 1375 Spills.getSpillSlotOrReMatPhysReg(SSorRMId) : 0; 1376 if (InReg == Phys) { 1377 // If the value is already available in the expected register, save 1378 // a reload / remat. 1379 if (SSorRMId) 1380 DOUT << "Reusing RM#" << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1; 1381 else 1382 DOUT << "Reusing SS#" << SSorRMId; 1383 DOUT << " from physreg " 1384 << TRI->getName(InReg) << " for vreg" 1385 << VirtReg <<" instead of reloading into physreg " 1386 << TRI->getName(Phys) << "\n"; 1387 ++NumOmitted; 1388 continue; 1389 } else if (InReg && InReg != Phys) { 1390 if (SSorRMId) 1391 DOUT << "Reusing RM#" << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1; 1392 else 1393 DOUT << "Reusing SS#" << SSorRMId; 1394 DOUT << " from physreg " 1395 << TRI->getName(InReg) << " for vreg" 1396 << VirtReg <<" by copying it into physreg " 1397 << TRI->getName(Phys) << "\n"; 1398 1399 // If the reloaded / remat value is available in another register, 1400 // copy it to the desired register. 1401 TII->copyRegToReg(MBB, &MI, Phys, InReg, RC, RC); 1402 1403 // This invalidates Phys. 1404 Spills.ClobberPhysReg(Phys); 1405 // Remember it's available. 1406 Spills.addAvailable(SSorRMId, Phys); 1407 1408 // Mark is killed. 1409 MachineInstr *CopyMI = prior(MII); 1410 MachineOperand *KillOpnd = CopyMI->findRegisterUseOperand(InReg); 1411 KillOpnd->setIsKill(); 1412 UpdateKills(*CopyMI, RegKills, KillOps, TRI); 1413 1414 DOUT << '\t' << *CopyMI; 1415 ++NumCopified; 1416 continue; 1417 } 1418 1419 if (VRM.isReMaterialized(VirtReg)) { 1420 ReMaterialize(MBB, MII, Phys, VirtReg, TII, TRI, VRM); 1421 } else { 1422 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1423 TII->loadRegFromStackSlot(MBB, &MI, Phys, SSorRMId, RC); 1424 MachineInstr *LoadMI = prior(MII); 1425 VRM.addSpillSlotUse(SSorRMId, LoadMI); 1426 ++NumLoads; 1427 } 1428 1429 // This invalidates Phys. 1430 Spills.ClobberPhysReg(Phys); 1431 // Remember it's available. 1432 Spills.addAvailable(SSorRMId, Phys); 1433 1434 UpdateKills(*prior(MII), RegKills, KillOps, TRI); 1435 DOUT << '\t' << *prior(MII); 1436 } 1437 } 1438 1439 // Insert spills here if asked to. 1440 if (VRM.isSpillPt(&MI)) { 1441 std::vector<std::pair<unsigned,bool> > &SpillRegs = 1442 VRM.getSpillPtSpills(&MI); 1443 for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) { 1444 unsigned VirtReg = SpillRegs[i].first; 1445 bool isKill = SpillRegs[i].second; 1446 if (!VRM.getPreSplitReg(VirtReg)) 1447 continue; // Split interval spilled again. 1448 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); 1449 unsigned Phys = VRM.getPhys(VirtReg); 1450 int StackSlot = VRM.getStackSlot(VirtReg); 1451 TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC); 1452 MachineInstr *StoreMI = next(MII); 1453 VRM.addSpillSlotUse(StackSlot, StoreMI); 1454 DOUT << "Store:\t" << *StoreMI; 1455 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod); 1456 } 1457 NextMII = next(MII); 1458 } 1459 1460 /// ReusedOperands - Keep track of operand reuse in case we need to undo 1461 /// reuse. 1462 ReuseInfo ReusedOperands(MI, TRI); 1463 SmallVector<unsigned, 4> VirtUseOps; 1464 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1465 MachineOperand &MO = MI.getOperand(i); 1466 if (!MO.isReg() || MO.getReg() == 0) 1467 continue; // Ignore non-register operands. 1468 1469 unsigned VirtReg = MO.getReg(); 1470 if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) { 1471 // Ignore physregs for spilling, but remember that it is used by this 1472 // function. 1473 RegInfo->setPhysRegUsed(VirtReg); 1474 continue; 1475 } 1476 1477 // We want to process implicit virtual register uses first. 1478 if (MO.isImplicit()) 1479 // If the virtual register is implicitly defined, emit a implicit_def 1480 // before so scavenger knows it's "defined". 1481 VirtUseOps.insert(VirtUseOps.begin(), i); 1482 else 1483 VirtUseOps.push_back(i); 1484 } 1485 1486 // Process all of the spilled uses and all non spilled reg references. 1487 SmallVector<int, 2> PotentialDeadStoreSlots; 1488 for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) { 1489 unsigned i = VirtUseOps[j]; 1490 MachineOperand &MO = MI.getOperand(i); 1491 unsigned VirtReg = MO.getReg(); 1492 assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && 1493 "Not a virtual register?"); 1494 1495 unsigned SubIdx = MO.getSubReg(); 1496 if (VRM.isAssignedReg(VirtReg)) { 1497 // This virtual register was assigned a physreg! 1498 unsigned Phys = VRM.getPhys(VirtReg); 1499 RegInfo->setPhysRegUsed(Phys); 1500 if (MO.isDef()) 1501 ReusedOperands.markClobbered(Phys); 1502 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys; 1503 MI.getOperand(i).setReg(RReg); 1504 if (VRM.isImplicitlyDefined(VirtReg)) 1505 BuildMI(MBB, &MI, MI.getDebugLoc(), 1506 TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg); 1507 continue; 1508 } 1509 1510 // This virtual register is now known to be a spilled value. 1511 if (!MO.isUse()) 1512 continue; // Handle defs in the loop below (handle use&def here though) 1513 1514 bool DoReMat = VRM.isReMaterialized(VirtReg); 1515 int SSorRMId = DoReMat 1516 ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg); 1517 int ReuseSlot = SSorRMId; 1518 1519 // Check to see if this stack slot is available. 1520 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId); 1521 1522 // If this is a sub-register use, make sure the reuse register is in the 1523 // right register class. For example, for x86 not all of the 32-bit 1524 // registers have accessible sub-registers. 1525 // Similarly so for EXTRACT_SUBREG. Consider this: 1526 // EDI = op 1527 // MOV32_mr fi#1, EDI 1528 // ... 1529 // = EXTRACT_SUBREG fi#1 1530 // fi#1 is available in EDI, but it cannot be reused because it's not in 1531 // the right register file. 1532 if (PhysReg && 1533 (SubIdx || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) { 1534 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1535 if (!RC->contains(PhysReg)) 1536 PhysReg = 0; 1537 } 1538 1539 if (PhysReg) { 1540 // This spilled operand might be part of a two-address operand. If this 1541 // is the case, then changing it will necessarily require changing the 1542 // def part of the instruction as well. However, in some cases, we 1543 // aren't allowed to modify the reused register. If none of these cases 1544 // apply, reuse it. 1545 bool CanReuse = true; 1546 int ti = TID.getOperandConstraint(i, TOI::TIED_TO); 1547 if (ti != -1 && 1548 MI.getOperand(ti).isReg() && 1549 MI.getOperand(ti).getReg() == VirtReg) { 1550 // Okay, we have a two address operand. We can reuse this physreg as 1551 // long as we are allowed to clobber the value and there isn't an 1552 // earlier def that has already clobbered the physreg. 1553 CanReuse = Spills.canClobberPhysReg(ReuseSlot) && 1554 !ReusedOperands.isClobbered(PhysReg); 1555 } 1556 1557 if (CanReuse) { 1558 // If this stack slot value is already available, reuse it! 1559 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT) 1560 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1; 1561 else 1562 DOUT << "Reusing SS#" << ReuseSlot; 1563 DOUT << " from physreg " 1564 << TRI->getName(PhysReg) << " for vreg" 1565 << VirtReg <<" instead of reloading into physreg " 1566 << TRI->getName(VRM.getPhys(VirtReg)) << "\n"; 1567 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1568 MI.getOperand(i).setReg(RReg); 1569 1570 // The only technical detail we have is that we don't know that 1571 // PhysReg won't be clobbered by a reloaded stack slot that occurs 1572 // later in the instruction. In particular, consider 'op V1, V2'. 1573 // If V1 is available in physreg R0, we would choose to reuse it 1574 // here, instead of reloading it into the register the allocator 1575 // indicated (say R1). However, V2 might have to be reloaded 1576 // later, and it might indicate that it needs to live in R0. When 1577 // this occurs, we need to have information available that 1578 // indicates it is safe to use R1 for the reload instead of R0. 1579 // 1580 // To further complicate matters, we might conflict with an alias, 1581 // or R0 and R1 might not be compatible with each other. In this 1582 // case, we actually insert a reload for V1 in R1, ensuring that 1583 // we can get at R0 or its alias. 1584 ReusedOperands.addReuse(i, ReuseSlot, PhysReg, 1585 VRM.getPhys(VirtReg), VirtReg); 1586 if (ti != -1) 1587 // Only mark it clobbered if this is a use&def operand. 1588 ReusedOperands.markClobbered(PhysReg); 1589 ++NumReused; 1590 1591 if (MI.getOperand(i).isKill() && 1592 ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) { 1593 1594 // The store of this spilled value is potentially dead, but we 1595 // won't know for certain until we've confirmed that the re-use 1596 // above is valid, which means waiting until the other operands 1597 // are processed. For now we just track the spill slot, we'll 1598 // remove it after the other operands are processed if valid. 1599 1600 PotentialDeadStoreSlots.push_back(ReuseSlot); 1601 } 1602 1603 // Assumes this is the last use. IsKill will be unset if reg is reused 1604 // unless it's a two-address operand. 1605 if (ti == -1) 1606 MI.getOperand(i).setIsKill(); 1607 1608 continue; 1609 } // CanReuse 1610 1611 // Otherwise we have a situation where we have a two-address instruction 1612 // whose mod/ref operand needs to be reloaded. This reload is already 1613 // available in some register "PhysReg", but if we used PhysReg as the 1614 // operand to our 2-addr instruction, the instruction would modify 1615 // PhysReg. This isn't cool if something later uses PhysReg and expects 1616 // to get its initial value. 1617 // 1618 // To avoid this problem, and to avoid doing a load right after a store, 1619 // we emit a copy from PhysReg into the designated register for this 1620 // operand. 1621 unsigned DesignatedReg = VRM.getPhys(VirtReg); 1622 assert(DesignatedReg && "Must map virtreg to physreg!"); 1623 1624 // Note that, if we reused a register for a previous operand, the 1625 // register we want to reload into might not actually be 1626 // available. If this occurs, use the register indicated by the 1627 // reuser. 1628 if (ReusedOperands.hasReuses()) 1629 DesignatedReg = ReusedOperands.GetRegForReload(DesignatedReg, &MI, 1630 Spills, MaybeDeadStores, RegKills, KillOps, VRM); 1631 1632 // If the mapped designated register is actually the physreg we have 1633 // incoming, we don't need to inserted a dead copy. 1634 if (DesignatedReg == PhysReg) { 1635 // If this stack slot value is already available, reuse it! 1636 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT) 1637 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1; 1638 else 1639 DOUT << "Reusing SS#" << ReuseSlot; 1640 DOUT << " from physreg " << TRI->getName(PhysReg) 1641 << " for vreg" << VirtReg 1642 << " instead of reloading into same physreg.\n"; 1643 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1644 MI.getOperand(i).setReg(RReg); 1645 ReusedOperands.markClobbered(RReg); 1646 ++NumReused; 1647 continue; 1648 } 1649 1650 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1651 RegInfo->setPhysRegUsed(DesignatedReg); 1652 ReusedOperands.markClobbered(DesignatedReg); 1653 TII->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC); 1654 1655 MachineInstr *CopyMI = prior(MII); 1656 UpdateKills(*CopyMI, RegKills, KillOps, TRI); 1657 1658 // This invalidates DesignatedReg. 1659 Spills.ClobberPhysReg(DesignatedReg); 1660 1661 Spills.addAvailable(ReuseSlot, DesignatedReg); 1662 unsigned RReg = 1663 SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg; 1664 MI.getOperand(i).setReg(RReg); 1665 DOUT << '\t' << *prior(MII); 1666 ++NumReused; 1667 continue; 1668 } // if (PhysReg) 1669 1670 // Otherwise, reload it and remember that we have it. 1671 PhysReg = VRM.getPhys(VirtReg); 1672 assert(PhysReg && "Must map virtreg to physreg!"); 1673 1674 // Note that, if we reused a register for a previous operand, the 1675 // register we want to reload into might not actually be 1676 // available. If this occurs, use the register indicated by the 1677 // reuser. 1678 if (ReusedOperands.hasReuses()) 1679 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI, 1680 Spills, MaybeDeadStores, RegKills, KillOps, VRM); 1681 1682 RegInfo->setPhysRegUsed(PhysReg); 1683 ReusedOperands.markClobbered(PhysReg); 1684 if (DoReMat) { 1685 ReMaterialize(MBB, MII, PhysReg, VirtReg, TII, TRI, VRM); 1686 } else { 1687 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1688 TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC); 1689 MachineInstr *LoadMI = prior(MII); 1690 VRM.addSpillSlotUse(SSorRMId, LoadMI); 1691 ++NumLoads; 1692 } 1693 // This invalidates PhysReg. 1694 Spills.ClobberPhysReg(PhysReg); 1695 1696 // Any stores to this stack slot are not dead anymore. 1697 if (!DoReMat) 1698 MaybeDeadStores[SSorRMId] = NULL; 1699 Spills.addAvailable(SSorRMId, PhysReg); 1700 // Assumes this is the last use. IsKill will be unset if reg is reused 1701 // unless it's a two-address operand. 1702 if (TID.getOperandConstraint(i, TOI::TIED_TO) == -1) 1703 MI.getOperand(i).setIsKill(); 1704 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1705 MI.getOperand(i).setReg(RReg); 1706 UpdateKills(*prior(MII), RegKills, KillOps, TRI); 1707 DOUT << '\t' << *prior(MII); 1708 } 1709 1710 // Ok - now we can remove stores that have been confirmed dead. 1711 for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) { 1712 // This was the last use and the spilled value is still available 1713 // for reuse. That means the spill was unnecessary! 1714 int PDSSlot = PotentialDeadStoreSlots[j]; 1715 MachineInstr* DeadStore = MaybeDeadStores[PDSSlot]; 1716 if (DeadStore) { 1717 DOUT << "Removed dead store:\t" << *DeadStore; 1718 InvalidateKills(*DeadStore, RegKills, KillOps); 1719 VRM.RemoveMachineInstrFromMaps(DeadStore); 1720 MBB.erase(DeadStore); 1721 MaybeDeadStores[PDSSlot] = NULL; 1722 ++NumDSE; 1723 } 1724 } 1725 1726 1727 DOUT << '\t' << MI; 1728 1729 1730 // If we have folded references to memory operands, make sure we clear all 1731 // physical registers that may contain the value of the spilled virtual 1732 // register 1733 SmallSet<int, 2> FoldedSS; 1734 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) { 1735 unsigned VirtReg = I->second.first; 1736 VirtRegMap::ModRef MR = I->second.second; 1737 DOUT << "Folded vreg: " << VirtReg << " MR: " << MR; 1738 1739 // MI2VirtMap be can updated which invalidate the iterator. 1740 // Increment the iterator first. 1741 ++I; 1742 int SS = VRM.getStackSlot(VirtReg); 1743 if (SS == VirtRegMap::NO_STACK_SLOT) 1744 continue; 1745 FoldedSS.insert(SS); 1746 DOUT << " - StackSlot: " << SS << "\n"; 1747 1748 // If this folded instruction is just a use, check to see if it's a 1749 // straight load from the virt reg slot. 1750 if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) { 1751 int FrameIdx; 1752 unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx); 1753 if (DestReg && FrameIdx == SS) { 1754 // If this spill slot is available, turn it into a copy (or nothing) 1755 // instead of leaving it as a load! 1756 if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) { 1757 DOUT << "Promoted Load To Copy: " << MI; 1758 if (DestReg != InReg) { 1759 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); 1760 TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC); 1761 MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg); 1762 unsigned SubIdx = DefMO->getSubReg(); 1763 // Revisit the copy so we make sure to notice the effects of the 1764 // operation on the destreg (either needing to RA it if it's 1765 // virtual or needing to clobber any values if it's physical). 1766 NextMII = &MI; 1767 --NextMII; // backtrack to the copy. 1768 // Propagate the sub-register index over. 1769 if (SubIdx) { 1770 DefMO = NextMII->findRegisterDefOperand(DestReg); 1771 DefMO->setSubReg(SubIdx); 1772 } 1773 1774 // Mark is killed. 1775 MachineOperand *KillOpnd = NextMII->findRegisterUseOperand(InReg); 1776 KillOpnd->setIsKill(); 1777 1778 BackTracked = true; 1779 } else { 1780 DOUT << "Removing now-noop copy: " << MI; 1781 // Unset last kill since it's being reused. 1782 InvalidateKill(InReg, RegKills, KillOps); 1783 } 1784 1785 InvalidateKills(MI, RegKills, KillOps); 1786 VRM.RemoveMachineInstrFromMaps(&MI); 1787 MBB.erase(&MI); 1788 Erased = true; 1789 goto ProcessNextInst; 1790 } 1791 } else { 1792 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); 1793 SmallVector<MachineInstr*, 4> NewMIs; 1794 if (PhysReg && 1795 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) { 1796 MBB.insert(MII, NewMIs[0]); 1797 InvalidateKills(MI, RegKills, KillOps); 1798 VRM.RemoveMachineInstrFromMaps(&MI); 1799 MBB.erase(&MI); 1800 Erased = true; 1801 --NextMII; // backtrack to the unfolded instruction. 1802 BackTracked = true; 1803 goto ProcessNextInst; 1804 } 1805 } 1806 } 1807 1808 // If this reference is not a use, any previous store is now dead. 1809 // Otherwise, the store to this stack slot is not dead anymore. 1810 MachineInstr* DeadStore = MaybeDeadStores[SS]; 1811 if (DeadStore) { 1812 bool isDead = !(MR & VirtRegMap::isRef); 1813 MachineInstr *NewStore = NULL; 1814 if (MR & VirtRegMap::isModRef) { 1815 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); 1816 SmallVector<MachineInstr*, 4> NewMIs; 1817 // We can reuse this physreg as long as we are allowed to clobber 1818 // the value and there isn't an earlier def that has already clobbered 1819 // the physreg. 1820 if (PhysReg && 1821 !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable! 1822 MachineOperand *KillOpnd = 1823 DeadStore->findRegisterUseOperand(PhysReg, true); 1824 // Note, if the store is storing a sub-register, it's possible the 1825 // super-register is needed below. 1826 if (KillOpnd && !KillOpnd->getSubReg() && 1827 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){ 1828 MBB.insert(MII, NewMIs[0]); 1829 NewStore = NewMIs[1]; 1830 MBB.insert(MII, NewStore); 1831 VRM.addSpillSlotUse(SS, NewStore); 1832 InvalidateKills(MI, RegKills, KillOps); 1833 VRM.RemoveMachineInstrFromMaps(&MI); 1834 MBB.erase(&MI); 1835 Erased = true; 1836 --NextMII; 1837 --NextMII; // backtrack to the unfolded instruction. 1838 BackTracked = true; 1839 isDead = true; 1840 } 1841 } 1842 } 1843 1844 if (isDead) { // Previous store is dead. 1845 // If we get here, the store is dead, nuke it now. 1846 DOUT << "Removed dead store:\t" << *DeadStore; 1847 InvalidateKills(*DeadStore, RegKills, KillOps); 1848 VRM.RemoveMachineInstrFromMaps(DeadStore); 1849 MBB.erase(DeadStore); 1850 if (!NewStore) 1851 ++NumDSE; 1852 } 1853 1854 MaybeDeadStores[SS] = NULL; 1855 if (NewStore) { 1856 // Treat this store as a spill merged into a copy. That makes the 1857 // stack slot value available. 1858 VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod); 1859 goto ProcessNextInst; 1860 } 1861 } 1862 1863 // If the spill slot value is available, and this is a new definition of 1864 // the value, the value is not available anymore. 1865 if (MR & VirtRegMap::isMod) { 1866 // Notice that the value in this stack slot has been modified. 1867 Spills.ModifyStackSlotOrReMat(SS); 1868 1869 // If this is *just* a mod of the value, check to see if this is just a 1870 // store to the spill slot (i.e. the spill got merged into the copy). If 1871 // so, realize that the vreg is available now, and add the store to the 1872 // MaybeDeadStore info. 1873 int StackSlot; 1874 if (!(MR & VirtRegMap::isRef)) { 1875 if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) { 1876 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) && 1877 "Src hasn't been allocated yet?"); 1878 1879 if (CommuteToFoldReload(MBB, MII, VirtReg, SrcReg, StackSlot, 1880 RegKills, KillOps, TRI, VRM)) { 1881 NextMII = next(MII); 1882 BackTracked = true; 1883 goto ProcessNextInst; 1884 } 1885 1886 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark 1887 // this as a potentially dead store in case there is a subsequent 1888 // store into the stack slot without a read from it. 1889 MaybeDeadStores[StackSlot] = &MI; 1890 1891 // If the stack slot value was previously available in some other 1892 // register, change it now. Otherwise, make the register 1893 // available in PhysReg. 1894 Spills.addAvailable(StackSlot, SrcReg, false/*!clobber*/); 1895 } 1896 } 1897 } 1898 } 1899 1900 // Process all of the spilled defs. 1901 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1902 MachineOperand &MO = MI.getOperand(i); 1903 if (!(MO.isReg() && MO.getReg() && MO.isDef())) 1904 continue; 1905 1906 unsigned VirtReg = MO.getReg(); 1907 if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) { 1908 // Check to see if this is a noop copy. If so, eliminate the 1909 // instruction before considering the dest reg to be changed. 1910 unsigned Src, Dst, SrcSR, DstSR; 1911 if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) { 1912 ++NumDCE; 1913 DOUT << "Removing now-noop copy: " << MI; 1914 SmallVector<unsigned, 2> KillRegs; 1915 InvalidateKills(MI, RegKills, KillOps, &KillRegs); 1916 if (MO.isDead() && !KillRegs.empty()) { 1917 // Source register or an implicit super/sub-register use is killed. 1918 assert(KillRegs[0] == Dst || 1919 TRI->isSubRegister(KillRegs[0], Dst) || 1920 TRI->isSuperRegister(KillRegs[0], Dst)); 1921 // Last def is now dead. 1922 TransferDeadness(&MBB, Dist, Src, RegKills, KillOps); 1923 } 1924 VRM.RemoveMachineInstrFromMaps(&MI); 1925 MBB.erase(&MI); 1926 Erased = true; 1927 Spills.disallowClobberPhysReg(VirtReg); 1928 goto ProcessNextInst; 1929 } 1930 1931 // If it's not a no-op copy, it clobbers the value in the destreg. 1932 Spills.ClobberPhysReg(VirtReg); 1933 ReusedOperands.markClobbered(VirtReg); 1934 1935 // Check to see if this instruction is a load from a stack slot into 1936 // a register. If so, this provides the stack slot value in the reg. 1937 int FrameIdx; 1938 if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) { 1939 assert(DestReg == VirtReg && "Unknown load situation!"); 1940 1941 // If it is a folded reference, then it's not safe to clobber. 1942 bool Folded = FoldedSS.count(FrameIdx); 1943 // Otherwise, if it wasn't available, remember that it is now! 1944 Spills.addAvailable(FrameIdx, DestReg, !Folded); 1945 goto ProcessNextInst; 1946 } 1947 1948 continue; 1949 } 1950 1951 unsigned SubIdx = MO.getSubReg(); 1952 bool DoReMat = VRM.isReMaterialized(VirtReg); 1953 if (DoReMat) 1954 ReMatDefs.insert(&MI); 1955 1956 // The only vregs left are stack slot definitions. 1957 int StackSlot = VRM.getStackSlot(VirtReg); 1958 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); 1959 1960 // If this def is part of a two-address operand, make sure to execute 1961 // the store from the correct physical register. 1962 unsigned PhysReg; 1963 int TiedOp = MI.getDesc().findTiedToSrcOperand(i); 1964 if (TiedOp != -1) { 1965 PhysReg = MI.getOperand(TiedOp).getReg(); 1966 if (SubIdx) { 1967 unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI); 1968 assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg && 1969 "Can't find corresponding super-register!"); 1970 PhysReg = SuperReg; 1971 } 1972 } else { 1973 PhysReg = VRM.getPhys(VirtReg); 1974 if (ReusedOperands.isClobbered(PhysReg)) { 1975 // Another def has taken the assigned physreg. It must have been a 1976 // use&def which got it due to reuse. Undo the reuse! 1977 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI, 1978 Spills, MaybeDeadStores, RegKills, KillOps, VRM); 1979 } 1980 } 1981 1982 assert(PhysReg && "VR not assigned a physical register?"); 1983 RegInfo->setPhysRegUsed(PhysReg); 1984 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1985 ReusedOperands.markClobbered(RReg); 1986 MI.getOperand(i).setReg(RReg); 1987 1988 if (!MO.isDead()) { 1989 MachineInstr *&LastStore = MaybeDeadStores[StackSlot]; 1990 SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, true, 1991 LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM); 1992 NextMII = next(MII); 1993 1994 // Check to see if this is a noop copy. If so, eliminate the 1995 // instruction before considering the dest reg to be changed. 1996 { 1997 unsigned Src, Dst, SrcSR, DstSR; 1998 if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) { 1999 ++NumDCE; 2000 DOUT << "Removing now-noop copy: " << MI; 2001 InvalidateKills(MI, RegKills, KillOps); 2002 VRM.RemoveMachineInstrFromMaps(&MI); 2003 MBB.erase(&MI); 2004 Erased = true; 2005 UpdateKills(*LastStore, RegKills, KillOps, TRI); 2006 goto ProcessNextInst; 2007 } 2008 } 2009 } 2010 } 2011 ProcessNextInst: 2012 DistanceMap.insert(std::make_pair(&MI, Dist++)); 2013 if (!Erased && !BackTracked) { 2014 for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II) 2015 UpdateKills(*II, RegKills, KillOps, TRI); 2016 } 2017 MII = NextMII; 2018 } 2019 2020} 2021 2022llvm::Spiller* llvm::createSpiller() { 2023 switch (SpillerOpt) { 2024 default: assert(0 && "Unreachable!"); 2025 case local: 2026 return new LocalSpiller(); 2027 case simple: 2028 return new SimpleSpiller(); 2029 } 2030} 2031