VirtRegMap.cpp revision 5f56c26ff3a6ee1d71e91ca86afbc8efe1123a9b
1//===-- llvm/CodeGen/VirtRegMap.cpp - Virtual Register Map ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the VirtRegMap class. 11// 12// It also contains implementations of the the Spiller interface, which, given a 13// virtual register map and a machine function, eliminates all virtual 14// references by replacing them with physical register references - adding spill 15// code as necessary. 16// 17//===----------------------------------------------------------------------===// 18 19#define DEBUG_TYPE "spiller" 20#include "VirtRegMap.h" 21#include "llvm/Function.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineFunction.h" 24#include "llvm/CodeGen/MachineInstrBuilder.h" 25#include "llvm/CodeGen/MachineRegisterInfo.h" 26#include "llvm/Target/TargetMachine.h" 27#include "llvm/Target/TargetInstrInfo.h" 28#include "llvm/Support/CommandLine.h" 29#include "llvm/Support/Debug.h" 30#include "llvm/Support/Compiler.h" 31#include "llvm/ADT/BitVector.h" 32#include "llvm/ADT/Statistic.h" 33#include "llvm/ADT/STLExtras.h" 34#include "llvm/ADT/SmallSet.h" 35#include <algorithm> 36using namespace llvm; 37 38STATISTIC(NumSpills, "Number of register spills"); 39STATISTIC(NumPSpills,"Number of physical register spills"); 40STATISTIC(NumReMats, "Number of re-materialization"); 41STATISTIC(NumDRM , "Number of re-materializable defs elided"); 42STATISTIC(NumStores, "Number of stores added"); 43STATISTIC(NumLoads , "Number of loads added"); 44STATISTIC(NumReused, "Number of values reused"); 45STATISTIC(NumDSE , "Number of dead stores elided"); 46STATISTIC(NumDCE , "Number of copies elided"); 47STATISTIC(NumDSS , "Number of dead spill slots removed"); 48 49namespace { 50 enum SpillerName { simple, local }; 51} 52 53static cl::opt<SpillerName> 54SpillerOpt("spiller", 55 cl::desc("Spiller to use: (default: local)"), 56 cl::Prefix, 57 cl::values(clEnumVal(simple, " simple spiller"), 58 clEnumVal(local, " local spiller"), 59 clEnumValEnd), 60 cl::init(local)); 61 62//===----------------------------------------------------------------------===// 63// VirtRegMap implementation 64//===----------------------------------------------------------------------===// 65 66VirtRegMap::VirtRegMap(MachineFunction &mf) 67 : TII(*mf.getTarget().getInstrInfo()), MF(mf), 68 Virt2PhysMap(NO_PHYS_REG), Virt2StackSlotMap(NO_STACK_SLOT), 69 Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0), 70 Virt2SplitKillMap(0), ReMatMap(NULL), ReMatId(MAX_STACK_SLOT+1), 71 LowSpillSlot(NO_STACK_SLOT), HighSpillSlot(NO_STACK_SLOT) { 72 SpillSlotToUsesMap.resize(8); 73 ImplicitDefed.resize(MF.getRegInfo().getLastVirtReg()+1- 74 TargetRegisterInfo::FirstVirtualRegister); 75 grow(); 76} 77 78void VirtRegMap::grow() { 79 unsigned LastVirtReg = MF.getRegInfo().getLastVirtReg(); 80 Virt2PhysMap.grow(LastVirtReg); 81 Virt2StackSlotMap.grow(LastVirtReg); 82 Virt2ReMatIdMap.grow(LastVirtReg); 83 Virt2SplitMap.grow(LastVirtReg); 84 Virt2SplitKillMap.grow(LastVirtReg); 85 ReMatMap.grow(LastVirtReg); 86 ImplicitDefed.resize(LastVirtReg-TargetRegisterInfo::FirstVirtualRegister+1); 87} 88 89int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) { 90 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 91 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && 92 "attempt to assign stack slot to already spilled register"); 93 const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(virtReg); 94 int SS = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 95 RC->getAlignment()); 96 if (LowSpillSlot == NO_STACK_SLOT) 97 LowSpillSlot = SS; 98 if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot) 99 HighSpillSlot = SS; 100 unsigned Idx = SS-LowSpillSlot; 101 while (Idx >= SpillSlotToUsesMap.size()) 102 SpillSlotToUsesMap.resize(SpillSlotToUsesMap.size()*2); 103 Virt2StackSlotMap[virtReg] = SS; 104 ++NumSpills; 105 return SS; 106} 107 108void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int SS) { 109 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 110 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && 111 "attempt to assign stack slot to already spilled register"); 112 assert((SS >= 0 || 113 (SS >= MF.getFrameInfo()->getObjectIndexBegin())) && 114 "illegal fixed frame index"); 115 Virt2StackSlotMap[virtReg] = SS; 116} 117 118int VirtRegMap::assignVirtReMatId(unsigned virtReg) { 119 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 120 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT && 121 "attempt to assign re-mat id to already spilled register"); 122 Virt2ReMatIdMap[virtReg] = ReMatId; 123 return ReMatId++; 124} 125 126void VirtRegMap::assignVirtReMatId(unsigned virtReg, int id) { 127 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 128 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT && 129 "attempt to assign re-mat id to already spilled register"); 130 Virt2ReMatIdMap[virtReg] = id; 131} 132 133int VirtRegMap::getEmergencySpillSlot(const TargetRegisterClass *RC) { 134 std::map<const TargetRegisterClass*, int>::iterator I = 135 EmergencySpillSlots.find(RC); 136 if (I != EmergencySpillSlots.end()) 137 return I->second; 138 int SS = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 139 RC->getAlignment()); 140 if (LowSpillSlot == NO_STACK_SLOT) 141 LowSpillSlot = SS; 142 if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot) 143 HighSpillSlot = SS; 144 I->second = SS; 145 return SS; 146} 147 148void VirtRegMap::addSpillSlotUse(int FI, MachineInstr *MI) { 149 if (!MF.getFrameInfo()->isFixedObjectIndex(FI)) { 150 assert(FI >= 0 && "Spill slot index should not be negative!"); 151 SpillSlotToUsesMap[FI-LowSpillSlot].insert(MI); 152 } 153} 154 155void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI, 156 MachineInstr *NewMI, ModRef MRInfo) { 157 // Move previous memory references folded to new instruction. 158 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(NewMI); 159 for (MI2VirtMapTy::iterator I = MI2VirtMap.lower_bound(OldMI), 160 E = MI2VirtMap.end(); I != E && I->first == OldMI; ) { 161 MI2VirtMap.insert(IP, std::make_pair(NewMI, I->second)); 162 MI2VirtMap.erase(I++); 163 } 164 165 // add new memory reference 166 MI2VirtMap.insert(IP, std::make_pair(NewMI, std::make_pair(VirtReg, MRInfo))); 167} 168 169void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *MI, ModRef MRInfo) { 170 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(MI); 171 MI2VirtMap.insert(IP, std::make_pair(MI, std::make_pair(VirtReg, MRInfo))); 172} 173 174void VirtRegMap::RemoveMachineInstrFromMaps(MachineInstr *MI) { 175 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 176 MachineOperand &MO = MI->getOperand(i); 177 if (!MO.isFrameIndex()) 178 continue; 179 int FI = MO.getIndex(); 180 if (MF.getFrameInfo()->isFixedObjectIndex(FI)) 181 continue; 182 SpillSlotToUsesMap[FI-LowSpillSlot].erase(MI); 183 } 184 MI2VirtMap.erase(MI); 185 SpillPt2VirtMap.erase(MI); 186 RestorePt2VirtMap.erase(MI); 187 EmergencySpillMap.erase(MI); 188} 189 190void VirtRegMap::print(std::ostream &OS) const { 191 const TargetRegisterInfo* TRI = MF.getTarget().getRegisterInfo(); 192 193 OS << "********** REGISTER MAP **********\n"; 194 for (unsigned i = TargetRegisterInfo::FirstVirtualRegister, 195 e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) { 196 if (Virt2PhysMap[i] != (unsigned)VirtRegMap::NO_PHYS_REG) 197 OS << "[reg" << i << " -> " << TRI->getName(Virt2PhysMap[i]) 198 << "]\n"; 199 } 200 201 for (unsigned i = TargetRegisterInfo::FirstVirtualRegister, 202 e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) 203 if (Virt2StackSlotMap[i] != VirtRegMap::NO_STACK_SLOT) 204 OS << "[reg" << i << " -> fi#" << Virt2StackSlotMap[i] << "]\n"; 205 OS << '\n'; 206} 207 208void VirtRegMap::dump() const { 209 print(cerr); 210} 211 212 213//===----------------------------------------------------------------------===// 214// Simple Spiller Implementation 215//===----------------------------------------------------------------------===// 216 217Spiller::~Spiller() {} 218 219namespace { 220 struct VISIBILITY_HIDDEN SimpleSpiller : public Spiller { 221 bool runOnMachineFunction(MachineFunction& mf, VirtRegMap &VRM); 222 }; 223} 224 225bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { 226 DOUT << "********** REWRITE MACHINE CODE **********\n"; 227 DOUT << "********** Function: " << MF.getFunction()->getName() << '\n'; 228 const TargetMachine &TM = MF.getTarget(); 229 const TargetInstrInfo &TII = *TM.getInstrInfo(); 230 231 232 // LoadedRegs - Keep track of which vregs are loaded, so that we only load 233 // each vreg once (in the case where a spilled vreg is used by multiple 234 // operands). This is always smaller than the number of operands to the 235 // current machine instr, so it should be small. 236 std::vector<unsigned> LoadedRegs; 237 238 for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end(); 239 MBBI != E; ++MBBI) { 240 DOUT << MBBI->getBasicBlock()->getName() << ":\n"; 241 MachineBasicBlock &MBB = *MBBI; 242 for (MachineBasicBlock::iterator MII = MBB.begin(), 243 E = MBB.end(); MII != E; ++MII) { 244 MachineInstr &MI = *MII; 245 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 246 MachineOperand &MO = MI.getOperand(i); 247 if (MO.isRegister() && MO.getReg()) { 248 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) { 249 unsigned VirtReg = MO.getReg(); 250 unsigned PhysReg = VRM.getPhys(VirtReg); 251 if (!VRM.isAssignedReg(VirtReg)) { 252 int StackSlot = VRM.getStackSlot(VirtReg); 253 const TargetRegisterClass* RC = 254 MF.getRegInfo().getRegClass(VirtReg); 255 256 if (MO.isUse() && 257 std::find(LoadedRegs.begin(), LoadedRegs.end(), VirtReg) 258 == LoadedRegs.end()) { 259 TII.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC); 260 MachineInstr *LoadMI = prior(MII); 261 VRM.addSpillSlotUse(StackSlot, LoadMI); 262 LoadedRegs.push_back(VirtReg); 263 ++NumLoads; 264 DOUT << '\t' << *LoadMI; 265 } 266 267 if (MO.isDef()) { 268 TII.storeRegToStackSlot(MBB, next(MII), PhysReg, true, 269 StackSlot, RC); 270 MachineInstr *StoreMI = next(MII); 271 VRM.addSpillSlotUse(StackSlot, StoreMI); 272 ++NumStores; 273 } 274 } 275 MF.getRegInfo().setPhysRegUsed(PhysReg); 276 MI.getOperand(i).setReg(PhysReg); 277 } else { 278 MF.getRegInfo().setPhysRegUsed(MO.getReg()); 279 } 280 } 281 } 282 283 DOUT << '\t' << MI; 284 LoadedRegs.clear(); 285 } 286 } 287 return true; 288} 289 290//===----------------------------------------------------------------------===// 291// Local Spiller Implementation 292//===----------------------------------------------------------------------===// 293 294namespace { 295 class AvailableSpills; 296 297 /// LocalSpiller - This spiller does a simple pass over the machine basic 298 /// block to attempt to keep spills in registers as much as possible for 299 /// blocks that have low register pressure (the vreg may be spilled due to 300 /// register pressure in other blocks). 301 class VISIBILITY_HIDDEN LocalSpiller : public Spiller { 302 MachineRegisterInfo *RegInfo; 303 const TargetRegisterInfo *TRI; 304 const TargetInstrInfo *TII; 305 DenseMap<MachineInstr*, unsigned> DistanceMap; 306 public: 307 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { 308 RegInfo = &MF.getRegInfo(); 309 TRI = MF.getTarget().getRegisterInfo(); 310 TII = MF.getTarget().getInstrInfo(); 311 DOUT << "\n**** Local spiller rewriting function '" 312 << MF.getFunction()->getName() << "':\n"; 313 DOUT << "**** Machine Instrs (NOTE! Does not include spills and reloads!)" 314 " ****\n"; 315 DEBUG(MF.dump()); 316 317 for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); 318 MBB != E; ++MBB) 319 RewriteMBB(*MBB, VRM); 320 321 // Mark unused spill slots. 322 MachineFrameInfo *MFI = MF.getFrameInfo(); 323 int SS = VRM.getLowSpillSlot(); 324 if (SS != VirtRegMap::NO_STACK_SLOT) 325 for (int e = VRM.getHighSpillSlot(); SS <= e; ++SS) 326 if (!VRM.isSpillSlotUsed(SS)) { 327 MFI->RemoveStackObject(SS); 328 ++NumDSS; 329 } 330 331 DOUT << "**** Post Machine Instrs ****\n"; 332 DEBUG(MF.dump()); 333 334 return true; 335 } 336 private: 337 void TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist, 338 unsigned Reg, BitVector &RegKills, 339 std::vector<MachineOperand*> &KillOps); 340 bool PrepForUnfoldOpti(MachineBasicBlock &MBB, 341 MachineBasicBlock::iterator &MII, 342 std::vector<MachineInstr*> &MaybeDeadStores, 343 AvailableSpills &Spills, BitVector &RegKills, 344 std::vector<MachineOperand*> &KillOps, 345 VirtRegMap &VRM); 346 void SpillRegToStackSlot(MachineBasicBlock &MBB, 347 MachineBasicBlock::iterator &MII, 348 int Idx, unsigned PhysReg, int StackSlot, 349 const TargetRegisterClass *RC, 350 bool isAvailable, MachineInstr *&LastStore, 351 AvailableSpills &Spills, 352 SmallSet<MachineInstr*, 4> &ReMatDefs, 353 BitVector &RegKills, 354 std::vector<MachineOperand*> &KillOps, 355 VirtRegMap &VRM); 356 void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM); 357 }; 358} 359 360/// AvailableSpills - As the local spiller is scanning and rewriting an MBB from 361/// top down, keep track of which spills slots or remat are available in each 362/// register. 363/// 364/// Note that not all physregs are created equal here. In particular, some 365/// physregs are reloads that we are allowed to clobber or ignore at any time. 366/// Other physregs are values that the register allocated program is using that 367/// we cannot CHANGE, but we can read if we like. We keep track of this on a 368/// per-stack-slot / remat id basis as the low bit in the value of the 369/// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks 370/// this bit and addAvailable sets it if. 371namespace { 372class VISIBILITY_HIDDEN AvailableSpills { 373 const TargetRegisterInfo *TRI; 374 const TargetInstrInfo *TII; 375 376 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled 377 // or remat'ed virtual register values that are still available, due to being 378 // loaded or stored to, but not invalidated yet. 379 std::map<int, unsigned> SpillSlotsOrReMatsAvailable; 380 381 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable, 382 // indicating which stack slot values are currently held by a physreg. This 383 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a 384 // physreg is modified. 385 std::multimap<unsigned, int> PhysRegsAvailable; 386 387 void disallowClobberPhysRegOnly(unsigned PhysReg); 388 389 void ClobberPhysRegOnly(unsigned PhysReg); 390public: 391 AvailableSpills(const TargetRegisterInfo *tri, const TargetInstrInfo *tii) 392 : TRI(tri), TII(tii) { 393 } 394 395 const TargetRegisterInfo *getRegInfo() const { return TRI; } 396 397 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is 398 /// available in a physical register, return that PhysReg, otherwise 399 /// return 0. 400 unsigned getSpillSlotOrReMatPhysReg(int Slot) const { 401 std::map<int, unsigned>::const_iterator I = 402 SpillSlotsOrReMatsAvailable.find(Slot); 403 if (I != SpillSlotsOrReMatsAvailable.end()) { 404 return I->second >> 1; // Remove the CanClobber bit. 405 } 406 return 0; 407 } 408 409 /// addAvailable - Mark that the specified stack slot / remat is available in 410 /// the specified physreg. If CanClobber is true, the physreg can be modified 411 /// at any time without changing the semantics of the program. 412 void addAvailable(int SlotOrReMat, MachineInstr *MI, unsigned Reg, 413 bool CanClobber = true) { 414 // If this stack slot is thought to be available in some other physreg, 415 // remove its record. 416 ModifyStackSlotOrReMat(SlotOrReMat); 417 418 PhysRegsAvailable.insert(std::make_pair(Reg, SlotOrReMat)); 419 SpillSlotsOrReMatsAvailable[SlotOrReMat]= (Reg << 1) | (unsigned)CanClobber; 420 421 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT) 422 DOUT << "Remembering RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1; 423 else 424 DOUT << "Remembering SS#" << SlotOrReMat; 425 DOUT << " in physreg " << TRI->getName(Reg) << "\n"; 426 } 427 428 /// canClobberPhysReg - Return true if the spiller is allowed to change the 429 /// value of the specified stackslot register if it desires. The specified 430 /// stack slot must be available in a physreg for this query to make sense. 431 bool canClobberPhysReg(int SlotOrReMat) const { 432 assert(SpillSlotsOrReMatsAvailable.count(SlotOrReMat) && 433 "Value not available!"); 434 return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second & 1; 435 } 436 437 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified 438 /// stackslot register. The register is still available but is no longer 439 /// allowed to be modifed. 440 void disallowClobberPhysReg(unsigned PhysReg); 441 442 /// ClobberPhysReg - This is called when the specified physreg changes 443 /// value. We use this to invalidate any info about stuff that lives in 444 /// it and any of its aliases. 445 void ClobberPhysReg(unsigned PhysReg); 446 447 /// ModifyStackSlotOrReMat - This method is called when the value in a stack 448 /// slot changes. This removes information about which register the previous 449 /// value for this slot lives in (as the previous value is dead now). 450 void ModifyStackSlotOrReMat(int SlotOrReMat); 451}; 452} 453 454/// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified 455/// stackslot register. The register is still available but is no longer 456/// allowed to be modifed. 457void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) { 458 std::multimap<unsigned, int>::iterator I = 459 PhysRegsAvailable.lower_bound(PhysReg); 460 while (I != PhysRegsAvailable.end() && I->first == PhysReg) { 461 int SlotOrReMat = I->second; 462 I++; 463 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg && 464 "Bidirectional map mismatch!"); 465 SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1; 466 DOUT << "PhysReg " << TRI->getName(PhysReg) 467 << " copied, it is available for use but can no longer be modified\n"; 468 } 469} 470 471/// disallowClobberPhysReg - Unset the CanClobber bit of the specified 472/// stackslot register and its aliases. The register and its aliases may 473/// still available but is no longer allowed to be modifed. 474void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) { 475 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS) 476 disallowClobberPhysRegOnly(*AS); 477 disallowClobberPhysRegOnly(PhysReg); 478} 479 480/// ClobberPhysRegOnly - This is called when the specified physreg changes 481/// value. We use this to invalidate any info about stuff we thing lives in it. 482void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) { 483 std::multimap<unsigned, int>::iterator I = 484 PhysRegsAvailable.lower_bound(PhysReg); 485 while (I != PhysRegsAvailable.end() && I->first == PhysReg) { 486 int SlotOrReMat = I->second; 487 PhysRegsAvailable.erase(I++); 488 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg && 489 "Bidirectional map mismatch!"); 490 SpillSlotsOrReMatsAvailable.erase(SlotOrReMat); 491 DOUT << "PhysReg " << TRI->getName(PhysReg) 492 << " clobbered, invalidating "; 493 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT) 494 DOUT << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 << "\n"; 495 else 496 DOUT << "SS#" << SlotOrReMat << "\n"; 497 } 498} 499 500/// ClobberPhysReg - This is called when the specified physreg changes 501/// value. We use this to invalidate any info about stuff we thing lives in 502/// it and any of its aliases. 503void AvailableSpills::ClobberPhysReg(unsigned PhysReg) { 504 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS) 505 ClobberPhysRegOnly(*AS); 506 ClobberPhysRegOnly(PhysReg); 507} 508 509/// ModifyStackSlotOrReMat - This method is called when the value in a stack 510/// slot changes. This removes information about which register the previous 511/// value for this slot lives in (as the previous value is dead now). 512void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) { 513 std::map<int, unsigned>::iterator It = 514 SpillSlotsOrReMatsAvailable.find(SlotOrReMat); 515 if (It == SpillSlotsOrReMatsAvailable.end()) return; 516 unsigned Reg = It->second >> 1; 517 SpillSlotsOrReMatsAvailable.erase(It); 518 519 // This register may hold the value of multiple stack slots, only remove this 520 // stack slot from the set of values the register contains. 521 std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg); 522 for (; ; ++I) { 523 assert(I != PhysRegsAvailable.end() && I->first == Reg && 524 "Map inverse broken!"); 525 if (I->second == SlotOrReMat) break; 526 } 527 PhysRegsAvailable.erase(I); 528} 529 530 531 532/// InvalidateKills - MI is going to be deleted. If any of its operands are 533/// marked kill, then invalidate the information. 534static void InvalidateKills(MachineInstr &MI, BitVector &RegKills, 535 std::vector<MachineOperand*> &KillOps, 536 SmallVector<unsigned, 2> *KillRegs = NULL) { 537 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 538 MachineOperand &MO = MI.getOperand(i); 539 if (!MO.isRegister() || !MO.isUse() || !MO.isKill()) 540 continue; 541 unsigned Reg = MO.getReg(); 542 if (KillRegs) 543 KillRegs->push_back(Reg); 544 if (KillOps[Reg] == &MO) { 545 RegKills.reset(Reg); 546 KillOps[Reg] = NULL; 547 } 548 } 549} 550 551/// InvalidateKill - A MI that defines the specified register is being deleted, 552/// invalidate the register kill information. 553static void InvalidateKill(unsigned Reg, BitVector &RegKills, 554 std::vector<MachineOperand*> &KillOps) { 555 if (RegKills[Reg]) { 556 KillOps[Reg]->setIsKill(false); 557 KillOps[Reg] = NULL; 558 RegKills.reset(Reg); 559 } 560} 561 562/// InvalidateRegDef - If the def operand of the specified def MI is now dead 563/// (since it's spill instruction is removed), mark it isDead. Also checks if 564/// the def MI has other definition operands that are not dead. Returns it by 565/// reference. 566static bool InvalidateRegDef(MachineBasicBlock::iterator I, 567 MachineInstr &NewDef, unsigned Reg, 568 bool &HasLiveDef) { 569 // Due to remat, it's possible this reg isn't being reused. That is, 570 // the def of this reg (by prev MI) is now dead. 571 MachineInstr *DefMI = I; 572 MachineOperand *DefOp = NULL; 573 for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) { 574 MachineOperand &MO = DefMI->getOperand(i); 575 if (MO.isRegister() && MO.isDef()) { 576 if (MO.getReg() == Reg) 577 DefOp = &MO; 578 else if (!MO.isDead()) 579 HasLiveDef = true; 580 } 581 } 582 if (!DefOp) 583 return false; 584 585 bool FoundUse = false, Done = false; 586 MachineBasicBlock::iterator E = NewDef; 587 ++I; ++E; 588 for (; !Done && I != E; ++I) { 589 MachineInstr *NMI = I; 590 for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) { 591 MachineOperand &MO = NMI->getOperand(j); 592 if (!MO.isRegister() || MO.getReg() != Reg) 593 continue; 594 if (MO.isUse()) 595 FoundUse = true; 596 Done = true; // Stop after scanning all the operands of this MI. 597 } 598 } 599 if (!FoundUse) { 600 // Def is dead! 601 DefOp->setIsDead(); 602 return true; 603 } 604 return false; 605} 606 607/// UpdateKills - Track and update kill info. If a MI reads a register that is 608/// marked kill, then it must be due to register reuse. Transfer the kill info 609/// over. 610static void UpdateKills(MachineInstr &MI, BitVector &RegKills, 611 std::vector<MachineOperand*> &KillOps) { 612 const TargetInstrDesc &TID = MI.getDesc(); 613 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 614 MachineOperand &MO = MI.getOperand(i); 615 if (!MO.isRegister() || !MO.isUse()) 616 continue; 617 unsigned Reg = MO.getReg(); 618 if (Reg == 0) 619 continue; 620 621 if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) { 622 // That can't be right. Register is killed but not re-defined and it's 623 // being reused. Let's fix that. 624 KillOps[Reg]->setIsKill(false); 625 KillOps[Reg] = NULL; 626 RegKills.reset(Reg); 627 if (i < TID.getNumOperands() && 628 TID.getOperandConstraint(i, TOI::TIED_TO) == -1) 629 // Unless it's a two-address operand, this is the new kill. 630 MO.setIsKill(); 631 } 632 if (MO.isKill()) { 633 RegKills.set(Reg); 634 KillOps[Reg] = &MO; 635 } 636 } 637 638 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 639 const MachineOperand &MO = MI.getOperand(i); 640 if (!MO.isRegister() || !MO.isDef()) 641 continue; 642 unsigned Reg = MO.getReg(); 643 RegKills.reset(Reg); 644 KillOps[Reg] = NULL; 645 } 646} 647 648/// ReMaterialize - Re-materialize definition for Reg targetting DestReg. 649/// 650static void ReMaterialize(MachineBasicBlock &MBB, 651 MachineBasicBlock::iterator &MII, 652 unsigned DestReg, unsigned Reg, 653 const TargetInstrInfo *TII, 654 const TargetRegisterInfo *TRI, 655 VirtRegMap &VRM) { 656 TII->reMaterialize(MBB, MII, DestReg, VRM.getReMaterializedMI(Reg)); 657 MachineInstr *NewMI = prior(MII); 658 for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) { 659 MachineOperand &MO = NewMI->getOperand(i); 660 if (!MO.isRegister() || MO.getReg() == 0) 661 continue; 662 unsigned VirtReg = MO.getReg(); 663 if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) 664 continue; 665 assert(MO.isUse()); 666 unsigned SubIdx = MO.getSubReg(); 667 unsigned Phys = VRM.getPhys(VirtReg); 668 assert(Phys); 669 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys; 670 MO.setReg(RReg); 671 } 672 ++NumReMats; 673} 674 675 676// ReusedOp - For each reused operand, we keep track of a bit of information, in 677// case we need to rollback upon processing a new operand. See comments below. 678namespace { 679 struct ReusedOp { 680 // The MachineInstr operand that reused an available value. 681 unsigned Operand; 682 683 // StackSlotOrReMat - The spill slot or remat id of the value being reused. 684 unsigned StackSlotOrReMat; 685 686 // PhysRegReused - The physical register the value was available in. 687 unsigned PhysRegReused; 688 689 // AssignedPhysReg - The physreg that was assigned for use by the reload. 690 unsigned AssignedPhysReg; 691 692 // VirtReg - The virtual register itself. 693 unsigned VirtReg; 694 695 ReusedOp(unsigned o, unsigned ss, unsigned prr, unsigned apr, 696 unsigned vreg) 697 : Operand(o), StackSlotOrReMat(ss), PhysRegReused(prr), 698 AssignedPhysReg(apr), VirtReg(vreg) {} 699 }; 700 701 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that 702 /// is reused instead of reloaded. 703 class VISIBILITY_HIDDEN ReuseInfo { 704 MachineInstr &MI; 705 std::vector<ReusedOp> Reuses; 706 BitVector PhysRegsClobbered; 707 public: 708 ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) { 709 PhysRegsClobbered.resize(tri->getNumRegs()); 710 } 711 712 bool hasReuses() const { 713 return !Reuses.empty(); 714 } 715 716 /// addReuse - If we choose to reuse a virtual register that is already 717 /// available instead of reloading it, remember that we did so. 718 void addReuse(unsigned OpNo, unsigned StackSlotOrReMat, 719 unsigned PhysRegReused, unsigned AssignedPhysReg, 720 unsigned VirtReg) { 721 // If the reload is to the assigned register anyway, no undo will be 722 // required. 723 if (PhysRegReused == AssignedPhysReg) return; 724 725 // Otherwise, remember this. 726 Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused, 727 AssignedPhysReg, VirtReg)); 728 } 729 730 void markClobbered(unsigned PhysReg) { 731 PhysRegsClobbered.set(PhysReg); 732 } 733 734 bool isClobbered(unsigned PhysReg) const { 735 return PhysRegsClobbered.test(PhysReg); 736 } 737 738 /// GetRegForReload - We are about to emit a reload into PhysReg. If there 739 /// is some other operand that is using the specified register, either pick 740 /// a new register to use, or evict the previous reload and use this reg. 741 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI, 742 AvailableSpills &Spills, 743 std::vector<MachineInstr*> &MaybeDeadStores, 744 SmallSet<unsigned, 8> &Rejected, 745 BitVector &RegKills, 746 std::vector<MachineOperand*> &KillOps, 747 VirtRegMap &VRM) { 748 const TargetInstrInfo* TII = MI->getParent()->getParent()->getTarget() 749 .getInstrInfo(); 750 751 if (Reuses.empty()) return PhysReg; // This is most often empty. 752 753 for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) { 754 ReusedOp &Op = Reuses[ro]; 755 // If we find some other reuse that was supposed to use this register 756 // exactly for its reload, we can change this reload to use ITS reload 757 // register. That is, unless its reload register has already been 758 // considered and subsequently rejected because it has also been reused 759 // by another operand. 760 if (Op.PhysRegReused == PhysReg && 761 Rejected.count(Op.AssignedPhysReg) == 0) { 762 // Yup, use the reload register that we didn't use before. 763 unsigned NewReg = Op.AssignedPhysReg; 764 Rejected.insert(PhysReg); 765 return GetRegForReload(NewReg, MI, Spills, MaybeDeadStores, Rejected, 766 RegKills, KillOps, VRM); 767 } else { 768 // Otherwise, we might also have a problem if a previously reused 769 // value aliases the new register. If so, codegen the previous reload 770 // and use this one. 771 unsigned PRRU = Op.PhysRegReused; 772 const TargetRegisterInfo *TRI = Spills.getRegInfo(); 773 if (TRI->areAliases(PRRU, PhysReg)) { 774 // Okay, we found out that an alias of a reused register 775 // was used. This isn't good because it means we have 776 // to undo a previous reuse. 777 MachineBasicBlock *MBB = MI->getParent(); 778 const TargetRegisterClass *AliasRC = 779 MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg); 780 781 // Copy Op out of the vector and remove it, we're going to insert an 782 // explicit load for it. 783 ReusedOp NewOp = Op; 784 Reuses.erase(Reuses.begin()+ro); 785 786 // Ok, we're going to try to reload the assigned physreg into the 787 // slot that we were supposed to in the first place. However, that 788 // register could hold a reuse. Check to see if it conflicts or 789 // would prefer us to use a different register. 790 unsigned NewPhysReg = GetRegForReload(NewOp.AssignedPhysReg, 791 MI, Spills, MaybeDeadStores, 792 Rejected, RegKills, KillOps, VRM); 793 794 MachineBasicBlock::iterator MII = MI; 795 if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) { 796 ReMaterialize(*MBB, MII, NewPhysReg, NewOp.VirtReg, TII, TRI,VRM); 797 } else { 798 TII->loadRegFromStackSlot(*MBB, MII, NewPhysReg, 799 NewOp.StackSlotOrReMat, AliasRC); 800 MachineInstr *LoadMI = prior(MII); 801 VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI); 802 // Any stores to this stack slot are not dead anymore. 803 MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL; 804 ++NumLoads; 805 } 806 Spills.ClobberPhysReg(NewPhysReg); 807 Spills.ClobberPhysReg(NewOp.PhysRegReused); 808 809 MI->getOperand(NewOp.Operand).setReg(NewPhysReg); 810 811 Spills.addAvailable(NewOp.StackSlotOrReMat, MI, NewPhysReg); 812 --MII; 813 UpdateKills(*MII, RegKills, KillOps); 814 DOUT << '\t' << *MII; 815 816 DOUT << "Reuse undone!\n"; 817 --NumReused; 818 819 // Finally, PhysReg is now available, go ahead and use it. 820 return PhysReg; 821 } 822 } 823 } 824 return PhysReg; 825 } 826 827 /// GetRegForReload - Helper for the above GetRegForReload(). Add a 828 /// 'Rejected' set to remember which registers have been considered and 829 /// rejected for the reload. This avoids infinite looping in case like 830 /// this: 831 /// t1 := op t2, t3 832 /// t2 <- assigned r0 for use by the reload but ended up reuse r1 833 /// t3 <- assigned r1 for use by the reload but ended up reuse r0 834 /// t1 <- desires r1 835 /// sees r1 is taken by t2, tries t2's reload register r0 836 /// sees r0 is taken by t3, tries t3's reload register r1 837 /// sees r1 is taken by t2, tries t2's reload register r0 ... 838 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI, 839 AvailableSpills &Spills, 840 std::vector<MachineInstr*> &MaybeDeadStores, 841 BitVector &RegKills, 842 std::vector<MachineOperand*> &KillOps, 843 VirtRegMap &VRM) { 844 SmallSet<unsigned, 8> Rejected; 845 return GetRegForReload(PhysReg, MI, Spills, MaybeDeadStores, Rejected, 846 RegKills, KillOps, VRM); 847 } 848 }; 849} 850 851/// PrepForUnfoldOpti - Turn a store folding instruction into a load folding 852/// instruction. e.g. 853/// xorl %edi, %eax 854/// movl %eax, -32(%ebp) 855/// movl -36(%ebp), %eax 856/// orl %eax, -32(%ebp) 857/// ==> 858/// xorl %edi, %eax 859/// orl -36(%ebp), %eax 860/// mov %eax, -32(%ebp) 861/// This enables unfolding optimization for a subsequent instruction which will 862/// also eliminate the newly introduced store instruction. 863bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB, 864 MachineBasicBlock::iterator &MII, 865 std::vector<MachineInstr*> &MaybeDeadStores, 866 AvailableSpills &Spills, 867 BitVector &RegKills, 868 std::vector<MachineOperand*> &KillOps, 869 VirtRegMap &VRM) { 870 MachineFunction &MF = *MBB.getParent(); 871 MachineInstr &MI = *MII; 872 unsigned UnfoldedOpc = 0; 873 unsigned UnfoldPR = 0; 874 unsigned UnfoldVR = 0; 875 int FoldedSS = VirtRegMap::NO_STACK_SLOT; 876 VirtRegMap::MI2VirtMapTy::const_iterator I, End; 877 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) { 878 // Only transform a MI that folds a single register. 879 if (UnfoldedOpc) 880 return false; 881 UnfoldVR = I->second.first; 882 VirtRegMap::ModRef MR = I->second.second; 883 // MI2VirtMap be can updated which invalidate the iterator. 884 // Increment the iterator first. 885 ++I; 886 if (VRM.isAssignedReg(UnfoldVR)) 887 continue; 888 // If this reference is not a use, any previous store is now dead. 889 // Otherwise, the store to this stack slot is not dead anymore. 890 FoldedSS = VRM.getStackSlot(UnfoldVR); 891 MachineInstr* DeadStore = MaybeDeadStores[FoldedSS]; 892 if (DeadStore && (MR & VirtRegMap::isModRef)) { 893 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS); 894 if (!PhysReg || !DeadStore->readsRegister(PhysReg)) 895 continue; 896 UnfoldPR = PhysReg; 897 UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(), 898 false, true); 899 } 900 } 901 902 if (!UnfoldedOpc) 903 return false; 904 905 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 906 MachineOperand &MO = MI.getOperand(i); 907 if (!MO.isRegister() || MO.getReg() == 0 || !MO.isUse()) 908 continue; 909 unsigned VirtReg = MO.getReg(); 910 if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg()) 911 continue; 912 if (VRM.isAssignedReg(VirtReg)) { 913 unsigned PhysReg = VRM.getPhys(VirtReg); 914 if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR)) 915 return false; 916 } else if (VRM.isReMaterialized(VirtReg)) 917 continue; 918 int SS = VRM.getStackSlot(VirtReg); 919 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); 920 if (PhysReg) { 921 if (TRI->regsOverlap(PhysReg, UnfoldPR)) 922 return false; 923 continue; 924 } 925 PhysReg = VRM.getPhys(VirtReg); 926 if (!TRI->regsOverlap(PhysReg, UnfoldPR)) 927 continue; 928 929 // Ok, we'll need to reload the value into a register which makes 930 // it impossible to perform the store unfolding optimization later. 931 // Let's see if it is possible to fold the load if the store is 932 // unfolded. This allows us to perform the store unfolding 933 // optimization. 934 SmallVector<MachineInstr*, 4> NewMIs; 935 if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) { 936 assert(NewMIs.size() == 1); 937 MachineInstr *NewMI = NewMIs.back(); 938 NewMIs.clear(); 939 int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false); 940 assert(Idx != -1); 941 SmallVector<unsigned, 2> Ops; 942 Ops.push_back(Idx); 943 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS); 944 if (FoldedMI) { 945 VRM.addSpillSlotUse(SS, FoldedMI); 946 if (!VRM.hasPhys(UnfoldVR)) 947 VRM.assignVirt2Phys(UnfoldVR, UnfoldPR); 948 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef); 949 MII = MBB.insert(MII, FoldedMI); 950 InvalidateKills(MI, RegKills, KillOps); 951 VRM.RemoveMachineInstrFromMaps(&MI); 952 MBB.erase(&MI); 953 return true; 954 } 955 delete NewMI; 956 } 957 } 958 return false; 959} 960 961/// findSuperReg - Find the SubReg's super-register of given register class 962/// where its SubIdx sub-register is SubReg. 963static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg, 964 unsigned SubIdx, const TargetRegisterInfo *TRI) { 965 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 966 I != E; ++I) { 967 unsigned Reg = *I; 968 if (TRI->getSubReg(Reg, SubIdx) == SubReg) 969 return Reg; 970 } 971 return 0; 972} 973 974/// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if 975/// the last store to the same slot is now dead. If so, remove the last store. 976void LocalSpiller::SpillRegToStackSlot(MachineBasicBlock &MBB, 977 MachineBasicBlock::iterator &MII, 978 int Idx, unsigned PhysReg, int StackSlot, 979 const TargetRegisterClass *RC, 980 bool isAvailable, MachineInstr *&LastStore, 981 AvailableSpills &Spills, 982 SmallSet<MachineInstr*, 4> &ReMatDefs, 983 BitVector &RegKills, 984 std::vector<MachineOperand*> &KillOps, 985 VirtRegMap &VRM) { 986 TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC); 987 MachineInstr *StoreMI = next(MII); 988 VRM.addSpillSlotUse(StackSlot, StoreMI); 989 DOUT << "Store:\t" << *StoreMI; 990 991 // If there is a dead store to this stack slot, nuke it now. 992 if (LastStore) { 993 DOUT << "Removed dead store:\t" << *LastStore; 994 ++NumDSE; 995 SmallVector<unsigned, 2> KillRegs; 996 InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs); 997 MachineBasicBlock::iterator PrevMII = LastStore; 998 bool CheckDef = PrevMII != MBB.begin(); 999 if (CheckDef) 1000 --PrevMII; 1001 VRM.RemoveMachineInstrFromMaps(LastStore); 1002 MBB.erase(LastStore); 1003 if (CheckDef) { 1004 // Look at defs of killed registers on the store. Mark the defs 1005 // as dead since the store has been deleted and they aren't 1006 // being reused. 1007 for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) { 1008 bool HasOtherDef = false; 1009 if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef)) { 1010 MachineInstr *DeadDef = PrevMII; 1011 if (ReMatDefs.count(DeadDef) && !HasOtherDef) { 1012 // FIXME: This assumes a remat def does not have side 1013 // effects. 1014 VRM.RemoveMachineInstrFromMaps(DeadDef); 1015 MBB.erase(DeadDef); 1016 ++NumDRM; 1017 } 1018 } 1019 } 1020 } 1021 } 1022 1023 LastStore = next(MII); 1024 1025 // If the stack slot value was previously available in some other 1026 // register, change it now. Otherwise, make the register available, 1027 // in PhysReg. 1028 Spills.ModifyStackSlotOrReMat(StackSlot); 1029 Spills.ClobberPhysReg(PhysReg); 1030 Spills.addAvailable(StackSlot, LastStore, PhysReg, isAvailable); 1031 ++NumStores; 1032} 1033 1034/// TransferDeadness - A identity copy definition is dead and it's being 1035/// removed. Find the last def or use and mark it as dead / kill. 1036void LocalSpiller::TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist, 1037 unsigned Reg, BitVector &RegKills, 1038 std::vector<MachineOperand*> &KillOps) { 1039 int LastUDDist = -1; 1040 MachineInstr *LastUDMI = NULL; 1041 for (MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(Reg), 1042 RE = RegInfo->reg_end(); RI != RE; ++RI) { 1043 MachineInstr *UDMI = &*RI; 1044 if (UDMI->getParent() != MBB) 1045 continue; 1046 DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI); 1047 if (DI == DistanceMap.end() || DI->second > CurDist) 1048 continue; 1049 if ((int)DI->second < LastUDDist) 1050 continue; 1051 LastUDDist = DI->second; 1052 LastUDMI = UDMI; 1053 } 1054 1055 if (LastUDMI) { 1056 const TargetInstrDesc &TID = LastUDMI->getDesc(); 1057 MachineOperand *LastUD = NULL; 1058 for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) { 1059 MachineOperand &MO = LastUDMI->getOperand(i); 1060 if (!MO.isRegister() || MO.getReg() != Reg) 1061 continue; 1062 if (!LastUD || (LastUD->isUse() && MO.isDef())) 1063 LastUD = &MO; 1064 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) 1065 return; 1066 } 1067 if (LastUD->isDef()) 1068 LastUD->setIsDead(); 1069 else { 1070 LastUD->setIsKill(); 1071 RegKills.set(Reg); 1072 KillOps[Reg] = LastUD; 1073 } 1074 } 1075} 1076 1077/// rewriteMBB - Keep track of which spills are available even after the 1078/// register allocator is done with them. If possible, avid reloading vregs. 1079void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { 1080 DOUT << MBB.getBasicBlock()->getName() << ":\n"; 1081 1082 MachineFunction &MF = *MBB.getParent(); 1083 1084 // Spills - Keep track of which spilled values are available in physregs so 1085 // that we can choose to reuse the physregs instead of emitting reloads. 1086 AvailableSpills Spills(TRI, TII); 1087 1088 // MaybeDeadStores - When we need to write a value back into a stack slot, 1089 // keep track of the inserted store. If the stack slot value is never read 1090 // (because the value was used from some available register, for example), and 1091 // subsequently stored to, the original store is dead. This map keeps track 1092 // of inserted stores that are not used. If we see a subsequent store to the 1093 // same stack slot, the original store is deleted. 1094 std::vector<MachineInstr*> MaybeDeadStores; 1095 MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL); 1096 1097 // ReMatDefs - These are rematerializable def MIs which are not deleted. 1098 SmallSet<MachineInstr*, 4> ReMatDefs; 1099 1100 // Keep track of kill information. 1101 BitVector RegKills(TRI->getNumRegs()); 1102 std::vector<MachineOperand*> KillOps; 1103 KillOps.resize(TRI->getNumRegs(), NULL); 1104 1105 unsigned Dist = 0; 1106 DistanceMap.clear(); 1107 for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end(); 1108 MII != E; ) { 1109 MachineBasicBlock::iterator NextMII = MII; ++NextMII; 1110 1111 VirtRegMap::MI2VirtMapTy::const_iterator I, End; 1112 bool Erased = false; 1113 bool BackTracked = false; 1114 if (PrepForUnfoldOpti(MBB, MII, 1115 MaybeDeadStores, Spills, RegKills, KillOps, VRM)) 1116 NextMII = next(MII); 1117 1118 MachineInstr &MI = *MII; 1119 const TargetInstrDesc &TID = MI.getDesc(); 1120 1121 if (VRM.hasEmergencySpills(&MI)) { 1122 // Spill physical register(s) in the rare case the allocator has run out 1123 // of registers to allocate. 1124 SmallSet<int, 4> UsedSS; 1125 std::vector<unsigned> &EmSpills = VRM.getEmergencySpills(&MI); 1126 for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) { 1127 unsigned PhysReg = EmSpills[i]; 1128 const TargetRegisterClass *RC = 1129 TRI->getPhysicalRegisterRegClass(PhysReg); 1130 assert(RC && "Unable to determine register class!"); 1131 int SS = VRM.getEmergencySpillSlot(RC); 1132 if (UsedSS.count(SS)) 1133 assert(0 && "Need to spill more than one physical registers!"); 1134 UsedSS.insert(SS); 1135 TII->storeRegToStackSlot(MBB, MII, PhysReg, true, SS, RC); 1136 MachineInstr *StoreMI = prior(MII); 1137 VRM.addSpillSlotUse(SS, StoreMI); 1138 TII->loadRegFromStackSlot(MBB, next(MII), PhysReg, SS, RC); 1139 MachineInstr *LoadMI = next(MII); 1140 VRM.addSpillSlotUse(SS, LoadMI); 1141 ++NumPSpills; 1142 } 1143 NextMII = next(MII); 1144 } 1145 1146 // Insert restores here if asked to. 1147 if (VRM.isRestorePt(&MI)) { 1148 std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI); 1149 for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) { 1150 unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order. 1151 if (!VRM.getPreSplitReg(VirtReg)) 1152 continue; // Split interval spilled again. 1153 unsigned Phys = VRM.getPhys(VirtReg); 1154 RegInfo->setPhysRegUsed(Phys); 1155 if (VRM.isReMaterialized(VirtReg)) { 1156 ReMaterialize(MBB, MII, Phys, VirtReg, TII, TRI, VRM); 1157 } else { 1158 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1159 int SS = VRM.getStackSlot(VirtReg); 1160 TII->loadRegFromStackSlot(MBB, &MI, Phys, SS, RC); 1161 MachineInstr *LoadMI = prior(MII); 1162 VRM.addSpillSlotUse(SS, LoadMI); 1163 ++NumLoads; 1164 } 1165 // This invalidates Phys. 1166 Spills.ClobberPhysReg(Phys); 1167 UpdateKills(*prior(MII), RegKills, KillOps); 1168 DOUT << '\t' << *prior(MII); 1169 } 1170 } 1171 1172 // Insert spills here if asked to. 1173 if (VRM.isSpillPt(&MI)) { 1174 std::vector<std::pair<unsigned,bool> > &SpillRegs = 1175 VRM.getSpillPtSpills(&MI); 1176 for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) { 1177 unsigned VirtReg = SpillRegs[i].first; 1178 bool isKill = SpillRegs[i].second; 1179 if (!VRM.getPreSplitReg(VirtReg)) 1180 continue; // Split interval spilled again. 1181 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); 1182 unsigned Phys = VRM.getPhys(VirtReg); 1183 int StackSlot = VRM.getStackSlot(VirtReg); 1184 TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC); 1185 MachineInstr *StoreMI = next(MII); 1186 VRM.addSpillSlotUse(StackSlot, StoreMI); 1187 DOUT << "Store:\t" << *StoreMI; 1188 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod); 1189 } 1190 NextMII = next(MII); 1191 } 1192 1193 /// ReusedOperands - Keep track of operand reuse in case we need to undo 1194 /// reuse. 1195 ReuseInfo ReusedOperands(MI, TRI); 1196 SmallVector<unsigned, 4> VirtUseOps; 1197 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1198 MachineOperand &MO = MI.getOperand(i); 1199 if (!MO.isRegister() || MO.getReg() == 0) 1200 continue; // Ignore non-register operands. 1201 1202 unsigned VirtReg = MO.getReg(); 1203 if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) { 1204 // Ignore physregs for spilling, but remember that it is used by this 1205 // function. 1206 RegInfo->setPhysRegUsed(VirtReg); 1207 continue; 1208 } 1209 1210 // We want to process implicit virtual register uses first. 1211 if (MO.isImplicit()) 1212 // If the virtual register is implicitly defined, emit a implicit_def 1213 // before so scavenger knows it's "defined". 1214 VirtUseOps.insert(VirtUseOps.begin(), i); 1215 else 1216 VirtUseOps.push_back(i); 1217 } 1218 1219 // Process all of the spilled uses and all non spilled reg references. 1220 for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) { 1221 unsigned i = VirtUseOps[j]; 1222 MachineOperand &MO = MI.getOperand(i); 1223 unsigned VirtReg = MO.getReg(); 1224 assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && 1225 "Not a virtual register?"); 1226 1227 unsigned SubIdx = MO.getSubReg(); 1228 if (VRM.isAssignedReg(VirtReg)) { 1229 // This virtual register was assigned a physreg! 1230 unsigned Phys = VRM.getPhys(VirtReg); 1231 RegInfo->setPhysRegUsed(Phys); 1232 if (MO.isDef()) 1233 ReusedOperands.markClobbered(Phys); 1234 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys; 1235 MI.getOperand(i).setReg(RReg); 1236 if (VRM.isImplicitlyDefined(VirtReg)) 1237 BuildMI(MBB, MI, TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg); 1238 continue; 1239 } 1240 1241 // This virtual register is now known to be a spilled value. 1242 if (!MO.isUse()) 1243 continue; // Handle defs in the loop below (handle use&def here though) 1244 1245 bool DoReMat = VRM.isReMaterialized(VirtReg); 1246 int SSorRMId = DoReMat 1247 ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg); 1248 int ReuseSlot = SSorRMId; 1249 1250 // Check to see if this stack slot is available. 1251 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId); 1252 1253 // If this is a sub-register use, make sure the reuse register is in the 1254 // right register class. For example, for x86 not all of the 32-bit 1255 // registers have accessible sub-registers. 1256 // Similarly so for EXTRACT_SUBREG. Consider this: 1257 // EDI = op 1258 // MOV32_mr fi#1, EDI 1259 // ... 1260 // = EXTRACT_SUBREG fi#1 1261 // fi#1 is available in EDI, but it cannot be reused because it's not in 1262 // the right register file. 1263 if (PhysReg && 1264 (SubIdx || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) { 1265 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1266 if (!RC->contains(PhysReg)) 1267 PhysReg = 0; 1268 } 1269 1270 if (PhysReg) { 1271 // This spilled operand might be part of a two-address operand. If this 1272 // is the case, then changing it will necessarily require changing the 1273 // def part of the instruction as well. However, in some cases, we 1274 // aren't allowed to modify the reused register. If none of these cases 1275 // apply, reuse it. 1276 bool CanReuse = true; 1277 int ti = TID.getOperandConstraint(i, TOI::TIED_TO); 1278 if (ti != -1 && 1279 MI.getOperand(ti).isRegister() && 1280 MI.getOperand(ti).getReg() == VirtReg) { 1281 // Okay, we have a two address operand. We can reuse this physreg as 1282 // long as we are allowed to clobber the value and there isn't an 1283 // earlier def that has already clobbered the physreg. 1284 CanReuse = Spills.canClobberPhysReg(ReuseSlot) && 1285 !ReusedOperands.isClobbered(PhysReg); 1286 } 1287 1288 if (CanReuse) { 1289 // If this stack slot value is already available, reuse it! 1290 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT) 1291 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1; 1292 else 1293 DOUT << "Reusing SS#" << ReuseSlot; 1294 DOUT << " from physreg " 1295 << TRI->getName(PhysReg) << " for vreg" 1296 << VirtReg <<" instead of reloading into physreg " 1297 << TRI->getName(VRM.getPhys(VirtReg)) << "\n"; 1298 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1299 MI.getOperand(i).setReg(RReg); 1300 1301 // The only technical detail we have is that we don't know that 1302 // PhysReg won't be clobbered by a reloaded stack slot that occurs 1303 // later in the instruction. In particular, consider 'op V1, V2'. 1304 // If V1 is available in physreg R0, we would choose to reuse it 1305 // here, instead of reloading it into the register the allocator 1306 // indicated (say R1). However, V2 might have to be reloaded 1307 // later, and it might indicate that it needs to live in R0. When 1308 // this occurs, we need to have information available that 1309 // indicates it is safe to use R1 for the reload instead of R0. 1310 // 1311 // To further complicate matters, we might conflict with an alias, 1312 // or R0 and R1 might not be compatible with each other. In this 1313 // case, we actually insert a reload for V1 in R1, ensuring that 1314 // we can get at R0 or its alias. 1315 ReusedOperands.addReuse(i, ReuseSlot, PhysReg, 1316 VRM.getPhys(VirtReg), VirtReg); 1317 if (ti != -1) 1318 // Only mark it clobbered if this is a use&def operand. 1319 ReusedOperands.markClobbered(PhysReg); 1320 ++NumReused; 1321 1322 if (MI.getOperand(i).isKill() && 1323 ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) { 1324 // This was the last use and the spilled value is still available 1325 // for reuse. That means the spill was unnecessary! 1326 MachineInstr* DeadStore = MaybeDeadStores[ReuseSlot]; 1327 if (DeadStore) { 1328 DOUT << "Removed dead store:\t" << *DeadStore; 1329 InvalidateKills(*DeadStore, RegKills, KillOps); 1330 VRM.RemoveMachineInstrFromMaps(DeadStore); 1331 MBB.erase(DeadStore); 1332 MaybeDeadStores[ReuseSlot] = NULL; 1333 ++NumDSE; 1334 } 1335 } 1336 continue; 1337 } // CanReuse 1338 1339 // Otherwise we have a situation where we have a two-address instruction 1340 // whose mod/ref operand needs to be reloaded. This reload is already 1341 // available in some register "PhysReg", but if we used PhysReg as the 1342 // operand to our 2-addr instruction, the instruction would modify 1343 // PhysReg. This isn't cool if something later uses PhysReg and expects 1344 // to get its initial value. 1345 // 1346 // To avoid this problem, and to avoid doing a load right after a store, 1347 // we emit a copy from PhysReg into the designated register for this 1348 // operand. 1349 unsigned DesignatedReg = VRM.getPhys(VirtReg); 1350 assert(DesignatedReg && "Must map virtreg to physreg!"); 1351 1352 // Note that, if we reused a register for a previous operand, the 1353 // register we want to reload into might not actually be 1354 // available. If this occurs, use the register indicated by the 1355 // reuser. 1356 if (ReusedOperands.hasReuses()) 1357 DesignatedReg = ReusedOperands.GetRegForReload(DesignatedReg, &MI, 1358 Spills, MaybeDeadStores, RegKills, KillOps, VRM); 1359 1360 // If the mapped designated register is actually the physreg we have 1361 // incoming, we don't need to inserted a dead copy. 1362 if (DesignatedReg == PhysReg) { 1363 // If this stack slot value is already available, reuse it! 1364 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT) 1365 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1; 1366 else 1367 DOUT << "Reusing SS#" << ReuseSlot; 1368 DOUT << " from physreg " << TRI->getName(PhysReg) 1369 << " for vreg" << VirtReg 1370 << " instead of reloading into same physreg.\n"; 1371 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1372 MI.getOperand(i).setReg(RReg); 1373 ReusedOperands.markClobbered(RReg); 1374 ++NumReused; 1375 continue; 1376 } 1377 1378 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1379 RegInfo->setPhysRegUsed(DesignatedReg); 1380 ReusedOperands.markClobbered(DesignatedReg); 1381 TII->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC); 1382 1383 MachineInstr *CopyMI = prior(MII); 1384 UpdateKills(*CopyMI, RegKills, KillOps); 1385 1386 // This invalidates DesignatedReg. 1387 Spills.ClobberPhysReg(DesignatedReg); 1388 1389 Spills.addAvailable(ReuseSlot, &MI, DesignatedReg); 1390 unsigned RReg = 1391 SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg; 1392 MI.getOperand(i).setReg(RReg); 1393 DOUT << '\t' << *prior(MII); 1394 ++NumReused; 1395 continue; 1396 } // if (PhysReg) 1397 1398 // Otherwise, reload it and remember that we have it. 1399 PhysReg = VRM.getPhys(VirtReg); 1400 assert(PhysReg && "Must map virtreg to physreg!"); 1401 1402 // Note that, if we reused a register for a previous operand, the 1403 // register we want to reload into might not actually be 1404 // available. If this occurs, use the register indicated by the 1405 // reuser. 1406 if (ReusedOperands.hasReuses()) 1407 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI, 1408 Spills, MaybeDeadStores, RegKills, KillOps, VRM); 1409 1410 RegInfo->setPhysRegUsed(PhysReg); 1411 ReusedOperands.markClobbered(PhysReg); 1412 if (DoReMat) { 1413 ReMaterialize(MBB, MII, PhysReg, VirtReg, TII, TRI, VRM); 1414 } else { 1415 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1416 TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC); 1417 MachineInstr *LoadMI = prior(MII); 1418 VRM.addSpillSlotUse(SSorRMId, LoadMI); 1419 ++NumLoads; 1420 } 1421 // This invalidates PhysReg. 1422 Spills.ClobberPhysReg(PhysReg); 1423 1424 // Any stores to this stack slot are not dead anymore. 1425 if (!DoReMat) 1426 MaybeDeadStores[SSorRMId] = NULL; 1427 Spills.addAvailable(SSorRMId, &MI, PhysReg); 1428 // Assumes this is the last use. IsKill will be unset if reg is reused 1429 // unless it's a two-address operand. 1430 if (TID.getOperandConstraint(i, TOI::TIED_TO) == -1) 1431 MI.getOperand(i).setIsKill(); 1432 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1433 MI.getOperand(i).setReg(RReg); 1434 UpdateKills(*prior(MII), RegKills, KillOps); 1435 DOUT << '\t' << *prior(MII); 1436 } 1437 1438 DOUT << '\t' << MI; 1439 1440 1441 // If we have folded references to memory operands, make sure we clear all 1442 // physical registers that may contain the value of the spilled virtual 1443 // register 1444 SmallSet<int, 2> FoldedSS; 1445 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) { 1446 unsigned VirtReg = I->second.first; 1447 VirtRegMap::ModRef MR = I->second.second; 1448 DOUT << "Folded vreg: " << VirtReg << " MR: " << MR; 1449 1450 // MI2VirtMap be can updated which invalidate the iterator. 1451 // Increment the iterator first. 1452 ++I; 1453 int SS = VRM.getStackSlot(VirtReg); 1454 if (SS == VirtRegMap::NO_STACK_SLOT) 1455 continue; 1456 FoldedSS.insert(SS); 1457 DOUT << " - StackSlot: " << SS << "\n"; 1458 1459 // If this folded instruction is just a use, check to see if it's a 1460 // straight load from the virt reg slot. 1461 if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) { 1462 int FrameIdx; 1463 unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx); 1464 if (DestReg && FrameIdx == SS) { 1465 // If this spill slot is available, turn it into a copy (or nothing) 1466 // instead of leaving it as a load! 1467 if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) { 1468 DOUT << "Promoted Load To Copy: " << MI; 1469 if (DestReg != InReg) { 1470 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); 1471 TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC); 1472 // Revisit the copy so we make sure to notice the effects of the 1473 // operation on the destreg (either needing to RA it if it's 1474 // virtual or needing to clobber any values if it's physical). 1475 NextMII = &MI; 1476 --NextMII; // backtrack to the copy. 1477 BackTracked = true; 1478 } else { 1479 DOUT << "Removing now-noop copy: " << MI; 1480 // Unset last kill since it's being reused. 1481 InvalidateKill(InReg, RegKills, KillOps); 1482 } 1483 1484 InvalidateKills(MI, RegKills, KillOps); 1485 VRM.RemoveMachineInstrFromMaps(&MI); 1486 MBB.erase(&MI); 1487 Erased = true; 1488 goto ProcessNextInst; 1489 } 1490 } else { 1491 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); 1492 SmallVector<MachineInstr*, 4> NewMIs; 1493 if (PhysReg && 1494 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) { 1495 MBB.insert(MII, NewMIs[0]); 1496 InvalidateKills(MI, RegKills, KillOps); 1497 VRM.RemoveMachineInstrFromMaps(&MI); 1498 MBB.erase(&MI); 1499 Erased = true; 1500 --NextMII; // backtrack to the unfolded instruction. 1501 BackTracked = true; 1502 goto ProcessNextInst; 1503 } 1504 } 1505 } 1506 1507 // If this reference is not a use, any previous store is now dead. 1508 // Otherwise, the store to this stack slot is not dead anymore. 1509 MachineInstr* DeadStore = MaybeDeadStores[SS]; 1510 if (DeadStore) { 1511 bool isDead = !(MR & VirtRegMap::isRef); 1512 MachineInstr *NewStore = NULL; 1513 if (MR & VirtRegMap::isModRef) { 1514 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); 1515 SmallVector<MachineInstr*, 4> NewMIs; 1516 // We can reuse this physreg as long as we are allowed to clobber 1517 // the value and there isn't an earlier def that has already clobbered 1518 // the physreg. 1519 if (PhysReg && 1520 !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable! 1521 MachineOperand *KillOpnd = 1522 DeadStore->findRegisterUseOperand(PhysReg, true); 1523 // Note, if the store is storing a sub-register, it's possible the 1524 // super-register is needed below. 1525 if (KillOpnd && !KillOpnd->getSubReg() && 1526 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){ 1527 MBB.insert(MII, NewMIs[0]); 1528 NewStore = NewMIs[1]; 1529 MBB.insert(MII, NewStore); 1530 VRM.addSpillSlotUse(SS, NewStore); 1531 InvalidateKills(MI, RegKills, KillOps); 1532 VRM.RemoveMachineInstrFromMaps(&MI); 1533 MBB.erase(&MI); 1534 Erased = true; 1535 --NextMII; 1536 --NextMII; // backtrack to the unfolded instruction. 1537 BackTracked = true; 1538 isDead = true; 1539 } 1540 } 1541 } 1542 1543 if (isDead) { // Previous store is dead. 1544 // If we get here, the store is dead, nuke it now. 1545 DOUT << "Removed dead store:\t" << *DeadStore; 1546 InvalidateKills(*DeadStore, RegKills, KillOps); 1547 VRM.RemoveMachineInstrFromMaps(DeadStore); 1548 MBB.erase(DeadStore); 1549 if (!NewStore) 1550 ++NumDSE; 1551 } 1552 1553 MaybeDeadStores[SS] = NULL; 1554 if (NewStore) { 1555 // Treat this store as a spill merged into a copy. That makes the 1556 // stack slot value available. 1557 VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod); 1558 goto ProcessNextInst; 1559 } 1560 } 1561 1562 // If the spill slot value is available, and this is a new definition of 1563 // the value, the value is not available anymore. 1564 if (MR & VirtRegMap::isMod) { 1565 // Notice that the value in this stack slot has been modified. 1566 Spills.ModifyStackSlotOrReMat(SS); 1567 1568 // If this is *just* a mod of the value, check to see if this is just a 1569 // store to the spill slot (i.e. the spill got merged into the copy). If 1570 // so, realize that the vreg is available now, and add the store to the 1571 // MaybeDeadStore info. 1572 int StackSlot; 1573 if (!(MR & VirtRegMap::isRef)) { 1574 if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) { 1575 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) && 1576 "Src hasn't been allocated yet?"); 1577 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark 1578 // this as a potentially dead store in case there is a subsequent 1579 // store into the stack slot without a read from it. 1580 MaybeDeadStores[StackSlot] = &MI; 1581 1582 // If the stack slot value was previously available in some other 1583 // register, change it now. Otherwise, make the register available, 1584 // in PhysReg. 1585 Spills.addAvailable(StackSlot, &MI, SrcReg, false/*don't clobber*/); 1586 } 1587 } 1588 } 1589 } 1590 1591 // Process all of the spilled defs. 1592 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1593 MachineOperand &MO = MI.getOperand(i); 1594 if (!(MO.isRegister() && MO.getReg() && MO.isDef())) 1595 continue; 1596 1597 unsigned VirtReg = MO.getReg(); 1598 if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) { 1599 // Check to see if this is a noop copy. If so, eliminate the 1600 // instruction before considering the dest reg to be changed. 1601 unsigned Src, Dst; 1602 if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) { 1603 ++NumDCE; 1604 DOUT << "Removing now-noop copy: " << MI; 1605 SmallVector<unsigned, 2> KillRegs; 1606 InvalidateKills(MI, RegKills, KillOps, &KillRegs); 1607 if (MO.isDead() && !KillRegs.empty()) { 1608 assert(KillRegs[0] == Dst); 1609 // Last def is now dead. 1610 TransferDeadness(&MBB, Dist, Src, RegKills, KillOps); 1611 } 1612 VRM.RemoveMachineInstrFromMaps(&MI); 1613 MBB.erase(&MI); 1614 Erased = true; 1615 Spills.disallowClobberPhysReg(VirtReg); 1616 goto ProcessNextInst; 1617 } 1618 1619 // If it's not a no-op copy, it clobbers the value in the destreg. 1620 Spills.ClobberPhysReg(VirtReg); 1621 ReusedOperands.markClobbered(VirtReg); 1622 1623 // Check to see if this instruction is a load from a stack slot into 1624 // a register. If so, this provides the stack slot value in the reg. 1625 int FrameIdx; 1626 if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) { 1627 assert(DestReg == VirtReg && "Unknown load situation!"); 1628 1629 // If it is a folded reference, then it's not safe to clobber. 1630 bool Folded = FoldedSS.count(FrameIdx); 1631 // Otherwise, if it wasn't available, remember that it is now! 1632 Spills.addAvailable(FrameIdx, &MI, DestReg, !Folded); 1633 goto ProcessNextInst; 1634 } 1635 1636 continue; 1637 } 1638 1639 unsigned SubIdx = MO.getSubReg(); 1640 bool DoReMat = VRM.isReMaterialized(VirtReg); 1641 if (DoReMat) 1642 ReMatDefs.insert(&MI); 1643 1644 // The only vregs left are stack slot definitions. 1645 int StackSlot = VRM.getStackSlot(VirtReg); 1646 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); 1647 1648 // If this def is part of a two-address operand, make sure to execute 1649 // the store from the correct physical register. 1650 unsigned PhysReg; 1651 int TiedOp = MI.getDesc().findTiedToSrcOperand(i); 1652 if (TiedOp != -1) { 1653 PhysReg = MI.getOperand(TiedOp).getReg(); 1654 if (SubIdx) { 1655 unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI); 1656 assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg && 1657 "Can't find corresponding super-register!"); 1658 PhysReg = SuperReg; 1659 } 1660 } else { 1661 PhysReg = VRM.getPhys(VirtReg); 1662 if (ReusedOperands.isClobbered(PhysReg)) { 1663 // Another def has taken the assigned physreg. It must have been a 1664 // use&def which got it due to reuse. Undo the reuse! 1665 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI, 1666 Spills, MaybeDeadStores, RegKills, KillOps, VRM); 1667 } 1668 } 1669 1670 assert(PhysReg && "VR not assigned a physical register?"); 1671 RegInfo->setPhysRegUsed(PhysReg); 1672 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1673 ReusedOperands.markClobbered(RReg); 1674 MI.getOperand(i).setReg(RReg); 1675 1676 if (!MO.isDead()) { 1677 MachineInstr *&LastStore = MaybeDeadStores[StackSlot]; 1678 SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, true, 1679 LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM); 1680 NextMII = next(MII); 1681 1682 // Check to see if this is a noop copy. If so, eliminate the 1683 // instruction before considering the dest reg to be changed. 1684 { 1685 unsigned Src, Dst; 1686 if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) { 1687 ++NumDCE; 1688 DOUT << "Removing now-noop copy: " << MI; 1689 InvalidateKills(MI, RegKills, KillOps); 1690 VRM.RemoveMachineInstrFromMaps(&MI); 1691 MBB.erase(&MI); 1692 Erased = true; 1693 UpdateKills(*LastStore, RegKills, KillOps); 1694 goto ProcessNextInst; 1695 } 1696 } 1697 } 1698 } 1699 ProcessNextInst: 1700 DistanceMap.insert(std::make_pair(&MI, Dist++)); 1701 if (!Erased && !BackTracked) { 1702 for (MachineBasicBlock::iterator II = MI; II != NextMII; ++II) 1703 UpdateKills(*II, RegKills, KillOps); 1704 } 1705 MII = NextMII; 1706 } 1707} 1708 1709llvm::Spiller* llvm::createSpiller() { 1710 switch (SpillerOpt) { 1711 default: assert(0 && "Unreachable!"); 1712 case local: 1713 return new LocalSpiller(); 1714 case simple: 1715 return new SimpleSpiller(); 1716 } 1717} 1718