VirtRegMap.cpp revision 4c71dfe356716e6bc1993ef5efdced08b68fe612
1//===-- llvm/CodeGen/VirtRegMap.cpp - Virtual Register Map ----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the VirtRegMap class. 11// 12// It also contains implementations of the the Spiller interface, which, given a 13// virtual register map and a machine function, eliminates all virtual 14// references by replacing them with physical register references - adding spill 15// code as necessary. 16// 17//===----------------------------------------------------------------------===// 18 19#define DEBUG_TYPE "spiller" 20#include "VirtRegMap.h" 21#include "llvm/Function.h" 22#include "llvm/CodeGen/MachineFrameInfo.h" 23#include "llvm/CodeGen/MachineFunction.h" 24#include "llvm/CodeGen/MachineRegisterInfo.h" 25#include "llvm/Target/TargetMachine.h" 26#include "llvm/Target/TargetInstrInfo.h" 27#include "llvm/Support/CommandLine.h" 28#include "llvm/Support/Debug.h" 29#include "llvm/Support/Compiler.h" 30#include "llvm/ADT/BitVector.h" 31#include "llvm/ADT/Statistic.h" 32#include "llvm/ADT/STLExtras.h" 33#include "llvm/ADT/SmallSet.h" 34#include <algorithm> 35using namespace llvm; 36 37STATISTIC(NumSpills, "Number of register spills"); 38STATISTIC(NumReMats, "Number of re-materialization"); 39STATISTIC(NumDRM , "Number of re-materializable defs elided"); 40STATISTIC(NumStores, "Number of stores added"); 41STATISTIC(NumLoads , "Number of loads added"); 42STATISTIC(NumReused, "Number of values reused"); 43STATISTIC(NumDSE , "Number of dead stores elided"); 44STATISTIC(NumDCE , "Number of copies elided"); 45 46namespace { 47 enum SpillerName { simple, local }; 48 49 static cl::opt<SpillerName> 50 SpillerOpt("spiller", 51 cl::desc("Spiller to use: (default: local)"), 52 cl::Prefix, 53 cl::values(clEnumVal(simple, " simple spiller"), 54 clEnumVal(local, " local spiller"), 55 clEnumValEnd), 56 cl::init(local)); 57} 58 59//===----------------------------------------------------------------------===// 60// VirtRegMap implementation 61//===----------------------------------------------------------------------===// 62 63VirtRegMap::VirtRegMap(MachineFunction &mf) 64 : TII(*mf.getTarget().getInstrInfo()), MF(mf), 65 Virt2PhysMap(NO_PHYS_REG), Virt2StackSlotMap(NO_STACK_SLOT), 66 Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0), 67 Virt2SplitKillMap(0), ReMatMap(NULL), ReMatId(MAX_STACK_SLOT+1) { 68 grow(); 69} 70 71void VirtRegMap::grow() { 72 unsigned LastVirtReg = MF.getRegInfo().getLastVirtReg(); 73 Virt2PhysMap.grow(LastVirtReg); 74 Virt2StackSlotMap.grow(LastVirtReg); 75 Virt2ReMatIdMap.grow(LastVirtReg); 76 Virt2SplitMap.grow(LastVirtReg); 77 Virt2SplitKillMap.grow(LastVirtReg); 78 ReMatMap.grow(LastVirtReg); 79} 80 81int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) { 82 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 83 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && 84 "attempt to assign stack slot to already spilled register"); 85 const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(virtReg); 86 int frameIndex = MF.getFrameInfo()->CreateStackObject(RC->getSize(), 87 RC->getAlignment()); 88 Virt2StackSlotMap[virtReg] = frameIndex; 89 ++NumSpills; 90 return frameIndex; 91} 92 93void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int frameIndex) { 94 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 95 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT && 96 "attempt to assign stack slot to already spilled register"); 97 assert((frameIndex >= 0 || 98 (frameIndex >= MF.getFrameInfo()->getObjectIndexBegin())) && 99 "illegal fixed frame index"); 100 Virt2StackSlotMap[virtReg] = frameIndex; 101} 102 103int VirtRegMap::assignVirtReMatId(unsigned virtReg) { 104 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 105 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT && 106 "attempt to assign re-mat id to already spilled register"); 107 Virt2ReMatIdMap[virtReg] = ReMatId; 108 return ReMatId++; 109} 110 111void VirtRegMap::assignVirtReMatId(unsigned virtReg, int id) { 112 assert(TargetRegisterInfo::isVirtualRegister(virtReg)); 113 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT && 114 "attempt to assign re-mat id to already spilled register"); 115 Virt2ReMatIdMap[virtReg] = id; 116} 117 118void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI, 119 MachineInstr *NewMI, ModRef MRInfo) { 120 // Move previous memory references folded to new instruction. 121 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(NewMI); 122 for (MI2VirtMapTy::iterator I = MI2VirtMap.lower_bound(OldMI), 123 E = MI2VirtMap.end(); I != E && I->first == OldMI; ) { 124 MI2VirtMap.insert(IP, std::make_pair(NewMI, I->second)); 125 MI2VirtMap.erase(I++); 126 } 127 128 // add new memory reference 129 MI2VirtMap.insert(IP, std::make_pair(NewMI, std::make_pair(VirtReg, MRInfo))); 130} 131 132void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *MI, ModRef MRInfo) { 133 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(MI); 134 MI2VirtMap.insert(IP, std::make_pair(MI, std::make_pair(VirtReg, MRInfo))); 135} 136 137void VirtRegMap::print(std::ostream &OS) const { 138 const TargetRegisterInfo* TRI = MF.getTarget().getRegisterInfo(); 139 140 OS << "********** REGISTER MAP **********\n"; 141 for (unsigned i = TargetRegisterInfo::FirstVirtualRegister, 142 e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) { 143 if (Virt2PhysMap[i] != (unsigned)VirtRegMap::NO_PHYS_REG) 144 OS << "[reg" << i << " -> " << TRI->getName(Virt2PhysMap[i]) << "]\n"; 145 146 } 147 148 for (unsigned i = TargetRegisterInfo::FirstVirtualRegister, 149 e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) 150 if (Virt2StackSlotMap[i] != VirtRegMap::NO_STACK_SLOT) 151 OS << "[reg" << i << " -> fi#" << Virt2StackSlotMap[i] << "]\n"; 152 OS << '\n'; 153} 154 155void VirtRegMap::dump() const { 156 print(DOUT); 157} 158 159 160//===----------------------------------------------------------------------===// 161// Simple Spiller Implementation 162//===----------------------------------------------------------------------===// 163 164Spiller::~Spiller() {} 165 166namespace { 167 struct VISIBILITY_HIDDEN SimpleSpiller : public Spiller { 168 bool runOnMachineFunction(MachineFunction& mf, VirtRegMap &VRM); 169 }; 170} 171 172bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { 173 DOUT << "********** REWRITE MACHINE CODE **********\n"; 174 DOUT << "********** Function: " << MF.getFunction()->getName() << '\n'; 175 const TargetMachine &TM = MF.getTarget(); 176 const TargetInstrInfo &TII = *TM.getInstrInfo(); 177 178 179 // LoadedRegs - Keep track of which vregs are loaded, so that we only load 180 // each vreg once (in the case where a spilled vreg is used by multiple 181 // operands). This is always smaller than the number of operands to the 182 // current machine instr, so it should be small. 183 std::vector<unsigned> LoadedRegs; 184 185 for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end(); 186 MBBI != E; ++MBBI) { 187 DOUT << MBBI->getBasicBlock()->getName() << ":\n"; 188 MachineBasicBlock &MBB = *MBBI; 189 for (MachineBasicBlock::iterator MII = MBB.begin(), 190 E = MBB.end(); MII != E; ++MII) { 191 MachineInstr &MI = *MII; 192 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 193 MachineOperand &MO = MI.getOperand(i); 194 if (MO.isRegister() && MO.getReg()) { 195 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) { 196 unsigned VirtReg = MO.getReg(); 197 unsigned PhysReg = VRM.getPhys(VirtReg); 198 if (!VRM.isAssignedReg(VirtReg)) { 199 int StackSlot = VRM.getStackSlot(VirtReg); 200 const TargetRegisterClass* RC = 201 MF.getRegInfo().getRegClass(VirtReg); 202 203 if (MO.isUse() && 204 std::find(LoadedRegs.begin(), LoadedRegs.end(), VirtReg) 205 == LoadedRegs.end()) { 206 TII.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC); 207 LoadedRegs.push_back(VirtReg); 208 ++NumLoads; 209 DOUT << '\t' << *prior(MII); 210 } 211 212 if (MO.isDef()) { 213 TII.storeRegToStackSlot(MBB, next(MII), PhysReg, true, 214 StackSlot, RC); 215 ++NumStores; 216 } 217 } 218 MF.getRegInfo().setPhysRegUsed(PhysReg); 219 MI.getOperand(i).setReg(PhysReg); 220 } else { 221 MF.getRegInfo().setPhysRegUsed(MO.getReg()); 222 } 223 } 224 } 225 226 DOUT << '\t' << MI; 227 LoadedRegs.clear(); 228 } 229 } 230 return true; 231} 232 233//===----------------------------------------------------------------------===// 234// Local Spiller Implementation 235//===----------------------------------------------------------------------===// 236 237namespace { 238 class AvailableSpills; 239 240 /// LocalSpiller - This spiller does a simple pass over the machine basic 241 /// block to attempt to keep spills in registers as much as possible for 242 /// blocks that have low register pressure (the vreg may be spilled due to 243 /// register pressure in other blocks). 244 class VISIBILITY_HIDDEN LocalSpiller : public Spiller { 245 MachineRegisterInfo *RegInfo; 246 const TargetRegisterInfo *TRI; 247 const TargetInstrInfo *TII; 248 public: 249 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) { 250 RegInfo = &MF.getRegInfo(); 251 TRI = MF.getTarget().getRegisterInfo(); 252 TII = MF.getTarget().getInstrInfo(); 253 DOUT << "\n**** Local spiller rewriting function '" 254 << MF.getFunction()->getName() << "':\n"; 255 DOUT << "**** Machine Instrs (NOTE! Does not include spills and reloads!)" 256 " ****\n"; 257 DEBUG(MF.dump()); 258 259 for (MachineFunction::iterator MBB = MF.begin(), E = MF.end(); 260 MBB != E; ++MBB) 261 RewriteMBB(*MBB, VRM); 262 263 DOUT << "**** Post Machine Instrs ****\n"; 264 DEBUG(MF.dump()); 265 266 return true; 267 } 268 private: 269 bool PrepForUnfoldOpti(MachineBasicBlock &MBB, 270 MachineBasicBlock::iterator &MII, 271 std::vector<MachineInstr*> &MaybeDeadStores, 272 AvailableSpills &Spills, BitVector &RegKills, 273 std::vector<MachineOperand*> &KillOps, 274 VirtRegMap &VRM); 275 void SpillRegToStackSlot(MachineBasicBlock &MBB, 276 MachineBasicBlock::iterator &MII, 277 int Idx, unsigned PhysReg, int StackSlot, 278 const TargetRegisterClass *RC, 279 bool isAvailable, MachineInstr *&LastStore, 280 AvailableSpills &Spills, 281 SmallSet<MachineInstr*, 4> &ReMatDefs, 282 BitVector &RegKills, 283 std::vector<MachineOperand*> &KillOps, 284 VirtRegMap &VRM); 285 void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM); 286 }; 287} 288 289/// AvailableSpills - As the local spiller is scanning and rewriting an MBB from 290/// top down, keep track of which spills slots or remat are available in each 291/// register. 292/// 293/// Note that not all physregs are created equal here. In particular, some 294/// physregs are reloads that we are allowed to clobber or ignore at any time. 295/// Other physregs are values that the register allocated program is using that 296/// we cannot CHANGE, but we can read if we like. We keep track of this on a 297/// per-stack-slot / remat id basis as the low bit in the value of the 298/// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks 299/// this bit and addAvailable sets it if. 300namespace { 301class VISIBILITY_HIDDEN AvailableSpills { 302 const TargetRegisterInfo *TRI; 303 const TargetInstrInfo *TII; 304 305 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled 306 // or remat'ed virtual register values that are still available, due to being 307 // loaded or stored to, but not invalidated yet. 308 std::map<int, unsigned> SpillSlotsOrReMatsAvailable; 309 310 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable, 311 // indicating which stack slot values are currently held by a physreg. This 312 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a 313 // physreg is modified. 314 std::multimap<unsigned, int> PhysRegsAvailable; 315 316 void disallowClobberPhysRegOnly(unsigned PhysReg); 317 318 void ClobberPhysRegOnly(unsigned PhysReg); 319public: 320 AvailableSpills(const TargetRegisterInfo *tri, const TargetInstrInfo *tii) 321 : TRI(tri), TII(tii) { 322 } 323 324 const TargetRegisterInfo *getRegInfo() const { return TRI; } 325 326 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is 327 /// available in a physical register, return that PhysReg, otherwise 328 /// return 0. 329 unsigned getSpillSlotOrReMatPhysReg(int Slot) const { 330 std::map<int, unsigned>::const_iterator I = 331 SpillSlotsOrReMatsAvailable.find(Slot); 332 if (I != SpillSlotsOrReMatsAvailable.end()) { 333 return I->second >> 1; // Remove the CanClobber bit. 334 } 335 return 0; 336 } 337 338 /// addAvailable - Mark that the specified stack slot / remat is available in 339 /// the specified physreg. If CanClobber is true, the physreg can be modified 340 /// at any time without changing the semantics of the program. 341 void addAvailable(int SlotOrReMat, MachineInstr *MI, unsigned Reg, 342 bool CanClobber = true) { 343 // If this stack slot is thought to be available in some other physreg, 344 // remove its record. 345 ModifyStackSlotOrReMat(SlotOrReMat); 346 347 PhysRegsAvailable.insert(std::make_pair(Reg, SlotOrReMat)); 348 SpillSlotsOrReMatsAvailable[SlotOrReMat]= (Reg << 1) | (unsigned)CanClobber; 349 350 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT) 351 DOUT << "Remembering RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1; 352 else 353 DOUT << "Remembering SS#" << SlotOrReMat; 354 DOUT << " in physreg " << TRI->getName(Reg) << "\n"; 355 } 356 357 /// canClobberPhysReg - Return true if the spiller is allowed to change the 358 /// value of the specified stackslot register if it desires. The specified 359 /// stack slot must be available in a physreg for this query to make sense. 360 bool canClobberPhysReg(int SlotOrReMat) const { 361 assert(SpillSlotsOrReMatsAvailable.count(SlotOrReMat) && 362 "Value not available!"); 363 return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second & 1; 364 } 365 366 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified 367 /// stackslot register. The register is still available but is no longer 368 /// allowed to be modifed. 369 void disallowClobberPhysReg(unsigned PhysReg); 370 371 /// ClobberPhysReg - This is called when the specified physreg changes 372 /// value. We use this to invalidate any info about stuff that lives in 373 /// it and any of its aliases. 374 void ClobberPhysReg(unsigned PhysReg); 375 376 /// ModifyStackSlotOrReMat - This method is called when the value in a stack 377 /// slot changes. This removes information about which register the previous 378 /// value for this slot lives in (as the previous value is dead now). 379 void ModifyStackSlotOrReMat(int SlotOrReMat); 380}; 381} 382 383/// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified 384/// stackslot register. The register is still available but is no longer 385/// allowed to be modifed. 386void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) { 387 std::multimap<unsigned, int>::iterator I = 388 PhysRegsAvailable.lower_bound(PhysReg); 389 while (I != PhysRegsAvailable.end() && I->first == PhysReg) { 390 int SlotOrReMat = I->second; 391 I++; 392 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg && 393 "Bidirectional map mismatch!"); 394 SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1; 395 DOUT << "PhysReg " << TRI->getName(PhysReg) 396 << " copied, it is available for use but can no longer be modified\n"; 397 } 398} 399 400/// disallowClobberPhysReg - Unset the CanClobber bit of the specified 401/// stackslot register and its aliases. The register and its aliases may 402/// still available but is no longer allowed to be modifed. 403void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) { 404 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS) 405 disallowClobberPhysRegOnly(*AS); 406 disallowClobberPhysRegOnly(PhysReg); 407} 408 409/// ClobberPhysRegOnly - This is called when the specified physreg changes 410/// value. We use this to invalidate any info about stuff we thing lives in it. 411void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) { 412 std::multimap<unsigned, int>::iterator I = 413 PhysRegsAvailable.lower_bound(PhysReg); 414 while (I != PhysRegsAvailable.end() && I->first == PhysReg) { 415 int SlotOrReMat = I->second; 416 PhysRegsAvailable.erase(I++); 417 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg && 418 "Bidirectional map mismatch!"); 419 SpillSlotsOrReMatsAvailable.erase(SlotOrReMat); 420 DOUT << "PhysReg " << TRI->getName(PhysReg) 421 << " clobbered, invalidating "; 422 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT) 423 DOUT << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 << "\n"; 424 else 425 DOUT << "SS#" << SlotOrReMat << "\n"; 426 } 427} 428 429/// ClobberPhysReg - This is called when the specified physreg changes 430/// value. We use this to invalidate any info about stuff we thing lives in 431/// it and any of its aliases. 432void AvailableSpills::ClobberPhysReg(unsigned PhysReg) { 433 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS) 434 ClobberPhysRegOnly(*AS); 435 ClobberPhysRegOnly(PhysReg); 436} 437 438/// ModifyStackSlotOrReMat - This method is called when the value in a stack 439/// slot changes. This removes information about which register the previous 440/// value for this slot lives in (as the previous value is dead now). 441void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) { 442 std::map<int, unsigned>::iterator It = 443 SpillSlotsOrReMatsAvailable.find(SlotOrReMat); 444 if (It == SpillSlotsOrReMatsAvailable.end()) return; 445 unsigned Reg = It->second >> 1; 446 SpillSlotsOrReMatsAvailable.erase(It); 447 448 // This register may hold the value of multiple stack slots, only remove this 449 // stack slot from the set of values the register contains. 450 std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg); 451 for (; ; ++I) { 452 assert(I != PhysRegsAvailable.end() && I->first == Reg && 453 "Map inverse broken!"); 454 if (I->second == SlotOrReMat) break; 455 } 456 PhysRegsAvailable.erase(I); 457} 458 459 460 461/// InvalidateKills - MI is going to be deleted. If any of its operands are 462/// marked kill, then invalidate the information. 463static void InvalidateKills(MachineInstr &MI, BitVector &RegKills, 464 std::vector<MachineOperand*> &KillOps, 465 SmallVector<unsigned, 2> *KillRegs = NULL) { 466 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 467 MachineOperand &MO = MI.getOperand(i); 468 if (!MO.isRegister() || !MO.isUse() || !MO.isKill()) 469 continue; 470 unsigned Reg = MO.getReg(); 471 if (KillRegs) 472 KillRegs->push_back(Reg); 473 if (KillOps[Reg] == &MO) { 474 RegKills.reset(Reg); 475 KillOps[Reg] = NULL; 476 } 477 } 478} 479 480/// InvalidateKill - A MI that defines the specified register is being deleted, 481/// invalidate the register kill information. 482static void InvalidateKill(unsigned Reg, BitVector &RegKills, 483 std::vector<MachineOperand*> &KillOps) { 484 if (RegKills[Reg]) { 485 KillOps[Reg]->setIsKill(false); 486 KillOps[Reg] = NULL; 487 RegKills.reset(Reg); 488 } 489} 490 491/// InvalidateRegDef - If the def operand of the specified def MI is now dead 492/// (since it's spill instruction is removed), mark it isDead. Also checks if 493/// the def MI has other definition operands that are not dead. Returns it by 494/// reference. 495static bool InvalidateRegDef(MachineBasicBlock::iterator I, 496 MachineInstr &NewDef, unsigned Reg, 497 bool &HasLiveDef) { 498 // Due to remat, it's possible this reg isn't being reused. That is, 499 // the def of this reg (by prev MI) is now dead. 500 MachineInstr *DefMI = I; 501 MachineOperand *DefOp = NULL; 502 for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) { 503 MachineOperand &MO = DefMI->getOperand(i); 504 if (MO.isRegister() && MO.isDef()) { 505 if (MO.getReg() == Reg) 506 DefOp = &MO; 507 else if (!MO.isDead()) 508 HasLiveDef = true; 509 } 510 } 511 if (!DefOp) 512 return false; 513 514 bool FoundUse = false, Done = false; 515 MachineBasicBlock::iterator E = NewDef; 516 ++I; ++E; 517 for (; !Done && I != E; ++I) { 518 MachineInstr *NMI = I; 519 for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) { 520 MachineOperand &MO = NMI->getOperand(j); 521 if (!MO.isRegister() || MO.getReg() != Reg) 522 continue; 523 if (MO.isUse()) 524 FoundUse = true; 525 Done = true; // Stop after scanning all the operands of this MI. 526 } 527 } 528 if (!FoundUse) { 529 // Def is dead! 530 DefOp->setIsDead(); 531 return true; 532 } 533 return false; 534} 535 536/// UpdateKills - Track and update kill info. If a MI reads a register that is 537/// marked kill, then it must be due to register reuse. Transfer the kill info 538/// over. 539static void UpdateKills(MachineInstr &MI, BitVector &RegKills, 540 std::vector<MachineOperand*> &KillOps) { 541 const TargetInstrDesc &TID = MI.getDesc(); 542 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 543 MachineOperand &MO = MI.getOperand(i); 544 if (!MO.isRegister() || !MO.isUse()) 545 continue; 546 unsigned Reg = MO.getReg(); 547 if (Reg == 0) 548 continue; 549 550 if (RegKills[Reg]) { 551 // That can't be right. Register is killed but not re-defined and it's 552 // being reused. Let's fix that. 553 KillOps[Reg]->setIsKill(false); 554 KillOps[Reg] = NULL; 555 RegKills.reset(Reg); 556 if (i < TID.getNumOperands() && 557 TID.getOperandConstraint(i, TOI::TIED_TO) == -1) 558 // Unless it's a two-address operand, this is the new kill. 559 MO.setIsKill(); 560 } 561 if (MO.isKill()) { 562 RegKills.set(Reg); 563 KillOps[Reg] = &MO; 564 } 565 } 566 567 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 568 const MachineOperand &MO = MI.getOperand(i); 569 if (!MO.isRegister() || !MO.isDef()) 570 continue; 571 unsigned Reg = MO.getReg(); 572 RegKills.reset(Reg); 573 KillOps[Reg] = NULL; 574 } 575} 576 577 578// ReusedOp - For each reused operand, we keep track of a bit of information, in 579// case we need to rollback upon processing a new operand. See comments below. 580namespace { 581 struct ReusedOp { 582 // The MachineInstr operand that reused an available value. 583 unsigned Operand; 584 585 // StackSlotOrReMat - The spill slot or remat id of the value being reused. 586 unsigned StackSlotOrReMat; 587 588 // PhysRegReused - The physical register the value was available in. 589 unsigned PhysRegReused; 590 591 // AssignedPhysReg - The physreg that was assigned for use by the reload. 592 unsigned AssignedPhysReg; 593 594 // VirtReg - The virtual register itself. 595 unsigned VirtReg; 596 597 ReusedOp(unsigned o, unsigned ss, unsigned prr, unsigned apr, 598 unsigned vreg) 599 : Operand(o), StackSlotOrReMat(ss), PhysRegReused(prr), 600 AssignedPhysReg(apr), VirtReg(vreg) {} 601 }; 602 603 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that 604 /// is reused instead of reloaded. 605 class VISIBILITY_HIDDEN ReuseInfo { 606 MachineInstr &MI; 607 std::vector<ReusedOp> Reuses; 608 BitVector PhysRegsClobbered; 609 public: 610 ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) { 611 PhysRegsClobbered.resize(tri->getNumRegs()); 612 } 613 614 bool hasReuses() const { 615 return !Reuses.empty(); 616 } 617 618 /// addReuse - If we choose to reuse a virtual register that is already 619 /// available instead of reloading it, remember that we did so. 620 void addReuse(unsigned OpNo, unsigned StackSlotOrReMat, 621 unsigned PhysRegReused, unsigned AssignedPhysReg, 622 unsigned VirtReg) { 623 // If the reload is to the assigned register anyway, no undo will be 624 // required. 625 if (PhysRegReused == AssignedPhysReg) return; 626 627 // Otherwise, remember this. 628 Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused, 629 AssignedPhysReg, VirtReg)); 630 } 631 632 void markClobbered(unsigned PhysReg) { 633 PhysRegsClobbered.set(PhysReg); 634 } 635 636 bool isClobbered(unsigned PhysReg) const { 637 return PhysRegsClobbered.test(PhysReg); 638 } 639 640 /// GetRegForReload - We are about to emit a reload into PhysReg. If there 641 /// is some other operand that is using the specified register, either pick 642 /// a new register to use, or evict the previous reload and use this reg. 643 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI, 644 AvailableSpills &Spills, 645 std::vector<MachineInstr*> &MaybeDeadStores, 646 SmallSet<unsigned, 8> &Rejected, 647 BitVector &RegKills, 648 std::vector<MachineOperand*> &KillOps, 649 VirtRegMap &VRM) { 650 const TargetInstrInfo* TII = MI->getParent()->getParent()->getTarget() 651 .getInstrInfo(); 652 653 if (Reuses.empty()) return PhysReg; // This is most often empty. 654 655 for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) { 656 ReusedOp &Op = Reuses[ro]; 657 // If we find some other reuse that was supposed to use this register 658 // exactly for its reload, we can change this reload to use ITS reload 659 // register. That is, unless its reload register has already been 660 // considered and subsequently rejected because it has also been reused 661 // by another operand. 662 if (Op.PhysRegReused == PhysReg && 663 Rejected.count(Op.AssignedPhysReg) == 0) { 664 // Yup, use the reload register that we didn't use before. 665 unsigned NewReg = Op.AssignedPhysReg; 666 Rejected.insert(PhysReg); 667 return GetRegForReload(NewReg, MI, Spills, MaybeDeadStores, Rejected, 668 RegKills, KillOps, VRM); 669 } else { 670 // Otherwise, we might also have a problem if a previously reused 671 // value aliases the new register. If so, codegen the previous reload 672 // and use this one. 673 unsigned PRRU = Op.PhysRegReused; 674 const TargetRegisterInfo *TRI = Spills.getRegInfo(); 675 if (TRI->areAliases(PRRU, PhysReg)) { 676 // Okay, we found out that an alias of a reused register 677 // was used. This isn't good because it means we have 678 // to undo a previous reuse. 679 MachineBasicBlock *MBB = MI->getParent(); 680 const TargetRegisterClass *AliasRC = 681 MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg); 682 683 // Copy Op out of the vector and remove it, we're going to insert an 684 // explicit load for it. 685 ReusedOp NewOp = Op; 686 Reuses.erase(Reuses.begin()+ro); 687 688 // Ok, we're going to try to reload the assigned physreg into the 689 // slot that we were supposed to in the first place. However, that 690 // register could hold a reuse. Check to see if it conflicts or 691 // would prefer us to use a different register. 692 unsigned NewPhysReg = GetRegForReload(NewOp.AssignedPhysReg, 693 MI, Spills, MaybeDeadStores, 694 Rejected, RegKills, KillOps, VRM); 695 696 if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) { 697 TRI->reMaterialize(*MBB, MI, NewPhysReg, 698 VRM.getReMaterializedMI(NewOp.VirtReg)); 699 ++NumReMats; 700 } else { 701 TII->loadRegFromStackSlot(*MBB, MI, NewPhysReg, 702 NewOp.StackSlotOrReMat, AliasRC); 703 // Any stores to this stack slot are not dead anymore. 704 MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL; 705 ++NumLoads; 706 } 707 Spills.ClobberPhysReg(NewPhysReg); 708 Spills.ClobberPhysReg(NewOp.PhysRegReused); 709 710 MI->getOperand(NewOp.Operand).setReg(NewPhysReg); 711 712 Spills.addAvailable(NewOp.StackSlotOrReMat, MI, NewPhysReg); 713 MachineBasicBlock::iterator MII = MI; 714 --MII; 715 UpdateKills(*MII, RegKills, KillOps); 716 DOUT << '\t' << *MII; 717 718 DOUT << "Reuse undone!\n"; 719 --NumReused; 720 721 // Finally, PhysReg is now available, go ahead and use it. 722 return PhysReg; 723 } 724 } 725 } 726 return PhysReg; 727 } 728 729 /// GetRegForReload - Helper for the above GetRegForReload(). Add a 730 /// 'Rejected' set to remember which registers have been considered and 731 /// rejected for the reload. This avoids infinite looping in case like 732 /// this: 733 /// t1 := op t2, t3 734 /// t2 <- assigned r0 for use by the reload but ended up reuse r1 735 /// t3 <- assigned r1 for use by the reload but ended up reuse r0 736 /// t1 <- desires r1 737 /// sees r1 is taken by t2, tries t2's reload register r0 738 /// sees r0 is taken by t3, tries t3's reload register r1 739 /// sees r1 is taken by t2, tries t2's reload register r0 ... 740 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI, 741 AvailableSpills &Spills, 742 std::vector<MachineInstr*> &MaybeDeadStores, 743 BitVector &RegKills, 744 std::vector<MachineOperand*> &KillOps, 745 VirtRegMap &VRM) { 746 SmallSet<unsigned, 8> Rejected; 747 return GetRegForReload(PhysReg, MI, Spills, MaybeDeadStores, Rejected, 748 RegKills, KillOps, VRM); 749 } 750 }; 751} 752 753/// PrepForUnfoldOpti - Turn a store folding instruction into a load folding 754/// instruction. e.g. 755/// xorl %edi, %eax 756/// movl %eax, -32(%ebp) 757/// movl -36(%ebp), %eax 758/// orl %eax, -32(%ebp) 759/// ==> 760/// xorl %edi, %eax 761/// orl -36(%ebp), %eax 762/// mov %eax, -32(%ebp) 763/// This enables unfolding optimization for a subsequent instruction which will 764/// also eliminate the newly introduced store instruction. 765bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB, 766 MachineBasicBlock::iterator &MII, 767 std::vector<MachineInstr*> &MaybeDeadStores, 768 AvailableSpills &Spills, 769 BitVector &RegKills, 770 std::vector<MachineOperand*> &KillOps, 771 VirtRegMap &VRM) { 772 MachineFunction &MF = *MBB.getParent(); 773 MachineInstr &MI = *MII; 774 unsigned UnfoldedOpc = 0; 775 unsigned UnfoldPR = 0; 776 unsigned UnfoldVR = 0; 777 int FoldedSS = VirtRegMap::NO_STACK_SLOT; 778 VirtRegMap::MI2VirtMapTy::const_iterator I, End; 779 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ++I) { 780 // Only transform a MI that folds a single register. 781 if (UnfoldedOpc) 782 return false; 783 UnfoldVR = I->second.first; 784 VirtRegMap::ModRef MR = I->second.second; 785 if (VRM.isAssignedReg(UnfoldVR)) 786 continue; 787 // If this reference is not a use, any previous store is now dead. 788 // Otherwise, the store to this stack slot is not dead anymore. 789 FoldedSS = VRM.getStackSlot(UnfoldVR); 790 MachineInstr* DeadStore = MaybeDeadStores[FoldedSS]; 791 if (DeadStore && (MR & VirtRegMap::isModRef)) { 792 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS); 793 if (!PhysReg || 794 DeadStore->findRegisterUseOperandIdx(PhysReg, true) == -1) 795 continue; 796 UnfoldPR = PhysReg; 797 UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(), 798 false, true); 799 } 800 } 801 802 if (!UnfoldedOpc) 803 return false; 804 805 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 806 MachineOperand &MO = MI.getOperand(i); 807 if (!MO.isRegister() || MO.getReg() == 0 || !MO.isUse()) 808 continue; 809 unsigned VirtReg = MO.getReg(); 810 if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg()) 811 continue; 812 if (VRM.isAssignedReg(VirtReg)) { 813 unsigned PhysReg = VRM.getPhys(VirtReg); 814 if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR)) 815 return false; 816 } else if (VRM.isReMaterialized(VirtReg)) 817 continue; 818 int SS = VRM.getStackSlot(VirtReg); 819 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); 820 if (PhysReg) { 821 if (TRI->regsOverlap(PhysReg, UnfoldPR)) 822 return false; 823 continue; 824 } 825 PhysReg = VRM.getPhys(VirtReg); 826 if (!TRI->regsOverlap(PhysReg, UnfoldPR)) 827 continue; 828 829 // Ok, we'll need to reload the value into a register which makes 830 // it impossible to perform the store unfolding optimization later. 831 // Let's see if it is possible to fold the load if the store is 832 // unfolded. This allows us to perform the store unfolding 833 // optimization. 834 SmallVector<MachineInstr*, 4> NewMIs; 835 if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) { 836 assert(NewMIs.size() == 1); 837 MachineInstr *NewMI = NewMIs.back(); 838 NewMIs.clear(); 839 int Idx = NewMI->findRegisterUseOperandIdx(VirtReg); 840 assert(Idx != -1); 841 SmallVector<unsigned, 2> Ops; 842 Ops.push_back(Idx); 843 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS); 844 if (FoldedMI) { 845 if (!VRM.hasPhys(UnfoldVR)) 846 VRM.assignVirt2Phys(UnfoldVR, UnfoldPR); 847 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef); 848 MII = MBB.insert(MII, FoldedMI); 849 VRM.RemoveMachineInstrFromMaps(&MI); 850 MBB.erase(&MI); 851 return true; 852 } 853 delete NewMI; 854 } 855 } 856 return false; 857} 858 859/// findSuperReg - Find the SubReg's super-register of given register class 860/// where its SubIdx sub-register is SubReg. 861static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg, 862 unsigned SubIdx, const TargetRegisterInfo *TRI) { 863 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end(); 864 I != E; ++I) { 865 unsigned Reg = *I; 866 if (TRI->getSubReg(Reg, SubIdx) == SubReg) 867 return Reg; 868 } 869 return 0; 870} 871 872/// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if 873/// the last store to the same slot is now dead. If so, remove the last store. 874void LocalSpiller::SpillRegToStackSlot(MachineBasicBlock &MBB, 875 MachineBasicBlock::iterator &MII, 876 int Idx, unsigned PhysReg, int StackSlot, 877 const TargetRegisterClass *RC, 878 bool isAvailable, MachineInstr *&LastStore, 879 AvailableSpills &Spills, 880 SmallSet<MachineInstr*, 4> &ReMatDefs, 881 BitVector &RegKills, 882 std::vector<MachineOperand*> &KillOps, 883 VirtRegMap &VRM) { 884 TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC); 885 DOUT << "Store:\t" << *next(MII); 886 887 // If there is a dead store to this stack slot, nuke it now. 888 if (LastStore) { 889 DOUT << "Removed dead store:\t" << *LastStore; 890 ++NumDSE; 891 SmallVector<unsigned, 2> KillRegs; 892 InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs); 893 MachineBasicBlock::iterator PrevMII = LastStore; 894 bool CheckDef = PrevMII != MBB.begin(); 895 if (CheckDef) 896 --PrevMII; 897 MBB.erase(LastStore); 898 VRM.RemoveMachineInstrFromMaps(LastStore); 899 if (CheckDef) { 900 // Look at defs of killed registers on the store. Mark the defs 901 // as dead since the store has been deleted and they aren't 902 // being reused. 903 for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) { 904 bool HasOtherDef = false; 905 if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef)) { 906 MachineInstr *DeadDef = PrevMII; 907 if (ReMatDefs.count(DeadDef) && !HasOtherDef) { 908 // FIXME: This assumes a remat def does not have side 909 // effects. 910 MBB.erase(DeadDef); 911 VRM.RemoveMachineInstrFromMaps(DeadDef); 912 ++NumDRM; 913 } 914 } 915 } 916 } 917 } 918 919 LastStore = next(MII); 920 921 // If the stack slot value was previously available in some other 922 // register, change it now. Otherwise, make the register available, 923 // in PhysReg. 924 Spills.ModifyStackSlotOrReMat(StackSlot); 925 Spills.ClobberPhysReg(PhysReg); 926 Spills.addAvailable(StackSlot, LastStore, PhysReg, isAvailable); 927 ++NumStores; 928} 929 930/// rewriteMBB - Keep track of which spills are available even after the 931/// register allocator is done with them. If possible, avid reloading vregs. 932void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) { 933 DOUT << MBB.getBasicBlock()->getName() << ":\n"; 934 935 MachineFunction &MF = *MBB.getParent(); 936 937 // Spills - Keep track of which spilled values are available in physregs so 938 // that we can choose to reuse the physregs instead of emitting reloads. 939 AvailableSpills Spills(TRI, TII); 940 941 // MaybeDeadStores - When we need to write a value back into a stack slot, 942 // keep track of the inserted store. If the stack slot value is never read 943 // (because the value was used from some available register, for example), and 944 // subsequently stored to, the original store is dead. This map keeps track 945 // of inserted stores that are not used. If we see a subsequent store to the 946 // same stack slot, the original store is deleted. 947 std::vector<MachineInstr*> MaybeDeadStores; 948 MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL); 949 950 // ReMatDefs - These are rematerializable def MIs which are not deleted. 951 SmallSet<MachineInstr*, 4> ReMatDefs; 952 953 // Keep track of kill information. 954 BitVector RegKills(TRI->getNumRegs()); 955 std::vector<MachineOperand*> KillOps; 956 KillOps.resize(TRI->getNumRegs(), NULL); 957 958 for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end(); 959 MII != E; ) { 960 MachineBasicBlock::iterator NextMII = MII; ++NextMII; 961 962 VirtRegMap::MI2VirtMapTy::const_iterator I, End; 963 bool Erased = false; 964 bool BackTracked = false; 965 if (PrepForUnfoldOpti(MBB, MII, 966 MaybeDeadStores, Spills, RegKills, KillOps, VRM)) 967 NextMII = next(MII); 968 969 MachineInstr &MI = *MII; 970 const TargetInstrDesc &TID = MI.getDesc(); 971 972 // Insert restores here if asked to. 973 if (VRM.isRestorePt(&MI)) { 974 std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI); 975 for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) { 976 unsigned VirtReg = RestoreRegs[i]; 977 if (!VRM.getPreSplitReg(VirtReg)) 978 continue; // Split interval spilled again. 979 unsigned Phys = VRM.getPhys(VirtReg); 980 RegInfo->setPhysRegUsed(Phys); 981 if (VRM.isReMaterialized(VirtReg)) { 982 TRI->reMaterialize(MBB, &MI, Phys, 983 VRM.getReMaterializedMI(VirtReg)); 984 ++NumReMats; 985 } else { 986 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 987 TII->loadRegFromStackSlot(MBB, &MI, Phys, VRM.getStackSlot(VirtReg), 988 RC); 989 ++NumLoads; 990 } 991 // This invalidates Phys. 992 Spills.ClobberPhysReg(Phys); 993 UpdateKills(*prior(MII), RegKills, KillOps); 994 DOUT << '\t' << *prior(MII); 995 } 996 } 997 998 // Insert spills here if asked to. 999 if (VRM.isSpillPt(&MI)) { 1000 std::vector<std::pair<unsigned,bool> > &SpillRegs = 1001 VRM.getSpillPtSpills(&MI); 1002 for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) { 1003 unsigned VirtReg = SpillRegs[i].first; 1004 bool isKill = SpillRegs[i].second; 1005 if (!VRM.getPreSplitReg(VirtReg)) 1006 continue; // Split interval spilled again. 1007 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); 1008 unsigned Phys = VRM.getPhys(VirtReg); 1009 int StackSlot = VRM.getStackSlot(VirtReg); 1010 TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC); 1011 MachineInstr *StoreMI = next(MII); 1012 DOUT << "Store:\t" << StoreMI; 1013 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod); 1014 } 1015 NextMII = next(MII); 1016 } 1017 1018 /// ReusedOperands - Keep track of operand reuse in case we need to undo 1019 /// reuse. 1020 ReuseInfo ReusedOperands(MI, TRI); 1021 // Process all of the spilled uses and all non spilled reg references. 1022 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1023 MachineOperand &MO = MI.getOperand(i); 1024 if (!MO.isRegister() || MO.getReg() == 0) 1025 continue; // Ignore non-register operands. 1026 1027 unsigned VirtReg = MO.getReg(); 1028 if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) { 1029 // Ignore physregs for spilling, but remember that it is used by this 1030 // function. 1031 RegInfo->setPhysRegUsed(VirtReg); 1032 continue; 1033 } 1034 1035 assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && 1036 "Not a virtual or a physical register?"); 1037 1038 unsigned SubIdx = MO.getSubReg(); 1039 if (VRM.isAssignedReg(VirtReg)) { 1040 // This virtual register was assigned a physreg! 1041 unsigned Phys = VRM.getPhys(VirtReg); 1042 RegInfo->setPhysRegUsed(Phys); 1043 if (MO.isDef()) 1044 ReusedOperands.markClobbered(Phys); 1045 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys; 1046 MI.getOperand(i).setReg(RReg); 1047 continue; 1048 } 1049 1050 // This virtual register is now known to be a spilled value. 1051 if (!MO.isUse()) 1052 continue; // Handle defs in the loop below (handle use&def here though) 1053 1054 bool DoReMat = VRM.isReMaterialized(VirtReg); 1055 int SSorRMId = DoReMat 1056 ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg); 1057 int ReuseSlot = SSorRMId; 1058 1059 // Check to see if this stack slot is available. 1060 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId); 1061 1062 // If this is a sub-register use, make sure the reuse register is in the 1063 // right register class. For example, for x86 not all of the 32-bit 1064 // registers have accessible sub-registers. 1065 // Similarly so for EXTRACT_SUBREG. Consider this: 1066 // EDI = op 1067 // MOV32_mr fi#1, EDI 1068 // ... 1069 // = EXTRACT_SUBREG fi#1 1070 // fi#1 is available in EDI, but it cannot be reused because it's not in 1071 // the right register file. 1072 if (PhysReg && 1073 (SubIdx || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) { 1074 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1075 if (!RC->contains(PhysReg)) 1076 PhysReg = 0; 1077 } 1078 1079 if (PhysReg) { 1080 // This spilled operand might be part of a two-address operand. If this 1081 // is the case, then changing it will necessarily require changing the 1082 // def part of the instruction as well. However, in some cases, we 1083 // aren't allowed to modify the reused register. If none of these cases 1084 // apply, reuse it. 1085 bool CanReuse = true; 1086 int ti = TID.getOperandConstraint(i, TOI::TIED_TO); 1087 if (ti != -1 && 1088 MI.getOperand(ti).isRegister() && 1089 MI.getOperand(ti).getReg() == VirtReg) { 1090 // Okay, we have a two address operand. We can reuse this physreg as 1091 // long as we are allowed to clobber the value and there isn't an 1092 // earlier def that has already clobbered the physreg. 1093 CanReuse = Spills.canClobberPhysReg(ReuseSlot) && 1094 !ReusedOperands.isClobbered(PhysReg); 1095 } 1096 1097 if (CanReuse) { 1098 // If this stack slot value is already available, reuse it! 1099 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT) 1100 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1; 1101 else 1102 DOUT << "Reusing SS#" << ReuseSlot; 1103 DOUT << " from physreg " 1104 << TRI->getName(PhysReg) << " for vreg" 1105 << VirtReg <<" instead of reloading into physreg " 1106 << TRI->getName(VRM.getPhys(VirtReg)) << "\n"; 1107 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1108 MI.getOperand(i).setReg(RReg); 1109 1110 // The only technical detail we have is that we don't know that 1111 // PhysReg won't be clobbered by a reloaded stack slot that occurs 1112 // later in the instruction. In particular, consider 'op V1, V2'. 1113 // If V1 is available in physreg R0, we would choose to reuse it 1114 // here, instead of reloading it into the register the allocator 1115 // indicated (say R1). However, V2 might have to be reloaded 1116 // later, and it might indicate that it needs to live in R0. When 1117 // this occurs, we need to have information available that 1118 // indicates it is safe to use R1 for the reload instead of R0. 1119 // 1120 // To further complicate matters, we might conflict with an alias, 1121 // or R0 and R1 might not be compatible with each other. In this 1122 // case, we actually insert a reload for V1 in R1, ensuring that 1123 // we can get at R0 or its alias. 1124 ReusedOperands.addReuse(i, ReuseSlot, PhysReg, 1125 VRM.getPhys(VirtReg), VirtReg); 1126 if (ti != -1) 1127 // Only mark it clobbered if this is a use&def operand. 1128 ReusedOperands.markClobbered(PhysReg); 1129 ++NumReused; 1130 1131 if (MI.getOperand(i).isKill() && 1132 ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) { 1133 // This was the last use and the spilled value is still available 1134 // for reuse. That means the spill was unnecessary! 1135 MachineInstr* DeadStore = MaybeDeadStores[ReuseSlot]; 1136 if (DeadStore) { 1137 DOUT << "Removed dead store:\t" << *DeadStore; 1138 InvalidateKills(*DeadStore, RegKills, KillOps); 1139 VRM.RemoveMachineInstrFromMaps(DeadStore); 1140 MBB.erase(DeadStore); 1141 MaybeDeadStores[ReuseSlot] = NULL; 1142 ++NumDSE; 1143 } 1144 } 1145 continue; 1146 } // CanReuse 1147 1148 // Otherwise we have a situation where we have a two-address instruction 1149 // whose mod/ref operand needs to be reloaded. This reload is already 1150 // available in some register "PhysReg", but if we used PhysReg as the 1151 // operand to our 2-addr instruction, the instruction would modify 1152 // PhysReg. This isn't cool if something later uses PhysReg and expects 1153 // to get its initial value. 1154 // 1155 // To avoid this problem, and to avoid doing a load right after a store, 1156 // we emit a copy from PhysReg into the designated register for this 1157 // operand. 1158 unsigned DesignatedReg = VRM.getPhys(VirtReg); 1159 assert(DesignatedReg && "Must map virtreg to physreg!"); 1160 1161 // Note that, if we reused a register for a previous operand, the 1162 // register we want to reload into might not actually be 1163 // available. If this occurs, use the register indicated by the 1164 // reuser. 1165 if (ReusedOperands.hasReuses()) 1166 DesignatedReg = ReusedOperands.GetRegForReload(DesignatedReg, &MI, 1167 Spills, MaybeDeadStores, RegKills, KillOps, VRM); 1168 1169 // If the mapped designated register is actually the physreg we have 1170 // incoming, we don't need to inserted a dead copy. 1171 if (DesignatedReg == PhysReg) { 1172 // If this stack slot value is already available, reuse it! 1173 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT) 1174 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1; 1175 else 1176 DOUT << "Reusing SS#" << ReuseSlot; 1177 DOUT << " from physreg " << TRI->getName(PhysReg) << " for vreg" 1178 << VirtReg 1179 << " instead of reloading into same physreg.\n"; 1180 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1181 MI.getOperand(i).setReg(RReg); 1182 ReusedOperands.markClobbered(RReg); 1183 ++NumReused; 1184 continue; 1185 } 1186 1187 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1188 RegInfo->setPhysRegUsed(DesignatedReg); 1189 ReusedOperands.markClobbered(DesignatedReg); 1190 TII->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC); 1191 1192 MachineInstr *CopyMI = prior(MII); 1193 UpdateKills(*CopyMI, RegKills, KillOps); 1194 1195 // This invalidates DesignatedReg. 1196 Spills.ClobberPhysReg(DesignatedReg); 1197 1198 Spills.addAvailable(ReuseSlot, &MI, DesignatedReg); 1199 unsigned RReg = 1200 SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg; 1201 MI.getOperand(i).setReg(RReg); 1202 DOUT << '\t' << *prior(MII); 1203 ++NumReused; 1204 continue; 1205 } // if (PhysReg) 1206 1207 // Otherwise, reload it and remember that we have it. 1208 PhysReg = VRM.getPhys(VirtReg); 1209 assert(PhysReg && "Must map virtreg to physreg!"); 1210 1211 // Note that, if we reused a register for a previous operand, the 1212 // register we want to reload into might not actually be 1213 // available. If this occurs, use the register indicated by the 1214 // reuser. 1215 if (ReusedOperands.hasReuses()) 1216 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI, 1217 Spills, MaybeDeadStores, RegKills, KillOps, VRM); 1218 1219 RegInfo->setPhysRegUsed(PhysReg); 1220 ReusedOperands.markClobbered(PhysReg); 1221 if (DoReMat) { 1222 TRI->reMaterialize(MBB, &MI, PhysReg, VRM.getReMaterializedMI(VirtReg)); 1223 ++NumReMats; 1224 } else { 1225 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg); 1226 TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC); 1227 ++NumLoads; 1228 } 1229 // This invalidates PhysReg. 1230 Spills.ClobberPhysReg(PhysReg); 1231 1232 // Any stores to this stack slot are not dead anymore. 1233 if (!DoReMat) 1234 MaybeDeadStores[SSorRMId] = NULL; 1235 Spills.addAvailable(SSorRMId, &MI, PhysReg); 1236 // Assumes this is the last use. IsKill will be unset if reg is reused 1237 // unless it's a two-address operand. 1238 if (TID.getOperandConstraint(i, TOI::TIED_TO) == -1) 1239 MI.getOperand(i).setIsKill(); 1240 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1241 MI.getOperand(i).setReg(RReg); 1242 UpdateKills(*prior(MII), RegKills, KillOps); 1243 DOUT << '\t' << *prior(MII); 1244 } 1245 1246 DOUT << '\t' << MI; 1247 1248 1249 // If we have folded references to memory operands, make sure we clear all 1250 // physical registers that may contain the value of the spilled virtual 1251 // register 1252 SmallSet<int, 2> FoldedSS; 1253 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ++I) { 1254 unsigned VirtReg = I->second.first; 1255 VirtRegMap::ModRef MR = I->second.second; 1256 DOUT << "Folded vreg: " << VirtReg << " MR: " << MR; 1257 1258 int SS = VRM.getStackSlot(VirtReg); 1259 if (SS == VirtRegMap::NO_STACK_SLOT) 1260 continue; 1261 FoldedSS.insert(SS); 1262 DOUT << " - StackSlot: " << SS << "\n"; 1263 1264 // If this folded instruction is just a use, check to see if it's a 1265 // straight load from the virt reg slot. 1266 if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) { 1267 int FrameIdx; 1268 unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx); 1269 if (DestReg && FrameIdx == SS) { 1270 // If this spill slot is available, turn it into a copy (or nothing) 1271 // instead of leaving it as a load! 1272 if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) { 1273 DOUT << "Promoted Load To Copy: " << MI; 1274 if (DestReg != InReg) { 1275 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); 1276 TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC); 1277 // Revisit the copy so we make sure to notice the effects of the 1278 // operation on the destreg (either needing to RA it if it's 1279 // virtual or needing to clobber any values if it's physical). 1280 NextMII = &MI; 1281 --NextMII; // backtrack to the copy. 1282 BackTracked = true; 1283 } else { 1284 DOUT << "Removing now-noop copy: " << MI; 1285 // Unset last kill since it's being reused. 1286 InvalidateKill(InReg, RegKills, KillOps); 1287 } 1288 1289 VRM.RemoveMachineInstrFromMaps(&MI); 1290 MBB.erase(&MI); 1291 Erased = true; 1292 goto ProcessNextInst; 1293 } 1294 } else { 1295 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); 1296 SmallVector<MachineInstr*, 4> NewMIs; 1297 if (PhysReg && 1298 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) { 1299 MBB.insert(MII, NewMIs[0]); 1300 VRM.RemoveMachineInstrFromMaps(&MI); 1301 MBB.erase(&MI); 1302 Erased = true; 1303 --NextMII; // backtrack to the unfolded instruction. 1304 BackTracked = true; 1305 goto ProcessNextInst; 1306 } 1307 } 1308 } 1309 1310 // If this reference is not a use, any previous store is now dead. 1311 // Otherwise, the store to this stack slot is not dead anymore. 1312 MachineInstr* DeadStore = MaybeDeadStores[SS]; 1313 if (DeadStore) { 1314 bool isDead = !(MR & VirtRegMap::isRef); 1315 MachineInstr *NewStore = NULL; 1316 if (MR & VirtRegMap::isModRef) { 1317 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS); 1318 SmallVector<MachineInstr*, 4> NewMIs; 1319 // We can reuse this physreg as long as we are allowed to clobber 1320 // the value and there isn't an earlier def that has already clobbered 1321 // the physreg. 1322 if (PhysReg && 1323 !TII->isStoreToStackSlot(&MI, SS) && // Not profitable! 1324 DeadStore->findRegisterUseOperandIdx(PhysReg, true) != -1 && 1325 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true, NewMIs)) { 1326 MBB.insert(MII, NewMIs[0]); 1327 NewStore = NewMIs[1]; 1328 MBB.insert(MII, NewStore); 1329 VRM.RemoveMachineInstrFromMaps(&MI); 1330 MBB.erase(&MI); 1331 Erased = true; 1332 --NextMII; 1333 --NextMII; // backtrack to the unfolded instruction. 1334 BackTracked = true; 1335 isDead = true; 1336 } 1337 } 1338 1339 if (isDead) { // Previous store is dead. 1340 // If we get here, the store is dead, nuke it now. 1341 DOUT << "Removed dead store:\t" << *DeadStore; 1342 InvalidateKills(*DeadStore, RegKills, KillOps); 1343 VRM.RemoveMachineInstrFromMaps(DeadStore); 1344 MBB.erase(DeadStore); 1345 if (!NewStore) 1346 ++NumDSE; 1347 } 1348 1349 MaybeDeadStores[SS] = NULL; 1350 if (NewStore) { 1351 // Treat this store as a spill merged into a copy. That makes the 1352 // stack slot value available. 1353 VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod); 1354 goto ProcessNextInst; 1355 } 1356 } 1357 1358 // If the spill slot value is available, and this is a new definition of 1359 // the value, the value is not available anymore. 1360 if (MR & VirtRegMap::isMod) { 1361 // Notice that the value in this stack slot has been modified. 1362 Spills.ModifyStackSlotOrReMat(SS); 1363 1364 // If this is *just* a mod of the value, check to see if this is just a 1365 // store to the spill slot (i.e. the spill got merged into the copy). If 1366 // so, realize that the vreg is available now, and add the store to the 1367 // MaybeDeadStore info. 1368 int StackSlot; 1369 if (!(MR & VirtRegMap::isRef)) { 1370 if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) { 1371 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) && 1372 "Src hasn't been allocated yet?"); 1373 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark 1374 // this as a potentially dead store in case there is a subsequent 1375 // store into the stack slot without a read from it. 1376 MaybeDeadStores[StackSlot] = &MI; 1377 1378 // If the stack slot value was previously available in some other 1379 // register, change it now. Otherwise, make the register available, 1380 // in PhysReg. 1381 Spills.addAvailable(StackSlot, &MI, SrcReg, false/*don't clobber*/); 1382 } 1383 } 1384 } 1385 } 1386 1387 // Process all of the spilled defs. 1388 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 1389 MachineOperand &MO = MI.getOperand(i); 1390 if (!(MO.isRegister() && MO.getReg() && MO.isDef())) 1391 continue; 1392 1393 unsigned VirtReg = MO.getReg(); 1394 if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) { 1395 // Check to see if this is a noop copy. If so, eliminate the 1396 // instruction before considering the dest reg to be changed. 1397 unsigned Src, Dst; 1398 if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) { 1399 ++NumDCE; 1400 DOUT << "Removing now-noop copy: " << MI; 1401 MBB.erase(&MI); 1402 Erased = true; 1403 VRM.RemoveMachineInstrFromMaps(&MI); 1404 Spills.disallowClobberPhysReg(VirtReg); 1405 goto ProcessNextInst; 1406 } 1407 1408 // If it's not a no-op copy, it clobbers the value in the destreg. 1409 Spills.ClobberPhysReg(VirtReg); 1410 ReusedOperands.markClobbered(VirtReg); 1411 1412 // Check to see if this instruction is a load from a stack slot into 1413 // a register. If so, this provides the stack slot value in the reg. 1414 int FrameIdx; 1415 if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) { 1416 assert(DestReg == VirtReg && "Unknown load situation!"); 1417 1418 // If it is a folded reference, then it's not safe to clobber. 1419 bool Folded = FoldedSS.count(FrameIdx); 1420 // Otherwise, if it wasn't available, remember that it is now! 1421 Spills.addAvailable(FrameIdx, &MI, DestReg, !Folded); 1422 goto ProcessNextInst; 1423 } 1424 1425 continue; 1426 } 1427 1428 unsigned SubIdx = MO.getSubReg(); 1429 bool DoReMat = VRM.isReMaterialized(VirtReg); 1430 if (DoReMat) 1431 ReMatDefs.insert(&MI); 1432 1433 // The only vregs left are stack slot definitions. 1434 int StackSlot = VRM.getStackSlot(VirtReg); 1435 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg); 1436 1437 // If this def is part of a two-address operand, make sure to execute 1438 // the store from the correct physical register. 1439 unsigned PhysReg; 1440 int TiedOp = MI.getDesc().findTiedToSrcOperand(i); 1441 if (TiedOp != -1) { 1442 PhysReg = MI.getOperand(TiedOp).getReg(); 1443 if (SubIdx) { 1444 unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI); 1445 assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg && 1446 "Can't find corresponding super-register!"); 1447 PhysReg = SuperReg; 1448 } 1449 } else { 1450 PhysReg = VRM.getPhys(VirtReg); 1451 if (ReusedOperands.isClobbered(PhysReg)) { 1452 // Another def has taken the assigned physreg. It must have been a 1453 // use&def which got it due to reuse. Undo the reuse! 1454 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI, 1455 Spills, MaybeDeadStores, RegKills, KillOps, VRM); 1456 } 1457 } 1458 1459 RegInfo->setPhysRegUsed(PhysReg); 1460 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg; 1461 ReusedOperands.markClobbered(RReg); 1462 MI.getOperand(i).setReg(RReg); 1463 1464 if (!MO.isDead()) { 1465 MachineInstr *&LastStore = MaybeDeadStores[StackSlot]; 1466 SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, true, 1467 LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM); 1468 NextMII = next(MII); 1469 1470 // Check to see if this is a noop copy. If so, eliminate the 1471 // instruction before considering the dest reg to be changed. 1472 { 1473 unsigned Src, Dst; 1474 if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) { 1475 ++NumDCE; 1476 DOUT << "Removing now-noop copy: " << MI; 1477 MBB.erase(&MI); 1478 Erased = true; 1479 VRM.RemoveMachineInstrFromMaps(&MI); 1480 UpdateKills(*LastStore, RegKills, KillOps); 1481 goto ProcessNextInst; 1482 } 1483 } 1484 } 1485 } 1486 ProcessNextInst: 1487 if (!Erased && !BackTracked) { 1488 for (MachineBasicBlock::iterator II = MI; II != NextMII; ++II) 1489 UpdateKills(*II, RegKills, KillOps); 1490 } 1491 MII = NextMII; 1492 } 1493} 1494 1495llvm::Spiller* llvm::createSpiller() { 1496 switch (SpillerOpt) { 1497 default: assert(0 && "Unreachable!"); 1498 case local: 1499 return new LocalSpiller(); 1500 case simple: 1501 return new SimpleSpiller(); 1502 } 1503} 1504