Spiller.cpp revision 65de504ad226a0b2adc6c675115863a8e583ac1f
1//===-- llvm/CodeGen/Spiller.cpp - Spiller -------------------------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#define DEBUG_TYPE "spiller" 11 12#include "Spiller.h" 13#include "VirtRegMap.h" 14#include "llvm/CodeGen/LiveIntervalAnalysis.h" 15#include "llvm/CodeGen/MachineFrameInfo.h" 16#include "llvm/CodeGen/MachineFunction.h" 17#include "llvm/CodeGen/MachineRegisterInfo.h" 18#include "llvm/Target/TargetMachine.h" 19#include "llvm/Target/TargetInstrInfo.h" 20#include "llvm/Support/CommandLine.h" 21#include "llvm/Support/Debug.h" 22#include "llvm/Support/raw_ostream.h" 23#include <set> 24 25using namespace llvm; 26 27namespace { 28 enum SpillerName { trivial, standard, splitting }; 29} 30 31static cl::opt<SpillerName> 32spillerOpt("spiller", 33 cl::desc("Spiller to use: (default: standard)"), 34 cl::Prefix, 35 cl::values(clEnumVal(trivial, "trivial spiller"), 36 clEnumVal(standard, "default spiller"), 37 clEnumVal(splitting, "splitting spiller"), 38 clEnumValEnd), 39 cl::init(standard)); 40 41// Spiller virtual destructor implementation. 42Spiller::~Spiller() {} 43 44namespace { 45 46/// Utility class for spillers. 47class SpillerBase : public Spiller { 48protected: 49 50 MachineFunction *mf; 51 LiveIntervals *lis; 52 MachineFrameInfo *mfi; 53 MachineRegisterInfo *mri; 54 const TargetInstrInfo *tii; 55 VirtRegMap *vrm; 56 57 /// Construct a spiller base. 58 SpillerBase(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm) 59 : mf(mf), lis(lis), vrm(vrm) 60 { 61 mfi = mf->getFrameInfo(); 62 mri = &mf->getRegInfo(); 63 tii = mf->getTarget().getInstrInfo(); 64 } 65 66 /// Add spill ranges for every use/def of the live interval, inserting loads 67 /// immediately before each use, and stores after each def. No folding or 68 /// remat is attempted. 69 std::vector<LiveInterval*> trivialSpillEverywhere(LiveInterval *li) { 70 DEBUG(dbgs() << "Spilling everywhere " << *li << "\n"); 71 72 assert(li->weight != HUGE_VALF && 73 "Attempting to spill already spilled value."); 74 75 assert(!li->isStackSlot() && 76 "Trying to spill a stack slot."); 77 78 DEBUG(dbgs() << "Trivial spill everywhere of reg" << li->reg << "\n"); 79 80 std::vector<LiveInterval*> added; 81 82 const TargetRegisterClass *trc = mri->getRegClass(li->reg); 83 unsigned ss = vrm->assignVirt2StackSlot(li->reg); 84 85 // Iterate over reg uses/defs. 86 for (MachineRegisterInfo::reg_iterator 87 regItr = mri->reg_begin(li->reg); regItr != mri->reg_end();) { 88 89 // Grab the use/def instr. 90 MachineInstr *mi = &*regItr; 91 92 DEBUG(dbgs() << " Processing " << *mi); 93 94 // Step regItr to the next use/def instr. 95 do { 96 ++regItr; 97 } while (regItr != mri->reg_end() && (&*regItr == mi)); 98 99 // Collect uses & defs for this instr. 100 SmallVector<unsigned, 2> indices; 101 bool hasUse = false; 102 bool hasDef = false; 103 for (unsigned i = 0; i != mi->getNumOperands(); ++i) { 104 MachineOperand &op = mi->getOperand(i); 105 if (!op.isReg() || op.getReg() != li->reg) 106 continue; 107 hasUse |= mi->getOperand(i).isUse(); 108 hasDef |= mi->getOperand(i).isDef(); 109 indices.push_back(i); 110 } 111 112 // Create a new vreg & interval for this instr. 113 unsigned newVReg = mri->createVirtualRegister(trc); 114 vrm->grow(); 115 vrm->assignVirt2StackSlot(newVReg, ss); 116 LiveInterval *newLI = &lis->getOrCreateInterval(newVReg); 117 newLI->weight = HUGE_VALF; 118 119 // Update the reg operands & kill flags. 120 for (unsigned i = 0; i < indices.size(); ++i) { 121 unsigned mopIdx = indices[i]; 122 MachineOperand &mop = mi->getOperand(mopIdx); 123 mop.setReg(newVReg); 124 if (mop.isUse() && !mi->isRegTiedToDefOperand(mopIdx)) { 125 mop.setIsKill(true); 126 } 127 } 128 assert(hasUse || hasDef); 129 130 // Insert reload if necessary. 131 MachineBasicBlock::iterator miItr(mi); 132 if (hasUse) { 133 tii->loadRegFromStackSlot(*mi->getParent(), miItr, newVReg, ss, trc); 134 MachineInstr *loadInstr(prior(miItr)); 135 SlotIndex loadIndex = 136 lis->InsertMachineInstrInMaps(loadInstr).getDefIndex(); 137 SlotIndex endIndex = loadIndex.getNextIndex(); 138 VNInfo *loadVNI = 139 newLI->getNextValue(loadIndex, 0, true, lis->getVNInfoAllocator()); 140 loadVNI->addKill(endIndex); 141 newLI->addRange(LiveRange(loadIndex, endIndex, loadVNI)); 142 } 143 144 // Insert store if necessary. 145 if (hasDef) { 146 tii->storeRegToStackSlot(*mi->getParent(), llvm::next(miItr), newVReg, true, 147 ss, trc); 148 MachineInstr *storeInstr(llvm::next(miItr)); 149 SlotIndex storeIndex = 150 lis->InsertMachineInstrInMaps(storeInstr).getDefIndex(); 151 SlotIndex beginIndex = storeIndex.getPrevIndex(); 152 VNInfo *storeVNI = 153 newLI->getNextValue(beginIndex, 0, true, lis->getVNInfoAllocator()); 154 storeVNI->addKill(storeIndex); 155 newLI->addRange(LiveRange(beginIndex, storeIndex, storeVNI)); 156 } 157 158 added.push_back(newLI); 159 } 160 161 return added; 162 } 163 164}; 165 166 167/// Spills any live range using the spill-everywhere method with no attempt at 168/// folding. 169class TrivialSpiller : public SpillerBase { 170public: 171 172 TrivialSpiller(MachineFunction *mf, LiveIntervals *lis, VirtRegMap *vrm) 173 : SpillerBase(mf, lis, vrm) {} 174 175 std::vector<LiveInterval*> spill(LiveInterval *li, 176 SmallVectorImpl<LiveInterval*> &spillIs, 177 SlotIndex*) { 178 // Ignore spillIs - we don't use it. 179 return trivialSpillEverywhere(li); 180 } 181 182}; 183 184/// Falls back on LiveIntervals::addIntervalsForSpills. 185class StandardSpiller : public Spiller { 186protected: 187 LiveIntervals *lis; 188 const MachineLoopInfo *loopInfo; 189 VirtRegMap *vrm; 190public: 191 StandardSpiller(LiveIntervals *lis, const MachineLoopInfo *loopInfo, 192 VirtRegMap *vrm) 193 : lis(lis), loopInfo(loopInfo), vrm(vrm) {} 194 195 /// Falls back on LiveIntervals::addIntervalsForSpills. 196 std::vector<LiveInterval*> spill(LiveInterval *li, 197 SmallVectorImpl<LiveInterval*> &spillIs, 198 SlotIndex*) { 199 return lis->addIntervalsForSpills(*li, spillIs, loopInfo, *vrm); 200 } 201 202}; 203 204/// When a call to spill is placed this spiller will first try to break the 205/// interval up into its component values (one new interval per value). 206/// If this fails, or if a call is placed to spill a previously split interval 207/// then the spiller falls back on the standard spilling mechanism. 208class SplittingSpiller : public StandardSpiller { 209public: 210 SplittingSpiller(MachineFunction *mf, LiveIntervals *lis, 211 const MachineLoopInfo *loopInfo, VirtRegMap *vrm) 212 : StandardSpiller(lis, loopInfo, vrm) { 213 214 mri = &mf->getRegInfo(); 215 tii = mf->getTarget().getInstrInfo(); 216 tri = mf->getTarget().getRegisterInfo(); 217 } 218 219 std::vector<LiveInterval*> spill(LiveInterval *li, 220 SmallVectorImpl<LiveInterval*> &spillIs, 221 SlotIndex *earliestStart) { 222 223 if (worthTryingToSplit(li)) { 224 return tryVNISplit(li, earliestStart); 225 } 226 // else 227 return StandardSpiller::spill(li, spillIs, earliestStart); 228 } 229 230private: 231 232 MachineRegisterInfo *mri; 233 const TargetInstrInfo *tii; 234 const TargetRegisterInfo *tri; 235 DenseSet<LiveInterval*> alreadySplit; 236 237 bool worthTryingToSplit(LiveInterval *li) const { 238 return (!alreadySplit.count(li) && li->getNumValNums() > 1); 239 } 240 241 /// Try to break a LiveInterval into its component values. 242 std::vector<LiveInterval*> tryVNISplit(LiveInterval *li, 243 SlotIndex *earliestStart) { 244 245 DEBUG(dbgs() << "Trying VNI split of %reg" << *li << "\n"); 246 247 std::vector<LiveInterval*> added; 248 SmallVector<VNInfo*, 4> vnis; 249 250 std::copy(li->vni_begin(), li->vni_end(), std::back_inserter(vnis)); 251 252 for (SmallVectorImpl<VNInfo*>::iterator vniItr = vnis.begin(), 253 vniEnd = vnis.end(); vniItr != vniEnd; ++vniItr) { 254 VNInfo *vni = *vniItr; 255 256 // Skip unused VNIs, or VNIs with no kills. 257 if (vni->isUnused() || vni->kills.empty()) 258 continue; 259 260 DEBUG(dbgs() << " Extracted Val #" << vni->id << " as "); 261 LiveInterval *splitInterval = extractVNI(li, vni); 262 263 if (splitInterval != 0) { 264 DEBUG(dbgs() << *splitInterval << "\n"); 265 added.push_back(splitInterval); 266 alreadySplit.insert(splitInterval); 267 if (earliestStart != 0) { 268 if (splitInterval->beginIndex() < *earliestStart) 269 *earliestStart = splitInterval->beginIndex(); 270 } 271 } else { 272 DEBUG(dbgs() << "0\n"); 273 } 274 } 275 276 DEBUG(dbgs() << "Original LI: " << *li << "\n"); 277 278 // If there original interval still contains some live ranges 279 // add it to added and alreadySplit. 280 if (!li->empty()) { 281 added.push_back(li); 282 alreadySplit.insert(li); 283 if (earliestStart != 0) { 284 if (li->beginIndex() < *earliestStart) 285 *earliestStart = li->beginIndex(); 286 } 287 } 288 289 return added; 290 } 291 292 /// Extract the given value number from the interval. 293 LiveInterval* extractVNI(LiveInterval *li, VNInfo *vni) const { 294 assert(vni->isDefAccurate() || vni->isPHIDef()); 295 assert(!vni->kills.empty()); 296 297 // Create a new vreg and live interval, copy VNI kills & ranges over. 298 const TargetRegisterClass *trc = mri->getRegClass(li->reg); 299 unsigned newVReg = mri->createVirtualRegister(trc); 300 vrm->grow(); 301 LiveInterval *newLI = &lis->getOrCreateInterval(newVReg); 302 VNInfo *newVNI = newLI->createValueCopy(vni, lis->getVNInfoAllocator()); 303 304 // Start by copying all live ranges in the VN to the new interval. 305 for (LiveInterval::iterator rItr = li->begin(), rEnd = li->end(); 306 rItr != rEnd; ++rItr) { 307 if (rItr->valno == vni) { 308 newLI->addRange(LiveRange(rItr->start, rItr->end, newVNI)); 309 } 310 } 311 312 // Erase the old VNI & ranges. 313 li->removeValNo(vni); 314 315 // Collect all current uses of the register belonging to the given VNI. 316 // We'll use this to rename the register after we've dealt with the def. 317 std::set<MachineInstr*> uses; 318 for (MachineRegisterInfo::use_iterator 319 useItr = mri->use_begin(li->reg), useEnd = mri->use_end(); 320 useItr != useEnd; ++useItr) { 321 uses.insert(&*useItr); 322 } 323 324 // Process the def instruction for this VNI. 325 if (newVNI->isPHIDef()) { 326 // Insert a copy at the start of the MBB. The range proceeding the 327 // copy will be attached to the original LiveInterval. 328 MachineBasicBlock *defMBB = lis->getMBBFromIndex(newVNI->def); 329 tii->copyRegToReg(*defMBB, defMBB->begin(), newVReg, li->reg, trc, trc); 330 MachineInstr *copyMI = defMBB->begin(); 331 copyMI->addRegisterKilled(li->reg, tri); 332 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI); 333 VNInfo *phiDefVNI = li->getNextValue(lis->getMBBStartIdx(defMBB), 334 0, false, lis->getVNInfoAllocator()); 335 phiDefVNI->setIsPHIDef(true); 336 phiDefVNI->addKill(copyIdx.getDefIndex()); 337 li->addRange(LiveRange(phiDefVNI->def, copyIdx.getDefIndex(), phiDefVNI)); 338 LiveRange *oldPHIDefRange = 339 newLI->getLiveRangeContaining(lis->getMBBStartIdx(defMBB)); 340 341 // If the old phi def starts in the middle of the range chop it up. 342 if (oldPHIDefRange->start < lis->getMBBStartIdx(defMBB)) { 343 LiveRange oldPHIDefRange2(copyIdx.getDefIndex(), oldPHIDefRange->end, 344 oldPHIDefRange->valno); 345 oldPHIDefRange->end = lis->getMBBStartIdx(defMBB); 346 newLI->addRange(oldPHIDefRange2); 347 } else if (oldPHIDefRange->start == lis->getMBBStartIdx(defMBB)) { 348 // Otherwise if it's at the start of the range just trim it. 349 oldPHIDefRange->start = copyIdx.getDefIndex(); 350 } else { 351 assert(false && "PHI def range doesn't cover PHI def?"); 352 } 353 354 newVNI->def = copyIdx.getDefIndex(); 355 newVNI->setCopy(copyMI); 356 newVNI->setIsPHIDef(false); // not a PHI def anymore. 357 newVNI->setIsDefAccurate(true); 358 } else { 359 // non-PHI def. Rename the def. If it's two-addr that means renaming the use 360 // and inserting a new copy too. 361 MachineInstr *defInst = lis->getInstructionFromIndex(newVNI->def); 362 // We'll rename this now, so we can remove it from uses. 363 uses.erase(defInst); 364 unsigned defOpIdx = defInst->findRegisterDefOperandIdx(li->reg); 365 bool isTwoAddr = defInst->isRegTiedToUseOperand(defOpIdx), 366 twoAddrUseIsUndef = false; 367 368 for (unsigned i = 0; i < defInst->getNumOperands(); ++i) { 369 MachineOperand &mo = defInst->getOperand(i); 370 if (mo.isReg() && (mo.isDef() || isTwoAddr) && (mo.getReg()==li->reg)) { 371 mo.setReg(newVReg); 372 if (isTwoAddr && mo.isUse() && mo.isUndef()) 373 twoAddrUseIsUndef = true; 374 } 375 } 376 377 SlotIndex defIdx = lis->getInstructionIndex(defInst); 378 newVNI->def = defIdx.getDefIndex(); 379 380 if (isTwoAddr && !twoAddrUseIsUndef) { 381 MachineBasicBlock *defMBB = defInst->getParent(); 382 tii->copyRegToReg(*defMBB, defInst, newVReg, li->reg, trc, trc); 383 MachineInstr *copyMI = prior(MachineBasicBlock::iterator(defInst)); 384 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI); 385 copyMI->addRegisterKilled(li->reg, tri); 386 LiveRange *origUseRange = 387 li->getLiveRangeContaining(newVNI->def.getUseIndex()); 388 VNInfo *origUseVNI = origUseRange->valno; 389 origUseRange->end = copyIdx.getDefIndex(); 390 bool updatedKills = false; 391 for (unsigned k = 0; k < origUseVNI->kills.size(); ++k) { 392 if (origUseVNI->kills[k] == defIdx.getDefIndex()) { 393 origUseVNI->kills[k] = copyIdx.getDefIndex(); 394 updatedKills = true; 395 break; 396 } 397 } 398 assert(updatedKills && "Failed to update VNI kill list."); 399 VNInfo *copyVNI = newLI->getNextValue(copyIdx.getDefIndex(), copyMI, 400 true, lis->getVNInfoAllocator()); 401 copyVNI->addKill(defIdx.getDefIndex()); 402 LiveRange copyRange(copyIdx.getDefIndex(),defIdx.getDefIndex(),copyVNI); 403 newLI->addRange(copyRange); 404 } 405 } 406 407 for (std::set<MachineInstr*>::iterator 408 usesItr = uses.begin(), usesEnd = uses.end(); 409 usesItr != usesEnd; ++usesItr) { 410 MachineInstr *useInst = *usesItr; 411 SlotIndex useIdx = lis->getInstructionIndex(useInst); 412 LiveRange *useRange = 413 newLI->getLiveRangeContaining(useIdx.getUseIndex()); 414 415 // If this use doesn't belong to the new interval skip it. 416 if (useRange == 0) 417 continue; 418 419 // This use doesn't belong to the VNI, skip it. 420 if (useRange->valno != newVNI) 421 continue; 422 423 // Check if this instr is two address. 424 unsigned useOpIdx = useInst->findRegisterUseOperandIdx(li->reg); 425 bool isTwoAddress = useInst->isRegTiedToDefOperand(useOpIdx); 426 427 // Rename uses (and defs for two-address instrs). 428 for (unsigned i = 0; i < useInst->getNumOperands(); ++i) { 429 MachineOperand &mo = useInst->getOperand(i); 430 if (mo.isReg() && (mo.isUse() || isTwoAddress) && 431 (mo.getReg() == li->reg)) { 432 mo.setReg(newVReg); 433 } 434 } 435 436 // If this is a two address instruction we've got some extra work to do. 437 if (isTwoAddress) { 438 // We modified the def operand, so we need to copy back to the original 439 // reg. 440 MachineBasicBlock *useMBB = useInst->getParent(); 441 MachineBasicBlock::iterator useItr(useInst); 442 tii->copyRegToReg(*useMBB, next(useItr), li->reg, newVReg, trc, trc); 443 MachineInstr *copyMI = next(useItr); 444 copyMI->addRegisterKilled(newVReg, tri); 445 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI); 446 447 // Change the old two-address defined range & vni to start at 448 // (and be defined by) the copy. 449 LiveRange *origDefRange = 450 li->getLiveRangeContaining(useIdx.getDefIndex()); 451 origDefRange->start = copyIdx.getDefIndex(); 452 origDefRange->valno->def = copyIdx.getDefIndex(); 453 origDefRange->valno->setCopy(copyMI); 454 455 // Insert a new range & vni for the two-address-to-copy value. This 456 // will be attached to the new live interval. 457 VNInfo *copyVNI = 458 newLI->getNextValue(useIdx.getDefIndex(), 0, true, 459 lis->getVNInfoAllocator()); 460 copyVNI->addKill(copyIdx.getDefIndex()); 461 LiveRange copyRange(useIdx.getDefIndex(),copyIdx.getDefIndex(),copyVNI); 462 newLI->addRange(copyRange); 463 } 464 } 465 466 // Iterate over any PHI kills - we'll need to insert new copies for them. 467 for (VNInfo::KillSet::iterator 468 killItr = newVNI->kills.begin(), killEnd = newVNI->kills.end(); 469 killItr != killEnd; ++killItr) { 470 SlotIndex killIdx(*killItr); 471 if (killItr->isPHI()) { 472 MachineBasicBlock *killMBB = lis->getMBBFromIndex(killIdx); 473 LiveRange *oldKillRange = 474 newLI->getLiveRangeContaining(killIdx); 475 476 assert(oldKillRange != 0 && "No kill range?"); 477 478 tii->copyRegToReg(*killMBB, killMBB->getFirstTerminator(), 479 li->reg, newVReg, trc, trc); 480 MachineInstr *copyMI = prior(killMBB->getFirstTerminator()); 481 copyMI->addRegisterKilled(newVReg, tri); 482 SlotIndex copyIdx = lis->InsertMachineInstrInMaps(copyMI); 483 484 // Save the current end. We may need it to add a new range if the 485 // current range runs of the end of the MBB. 486 SlotIndex newKillRangeEnd = oldKillRange->end; 487 oldKillRange->end = copyIdx.getDefIndex(); 488 489 if (newKillRangeEnd != lis->getMBBEndIdx(killMBB)) { 490 assert(newKillRangeEnd > lis->getMBBEndIdx(killMBB) && 491 "PHI kill range doesn't reach kill-block end. Not sane."); 492 newLI->addRange(LiveRange(lis->getMBBEndIdx(killMBB), 493 newKillRangeEnd, newVNI)); 494 } 495 496 *killItr = oldKillRange->end; 497 VNInfo *newKillVNI = li->getNextValue(copyIdx.getDefIndex(), 498 copyMI, true, 499 lis->getVNInfoAllocator()); 500 newKillVNI->addKill(lis->getMBBTerminatorGap(killMBB)); 501 newKillVNI->setHasPHIKill(true); 502 li->addRange(LiveRange(copyIdx.getDefIndex(), 503 lis->getMBBEndIdx(killMBB), 504 newKillVNI)); 505 } 506 507 } 508 509 newVNI->setHasPHIKill(false); 510 511 return newLI; 512 } 513 514}; 515 516} 517 518llvm::Spiller* llvm::createSpiller(MachineFunction *mf, LiveIntervals *lis, 519 const MachineLoopInfo *loopInfo, 520 VirtRegMap *vrm) { 521 switch (spillerOpt) { 522 case trivial: return new TrivialSpiller(mf, lis, vrm); break; 523 case standard: return new StandardSpiller(lis, loopInfo, vrm); break; 524 case splitting: return new SplittingSpiller(mf, lis, loopInfo, vrm); break; 525 default: llvm_unreachable("Unreachable!"); break; 526 } 527} 528