LiveIntervalAnalysis.cpp revision 20e4cc9761f93188b3466fe87c2ff832eb05087e
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the LiveInterval analysis pass which is used 11// by the Linear Scan Register allocator. This pass linearizes the 12// basic blocks of the function in DFS order and uses the 13// LiveVariables pass to conservatively compute live intervals for 14// each virtual and physical register. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "liveintervals" 19#include "llvm/CodeGen/LiveIntervalAnalysis.h" 20#include "VirtRegMap.h" 21#include "llvm/Value.h" 22#include "llvm/Analysis/AliasAnalysis.h" 23#include "llvm/CodeGen/LiveVariables.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineInstr.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineLoopInfo.h" 28#include "llvm/CodeGen/MachineMemOperand.h" 29#include "llvm/CodeGen/MachineRegisterInfo.h" 30#include "llvm/CodeGen/Passes.h" 31#include "llvm/CodeGen/ProcessImplicitDefs.h" 32#include "llvm/Target/TargetRegisterInfo.h" 33#include "llvm/Target/TargetInstrInfo.h" 34#include "llvm/Target/TargetMachine.h" 35#include "llvm/Target/TargetOptions.h" 36#include "llvm/Support/CommandLine.h" 37#include "llvm/Support/Debug.h" 38#include "llvm/Support/ErrorHandling.h" 39#include "llvm/Support/raw_ostream.h" 40#include "llvm/ADT/DepthFirstIterator.h" 41#include "llvm/ADT/SmallSet.h" 42#include "llvm/ADT/Statistic.h" 43#include "llvm/ADT/STLExtras.h" 44#include <algorithm> 45#include <limits> 46#include <cmath> 47using namespace llvm; 48 49// Hidden options for help debugging. 50static cl::opt<bool> DisableReMat("disable-rematerialization", 51 cl::init(false), cl::Hidden); 52 53static cl::opt<bool> EnableFastSpilling("fast-spill", 54 cl::init(false), cl::Hidden); 55 56STATISTIC(numIntervals , "Number of original intervals"); 57STATISTIC(numFolds , "Number of loads/stores folded into instructions"); 58STATISTIC(numSplits , "Number of intervals split"); 59 60char LiveIntervals::ID = 0; 61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis"); 62 63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { 64 AU.setPreservesCFG(); 65 AU.addRequired<AliasAnalysis>(); 66 AU.addPreserved<AliasAnalysis>(); 67 AU.addPreserved<LiveVariables>(); 68 AU.addRequired<LiveVariables>(); 69 AU.addPreservedID(MachineLoopInfoID); 70 AU.addPreservedID(MachineDominatorsID); 71 72 if (!StrongPHIElim) { 73 AU.addPreservedID(PHIEliminationID); 74 AU.addRequiredID(PHIEliminationID); 75 } 76 77 AU.addRequiredID(TwoAddressInstructionPassID); 78 AU.addPreserved<ProcessImplicitDefs>(); 79 AU.addRequired<ProcessImplicitDefs>(); 80 AU.addPreserved<SlotIndexes>(); 81 AU.addRequiredTransitive<SlotIndexes>(); 82 MachineFunctionPass::getAnalysisUsage(AU); 83} 84 85void LiveIntervals::releaseMemory() { 86 // Free the live intervals themselves. 87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(), 88 E = r2iMap_.end(); I != E; ++I) 89 delete I->second; 90 91 r2iMap_.clear(); 92 93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd. 94 VNInfoAllocator.DestroyAll(); 95 while (!CloneMIs.empty()) { 96 MachineInstr *MI = CloneMIs.back(); 97 CloneMIs.pop_back(); 98 mf_->DeleteMachineInstr(MI); 99 } 100} 101 102/// runOnMachineFunction - Register allocate the whole function 103/// 104bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { 105 mf_ = &fn; 106 mri_ = &mf_->getRegInfo(); 107 tm_ = &fn.getTarget(); 108 tri_ = tm_->getRegisterInfo(); 109 tii_ = tm_->getInstrInfo(); 110 aa_ = &getAnalysis<AliasAnalysis>(); 111 lv_ = &getAnalysis<LiveVariables>(); 112 indexes_ = &getAnalysis<SlotIndexes>(); 113 allocatableRegs_ = tri_->getAllocatableSet(fn); 114 115 computeIntervals(); 116 117 numIntervals += getNumIntervals(); 118 119 DEBUG(dump()); 120 return true; 121} 122 123/// print - Implement the dump method. 124void LiveIntervals::print(raw_ostream &OS, const Module* ) const { 125 OS << "********** INTERVALS **********\n"; 126 for (const_iterator I = begin(), E = end(); I != E; ++I) { 127 I->second->print(OS, tri_); 128 OS << "\n"; 129 } 130 131 printInstrs(OS); 132} 133 134void LiveIntervals::printInstrs(raw_ostream &OS) const { 135 OS << "********** MACHINEINSTRS **********\n"; 136 137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end(); 138 mbbi != mbbe; ++mbbi) { 139 OS << "BB#" << mbbi->getNumber() 140 << ":\t\t# derived from " << mbbi->getName() << "\n"; 141 for (MachineBasicBlock::iterator mii = mbbi->begin(), 142 mie = mbbi->end(); mii != mie; ++mii) { 143 if (mii->isDebugValue()) 144 OS << " \t" << *mii; 145 else 146 OS << getInstructionIndex(mii) << '\t' << *mii; 147 } 148 } 149} 150 151void LiveIntervals::dumpInstrs() const { 152 printInstrs(dbgs()); 153} 154 155bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li, 156 VirtRegMap &vrm, unsigned reg) { 157 // We don't handle fancy stuff crossing basic block boundaries 158 if (li.ranges.size() != 1) 159 return true; 160 const LiveRange &range = li.ranges.front(); 161 SlotIndex idx = range.start.getBaseIndex(); 162 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex(); 163 164 // Skip deleted instructions 165 MachineInstr *firstMI = getInstructionFromIndex(idx); 166 while (!firstMI && idx != end) { 167 idx = idx.getNextIndex(); 168 firstMI = getInstructionFromIndex(idx); 169 } 170 if (!firstMI) 171 return false; 172 173 // Find last instruction in range 174 SlotIndex lastIdx = end.getPrevIndex(); 175 MachineInstr *lastMI = getInstructionFromIndex(lastIdx); 176 while (!lastMI && lastIdx != idx) { 177 lastIdx = lastIdx.getPrevIndex(); 178 lastMI = getInstructionFromIndex(lastIdx); 179 } 180 if (!lastMI) 181 return false; 182 183 // Range cannot cross basic block boundaries or terminators 184 MachineBasicBlock *MBB = firstMI->getParent(); 185 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator()) 186 return true; 187 188 MachineBasicBlock::const_iterator E = lastMI; 189 ++E; 190 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) { 191 const MachineInstr &MI = *I; 192 193 // Allow copies to and from li.reg 194 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 195 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg)) 196 if (SrcReg == li.reg || DstReg == li.reg) 197 continue; 198 199 // Check for operands using reg 200 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 201 const MachineOperand& mop = MI.getOperand(i); 202 if (!mop.isReg()) 203 continue; 204 unsigned PhysReg = mop.getReg(); 205 if (PhysReg == 0 || PhysReg == li.reg) 206 continue; 207 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) { 208 if (!vrm.hasPhys(PhysReg)) 209 continue; 210 PhysReg = vrm.getPhys(PhysReg); 211 } 212 if (PhysReg && tri_->regsOverlap(PhysReg, reg)) 213 return true; 214 } 215 } 216 217 // No conflicts found. 218 return false; 219} 220 221/// conflictsWithSubPhysRegRef - Similar to conflictsWithPhysRegRef except 222/// it checks for sub-register reference and it can check use as well. 223bool LiveIntervals::conflictsWithSubPhysRegRef(LiveInterval &li, 224 unsigned Reg, bool CheckUse, 225 SmallPtrSet<MachineInstr*,32> &JoinedCopies) { 226 for (LiveInterval::Ranges::const_iterator 227 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 228 for (SlotIndex index = I->start.getBaseIndex(), 229 end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 230 index != end; 231 index = index.getNextIndex()) { 232 MachineInstr *MI = getInstructionFromIndex(index); 233 if (!MI) 234 continue; // skip deleted instructions 235 236 if (JoinedCopies.count(MI)) 237 continue; 238 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 239 MachineOperand& MO = MI->getOperand(i); 240 if (!MO.isReg()) 241 continue; 242 if (MO.isUse() && !CheckUse) 243 continue; 244 unsigned PhysReg = MO.getReg(); 245 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg)) 246 continue; 247 if (tri_->isSubRegister(Reg, PhysReg)) 248 return true; 249 } 250 } 251 } 252 253 return false; 254} 255 256#ifndef NDEBUG 257static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) { 258 if (TargetRegisterInfo::isPhysicalRegister(reg)) 259 dbgs() << tri_->getName(reg); 260 else 261 dbgs() << "%reg" << reg; 262} 263#endif 264 265static 266bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) { 267 unsigned Reg = MI.getOperand(MOIdx).getReg(); 268 for (unsigned i = MOIdx+1, e = MI.getNumOperands(); i < e; ++i) { 269 const MachineOperand &MO = MI.getOperand(i); 270 if (!MO.isReg()) 271 continue; 272 if (MO.getReg() == Reg && MO.isDef()) { 273 assert(MI.getOperand(MOIdx).getSubReg() != MO.getSubReg() && 274 MI.getOperand(MOIdx).getSubReg() && 275 MO.getSubReg()); 276 return true; 277 } 278 } 279 return false; 280} 281 282/// isPartialRedef - Return true if the specified def at the specific index is 283/// partially re-defining the specified live interval. A common case of this is 284/// a definition of the sub-register. 285bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO, 286 LiveInterval &interval) { 287 if (!MO.getSubReg() || MO.isEarlyClobber()) 288 return false; 289 290 SlotIndex RedefIndex = MIIdx.getDefIndex(); 291 const LiveRange *OldLR = 292 interval.getLiveRangeContaining(RedefIndex.getUseIndex()); 293 if (OldLR->valno->isDefAccurate()) { 294 MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def); 295 return DefMI->findRegisterDefOperandIdx(interval.reg) != -1; 296 } 297 return false; 298} 299 300void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, 301 MachineBasicBlock::iterator mi, 302 SlotIndex MIIdx, 303 MachineOperand& MO, 304 unsigned MOIdx, 305 LiveInterval &interval) { 306 DEBUG({ 307 dbgs() << "\t\tregister: "; 308 printRegName(interval.reg, tri_); 309 }); 310 311 // Virtual registers may be defined multiple times (due to phi 312 // elimination and 2-addr elimination). Much of what we do only has to be 313 // done once for the vreg. We use an empty interval to detect the first 314 // time we see a vreg. 315 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg); 316 if (interval.empty()) { 317 // Get the Idx of the defining instructions. 318 SlotIndex defIndex = MIIdx.getDefIndex(); 319 // Earlyclobbers move back one, so that they overlap the live range 320 // of inputs. 321 if (MO.isEarlyClobber()) 322 defIndex = MIIdx.getUseIndex(); 323 324 // Make sure the first definition is not a partial redefinition. Add an 325 // <imp-def> of the full register. 326 if (MO.getSubReg()) 327 mi->addRegisterDefined(interval.reg); 328 329 MachineInstr *CopyMI = NULL; 330 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 331 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg() || 332 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) 333 CopyMI = mi; 334 335 VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true, 336 VNInfoAllocator); 337 assert(ValNo->id == 0 && "First value in interval is not 0?"); 338 339 // Loop over all of the blocks that the vreg is defined in. There are 340 // two cases we have to handle here. The most common case is a vreg 341 // whose lifetime is contained within a basic block. In this case there 342 // will be a single kill, in MBB, which comes after the definition. 343 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) { 344 // FIXME: what about dead vars? 345 SlotIndex killIdx; 346 if (vi.Kills[0] != mi) 347 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex(); 348 else 349 killIdx = defIndex.getStoreIndex(); 350 351 // If the kill happens after the definition, we have an intra-block 352 // live range. 353 if (killIdx > defIndex) { 354 assert(vi.AliveBlocks.empty() && 355 "Shouldn't be alive across any blocks!"); 356 LiveRange LR(defIndex, killIdx, ValNo); 357 interval.addRange(LR); 358 DEBUG(dbgs() << " +" << LR << "\n"); 359 ValNo->addKill(killIdx); 360 return; 361 } 362 } 363 364 // The other case we handle is when a virtual register lives to the end 365 // of the defining block, potentially live across some blocks, then is 366 // live into some number of blocks, but gets killed. Start by adding a 367 // range that goes from this definition to the end of the defining block. 368 LiveRange NewLR(defIndex, getMBBEndIdx(mbb), ValNo); 369 DEBUG(dbgs() << " +" << NewLR); 370 interval.addRange(NewLR); 371 372 bool PHIJoin = lv_->isPHIJoin(interval.reg); 373 374 if (PHIJoin) { 375 // A phi join register is killed at the end of the MBB and revived as a new 376 // valno in the killing blocks. 377 assert(vi.AliveBlocks.empty() && "Phi join can't pass through blocks"); 378 DEBUG(dbgs() << " phi-join"); 379 ValNo->addKill(indexes_->getTerminatorGap(mbb)); 380 ValNo->setHasPHIKill(true); 381 } else { 382 // Iterate over all of the blocks that the variable is completely 383 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the 384 // live interval. 385 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(), 386 E = vi.AliveBlocks.end(); I != E; ++I) { 387 MachineBasicBlock *aliveBlock = mf_->getBlockNumbered(*I); 388 LiveRange LR(getMBBStartIdx(aliveBlock), getMBBEndIdx(aliveBlock), ValNo); 389 interval.addRange(LR); 390 DEBUG(dbgs() << " +" << LR); 391 } 392 } 393 394 // Finally, this virtual register is live from the start of any killing 395 // block to the 'use' slot of the killing instruction. 396 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) { 397 MachineInstr *Kill = vi.Kills[i]; 398 SlotIndex Start = getMBBStartIdx(Kill->getParent()); 399 SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex(); 400 401 // Create interval with one of a NEW value number. Note that this value 402 // number isn't actually defined by an instruction, weird huh? :) 403 if (PHIJoin) { 404 ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false, 405 VNInfoAllocator); 406 ValNo->setIsPHIDef(true); 407 } 408 LiveRange LR(Start, killIdx, ValNo); 409 interval.addRange(LR); 410 ValNo->addKill(killIdx); 411 DEBUG(dbgs() << " +" << LR); 412 } 413 414 } else { 415 if (MultipleDefsBySameMI(*mi, MOIdx)) 416 // Multiple defs of the same virtual register by the same instruction. 417 // e.g. %reg1031:5<def>, %reg1031:6<def> = VLD1q16 %reg1024<kill>, ... 418 // This is likely due to elimination of REG_SEQUENCE instructions. Return 419 // here since there is nothing to do. 420 return; 421 422 // If this is the second time we see a virtual register definition, it 423 // must be due to phi elimination or two addr elimination. If this is 424 // the result of two address elimination, then the vreg is one of the 425 // def-and-use register operand. 426 427 // It may also be partial redef like this: 428 // 80 %reg1041:6<def> = VSHRNv4i16 %reg1034<kill>, 12, pred:14, pred:%reg0 429 // 120 %reg1041:5<def> = VSHRNv4i16 %reg1039<kill>, 12, pred:14, pred:%reg0 430 bool PartReDef = isPartialRedef(MIIdx, MO, interval); 431 if (PartReDef || mi->isRegTiedToUseOperand(MOIdx)) { 432 // If this is a two-address definition, then we have already processed 433 // the live range. The only problem is that we didn't realize there 434 // are actually two values in the live interval. Because of this we 435 // need to take the LiveRegion that defines this register and split it 436 // into two values. 437 // Two-address vregs should always only be redefined once. This means 438 // that at this point, there should be exactly one value number in it. 439 assert((PartReDef || interval.containsOneValue()) && 440 "Unexpected 2-addr liveint!"); 441 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex(); 442 SlotIndex RedefIndex = MIIdx.getDefIndex(); 443 if (MO.isEarlyClobber()) 444 RedefIndex = MIIdx.getUseIndex(); 445 446 const LiveRange *OldLR = 447 interval.getLiveRangeContaining(RedefIndex.getUseIndex()); 448 VNInfo *OldValNo = OldLR->valno; 449 450 // Delete the initial value, which should be short and continuous, 451 // because the 2-addr copy must be in the same MBB as the redef. 452 interval.removeRange(DefIndex, RedefIndex); 453 454 // The new value number (#1) is defined by the instruction we claimed 455 // defined value #0. 456 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(), 457 false, // update at * 458 VNInfoAllocator); 459 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here 460 461 // Value#0 is now defined by the 2-addr instruction. 462 OldValNo->def = RedefIndex; 463 OldValNo->setCopy(0); 464 465 // A re-def may be a copy. e.g. %reg1030:6<def> = VMOVD %reg1026, ... 466 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 467 if (PartReDef && 468 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) 469 OldValNo->setCopy(&*mi); 470 471 // Add the new live interval which replaces the range for the input copy. 472 LiveRange LR(DefIndex, RedefIndex, ValNo); 473 DEBUG(dbgs() << " replace range with " << LR); 474 interval.addRange(LR); 475 ValNo->addKill(RedefIndex); 476 477 // If this redefinition is dead, we need to add a dummy unit live 478 // range covering the def slot. 479 if (MO.isDead()) 480 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(), 481 OldValNo)); 482 483 DEBUG({ 484 dbgs() << " RESULT: "; 485 interval.print(dbgs(), tri_); 486 }); 487 } else if (lv_->isPHIJoin(interval.reg)) { 488 // In the case of PHI elimination, each variable definition is only 489 // live until the end of the block. We've already taken care of the 490 // rest of the live range. 491 492 SlotIndex defIndex = MIIdx.getDefIndex(); 493 if (MO.isEarlyClobber()) 494 defIndex = MIIdx.getUseIndex(); 495 496 VNInfo *ValNo; 497 MachineInstr *CopyMI = NULL; 498 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 499 if (mi->isExtractSubreg() || mi->isInsertSubreg() || mi->isSubregToReg()|| 500 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) 501 CopyMI = mi; 502 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator); 503 504 SlotIndex killIndex = getMBBEndIdx(mbb); 505 LiveRange LR(defIndex, killIndex, ValNo); 506 interval.addRange(LR); 507 ValNo->addKill(indexes_->getTerminatorGap(mbb)); 508 ValNo->setHasPHIKill(true); 509 DEBUG(dbgs() << " phi-join +" << LR); 510 } else { 511 llvm_unreachable("Multiply defined register"); 512 } 513 } 514 515 DEBUG(dbgs() << '\n'); 516} 517 518void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, 519 MachineBasicBlock::iterator mi, 520 SlotIndex MIIdx, 521 MachineOperand& MO, 522 LiveInterval &interval, 523 MachineInstr *CopyMI) { 524 // A physical register cannot be live across basic block, so its 525 // lifetime must end somewhere in its defining basic block. 526 DEBUG({ 527 dbgs() << "\t\tregister: "; 528 printRegName(interval.reg, tri_); 529 }); 530 531 SlotIndex baseIndex = MIIdx; 532 SlotIndex start = baseIndex.getDefIndex(); 533 // Earlyclobbers move back one. 534 if (MO.isEarlyClobber()) 535 start = MIIdx.getUseIndex(); 536 SlotIndex end = start; 537 538 // If it is not used after definition, it is considered dead at 539 // the instruction defining it. Hence its interval is: 540 // [defSlot(def), defSlot(def)+1) 541 // For earlyclobbers, the defSlot was pushed back one; the extra 542 // advance below compensates. 543 if (MO.isDead()) { 544 DEBUG(dbgs() << " dead"); 545 end = start.getStoreIndex(); 546 goto exit; 547 } 548 549 // If it is not dead on definition, it must be killed by a 550 // subsequent instruction. Hence its interval is: 551 // [defSlot(def), useSlot(kill)+1) 552 baseIndex = baseIndex.getNextIndex(); 553 while (++mi != MBB->end()) { 554 555 if (mi->isDebugValue()) 556 continue; 557 if (getInstructionFromIndex(baseIndex) == 0) 558 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 559 560 if (mi->killsRegister(interval.reg, tri_)) { 561 DEBUG(dbgs() << " killed"); 562 end = baseIndex.getDefIndex(); 563 goto exit; 564 } else { 565 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_); 566 if (DefIdx != -1) { 567 if (mi->isRegTiedToUseOperand(DefIdx)) { 568 // Two-address instruction. 569 end = baseIndex.getDefIndex(); 570 } else { 571 // Another instruction redefines the register before it is ever read. 572 // Then the register is essentially dead at the instruction that 573 // defines it. Hence its interval is: 574 // [defSlot(def), defSlot(def)+1) 575 DEBUG(dbgs() << " dead"); 576 end = start.getStoreIndex(); 577 } 578 goto exit; 579 } 580 } 581 582 baseIndex = baseIndex.getNextIndex(); 583 } 584 585 // The only case we should have a dead physreg here without a killing or 586 // instruction where we know it's dead is if it is live-in to the function 587 // and never used. Another possible case is the implicit use of the 588 // physical register has been deleted by two-address pass. 589 end = start.getStoreIndex(); 590 591exit: 592 assert(start < end && "did not find end of interval?"); 593 594 // Already exists? Extend old live interval. 595 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start); 596 bool Extend = OldLR != interval.end(); 597 VNInfo *ValNo = Extend 598 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator); 599 if (MO.isEarlyClobber() && Extend) 600 ValNo->setHasRedefByEC(true); 601 LiveRange LR(start, end, ValNo); 602 interval.addRange(LR); 603 LR.valno->addKill(end); 604 DEBUG(dbgs() << " +" << LR << '\n'); 605} 606 607void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB, 608 MachineBasicBlock::iterator MI, 609 SlotIndex MIIdx, 610 MachineOperand& MO, 611 unsigned MOIdx) { 612 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) 613 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx, 614 getOrCreateInterval(MO.getReg())); 615 else if (allocatableRegs_[MO.getReg()]) { 616 MachineInstr *CopyMI = NULL; 617 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 618 if (MI->isExtractSubreg() || MI->isInsertSubreg() || MI->isSubregToReg() || 619 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg)) 620 CopyMI = MI; 621 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 622 getOrCreateInterval(MO.getReg()), CopyMI); 623 // Def of a register also defines its sub-registers. 624 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS) 625 // If MI also modifies the sub-register explicitly, avoid processing it 626 // more than once. Do not pass in TRI here so it checks for exact match. 627 if (!MI->definesRegister(*AS)) 628 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 629 getOrCreateInterval(*AS), 0); 630 } 631} 632 633void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, 634 SlotIndex MIIdx, 635 LiveInterval &interval, bool isAlias) { 636 DEBUG({ 637 dbgs() << "\t\tlivein register: "; 638 printRegName(interval.reg, tri_); 639 }); 640 641 // Look for kills, if it reaches a def before it's killed, then it shouldn't 642 // be considered a livein. 643 MachineBasicBlock::iterator mi = MBB->begin(); 644 MachineBasicBlock::iterator E = MBB->end(); 645 // Skip over DBG_VALUE at the start of the MBB. 646 if (mi != E && mi->isDebugValue()) { 647 while (++mi != E && mi->isDebugValue()) 648 ; 649 if (mi == E) 650 // MBB is empty except for DBG_VALUE's. 651 return; 652 } 653 654 SlotIndex baseIndex = MIIdx; 655 SlotIndex start = baseIndex; 656 if (getInstructionFromIndex(baseIndex) == 0) 657 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 658 659 SlotIndex end = baseIndex; 660 bool SeenDefUse = false; 661 662 while (mi != E) { 663 if (mi->killsRegister(interval.reg, tri_)) { 664 DEBUG(dbgs() << " killed"); 665 end = baseIndex.getDefIndex(); 666 SeenDefUse = true; 667 break; 668 } else if (mi->definesRegister(interval.reg, tri_)) { 669 // Another instruction redefines the register before it is ever read. 670 // Then the register is essentially dead at the instruction that defines 671 // it. Hence its interval is: 672 // [defSlot(def), defSlot(def)+1) 673 DEBUG(dbgs() << " dead"); 674 end = start.getStoreIndex(); 675 SeenDefUse = true; 676 break; 677 } 678 679 while (++mi != E && mi->isDebugValue()) 680 // Skip over DBG_VALUE. 681 ; 682 if (mi != E) 683 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 684 } 685 686 // Live-in register might not be used at all. 687 if (!SeenDefUse) { 688 if (isAlias) { 689 DEBUG(dbgs() << " dead"); 690 end = MIIdx.getStoreIndex(); 691 } else { 692 DEBUG(dbgs() << " live through"); 693 end = baseIndex; 694 } 695 } 696 697 VNInfo *vni = 698 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true), 699 0, false, VNInfoAllocator); 700 vni->setIsPHIDef(true); 701 LiveRange LR(start, end, vni); 702 703 interval.addRange(LR); 704 LR.valno->addKill(end); 705 DEBUG(dbgs() << " +" << LR << '\n'); 706} 707 708/// computeIntervals - computes the live intervals for virtual 709/// registers. for some ordering of the machine instructions [1,N] a 710/// live interval is an interval [i, j) where 1 <= i <= j < N for 711/// which a variable is live 712void LiveIntervals::computeIntervals() { 713 DEBUG(dbgs() << "********** COMPUTING LIVE INTERVALS **********\n" 714 << "********** Function: " 715 << ((Value*)mf_->getFunction())->getName() << '\n'); 716 717 SmallVector<unsigned, 8> UndefUses; 718 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end(); 719 MBBI != E; ++MBBI) { 720 MachineBasicBlock *MBB = MBBI; 721 if (MBB->empty()) 722 continue; 723 724 // Track the index of the current machine instr. 725 SlotIndex MIIndex = getMBBStartIdx(MBB); 726 DEBUG(dbgs() << "BB#" << MBB->getNumber() 727 << ":\t\t# derived from " << MBB->getName() << "\n"); 728 729 // Create intervals for live-ins to this BB first. 730 for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(), 731 LE = MBB->livein_end(); LI != LE; ++LI) { 732 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI)); 733 // Multiple live-ins can alias the same register. 734 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS) 735 if (!hasInterval(*AS)) 736 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS), 737 true); 738 } 739 740 // Skip over empty initial indices. 741 if (getInstructionFromIndex(MIIndex) == 0) 742 MIIndex = indexes_->getNextNonNullIndex(MIIndex); 743 744 for (MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end(); 745 MI != miEnd; ++MI) { 746 DEBUG(dbgs() << MIIndex << "\t" << *MI); 747 if (MI->isDebugValue()) 748 continue; 749 750 // Handle defs. 751 for (int i = MI->getNumOperands() - 1; i >= 0; --i) { 752 MachineOperand &MO = MI->getOperand(i); 753 if (!MO.isReg() || !MO.getReg()) 754 continue; 755 756 // handle register defs - build intervals 757 if (MO.isDef()) 758 handleRegisterDef(MBB, MI, MIIndex, MO, i); 759 else if (MO.isUndef()) 760 UndefUses.push_back(MO.getReg()); 761 } 762 763 // Move to the next instr slot. 764 MIIndex = indexes_->getNextNonNullIndex(MIIndex); 765 } 766 } 767 768 // Create empty intervals for registers defined by implicit_def's (except 769 // for those implicit_def that define values which are liveout of their 770 // blocks. 771 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) { 772 unsigned UndefReg = UndefUses[i]; 773 (void)getOrCreateInterval(UndefReg); 774 } 775} 776 777LiveInterval* LiveIntervals::createInterval(unsigned reg) { 778 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F; 779 return new LiveInterval(reg, Weight); 780} 781 782/// dupInterval - Duplicate a live interval. The caller is responsible for 783/// managing the allocated memory. 784LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) { 785 LiveInterval *NewLI = createInterval(li->reg); 786 NewLI->Copy(*li, mri_, getVNInfoAllocator()); 787 return NewLI; 788} 789 790/// getVNInfoSourceReg - Helper function that parses the specified VNInfo 791/// copy field and returns the source register that defines it. 792unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const { 793 if (!VNI->getCopy()) 794 return 0; 795 796 if (VNI->getCopy()->isExtractSubreg()) { 797 // If it's extracting out of a physical register, return the sub-register. 798 unsigned Reg = VNI->getCopy()->getOperand(1).getReg(); 799 if (TargetRegisterInfo::isPhysicalRegister(Reg)) { 800 unsigned SrcSubReg = VNI->getCopy()->getOperand(2).getImm(); 801 unsigned DstSubReg = VNI->getCopy()->getOperand(0).getSubReg(); 802 if (SrcSubReg == DstSubReg) 803 // %reg1034:3<def> = EXTRACT_SUBREG %EDX, 3 804 // reg1034 can still be coalesced to EDX. 805 return Reg; 806 assert(DstSubReg == 0); 807 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm()); 808 } 809 return Reg; 810 } else if (VNI->getCopy()->isInsertSubreg() || 811 VNI->getCopy()->isSubregToReg()) 812 return VNI->getCopy()->getOperand(2).getReg(); 813 814 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 815 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg)) 816 return SrcReg; 817 llvm_unreachable("Unrecognized copy instruction!"); 818 return 0; 819} 820 821//===----------------------------------------------------------------------===// 822// Register allocator hooks. 823// 824 825/// getReMatImplicitUse - If the remat definition MI has one (for now, we only 826/// allow one) virtual register operand, then its uses are implicitly using 827/// the register. Returns the virtual register. 828unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li, 829 MachineInstr *MI) const { 830 unsigned RegOp = 0; 831 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 832 MachineOperand &MO = MI->getOperand(i); 833 if (!MO.isReg() || !MO.isUse()) 834 continue; 835 unsigned Reg = MO.getReg(); 836 if (Reg == 0 || Reg == li.reg) 837 continue; 838 839 if (TargetRegisterInfo::isPhysicalRegister(Reg) && 840 !allocatableRegs_[Reg]) 841 continue; 842 // FIXME: For now, only remat MI with at most one register operand. 843 assert(!RegOp && 844 "Can't rematerialize instruction with multiple register operand!"); 845 RegOp = MO.getReg(); 846#ifndef NDEBUG 847 break; 848#endif 849 } 850 return RegOp; 851} 852 853/// isValNoAvailableAt - Return true if the val# of the specified interval 854/// which reaches the given instruction also reaches the specified use index. 855bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI, 856 SlotIndex UseIdx) const { 857 SlotIndex Index = getInstructionIndex(MI); 858 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno; 859 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx); 860 return UI != li.end() && UI->valno == ValNo; 861} 862 863/// isReMaterializable - Returns true if the definition MI of the specified 864/// val# of the specified interval is re-materializable. 865bool LiveIntervals::isReMaterializable(const LiveInterval &li, 866 const VNInfo *ValNo, MachineInstr *MI, 867 SmallVectorImpl<LiveInterval*> &SpillIs, 868 bool &isLoad) { 869 if (DisableReMat) 870 return false; 871 872 if (!tii_->isTriviallyReMaterializable(MI, aa_)) 873 return false; 874 875 // Target-specific code can mark an instruction as being rematerializable 876 // if it has one virtual reg use, though it had better be something like 877 // a PIC base register which is likely to be live everywhere. 878 unsigned ImpUse = getReMatImplicitUse(li, MI); 879 if (ImpUse) { 880 const LiveInterval &ImpLi = getInterval(ImpUse); 881 for (MachineRegisterInfo::use_nodbg_iterator 882 ri = mri_->use_nodbg_begin(li.reg), re = mri_->use_nodbg_end(); 883 ri != re; ++ri) { 884 MachineInstr *UseMI = &*ri; 885 SlotIndex UseIdx = getInstructionIndex(UseMI); 886 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo) 887 continue; 888 if (!isValNoAvailableAt(ImpLi, MI, UseIdx)) 889 return false; 890 } 891 892 // If a register operand of the re-materialized instruction is going to 893 // be spilled next, then it's not legal to re-materialize this instruction. 894 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i) 895 if (ImpUse == SpillIs[i]->reg) 896 return false; 897 } 898 return true; 899} 900 901/// isReMaterializable - Returns true if the definition MI of the specified 902/// val# of the specified interval is re-materializable. 903bool LiveIntervals::isReMaterializable(const LiveInterval &li, 904 const VNInfo *ValNo, MachineInstr *MI) { 905 SmallVector<LiveInterval*, 4> Dummy1; 906 bool Dummy2; 907 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2); 908} 909 910/// isReMaterializable - Returns true if every definition of MI of every 911/// val# of the specified interval is re-materializable. 912bool LiveIntervals::isReMaterializable(const LiveInterval &li, 913 SmallVectorImpl<LiveInterval*> &SpillIs, 914 bool &isLoad) { 915 isLoad = false; 916 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 917 i != e; ++i) { 918 const VNInfo *VNI = *i; 919 if (VNI->isUnused()) 920 continue; // Dead val#. 921 // Is the def for the val# rematerializable? 922 if (!VNI->isDefAccurate()) 923 return false; 924 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def); 925 bool DefIsLoad = false; 926 if (!ReMatDefMI || 927 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad)) 928 return false; 929 isLoad |= DefIsLoad; 930 } 931 return true; 932} 933 934/// FilterFoldedOps - Filter out two-address use operands. Return 935/// true if it finds any issue with the operands that ought to prevent 936/// folding. 937static bool FilterFoldedOps(MachineInstr *MI, 938 SmallVector<unsigned, 2> &Ops, 939 unsigned &MRInfo, 940 SmallVector<unsigned, 2> &FoldOps) { 941 MRInfo = 0; 942 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 943 unsigned OpIdx = Ops[i]; 944 MachineOperand &MO = MI->getOperand(OpIdx); 945 // FIXME: fold subreg use. 946 if (MO.getSubReg()) 947 return true; 948 if (MO.isDef()) 949 MRInfo |= (unsigned)VirtRegMap::isMod; 950 else { 951 // Filter out two-address use operand(s). 952 if (MI->isRegTiedToDefOperand(OpIdx)) { 953 MRInfo = VirtRegMap::isModRef; 954 continue; 955 } 956 MRInfo |= (unsigned)VirtRegMap::isRef; 957 } 958 FoldOps.push_back(OpIdx); 959 } 960 return false; 961} 962 963 964/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from 965/// slot / to reg or any rematerialized load into ith operand of specified 966/// MI. If it is successul, MI is updated with the newly created MI and 967/// returns true. 968bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, 969 VirtRegMap &vrm, MachineInstr *DefMI, 970 SlotIndex InstrIdx, 971 SmallVector<unsigned, 2> &Ops, 972 bool isSS, int Slot, unsigned Reg) { 973 // If it is an implicit def instruction, just delete it. 974 if (MI->isImplicitDef()) { 975 RemoveMachineInstrFromMaps(MI); 976 vrm.RemoveMachineInstrFromMaps(MI); 977 MI->eraseFromParent(); 978 ++numFolds; 979 return true; 980 } 981 982 // Filter the list of operand indexes that are to be folded. Abort if 983 // any operand will prevent folding. 984 unsigned MRInfo = 0; 985 SmallVector<unsigned, 2> FoldOps; 986 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 987 return false; 988 989 // The only time it's safe to fold into a two address instruction is when 990 // it's folding reload and spill from / into a spill stack slot. 991 if (DefMI && (MRInfo & VirtRegMap::isMod)) 992 return false; 993 994 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot) 995 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI); 996 if (fmi) { 997 // Remember this instruction uses the spill slot. 998 if (isSS) vrm.addSpillSlotUse(Slot, fmi); 999 1000 // Attempt to fold the memory reference into the instruction. If 1001 // we can do this, we don't need to insert spill code. 1002 MachineBasicBlock &MBB = *MI->getParent(); 1003 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot)) 1004 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo); 1005 vrm.transferSpillPts(MI, fmi); 1006 vrm.transferRestorePts(MI, fmi); 1007 vrm.transferEmergencySpills(MI, fmi); 1008 ReplaceMachineInstrInMaps(MI, fmi); 1009 MI = MBB.insert(MBB.erase(MI), fmi); 1010 ++numFolds; 1011 return true; 1012 } 1013 return false; 1014} 1015 1016/// canFoldMemoryOperand - Returns true if the specified load / store 1017/// folding is possible. 1018bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI, 1019 SmallVector<unsigned, 2> &Ops, 1020 bool ReMat) const { 1021 // Filter the list of operand indexes that are to be folded. Abort if 1022 // any operand will prevent folding. 1023 unsigned MRInfo = 0; 1024 SmallVector<unsigned, 2> FoldOps; 1025 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 1026 return false; 1027 1028 // It's only legal to remat for a use, not a def. 1029 if (ReMat && (MRInfo & VirtRegMap::isMod)) 1030 return false; 1031 1032 return tii_->canFoldMemoryOperand(MI, FoldOps); 1033} 1034 1035bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const { 1036 LiveInterval::Ranges::const_iterator itr = li.ranges.begin(); 1037 1038 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end); 1039 1040 if (mbb == 0) 1041 return false; 1042 1043 for (++itr; itr != li.ranges.end(); ++itr) { 1044 MachineBasicBlock *mbb2 = 1045 indexes_->getMBBCoveringRange(itr->start, itr->end); 1046 1047 if (mbb2 != mbb) 1048 return false; 1049 } 1050 1051 return true; 1052} 1053 1054/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of 1055/// interval on to-be re-materialized operands of MI) with new register. 1056void LiveIntervals::rewriteImplicitOps(const LiveInterval &li, 1057 MachineInstr *MI, unsigned NewVReg, 1058 VirtRegMap &vrm) { 1059 // There is an implicit use. That means one of the other operand is 1060 // being remat'ed and the remat'ed instruction has li.reg as an 1061 // use operand. Make sure we rewrite that as well. 1062 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1063 MachineOperand &MO = MI->getOperand(i); 1064 if (!MO.isReg()) 1065 continue; 1066 unsigned Reg = MO.getReg(); 1067 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 1068 continue; 1069 if (!vrm.isReMaterialized(Reg)) 1070 continue; 1071 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg); 1072 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg); 1073 if (UseMO) 1074 UseMO->setReg(NewVReg); 1075 } 1076} 1077 1078/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions 1079/// for addIntervalsForSpills to rewrite uses / defs for the given live range. 1080bool LiveIntervals:: 1081rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, 1082 bool TrySplit, SlotIndex index, SlotIndex end, 1083 MachineInstr *MI, 1084 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1085 unsigned Slot, int LdSlot, 1086 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1087 VirtRegMap &vrm, 1088 const TargetRegisterClass* rc, 1089 SmallVector<int, 4> &ReMatIds, 1090 const MachineLoopInfo *loopInfo, 1091 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse, 1092 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1093 std::vector<LiveInterval*> &NewLIs) { 1094 bool CanFold = false; 1095 RestartInstruction: 1096 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 1097 MachineOperand& mop = MI->getOperand(i); 1098 if (!mop.isReg()) 1099 continue; 1100 unsigned Reg = mop.getReg(); 1101 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 1102 continue; 1103 if (Reg != li.reg) 1104 continue; 1105 1106 bool TryFold = !DefIsReMat; 1107 bool FoldSS = true; // Default behavior unless it's a remat. 1108 int FoldSlot = Slot; 1109 if (DefIsReMat) { 1110 // If this is the rematerializable definition MI itself and 1111 // all of its uses are rematerialized, simply delete it. 1112 if (MI == ReMatOrigDefMI && CanDelete) { 1113 DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: " 1114 << *MI << '\n'); 1115 RemoveMachineInstrFromMaps(MI); 1116 vrm.RemoveMachineInstrFromMaps(MI); 1117 MI->eraseFromParent(); 1118 break; 1119 } 1120 1121 // If def for this use can't be rematerialized, then try folding. 1122 // If def is rematerializable and it's a load, also try folding. 1123 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad)); 1124 if (isLoad) { 1125 // Try fold loads (from stack slot, constant pool, etc.) into uses. 1126 FoldSS = isLoadSS; 1127 FoldSlot = LdSlot; 1128 } 1129 } 1130 1131 // Scan all of the operands of this instruction rewriting operands 1132 // to use NewVReg instead of li.reg as appropriate. We do this for 1133 // two reasons: 1134 // 1135 // 1. If the instr reads the same spilled vreg multiple times, we 1136 // want to reuse the NewVReg. 1137 // 2. If the instr is a two-addr instruction, we are required to 1138 // keep the src/dst regs pinned. 1139 // 1140 // Keep track of whether we replace a use and/or def so that we can 1141 // create the spill interval with the appropriate range. 1142 SmallVector<unsigned, 2> Ops; 1143 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops); 1144 1145 // Create a new virtual register for the spill interval. 1146 // Create the new register now so we can map the fold instruction 1147 // to the new register so when it is unfolded we get the correct 1148 // answer. 1149 bool CreatedNewVReg = false; 1150 if (NewVReg == 0) { 1151 NewVReg = mri_->createVirtualRegister(rc); 1152 vrm.grow(); 1153 CreatedNewVReg = true; 1154 1155 // The new virtual register should get the same allocation hints as the 1156 // old one. 1157 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg); 1158 if (Hint.first || Hint.second) 1159 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second); 1160 } 1161 1162 if (!TryFold) 1163 CanFold = false; 1164 else { 1165 // Do not fold load / store here if we are splitting. We'll find an 1166 // optimal point to insert a load / store later. 1167 if (!TrySplit) { 1168 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1169 Ops, FoldSS, FoldSlot, NewVReg)) { 1170 // Folding the load/store can completely change the instruction in 1171 // unpredictable ways, rescan it from the beginning. 1172 1173 if (FoldSS) { 1174 // We need to give the new vreg the same stack slot as the 1175 // spilled interval. 1176 vrm.assignVirt2StackSlot(NewVReg, FoldSlot); 1177 } 1178 1179 HasUse = false; 1180 HasDef = false; 1181 CanFold = false; 1182 if (isNotInMIMap(MI)) 1183 break; 1184 goto RestartInstruction; 1185 } 1186 } else { 1187 // We'll try to fold it later if it's profitable. 1188 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat); 1189 } 1190 } 1191 1192 mop.setReg(NewVReg); 1193 if (mop.isImplicit()) 1194 rewriteImplicitOps(li, MI, NewVReg, vrm); 1195 1196 // Reuse NewVReg for other reads. 1197 for (unsigned j = 0, e = Ops.size(); j != e; ++j) { 1198 MachineOperand &mopj = MI->getOperand(Ops[j]); 1199 mopj.setReg(NewVReg); 1200 if (mopj.isImplicit()) 1201 rewriteImplicitOps(li, MI, NewVReg, vrm); 1202 } 1203 1204 if (CreatedNewVReg) { 1205 if (DefIsReMat) { 1206 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI); 1207 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) { 1208 // Each valnum may have its own remat id. 1209 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg); 1210 } else { 1211 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]); 1212 } 1213 if (!CanDelete || (HasUse && HasDef)) { 1214 // If this is a two-addr instruction then its use operands are 1215 // rematerializable but its def is not. It should be assigned a 1216 // stack slot. 1217 vrm.assignVirt2StackSlot(NewVReg, Slot); 1218 } 1219 } else { 1220 vrm.assignVirt2StackSlot(NewVReg, Slot); 1221 } 1222 } else if (HasUse && HasDef && 1223 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) { 1224 // If this interval hasn't been assigned a stack slot (because earlier 1225 // def is a deleted remat def), do it now. 1226 assert(Slot != VirtRegMap::NO_STACK_SLOT); 1227 vrm.assignVirt2StackSlot(NewVReg, Slot); 1228 } 1229 1230 // Re-matting an instruction with virtual register use. Add the 1231 // register as an implicit use on the use MI. 1232 if (DefIsReMat && ImpUse) 1233 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1234 1235 // Create a new register interval for this spill / remat. 1236 LiveInterval &nI = getOrCreateInterval(NewVReg); 1237 if (CreatedNewVReg) { 1238 NewLIs.push_back(&nI); 1239 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg)); 1240 if (TrySplit) 1241 vrm.setIsSplitFromReg(NewVReg, li.reg); 1242 } 1243 1244 if (HasUse) { 1245 if (CreatedNewVReg) { 1246 LiveRange LR(index.getLoadIndex(), index.getDefIndex(), 1247 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); 1248 DEBUG(dbgs() << " +" << LR); 1249 nI.addRange(LR); 1250 } else { 1251 // Extend the split live interval to this def / use. 1252 SlotIndex End = index.getDefIndex(); 1253 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End, 1254 nI.getValNumInfo(nI.getNumValNums()-1)); 1255 DEBUG(dbgs() << " +" << LR); 1256 nI.addRange(LR); 1257 } 1258 } 1259 if (HasDef) { 1260 LiveRange LR(index.getDefIndex(), index.getStoreIndex(), 1261 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); 1262 DEBUG(dbgs() << " +" << LR); 1263 nI.addRange(LR); 1264 } 1265 1266 DEBUG({ 1267 dbgs() << "\t\t\t\tAdded new interval: "; 1268 nI.print(dbgs(), tri_); 1269 dbgs() << '\n'; 1270 }); 1271 } 1272 return CanFold; 1273} 1274bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li, 1275 const VNInfo *VNI, 1276 MachineBasicBlock *MBB, 1277 SlotIndex Idx) const { 1278 SlotIndex End = getMBBEndIdx(MBB); 1279 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) { 1280 if (VNI->kills[j].isPHI()) 1281 continue; 1282 1283 SlotIndex KillIdx = VNI->kills[j]; 1284 if (KillIdx > Idx && KillIdx <= End) 1285 return true; 1286 } 1287 return false; 1288} 1289 1290/// RewriteInfo - Keep track of machine instrs that will be rewritten 1291/// during spilling. 1292namespace { 1293 struct RewriteInfo { 1294 SlotIndex Index; 1295 MachineInstr *MI; 1296 RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {} 1297 }; 1298 1299 struct RewriteInfoCompare { 1300 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const { 1301 return LHS.Index < RHS.Index; 1302 } 1303 }; 1304} 1305 1306void LiveIntervals:: 1307rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit, 1308 LiveInterval::Ranges::const_iterator &I, 1309 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1310 unsigned Slot, int LdSlot, 1311 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1312 VirtRegMap &vrm, 1313 const TargetRegisterClass* rc, 1314 SmallVector<int, 4> &ReMatIds, 1315 const MachineLoopInfo *loopInfo, 1316 BitVector &SpillMBBs, 1317 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes, 1318 BitVector &RestoreMBBs, 1319 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes, 1320 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1321 std::vector<LiveInterval*> &NewLIs) { 1322 bool AllCanFold = true; 1323 unsigned NewVReg = 0; 1324 SlotIndex start = I->start.getBaseIndex(); 1325 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 1326 1327 // First collect all the def / use in this live range that will be rewritten. 1328 // Make sure they are sorted according to instruction index. 1329 std::vector<RewriteInfo> RewriteMIs; 1330 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1331 re = mri_->reg_end(); ri != re; ) { 1332 MachineInstr *MI = &*ri; 1333 MachineOperand &O = ri.getOperand(); 1334 ++ri; 1335 if (MI->isDebugValue()) { 1336 // Modify DBG_VALUE now that the value is in a spill slot. 1337 if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) { 1338 uint64_t Offset = MI->getOperand(1).getImm(); 1339 const MDNode *MDPtr = MI->getOperand(2).getMetadata(); 1340 DebugLoc DL = MI->getDebugLoc(); 1341 int FI = isLoadSS ? LdSlot : (int)Slot; 1342 if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI, 1343 Offset, MDPtr, DL)) { 1344 DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); 1345 ReplaceMachineInstrInMaps(MI, NewDV); 1346 MachineBasicBlock *MBB = MI->getParent(); 1347 MBB->insert(MBB->erase(MI), NewDV); 1348 continue; 1349 } 1350 } 1351 1352 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 1353 RemoveMachineInstrFromMaps(MI); 1354 vrm.RemoveMachineInstrFromMaps(MI); 1355 MI->eraseFromParent(); 1356 continue; 1357 } 1358 assert(!(O.isImplicit() && O.isUse()) && 1359 "Spilling register that's used as implicit use?"); 1360 SlotIndex index = getInstructionIndex(MI); 1361 if (index < start || index >= end) 1362 continue; 1363 1364 if (O.isUndef()) 1365 // Must be defined by an implicit def. It should not be spilled. Note, 1366 // this is for correctness reason. e.g. 1367 // 8 %reg1024<def> = IMPLICIT_DEF 1368 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2 1369 // The live range [12, 14) are not part of the r1024 live interval since 1370 // it's defined by an implicit def. It will not conflicts with live 1371 // interval of r1025. Now suppose both registers are spilled, you can 1372 // easily see a situation where both registers are reloaded before 1373 // the INSERT_SUBREG and both target registers that would overlap. 1374 continue; 1375 RewriteMIs.push_back(RewriteInfo(index, MI)); 1376 } 1377 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare()); 1378 1379 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0; 1380 // Now rewrite the defs and uses. 1381 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) { 1382 RewriteInfo &rwi = RewriteMIs[i]; 1383 ++i; 1384 SlotIndex index = rwi.Index; 1385 MachineInstr *MI = rwi.MI; 1386 // If MI def and/or use the same register multiple times, then there 1387 // are multiple entries. 1388 while (i != e && RewriteMIs[i].MI == MI) { 1389 assert(RewriteMIs[i].Index == index); 1390 ++i; 1391 } 1392 MachineBasicBlock *MBB = MI->getParent(); 1393 1394 if (ImpUse && MI != ReMatDefMI) { 1395 // Re-matting an instruction with virtual register use. Prevent interval 1396 // from being spilled. 1397 getInterval(ImpUse).markNotSpillable(); 1398 } 1399 1400 unsigned MBBId = MBB->getNumber(); 1401 unsigned ThisVReg = 0; 1402 if (TrySplit) { 1403 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId); 1404 if (NVI != MBBVRegsMap.end()) { 1405 ThisVReg = NVI->second; 1406 // One common case: 1407 // x = use 1408 // ... 1409 // ... 1410 // def = ... 1411 // = use 1412 // It's better to start a new interval to avoid artifically 1413 // extend the new interval. 1414 if (MI->readsWritesVirtualRegister(li.reg) == 1415 std::make_pair(false,true)) { 1416 MBBVRegsMap.erase(MBB->getNumber()); 1417 ThisVReg = 0; 1418 } 1419 } 1420 } 1421 1422 bool IsNew = ThisVReg == 0; 1423 if (IsNew) { 1424 // This ends the previous live interval. If all of its def / use 1425 // can be folded, give it a low spill weight. 1426 if (NewVReg && TrySplit && AllCanFold) { 1427 LiveInterval &nI = getOrCreateInterval(NewVReg); 1428 nI.weight /= 10.0F; 1429 } 1430 AllCanFold = true; 1431 } 1432 NewVReg = ThisVReg; 1433 1434 bool HasDef = false; 1435 bool HasUse = false; 1436 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit, 1437 index, end, MI, ReMatOrigDefMI, ReMatDefMI, 1438 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1439 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg, 1440 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs); 1441 if (!HasDef && !HasUse) 1442 continue; 1443 1444 AllCanFold &= CanFold; 1445 1446 // Update weight of spill interval. 1447 LiveInterval &nI = getOrCreateInterval(NewVReg); 1448 if (!TrySplit) { 1449 // The spill weight is now infinity as it cannot be spilled again. 1450 nI.markNotSpillable(); 1451 continue; 1452 } 1453 1454 // Keep track of the last def and first use in each MBB. 1455 if (HasDef) { 1456 if (MI != ReMatOrigDefMI || !CanDelete) { 1457 bool HasKill = false; 1458 if (!HasUse) 1459 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex()); 1460 else { 1461 // If this is a two-address code, then this index starts a new VNInfo. 1462 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex()); 1463 if (VNI) 1464 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex()); 1465 } 1466 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1467 SpillIdxes.find(MBBId); 1468 if (!HasKill) { 1469 if (SII == SpillIdxes.end()) { 1470 std::vector<SRInfo> S; 1471 S.push_back(SRInfo(index, NewVReg, true)); 1472 SpillIdxes.insert(std::make_pair(MBBId, S)); 1473 } else if (SII->second.back().vreg != NewVReg) { 1474 SII->second.push_back(SRInfo(index, NewVReg, true)); 1475 } else if (index > SII->second.back().index) { 1476 // If there is an earlier def and this is a two-address 1477 // instruction, then it's not possible to fold the store (which 1478 // would also fold the load). 1479 SRInfo &Info = SII->second.back(); 1480 Info.index = index; 1481 Info.canFold = !HasUse; 1482 } 1483 SpillMBBs.set(MBBId); 1484 } else if (SII != SpillIdxes.end() && 1485 SII->second.back().vreg == NewVReg && 1486 index > SII->second.back().index) { 1487 // There is an earlier def that's not killed (must be two-address). 1488 // The spill is no longer needed. 1489 SII->second.pop_back(); 1490 if (SII->second.empty()) { 1491 SpillIdxes.erase(MBBId); 1492 SpillMBBs.reset(MBBId); 1493 } 1494 } 1495 } 1496 } 1497 1498 if (HasUse) { 1499 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1500 SpillIdxes.find(MBBId); 1501 if (SII != SpillIdxes.end() && 1502 SII->second.back().vreg == NewVReg && 1503 index > SII->second.back().index) 1504 // Use(s) following the last def, it's not safe to fold the spill. 1505 SII->second.back().canFold = false; 1506 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII = 1507 RestoreIdxes.find(MBBId); 1508 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg) 1509 // If we are splitting live intervals, only fold if it's the first 1510 // use and there isn't another use later in the MBB. 1511 RII->second.back().canFold = false; 1512 else if (IsNew) { 1513 // Only need a reload if there isn't an earlier def / use. 1514 if (RII == RestoreIdxes.end()) { 1515 std::vector<SRInfo> Infos; 1516 Infos.push_back(SRInfo(index, NewVReg, true)); 1517 RestoreIdxes.insert(std::make_pair(MBBId, Infos)); 1518 } else { 1519 RII->second.push_back(SRInfo(index, NewVReg, true)); 1520 } 1521 RestoreMBBs.set(MBBId); 1522 } 1523 } 1524 1525 // Update spill weight. 1526 unsigned loopDepth = loopInfo->getLoopDepth(MBB); 1527 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth); 1528 } 1529 1530 if (NewVReg && TrySplit && AllCanFold) { 1531 // If all of its def / use can be folded, give it a low spill weight. 1532 LiveInterval &nI = getOrCreateInterval(NewVReg); 1533 nI.weight /= 10.0F; 1534 } 1535} 1536 1537bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index, 1538 unsigned vr, BitVector &RestoreMBBs, 1539 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1540 if (!RestoreMBBs[Id]) 1541 return false; 1542 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1543 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1544 if (Restores[i].index == index && 1545 Restores[i].vreg == vr && 1546 Restores[i].canFold) 1547 return true; 1548 return false; 1549} 1550 1551void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index, 1552 unsigned vr, BitVector &RestoreMBBs, 1553 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1554 if (!RestoreMBBs[Id]) 1555 return; 1556 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1557 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1558 if (Restores[i].index == index && Restores[i].vreg) 1559 Restores[i].index = SlotIndex(); 1560} 1561 1562/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being 1563/// spilled and create empty intervals for their uses. 1564void 1565LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm, 1566 const TargetRegisterClass* rc, 1567 std::vector<LiveInterval*> &NewLIs) { 1568 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1569 re = mri_->reg_end(); ri != re; ) { 1570 MachineOperand &O = ri.getOperand(); 1571 MachineInstr *MI = &*ri; 1572 ++ri; 1573 if (MI->isDebugValue()) { 1574 // Remove debug info for now. 1575 O.setReg(0U); 1576 DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); 1577 continue; 1578 } 1579 if (O.isDef()) { 1580 assert(MI->isImplicitDef() && 1581 "Register def was not rewritten?"); 1582 RemoveMachineInstrFromMaps(MI); 1583 vrm.RemoveMachineInstrFromMaps(MI); 1584 MI->eraseFromParent(); 1585 } else { 1586 // This must be an use of an implicit_def so it's not part of the live 1587 // interval. Create a new empty live interval for it. 1588 // FIXME: Can we simply erase some of the instructions? e.g. Stores? 1589 unsigned NewVReg = mri_->createVirtualRegister(rc); 1590 vrm.grow(); 1591 vrm.setIsImplicitlyDefined(NewVReg); 1592 NewLIs.push_back(&getOrCreateInterval(NewVReg)); 1593 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1594 MachineOperand &MO = MI->getOperand(i); 1595 if (MO.isReg() && MO.getReg() == li.reg) { 1596 MO.setReg(NewVReg); 1597 MO.setIsUndef(); 1598 } 1599 } 1600 } 1601 } 1602} 1603 1604float 1605LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) { 1606 // Limit the loop depth ridiculousness. 1607 if (loopDepth > 200) 1608 loopDepth = 200; 1609 1610 // The loop depth is used to roughly estimate the number of times the 1611 // instruction is executed. Something like 10^d is simple, but will quickly 1612 // overflow a float. This expression behaves like 10^d for small d, but is 1613 // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of 1614 // headroom before overflow. 1615 float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth); 1616 1617 return (isDef + isUse) * lc; 1618} 1619 1620void 1621LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) { 1622 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) 1623 normalizeSpillWeight(*NewLIs[i]); 1624} 1625 1626std::vector<LiveInterval*> LiveIntervals:: 1627addIntervalsForSpillsFast(const LiveInterval &li, 1628 const MachineLoopInfo *loopInfo, 1629 VirtRegMap &vrm) { 1630 unsigned slot = vrm.assignVirt2StackSlot(li.reg); 1631 1632 std::vector<LiveInterval*> added; 1633 1634 assert(li.isSpillable() && "attempt to spill already spilled interval!"); 1635 1636 DEBUG({ 1637 dbgs() << "\t\t\t\tadding intervals for spills for interval: "; 1638 li.dump(); 1639 dbgs() << '\n'; 1640 }); 1641 1642 const TargetRegisterClass* rc = mri_->getRegClass(li.reg); 1643 1644 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg); 1645 while (RI != mri_->reg_end()) { 1646 MachineInstr* MI = &*RI; 1647 1648 SmallVector<unsigned, 2> Indices; 1649 bool HasUse, HasDef; 1650 tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(li.reg, &Indices); 1651 1652 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI), 1653 Indices, true, slot, li.reg)) { 1654 unsigned NewVReg = mri_->createVirtualRegister(rc); 1655 vrm.grow(); 1656 vrm.assignVirt2StackSlot(NewVReg, slot); 1657 1658 // create a new register for this spill 1659 LiveInterval &nI = getOrCreateInterval(NewVReg); 1660 nI.markNotSpillable(); 1661 1662 // Rewrite register operands to use the new vreg. 1663 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(), 1664 E = Indices.end(); I != E; ++I) { 1665 MI->getOperand(*I).setReg(NewVReg); 1666 1667 if (MI->getOperand(*I).isUse()) 1668 MI->getOperand(*I).setIsKill(true); 1669 } 1670 1671 // Fill in the new live interval. 1672 SlotIndex index = getInstructionIndex(MI); 1673 if (HasUse) { 1674 LiveRange LR(index.getLoadIndex(), index.getUseIndex(), 1675 nI.getNextValue(SlotIndex(), 0, false, 1676 getVNInfoAllocator())); 1677 DEBUG(dbgs() << " +" << LR); 1678 nI.addRange(LR); 1679 vrm.addRestorePoint(NewVReg, MI); 1680 } 1681 if (HasDef) { 1682 LiveRange LR(index.getDefIndex(), index.getStoreIndex(), 1683 nI.getNextValue(SlotIndex(), 0, false, 1684 getVNInfoAllocator())); 1685 DEBUG(dbgs() << " +" << LR); 1686 nI.addRange(LR); 1687 vrm.addSpillPoint(NewVReg, true, MI); 1688 } 1689 1690 added.push_back(&nI); 1691 1692 DEBUG({ 1693 dbgs() << "\t\t\t\tadded new interval: "; 1694 nI.dump(); 1695 dbgs() << '\n'; 1696 }); 1697 } 1698 1699 1700 RI = mri_->reg_begin(li.reg); 1701 } 1702 1703 return added; 1704} 1705 1706std::vector<LiveInterval*> LiveIntervals:: 1707addIntervalsForSpills(const LiveInterval &li, 1708 SmallVectorImpl<LiveInterval*> &SpillIs, 1709 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) { 1710 1711 if (EnableFastSpilling) 1712 return addIntervalsForSpillsFast(li, loopInfo, vrm); 1713 1714 assert(li.isSpillable() && "attempt to spill already spilled interval!"); 1715 1716 DEBUG({ 1717 dbgs() << "\t\t\t\tadding intervals for spills for interval: "; 1718 li.print(dbgs(), tri_); 1719 dbgs() << '\n'; 1720 }); 1721 1722 // Each bit specify whether a spill is required in the MBB. 1723 BitVector SpillMBBs(mf_->getNumBlockIDs()); 1724 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes; 1725 BitVector RestoreMBBs(mf_->getNumBlockIDs()); 1726 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes; 1727 DenseMap<unsigned,unsigned> MBBVRegsMap; 1728 std::vector<LiveInterval*> NewLIs; 1729 const TargetRegisterClass* rc = mri_->getRegClass(li.reg); 1730 1731 unsigned NumValNums = li.getNumValNums(); 1732 SmallVector<MachineInstr*, 4> ReMatDefs; 1733 ReMatDefs.resize(NumValNums, NULL); 1734 SmallVector<MachineInstr*, 4> ReMatOrigDefs; 1735 ReMatOrigDefs.resize(NumValNums, NULL); 1736 SmallVector<int, 4> ReMatIds; 1737 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT); 1738 BitVector ReMatDelete(NumValNums); 1739 unsigned Slot = VirtRegMap::MAX_STACK_SLOT; 1740 1741 // Spilling a split live interval. It cannot be split any further. Also, 1742 // it's also guaranteed to be a single val# / range interval. 1743 if (vrm.getPreSplitReg(li.reg)) { 1744 vrm.setIsSplitFromReg(li.reg, 0); 1745 // Unset the split kill marker on the last use. 1746 SlotIndex KillIdx = vrm.getKillPoint(li.reg); 1747 if (KillIdx != SlotIndex()) { 1748 MachineInstr *KillMI = getInstructionFromIndex(KillIdx); 1749 assert(KillMI && "Last use disappeared?"); 1750 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true); 1751 assert(KillOp != -1 && "Last use disappeared?"); 1752 KillMI->getOperand(KillOp).setIsKill(false); 1753 } 1754 vrm.removeKillPoint(li.reg); 1755 bool DefIsReMat = vrm.isReMaterialized(li.reg); 1756 Slot = vrm.getStackSlot(li.reg); 1757 assert(Slot != VirtRegMap::MAX_STACK_SLOT); 1758 MachineInstr *ReMatDefMI = DefIsReMat ? 1759 vrm.getReMaterializedMI(li.reg) : NULL; 1760 int LdSlot = 0; 1761 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1762 bool isLoad = isLoadSS || 1763 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad())); 1764 bool IsFirstRange = true; 1765 for (LiveInterval::Ranges::const_iterator 1766 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1767 // If this is a split live interval with multiple ranges, it means there 1768 // are two-address instructions that re-defined the value. Only the 1769 // first def can be rematerialized! 1770 if (IsFirstRange) { 1771 // Note ReMatOrigDefMI has already been deleted. 1772 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI, 1773 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1774 false, vrm, rc, ReMatIds, loopInfo, 1775 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1776 MBBVRegsMap, NewLIs); 1777 } else { 1778 rewriteInstructionsForSpills(li, false, I, NULL, 0, 1779 Slot, 0, false, false, false, 1780 false, vrm, rc, ReMatIds, loopInfo, 1781 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1782 MBBVRegsMap, NewLIs); 1783 } 1784 IsFirstRange = false; 1785 } 1786 1787 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1788 normalizeSpillWeights(NewLIs); 1789 return NewLIs; 1790 } 1791 1792 bool TrySplit = !intervalIsInOneMBB(li); 1793 if (TrySplit) 1794 ++numSplits; 1795 bool NeedStackSlot = false; 1796 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 1797 i != e; ++i) { 1798 const VNInfo *VNI = *i; 1799 unsigned VN = VNI->id; 1800 if (VNI->isUnused()) 1801 continue; // Dead val#. 1802 // Is the def for the val# rematerializable? 1803 MachineInstr *ReMatDefMI = VNI->isDefAccurate() 1804 ? getInstructionFromIndex(VNI->def) : 0; 1805 bool dummy; 1806 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) { 1807 // Remember how to remat the def of this val#. 1808 ReMatOrigDefs[VN] = ReMatDefMI; 1809 // Original def may be modified so we have to make a copy here. 1810 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI); 1811 CloneMIs.push_back(Clone); 1812 ReMatDefs[VN] = Clone; 1813 1814 bool CanDelete = true; 1815 if (VNI->hasPHIKill()) { 1816 // A kill is a phi node, not all of its uses can be rematerialized. 1817 // It must not be deleted. 1818 CanDelete = false; 1819 // Need a stack slot if there is any live range where uses cannot be 1820 // rematerialized. 1821 NeedStackSlot = true; 1822 } 1823 if (CanDelete) 1824 ReMatDelete.set(VN); 1825 } else { 1826 // Need a stack slot if there is any live range where uses cannot be 1827 // rematerialized. 1828 NeedStackSlot = true; 1829 } 1830 } 1831 1832 // One stack slot per live interval. 1833 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) { 1834 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT) 1835 Slot = vrm.assignVirt2StackSlot(li.reg); 1836 1837 // This case only occurs when the prealloc splitter has already assigned 1838 // a stack slot to this vreg. 1839 else 1840 Slot = vrm.getStackSlot(li.reg); 1841 } 1842 1843 // Create new intervals and rewrite defs and uses. 1844 for (LiveInterval::Ranges::const_iterator 1845 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1846 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id]; 1847 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id]; 1848 bool DefIsReMat = ReMatDefMI != NULL; 1849 bool CanDelete = ReMatDelete[I->valno->id]; 1850 int LdSlot = 0; 1851 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1852 bool isLoad = isLoadSS || 1853 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad()); 1854 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI, 1855 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1856 CanDelete, vrm, rc, ReMatIds, loopInfo, 1857 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1858 MBBVRegsMap, NewLIs); 1859 } 1860 1861 // Insert spills / restores if we are splitting. 1862 if (!TrySplit) { 1863 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1864 normalizeSpillWeights(NewLIs); 1865 return NewLIs; 1866 } 1867 1868 SmallPtrSet<LiveInterval*, 4> AddedKill; 1869 SmallVector<unsigned, 2> Ops; 1870 if (NeedStackSlot) { 1871 int Id = SpillMBBs.find_first(); 1872 while (Id != -1) { 1873 std::vector<SRInfo> &spills = SpillIdxes[Id]; 1874 for (unsigned i = 0, e = spills.size(); i != e; ++i) { 1875 SlotIndex index = spills[i].index; 1876 unsigned VReg = spills[i].vreg; 1877 LiveInterval &nI = getOrCreateInterval(VReg); 1878 bool isReMat = vrm.isReMaterialized(VReg); 1879 MachineInstr *MI = getInstructionFromIndex(index); 1880 bool CanFold = false; 1881 bool FoundUse = false; 1882 Ops.clear(); 1883 if (spills[i].canFold) { 1884 CanFold = true; 1885 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1886 MachineOperand &MO = MI->getOperand(j); 1887 if (!MO.isReg() || MO.getReg() != VReg) 1888 continue; 1889 1890 Ops.push_back(j); 1891 if (MO.isDef()) 1892 continue; 1893 if (isReMat || 1894 (!FoundUse && !alsoFoldARestore(Id, index, VReg, 1895 RestoreMBBs, RestoreIdxes))) { 1896 // MI has two-address uses of the same register. If the use 1897 // isn't the first and only use in the BB, then we can't fold 1898 // it. FIXME: Move this to rewriteInstructionsForSpills. 1899 CanFold = false; 1900 break; 1901 } 1902 FoundUse = true; 1903 } 1904 } 1905 // Fold the store into the def if possible. 1906 bool Folded = false; 1907 if (CanFold && !Ops.empty()) { 1908 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){ 1909 Folded = true; 1910 if (FoundUse) { 1911 // Also folded uses, do not issue a load. 1912 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes); 1913 nI.removeRange(index.getLoadIndex(), index.getDefIndex()); 1914 } 1915 nI.removeRange(index.getDefIndex(), index.getStoreIndex()); 1916 } 1917 } 1918 1919 // Otherwise tell the spiller to issue a spill. 1920 if (!Folded) { 1921 LiveRange *LR = &nI.ranges[nI.ranges.size()-1]; 1922 bool isKill = LR->end == index.getStoreIndex(); 1923 if (!MI->registerDefIsDead(nI.reg)) 1924 // No need to spill a dead def. 1925 vrm.addSpillPoint(VReg, isKill, MI); 1926 if (isKill) 1927 AddedKill.insert(&nI); 1928 } 1929 } 1930 Id = SpillMBBs.find_next(Id); 1931 } 1932 } 1933 1934 int Id = RestoreMBBs.find_first(); 1935 while (Id != -1) { 1936 std::vector<SRInfo> &restores = RestoreIdxes[Id]; 1937 for (unsigned i = 0, e = restores.size(); i != e; ++i) { 1938 SlotIndex index = restores[i].index; 1939 if (index == SlotIndex()) 1940 continue; 1941 unsigned VReg = restores[i].vreg; 1942 LiveInterval &nI = getOrCreateInterval(VReg); 1943 bool isReMat = vrm.isReMaterialized(VReg); 1944 MachineInstr *MI = getInstructionFromIndex(index); 1945 bool CanFold = false; 1946 Ops.clear(); 1947 if (restores[i].canFold) { 1948 CanFold = true; 1949 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1950 MachineOperand &MO = MI->getOperand(j); 1951 if (!MO.isReg() || MO.getReg() != VReg) 1952 continue; 1953 1954 if (MO.isDef()) { 1955 // If this restore were to be folded, it would have been folded 1956 // already. 1957 CanFold = false; 1958 break; 1959 } 1960 Ops.push_back(j); 1961 } 1962 } 1963 1964 // Fold the load into the use if possible. 1965 bool Folded = false; 1966 if (CanFold && !Ops.empty()) { 1967 if (!isReMat) 1968 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg); 1969 else { 1970 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg); 1971 int LdSlot = 0; 1972 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1973 // If the rematerializable def is a load, also try to fold it. 1974 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad()) 1975 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1976 Ops, isLoadSS, LdSlot, VReg); 1977 if (!Folded) { 1978 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI); 1979 if (ImpUse) { 1980 // Re-matting an instruction with virtual register use. Add the 1981 // register as an implicit use on the use MI and mark the register 1982 // interval as unspillable. 1983 LiveInterval &ImpLi = getInterval(ImpUse); 1984 ImpLi.markNotSpillable(); 1985 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1986 } 1987 } 1988 } 1989 } 1990 // If folding is not possible / failed, then tell the spiller to issue a 1991 // load / rematerialization for us. 1992 if (Folded) 1993 nI.removeRange(index.getLoadIndex(), index.getDefIndex()); 1994 else 1995 vrm.addRestorePoint(VReg, MI); 1996 } 1997 Id = RestoreMBBs.find_next(Id); 1998 } 1999 2000 // Finalize intervals: add kills, finalize spill weights, and filter out 2001 // dead intervals. 2002 std::vector<LiveInterval*> RetNewLIs; 2003 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) { 2004 LiveInterval *LI = NewLIs[i]; 2005 if (!LI->empty()) { 2006 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI); 2007 if (!AddedKill.count(LI)) { 2008 LiveRange *LR = &LI->ranges[LI->ranges.size()-1]; 2009 SlotIndex LastUseIdx = LR->end.getBaseIndex(); 2010 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx); 2011 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false); 2012 assert(UseIdx != -1); 2013 if (!LastUse->isRegTiedToDefOperand(UseIdx)) { 2014 LastUse->getOperand(UseIdx).setIsKill(); 2015 vrm.addKillPoint(LI->reg, LastUseIdx); 2016 } 2017 } 2018 RetNewLIs.push_back(LI); 2019 } 2020 } 2021 2022 handleSpilledImpDefs(li, vrm, rc, RetNewLIs); 2023 normalizeSpillWeights(RetNewLIs); 2024 return RetNewLIs; 2025} 2026 2027/// hasAllocatableSuperReg - Return true if the specified physical register has 2028/// any super register that's allocatable. 2029bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const { 2030 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) 2031 if (allocatableRegs_[*AS] && hasInterval(*AS)) 2032 return true; 2033 return false; 2034} 2035 2036/// getRepresentativeReg - Find the largest super register of the specified 2037/// physical register. 2038unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const { 2039 // Find the largest super-register that is allocatable. 2040 unsigned BestReg = Reg; 2041 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) { 2042 unsigned SuperReg = *AS; 2043 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) { 2044 BestReg = SuperReg; 2045 break; 2046 } 2047 } 2048 return BestReg; 2049} 2050 2051/// getNumConflictsWithPhysReg - Return the number of uses and defs of the 2052/// specified interval that conflicts with the specified physical register. 2053unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li, 2054 unsigned PhysReg) const { 2055 unsigned NumConflicts = 0; 2056 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg)); 2057 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 2058 E = mri_->reg_end(); I != E; ++I) { 2059 MachineOperand &O = I.getOperand(); 2060 MachineInstr *MI = O.getParent(); 2061 if (MI->isDebugValue()) 2062 continue; 2063 SlotIndex Index = getInstructionIndex(MI); 2064 if (pli.liveAt(Index)) 2065 ++NumConflicts; 2066 } 2067 return NumConflicts; 2068} 2069 2070/// spillPhysRegAroundRegDefsUses - Spill the specified physical register 2071/// around all defs and uses of the specified interval. Return true if it 2072/// was able to cut its interval. 2073bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li, 2074 unsigned PhysReg, VirtRegMap &vrm) { 2075 unsigned SpillReg = getRepresentativeReg(PhysReg); 2076 2077 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS) 2078 // If there are registers which alias PhysReg, but which are not a 2079 // sub-register of the chosen representative super register. Assert 2080 // since we can't handle it yet. 2081 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) || 2082 tri_->isSuperRegister(*AS, SpillReg)); 2083 2084 bool Cut = false; 2085 SmallVector<unsigned, 4> PRegs; 2086 if (hasInterval(SpillReg)) 2087 PRegs.push_back(SpillReg); 2088 else { 2089 SmallSet<unsigned, 4> Added; 2090 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) 2091 if (Added.insert(*AS) && hasInterval(*AS)) { 2092 PRegs.push_back(*AS); 2093 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS) 2094 Added.insert(*ASS); 2095 } 2096 } 2097 2098 SmallPtrSet<MachineInstr*, 8> SeenMIs; 2099 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 2100 E = mri_->reg_end(); I != E; ++I) { 2101 MachineOperand &O = I.getOperand(); 2102 MachineInstr *MI = O.getParent(); 2103 if (MI->isDebugValue() || SeenMIs.count(MI)) 2104 continue; 2105 SeenMIs.insert(MI); 2106 SlotIndex Index = getInstructionIndex(MI); 2107 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) { 2108 unsigned PReg = PRegs[i]; 2109 LiveInterval &pli = getInterval(PReg); 2110 if (!pli.liveAt(Index)) 2111 continue; 2112 vrm.addEmergencySpill(PReg, MI); 2113 SlotIndex StartIdx = Index.getLoadIndex(); 2114 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex(); 2115 if (pli.isInOneLiveRange(StartIdx, EndIdx)) { 2116 pli.removeRange(StartIdx, EndIdx); 2117 Cut = true; 2118 } else { 2119 std::string msg; 2120 raw_string_ostream Msg(msg); 2121 Msg << "Ran out of registers during register allocation!"; 2122 if (MI->isInlineAsm()) { 2123 Msg << "\nPlease check your inline asm statement for invalid " 2124 << "constraints:\n"; 2125 MI->print(Msg, tm_); 2126 } 2127 report_fatal_error(Msg.str()); 2128 } 2129 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) { 2130 if (!hasInterval(*AS)) 2131 continue; 2132 LiveInterval &spli = getInterval(*AS); 2133 if (spli.liveAt(Index)) 2134 spli.removeRange(Index.getLoadIndex(), 2135 Index.getNextIndex().getBaseIndex()); 2136 } 2137 } 2138 } 2139 return Cut; 2140} 2141 2142LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg, 2143 MachineInstr* startInst) { 2144 LiveInterval& Interval = getOrCreateInterval(reg); 2145 VNInfo* VN = Interval.getNextValue( 2146 SlotIndex(getInstructionIndex(startInst).getDefIndex()), 2147 startInst, true, getVNInfoAllocator()); 2148 VN->setHasPHIKill(true); 2149 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent())); 2150 LiveRange LR( 2151 SlotIndex(getInstructionIndex(startInst).getDefIndex()), 2152 getMBBEndIdx(startInst->getParent()), VN); 2153 Interval.addRange(LR); 2154 2155 return LR; 2156} 2157 2158