LiveIntervalAnalysis.cpp revision 1e78aa445c5b5b466cea4478f3f9034d4be7278e
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the LiveInterval analysis pass which is used 11// by the Linear Scan Register allocator. This pass linearizes the 12// basic blocks of the function in DFS order and uses the 13// LiveVariables pass to conservatively compute live intervals for 14// each virtual and physical register. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "liveintervals" 19#include "llvm/CodeGen/LiveIntervalAnalysis.h" 20#include "VirtRegMap.h" 21#include "llvm/Value.h" 22#include "llvm/Analysis/AliasAnalysis.h" 23#include "llvm/CodeGen/LiveVariables.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineInstr.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineLoopInfo.h" 28#include "llvm/CodeGen/MachineMemOperand.h" 29#include "llvm/CodeGen/MachineRegisterInfo.h" 30#include "llvm/CodeGen/Passes.h" 31#include "llvm/CodeGen/ProcessImplicitDefs.h" 32#include "llvm/Target/TargetRegisterInfo.h" 33#include "llvm/Target/TargetInstrInfo.h" 34#include "llvm/Target/TargetMachine.h" 35#include "llvm/Target/TargetOptions.h" 36#include "llvm/Support/CommandLine.h" 37#include "llvm/Support/Debug.h" 38#include "llvm/Support/ErrorHandling.h" 39#include "llvm/Support/raw_ostream.h" 40#include "llvm/ADT/DepthFirstIterator.h" 41#include "llvm/ADT/SmallSet.h" 42#include "llvm/ADT/Statistic.h" 43#include "llvm/ADT/STLExtras.h" 44#include <algorithm> 45#include <limits> 46#include <cmath> 47using namespace llvm; 48 49// Hidden options for help debugging. 50static cl::opt<bool> DisableReMat("disable-rematerialization", 51 cl::init(false), cl::Hidden); 52 53static cl::opt<bool> EnableFastSpilling("fast-spill", 54 cl::init(false), cl::Hidden); 55 56STATISTIC(numIntervals , "Number of original intervals"); 57STATISTIC(numFolds , "Number of loads/stores folded into instructions"); 58STATISTIC(numSplits , "Number of intervals split"); 59 60char LiveIntervals::ID = 0; 61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis"); 62 63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { 64 AU.setPreservesCFG(); 65 AU.addRequired<AliasAnalysis>(); 66 AU.addPreserved<AliasAnalysis>(); 67 AU.addPreserved<LiveVariables>(); 68 AU.addRequired<LiveVariables>(); 69 AU.addPreservedID(MachineLoopInfoID); 70 AU.addPreservedID(MachineDominatorsID); 71 72 if (!StrongPHIElim) { 73 AU.addPreservedID(PHIEliminationID); 74 AU.addRequiredID(PHIEliminationID); 75 } 76 77 AU.addRequiredID(TwoAddressInstructionPassID); 78 AU.addPreserved<ProcessImplicitDefs>(); 79 AU.addRequired<ProcessImplicitDefs>(); 80 AU.addPreserved<SlotIndexes>(); 81 AU.addRequiredTransitive<SlotIndexes>(); 82 MachineFunctionPass::getAnalysisUsage(AU); 83} 84 85void LiveIntervals::releaseMemory() { 86 // Free the live intervals themselves. 87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(), 88 E = r2iMap_.end(); I != E; ++I) 89 delete I->second; 90 91 r2iMap_.clear(); 92 93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd. 94 VNInfoAllocator.Reset(); 95 while (!CloneMIs.empty()) { 96 MachineInstr *MI = CloneMIs.back(); 97 CloneMIs.pop_back(); 98 mf_->DeleteMachineInstr(MI); 99 } 100} 101 102/// runOnMachineFunction - Register allocate the whole function 103/// 104bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { 105 mf_ = &fn; 106 mri_ = &mf_->getRegInfo(); 107 tm_ = &fn.getTarget(); 108 tri_ = tm_->getRegisterInfo(); 109 tii_ = tm_->getInstrInfo(); 110 aa_ = &getAnalysis<AliasAnalysis>(); 111 lv_ = &getAnalysis<LiveVariables>(); 112 indexes_ = &getAnalysis<SlotIndexes>(); 113 allocatableRegs_ = tri_->getAllocatableSet(fn); 114 115 computeIntervals(); 116 117 numIntervals += getNumIntervals(); 118 119 DEBUG(dump()); 120 return true; 121} 122 123/// print - Implement the dump method. 124void LiveIntervals::print(raw_ostream &OS, const Module* ) const { 125 OS << "********** INTERVALS **********\n"; 126 for (const_iterator I = begin(), E = end(); I != E; ++I) { 127 I->second->print(OS, tri_); 128 OS << "\n"; 129 } 130 131 printInstrs(OS); 132} 133 134void LiveIntervals::printInstrs(raw_ostream &OS) const { 135 OS << "********** MACHINEINSTRS **********\n"; 136 137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end(); 138 mbbi != mbbe; ++mbbi) { 139 OS << ((Value*)mbbi->getBasicBlock())->getName() << ":\n"; 140 for (MachineBasicBlock::iterator mii = mbbi->begin(), 141 mie = mbbi->end(); mii != mie; ++mii) { 142 OS << getInstructionIndex(mii) << '\t' << *mii; 143 } 144 } 145} 146 147void LiveIntervals::dumpInstrs() const { 148 printInstrs(errs()); 149} 150 151/// conflictsWithPhysRegDef - Returns true if the specified register 152/// is defined during the duration of the specified interval. 153bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li, 154 VirtRegMap &vrm, unsigned reg) { 155 for (LiveInterval::Ranges::const_iterator 156 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 157 for (SlotIndex index = I->start.getBaseIndex(), 158 end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 159 index != end; 160 index = index.getNextIndex()) { 161 // skip deleted instructions 162 while (index != end && !getInstructionFromIndex(index)) 163 index = index.getNextIndex(); 164 if (index == end) break; 165 166 MachineInstr *MI = getInstructionFromIndex(index); 167 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 168 if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg)) 169 if (SrcReg == li.reg || DstReg == li.reg) 170 continue; 171 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 172 MachineOperand& mop = MI->getOperand(i); 173 if (!mop.isReg()) 174 continue; 175 unsigned PhysReg = mop.getReg(); 176 if (PhysReg == 0 || PhysReg == li.reg) 177 continue; 178 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) { 179 if (!vrm.hasPhys(PhysReg)) 180 continue; 181 PhysReg = vrm.getPhys(PhysReg); 182 } 183 if (PhysReg && tri_->regsOverlap(PhysReg, reg)) 184 return true; 185 } 186 } 187 } 188 189 return false; 190} 191 192/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except 193/// it can check use as well. 194bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li, 195 unsigned Reg, bool CheckUse, 196 SmallPtrSet<MachineInstr*,32> &JoinedCopies) { 197 for (LiveInterval::Ranges::const_iterator 198 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 199 for (SlotIndex index = I->start.getBaseIndex(), 200 end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 201 index != end; 202 index = index.getNextIndex()) { 203 // Skip deleted instructions. 204 MachineInstr *MI = 0; 205 while (index != end) { 206 MI = getInstructionFromIndex(index); 207 if (MI) 208 break; 209 index = index.getNextIndex(); 210 } 211 if (index == end) break; 212 213 if (JoinedCopies.count(MI)) 214 continue; 215 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 216 MachineOperand& MO = MI->getOperand(i); 217 if (!MO.isReg()) 218 continue; 219 if (MO.isUse() && !CheckUse) 220 continue; 221 unsigned PhysReg = MO.getReg(); 222 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg)) 223 continue; 224 if (tri_->isSubRegister(Reg, PhysReg)) 225 return true; 226 } 227 } 228 } 229 230 return false; 231} 232 233#ifndef NDEBUG 234static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) { 235 if (TargetRegisterInfo::isPhysicalRegister(reg)) 236 errs() << tri_->getName(reg); 237 else 238 errs() << "%reg" << reg; 239} 240#endif 241 242void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, 243 MachineBasicBlock::iterator mi, 244 SlotIndex MIIdx, 245 MachineOperand& MO, 246 unsigned MOIdx, 247 LiveInterval &interval) { 248 DEBUG({ 249 errs() << "\t\tregister: "; 250 printRegName(interval.reg, tri_); 251 }); 252 253 // Virtual registers may be defined multiple times (due to phi 254 // elimination and 2-addr elimination). Much of what we do only has to be 255 // done once for the vreg. We use an empty interval to detect the first 256 // time we see a vreg. 257 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg); 258 if (interval.empty()) { 259 // Get the Idx of the defining instructions. 260 SlotIndex defIndex = MIIdx.getDefIndex(); 261 // Earlyclobbers move back one, so that they overlap the live range 262 // of inputs. 263 if (MO.isEarlyClobber()) 264 defIndex = MIIdx.getUseIndex(); 265 VNInfo *ValNo; 266 MachineInstr *CopyMI = NULL; 267 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 268 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG || 269 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG || 270 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG || 271 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) 272 CopyMI = mi; 273 // Earlyclobbers move back one. 274 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator); 275 276 assert(ValNo->id == 0 && "First value in interval is not 0?"); 277 278 // Loop over all of the blocks that the vreg is defined in. There are 279 // two cases we have to handle here. The most common case is a vreg 280 // whose lifetime is contained within a basic block. In this case there 281 // will be a single kill, in MBB, which comes after the definition. 282 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) { 283 // FIXME: what about dead vars? 284 SlotIndex killIdx; 285 if (vi.Kills[0] != mi) 286 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex(); 287 else 288 killIdx = defIndex.getStoreIndex(); 289 290 // If the kill happens after the definition, we have an intra-block 291 // live range. 292 if (killIdx > defIndex) { 293 assert(vi.AliveBlocks.empty() && 294 "Shouldn't be alive across any blocks!"); 295 LiveRange LR(defIndex, killIdx, ValNo); 296 interval.addRange(LR); 297 DEBUG(errs() << " +" << LR << "\n"); 298 ValNo->addKill(killIdx); 299 return; 300 } 301 } 302 303 // The other case we handle is when a virtual register lives to the end 304 // of the defining block, potentially live across some blocks, then is 305 // live into some number of blocks, but gets killed. Start by adding a 306 // range that goes from this definition to the end of the defining block. 307 LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(), 308 ValNo); 309 DEBUG(errs() << " +" << NewLR); 310 interval.addRange(NewLR); 311 312 // Iterate over all of the blocks that the variable is completely 313 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the 314 // live interval. 315 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(), 316 E = vi.AliveBlocks.end(); I != E; ++I) { 317 LiveRange LR( 318 getMBBStartIdx(mf_->getBlockNumbered(*I)), 319 getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(), 320 ValNo); 321 interval.addRange(LR); 322 DEBUG(errs() << " +" << LR); 323 } 324 325 // Finally, this virtual register is live from the start of any killing 326 // block to the 'use' slot of the killing instruction. 327 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) { 328 MachineInstr *Kill = vi.Kills[i]; 329 SlotIndex killIdx = 330 getInstructionIndex(Kill).getDefIndex(); 331 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo); 332 interval.addRange(LR); 333 ValNo->addKill(killIdx); 334 DEBUG(errs() << " +" << LR); 335 } 336 337 } else { 338 // If this is the second time we see a virtual register definition, it 339 // must be due to phi elimination or two addr elimination. If this is 340 // the result of two address elimination, then the vreg is one of the 341 // def-and-use register operand. 342 if (mi->isRegTiedToUseOperand(MOIdx)) { 343 // If this is a two-address definition, then we have already processed 344 // the live range. The only problem is that we didn't realize there 345 // are actually two values in the live interval. Because of this we 346 // need to take the LiveRegion that defines this register and split it 347 // into two values. 348 assert(interval.containsOneValue()); 349 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex(); 350 SlotIndex RedefIndex = MIIdx.getDefIndex(); 351 if (MO.isEarlyClobber()) 352 RedefIndex = MIIdx.getUseIndex(); 353 354 const LiveRange *OldLR = 355 interval.getLiveRangeContaining(RedefIndex.getUseIndex()); 356 VNInfo *OldValNo = OldLR->valno; 357 358 // Delete the initial value, which should be short and continuous, 359 // because the 2-addr copy must be in the same MBB as the redef. 360 interval.removeRange(DefIndex, RedefIndex); 361 362 // Two-address vregs should always only be redefined once. This means 363 // that at this point, there should be exactly one value number in it. 364 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!"); 365 366 // The new value number (#1) is defined by the instruction we claimed 367 // defined value #0. 368 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(), 369 false, // update at * 370 VNInfoAllocator); 371 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here 372 373 // Value#0 is now defined by the 2-addr instruction. 374 OldValNo->def = RedefIndex; 375 OldValNo->setCopy(0); 376 if (MO.isEarlyClobber()) 377 OldValNo->setHasRedefByEC(true); 378 379 // Add the new live interval which replaces the range for the input copy. 380 LiveRange LR(DefIndex, RedefIndex, ValNo); 381 DEBUG(errs() << " replace range with " << LR); 382 interval.addRange(LR); 383 ValNo->addKill(RedefIndex); 384 385 // If this redefinition is dead, we need to add a dummy unit live 386 // range covering the def slot. 387 if (MO.isDead()) 388 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(), 389 OldValNo)); 390 391 DEBUG({ 392 errs() << " RESULT: "; 393 interval.print(errs(), tri_); 394 }); 395 } else { 396 // Otherwise, this must be because of phi elimination. If this is the 397 // first redefinition of the vreg that we have seen, go back and change 398 // the live range in the PHI block to be a different value number. 399 if (interval.containsOneValue()) { 400 // Remove the old range that we now know has an incorrect number. 401 VNInfo *VNI = interval.getValNumInfo(0); 402 MachineInstr *Killer = vi.Kills[0]; 403 SlotIndex Start = getMBBStartIdx(Killer->getParent()); 404 SlotIndex End = getInstructionIndex(Killer).getDefIndex(); 405 DEBUG({ 406 errs() << " Removing [" << Start << "," << End << "] from: "; 407 interval.print(errs(), tri_); 408 errs() << "\n"; 409 }); 410 interval.removeRange(Start, End); 411 assert(interval.ranges.size() == 1 && 412 "Newly discovered PHI interval has >1 ranges."); 413 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex()); 414 VNI->addKill(indexes_->getTerminatorGap(killMBB)); 415 VNI->setHasPHIKill(true); 416 DEBUG({ 417 errs() << " RESULT: "; 418 interval.print(errs(), tri_); 419 }); 420 421 // Replace the interval with one of a NEW value number. Note that this 422 // value number isn't actually defined by an instruction, weird huh? :) 423 LiveRange LR(Start, End, 424 interval.getNextValue(SlotIndex(getMBBStartIdx(mbb), true), 425 0, false, VNInfoAllocator)); 426 LR.valno->setIsPHIDef(true); 427 DEBUG(errs() << " replace range with " << LR); 428 interval.addRange(LR); 429 LR.valno->addKill(End); 430 DEBUG({ 431 errs() << " RESULT: "; 432 interval.print(errs(), tri_); 433 }); 434 } 435 436 // In the case of PHI elimination, each variable definition is only 437 // live until the end of the block. We've already taken care of the 438 // rest of the live range. 439 SlotIndex defIndex = MIIdx.getDefIndex(); 440 if (MO.isEarlyClobber()) 441 defIndex = MIIdx.getUseIndex(); 442 443 VNInfo *ValNo; 444 MachineInstr *CopyMI = NULL; 445 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 446 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG || 447 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG || 448 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG || 449 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) 450 CopyMI = mi; 451 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator); 452 453 SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex(); 454 LiveRange LR(defIndex, killIndex, ValNo); 455 interval.addRange(LR); 456 ValNo->addKill(indexes_->getTerminatorGap(mbb)); 457 ValNo->setHasPHIKill(true); 458 DEBUG(errs() << " +" << LR); 459 } 460 } 461 462 DEBUG(errs() << '\n'); 463} 464 465void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, 466 MachineBasicBlock::iterator mi, 467 SlotIndex MIIdx, 468 MachineOperand& MO, 469 LiveInterval &interval, 470 MachineInstr *CopyMI) { 471 // A physical register cannot be live across basic block, so its 472 // lifetime must end somewhere in its defining basic block. 473 DEBUG({ 474 errs() << "\t\tregister: "; 475 printRegName(interval.reg, tri_); 476 }); 477 478 SlotIndex baseIndex = MIIdx; 479 SlotIndex start = baseIndex.getDefIndex(); 480 // Earlyclobbers move back one. 481 if (MO.isEarlyClobber()) 482 start = MIIdx.getUseIndex(); 483 SlotIndex end = start; 484 485 // If it is not used after definition, it is considered dead at 486 // the instruction defining it. Hence its interval is: 487 // [defSlot(def), defSlot(def)+1) 488 // For earlyclobbers, the defSlot was pushed back one; the extra 489 // advance below compensates. 490 if (MO.isDead()) { 491 DEBUG(errs() << " dead"); 492 end = start.getStoreIndex(); 493 goto exit; 494 } 495 496 // If it is not dead on definition, it must be killed by a 497 // subsequent instruction. Hence its interval is: 498 // [defSlot(def), useSlot(kill)+1) 499 baseIndex = baseIndex.getNextIndex(); 500 while (++mi != MBB->end()) { 501 502 if (getInstructionFromIndex(baseIndex) == 0) 503 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 504 505 if (mi->killsRegister(interval.reg, tri_)) { 506 DEBUG(errs() << " killed"); 507 end = baseIndex.getDefIndex(); 508 goto exit; 509 } else { 510 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_); 511 if (DefIdx != -1) { 512 if (mi->isRegTiedToUseOperand(DefIdx)) { 513 // Two-address instruction. 514 end = baseIndex.getDefIndex(); 515 assert(!mi->getOperand(DefIdx).isEarlyClobber() && 516 "Two address instruction is an early clobber?"); 517 } else { 518 // Another instruction redefines the register before it is ever read. 519 // Then the register is essentially dead at the instruction that defines 520 // it. Hence its interval is: 521 // [defSlot(def), defSlot(def)+1) 522 DEBUG(errs() << " dead"); 523 end = start.getStoreIndex(); 524 } 525 goto exit; 526 } 527 } 528 529 baseIndex = baseIndex.getNextIndex(); 530 } 531 532 // The only case we should have a dead physreg here without a killing or 533 // instruction where we know it's dead is if it is live-in to the function 534 // and never used. Another possible case is the implicit use of the 535 // physical register has been deleted by two-address pass. 536 end = start.getStoreIndex(); 537 538exit: 539 assert(start < end && "did not find end of interval?"); 540 541 // Already exists? Extend old live interval. 542 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start); 543 bool Extend = OldLR != interval.end(); 544 VNInfo *ValNo = Extend 545 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator); 546 if (MO.isEarlyClobber() && Extend) 547 ValNo->setHasRedefByEC(true); 548 LiveRange LR(start, end, ValNo); 549 interval.addRange(LR); 550 LR.valno->addKill(end); 551 DEBUG(errs() << " +" << LR << '\n'); 552} 553 554void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB, 555 MachineBasicBlock::iterator MI, 556 SlotIndex MIIdx, 557 MachineOperand& MO, 558 unsigned MOIdx) { 559 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) 560 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx, 561 getOrCreateInterval(MO.getReg())); 562 else if (allocatableRegs_[MO.getReg()]) { 563 MachineInstr *CopyMI = NULL; 564 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 565 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG || 566 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG || 567 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG || 568 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg)) 569 CopyMI = MI; 570 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 571 getOrCreateInterval(MO.getReg()), CopyMI); 572 // Def of a register also defines its sub-registers. 573 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS) 574 // If MI also modifies the sub-register explicitly, avoid processing it 575 // more than once. Do not pass in TRI here so it checks for exact match. 576 if (!MI->modifiesRegister(*AS)) 577 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 578 getOrCreateInterval(*AS), 0); 579 } 580} 581 582void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, 583 SlotIndex MIIdx, 584 LiveInterval &interval, bool isAlias) { 585 DEBUG({ 586 errs() << "\t\tlivein register: "; 587 printRegName(interval.reg, tri_); 588 }); 589 590 // Look for kills, if it reaches a def before it's killed, then it shouldn't 591 // be considered a livein. 592 MachineBasicBlock::iterator mi = MBB->begin(); 593 SlotIndex baseIndex = MIIdx; 594 SlotIndex start = baseIndex; 595 if (getInstructionFromIndex(baseIndex) == 0) 596 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 597 598 SlotIndex end = baseIndex; 599 bool SeenDefUse = false; 600 601 while (mi != MBB->end()) { 602 if (mi->killsRegister(interval.reg, tri_)) { 603 DEBUG(errs() << " killed"); 604 end = baseIndex.getDefIndex(); 605 SeenDefUse = true; 606 break; 607 } else if (mi->modifiesRegister(interval.reg, tri_)) { 608 // Another instruction redefines the register before it is ever read. 609 // Then the register is essentially dead at the instruction that defines 610 // it. Hence its interval is: 611 // [defSlot(def), defSlot(def)+1) 612 DEBUG(errs() << " dead"); 613 end = start.getStoreIndex(); 614 SeenDefUse = true; 615 break; 616 } 617 618 ++mi; 619 if (mi != MBB->end()) { 620 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 621 } 622 } 623 624 // Live-in register might not be used at all. 625 if (!SeenDefUse) { 626 if (isAlias) { 627 DEBUG(errs() << " dead"); 628 end = MIIdx.getStoreIndex(); 629 } else { 630 DEBUG(errs() << " live through"); 631 end = baseIndex; 632 } 633 } 634 635 VNInfo *vni = 636 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true), 637 0, false, VNInfoAllocator); 638 vni->setIsPHIDef(true); 639 LiveRange LR(start, end, vni); 640 641 interval.addRange(LR); 642 LR.valno->addKill(end); 643 DEBUG(errs() << " +" << LR << '\n'); 644} 645 646/// computeIntervals - computes the live intervals for virtual 647/// registers. for some ordering of the machine instructions [1,N] a 648/// live interval is an interval [i, j) where 1 <= i <= j < N for 649/// which a variable is live 650void LiveIntervals::computeIntervals() { 651 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n" 652 << "********** Function: " 653 << ((Value*)mf_->getFunction())->getName() << '\n'); 654 655 SmallVector<unsigned, 8> UndefUses; 656 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end(); 657 MBBI != E; ++MBBI) { 658 MachineBasicBlock *MBB = MBBI; 659 // Track the index of the current machine instr. 660 SlotIndex MIIndex = getMBBStartIdx(MBB); 661 DEBUG(errs() << ((Value*)MBB->getBasicBlock())->getName() << ":\n"); 662 663 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end(); 664 665 // Create intervals for live-ins to this BB first. 666 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(), 667 LE = MBB->livein_end(); LI != LE; ++LI) { 668 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI)); 669 // Multiple live-ins can alias the same register. 670 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS) 671 if (!hasInterval(*AS)) 672 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS), 673 true); 674 } 675 676 // Skip over empty initial indices. 677 if (getInstructionFromIndex(MIIndex) == 0) 678 MIIndex = indexes_->getNextNonNullIndex(MIIndex); 679 680 for (; MI != miEnd; ++MI) { 681 DEBUG(errs() << MIIndex << "\t" << *MI); 682 683 // Handle defs. 684 for (int i = MI->getNumOperands() - 1; i >= 0; --i) { 685 MachineOperand &MO = MI->getOperand(i); 686 if (!MO.isReg() || !MO.getReg()) 687 continue; 688 689 // handle register defs - build intervals 690 if (MO.isDef()) 691 handleRegisterDef(MBB, MI, MIIndex, MO, i); 692 else if (MO.isUndef()) 693 UndefUses.push_back(MO.getReg()); 694 } 695 696 // Move to the next instr slot. 697 MIIndex = indexes_->getNextNonNullIndex(MIIndex); 698 } 699 } 700 701 // Create empty intervals for registers defined by implicit_def's (except 702 // for those implicit_def that define values which are liveout of their 703 // blocks. 704 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) { 705 unsigned UndefReg = UndefUses[i]; 706 (void)getOrCreateInterval(UndefReg); 707 } 708} 709 710LiveInterval* LiveIntervals::createInterval(unsigned reg) { 711 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F; 712 return new LiveInterval(reg, Weight); 713} 714 715/// dupInterval - Duplicate a live interval. The caller is responsible for 716/// managing the allocated memory. 717LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) { 718 LiveInterval *NewLI = createInterval(li->reg); 719 NewLI->Copy(*li, mri_, getVNInfoAllocator()); 720 return NewLI; 721} 722 723/// getVNInfoSourceReg - Helper function that parses the specified VNInfo 724/// copy field and returns the source register that defines it. 725unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const { 726 if (!VNI->getCopy()) 727 return 0; 728 729 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) { 730 // If it's extracting out of a physical register, return the sub-register. 731 unsigned Reg = VNI->getCopy()->getOperand(1).getReg(); 732 if (TargetRegisterInfo::isPhysicalRegister(Reg)) 733 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm()); 734 return Reg; 735 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG || 736 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG) 737 return VNI->getCopy()->getOperand(2).getReg(); 738 739 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 740 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg)) 741 return SrcReg; 742 llvm_unreachable("Unrecognized copy instruction!"); 743 return 0; 744} 745 746//===----------------------------------------------------------------------===// 747// Register allocator hooks. 748// 749 750/// getReMatImplicitUse - If the remat definition MI has one (for now, we only 751/// allow one) virtual register operand, then its uses are implicitly using 752/// the register. Returns the virtual register. 753unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li, 754 MachineInstr *MI) const { 755 unsigned RegOp = 0; 756 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 757 MachineOperand &MO = MI->getOperand(i); 758 if (!MO.isReg() || !MO.isUse()) 759 continue; 760 unsigned Reg = MO.getReg(); 761 if (Reg == 0 || Reg == li.reg) 762 continue; 763 764 if (TargetRegisterInfo::isPhysicalRegister(Reg) && 765 !allocatableRegs_[Reg]) 766 continue; 767 // FIXME: For now, only remat MI with at most one register operand. 768 assert(!RegOp && 769 "Can't rematerialize instruction with multiple register operand!"); 770 RegOp = MO.getReg(); 771#ifndef NDEBUG 772 break; 773#endif 774 } 775 return RegOp; 776} 777 778/// isValNoAvailableAt - Return true if the val# of the specified interval 779/// which reaches the given instruction also reaches the specified use index. 780bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI, 781 SlotIndex UseIdx) const { 782 SlotIndex Index = getInstructionIndex(MI); 783 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno; 784 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx); 785 return UI != li.end() && UI->valno == ValNo; 786} 787 788/// isReMaterializable - Returns true if the definition MI of the specified 789/// val# of the specified interval is re-materializable. 790bool LiveIntervals::isReMaterializable(const LiveInterval &li, 791 const VNInfo *ValNo, MachineInstr *MI, 792 SmallVectorImpl<LiveInterval*> &SpillIs, 793 bool &isLoad) { 794 if (DisableReMat) 795 return false; 796 797 if (!tii_->isTriviallyReMaterializable(MI, aa_)) 798 return false; 799 800 // Target-specific code can mark an instruction as being rematerializable 801 // if it has one virtual reg use, though it had better be something like 802 // a PIC base register which is likely to be live everywhere. 803 unsigned ImpUse = getReMatImplicitUse(li, MI); 804 if (ImpUse) { 805 const LiveInterval &ImpLi = getInterval(ImpUse); 806 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg), 807 re = mri_->use_end(); ri != re; ++ri) { 808 MachineInstr *UseMI = &*ri; 809 SlotIndex UseIdx = getInstructionIndex(UseMI); 810 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo) 811 continue; 812 if (!isValNoAvailableAt(ImpLi, MI, UseIdx)) 813 return false; 814 } 815 816 // If a register operand of the re-materialized instruction is going to 817 // be spilled next, then it's not legal to re-materialize this instruction. 818 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i) 819 if (ImpUse == SpillIs[i]->reg) 820 return false; 821 } 822 return true; 823} 824 825/// isReMaterializable - Returns true if the definition MI of the specified 826/// val# of the specified interval is re-materializable. 827bool LiveIntervals::isReMaterializable(const LiveInterval &li, 828 const VNInfo *ValNo, MachineInstr *MI) { 829 SmallVector<LiveInterval*, 4> Dummy1; 830 bool Dummy2; 831 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2); 832} 833 834/// isReMaterializable - Returns true if every definition of MI of every 835/// val# of the specified interval is re-materializable. 836bool LiveIntervals::isReMaterializable(const LiveInterval &li, 837 SmallVectorImpl<LiveInterval*> &SpillIs, 838 bool &isLoad) { 839 isLoad = false; 840 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 841 i != e; ++i) { 842 const VNInfo *VNI = *i; 843 if (VNI->isUnused()) 844 continue; // Dead val#. 845 // Is the def for the val# rematerializable? 846 if (!VNI->isDefAccurate()) 847 return false; 848 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def); 849 bool DefIsLoad = false; 850 if (!ReMatDefMI || 851 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad)) 852 return false; 853 isLoad |= DefIsLoad; 854 } 855 return true; 856} 857 858/// FilterFoldedOps - Filter out two-address use operands. Return 859/// true if it finds any issue with the operands that ought to prevent 860/// folding. 861static bool FilterFoldedOps(MachineInstr *MI, 862 SmallVector<unsigned, 2> &Ops, 863 unsigned &MRInfo, 864 SmallVector<unsigned, 2> &FoldOps) { 865 MRInfo = 0; 866 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 867 unsigned OpIdx = Ops[i]; 868 MachineOperand &MO = MI->getOperand(OpIdx); 869 // FIXME: fold subreg use. 870 if (MO.getSubReg()) 871 return true; 872 if (MO.isDef()) 873 MRInfo |= (unsigned)VirtRegMap::isMod; 874 else { 875 // Filter out two-address use operand(s). 876 if (MI->isRegTiedToDefOperand(OpIdx)) { 877 MRInfo = VirtRegMap::isModRef; 878 continue; 879 } 880 MRInfo |= (unsigned)VirtRegMap::isRef; 881 } 882 FoldOps.push_back(OpIdx); 883 } 884 return false; 885} 886 887 888/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from 889/// slot / to reg or any rematerialized load into ith operand of specified 890/// MI. If it is successul, MI is updated with the newly created MI and 891/// returns true. 892bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, 893 VirtRegMap &vrm, MachineInstr *DefMI, 894 SlotIndex InstrIdx, 895 SmallVector<unsigned, 2> &Ops, 896 bool isSS, int Slot, unsigned Reg) { 897 // If it is an implicit def instruction, just delete it. 898 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) { 899 RemoveMachineInstrFromMaps(MI); 900 vrm.RemoveMachineInstrFromMaps(MI); 901 MI->eraseFromParent(); 902 ++numFolds; 903 return true; 904 } 905 906 // Filter the list of operand indexes that are to be folded. Abort if 907 // any operand will prevent folding. 908 unsigned MRInfo = 0; 909 SmallVector<unsigned, 2> FoldOps; 910 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 911 return false; 912 913 // The only time it's safe to fold into a two address instruction is when 914 // it's folding reload and spill from / into a spill stack slot. 915 if (DefMI && (MRInfo & VirtRegMap::isMod)) 916 return false; 917 918 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot) 919 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI); 920 if (fmi) { 921 // Remember this instruction uses the spill slot. 922 if (isSS) vrm.addSpillSlotUse(Slot, fmi); 923 924 // Attempt to fold the memory reference into the instruction. If 925 // we can do this, we don't need to insert spill code. 926 MachineBasicBlock &MBB = *MI->getParent(); 927 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot)) 928 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo); 929 vrm.transferSpillPts(MI, fmi); 930 vrm.transferRestorePts(MI, fmi); 931 vrm.transferEmergencySpills(MI, fmi); 932 ReplaceMachineInstrInMaps(MI, fmi); 933 MI = MBB.insert(MBB.erase(MI), fmi); 934 ++numFolds; 935 return true; 936 } 937 return false; 938} 939 940/// canFoldMemoryOperand - Returns true if the specified load / store 941/// folding is possible. 942bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI, 943 SmallVector<unsigned, 2> &Ops, 944 bool ReMat) const { 945 // Filter the list of operand indexes that are to be folded. Abort if 946 // any operand will prevent folding. 947 unsigned MRInfo = 0; 948 SmallVector<unsigned, 2> FoldOps; 949 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 950 return false; 951 952 // It's only legal to remat for a use, not a def. 953 if (ReMat && (MRInfo & VirtRegMap::isMod)) 954 return false; 955 956 return tii_->canFoldMemoryOperand(MI, FoldOps); 957} 958 959bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const { 960 LiveInterval::Ranges::const_iterator itr = li.ranges.begin(); 961 962 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end); 963 964 if (mbb == 0) 965 return false; 966 967 for (++itr; itr != li.ranges.end(); ++itr) { 968 MachineBasicBlock *mbb2 = 969 indexes_->getMBBCoveringRange(itr->start, itr->end); 970 971 if (mbb2 != mbb) 972 return false; 973 } 974 975 return true; 976} 977 978/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of 979/// interval on to-be re-materialized operands of MI) with new register. 980void LiveIntervals::rewriteImplicitOps(const LiveInterval &li, 981 MachineInstr *MI, unsigned NewVReg, 982 VirtRegMap &vrm) { 983 // There is an implicit use. That means one of the other operand is 984 // being remat'ed and the remat'ed instruction has li.reg as an 985 // use operand. Make sure we rewrite that as well. 986 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 987 MachineOperand &MO = MI->getOperand(i); 988 if (!MO.isReg()) 989 continue; 990 unsigned Reg = MO.getReg(); 991 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 992 continue; 993 if (!vrm.isReMaterialized(Reg)) 994 continue; 995 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg); 996 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg); 997 if (UseMO) 998 UseMO->setReg(NewVReg); 999 } 1000} 1001 1002/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions 1003/// for addIntervalsForSpills to rewrite uses / defs for the given live range. 1004bool LiveIntervals:: 1005rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, 1006 bool TrySplit, SlotIndex index, SlotIndex end, 1007 MachineInstr *MI, 1008 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1009 unsigned Slot, int LdSlot, 1010 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1011 VirtRegMap &vrm, 1012 const TargetRegisterClass* rc, 1013 SmallVector<int, 4> &ReMatIds, 1014 const MachineLoopInfo *loopInfo, 1015 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse, 1016 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1017 std::vector<LiveInterval*> &NewLIs) { 1018 bool CanFold = false; 1019 RestartInstruction: 1020 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 1021 MachineOperand& mop = MI->getOperand(i); 1022 if (!mop.isReg()) 1023 continue; 1024 unsigned Reg = mop.getReg(); 1025 unsigned RegI = Reg; 1026 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 1027 continue; 1028 if (Reg != li.reg) 1029 continue; 1030 1031 bool TryFold = !DefIsReMat; 1032 bool FoldSS = true; // Default behavior unless it's a remat. 1033 int FoldSlot = Slot; 1034 if (DefIsReMat) { 1035 // If this is the rematerializable definition MI itself and 1036 // all of its uses are rematerialized, simply delete it. 1037 if (MI == ReMatOrigDefMI && CanDelete) { 1038 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: " 1039 << MI << '\n'); 1040 RemoveMachineInstrFromMaps(MI); 1041 vrm.RemoveMachineInstrFromMaps(MI); 1042 MI->eraseFromParent(); 1043 break; 1044 } 1045 1046 // If def for this use can't be rematerialized, then try folding. 1047 // If def is rematerializable and it's a load, also try folding. 1048 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad)); 1049 if (isLoad) { 1050 // Try fold loads (from stack slot, constant pool, etc.) into uses. 1051 FoldSS = isLoadSS; 1052 FoldSlot = LdSlot; 1053 } 1054 } 1055 1056 // Scan all of the operands of this instruction rewriting operands 1057 // to use NewVReg instead of li.reg as appropriate. We do this for 1058 // two reasons: 1059 // 1060 // 1. If the instr reads the same spilled vreg multiple times, we 1061 // want to reuse the NewVReg. 1062 // 2. If the instr is a two-addr instruction, we are required to 1063 // keep the src/dst regs pinned. 1064 // 1065 // Keep track of whether we replace a use and/or def so that we can 1066 // create the spill interval with the appropriate range. 1067 1068 HasUse = mop.isUse(); 1069 HasDef = mop.isDef(); 1070 SmallVector<unsigned, 2> Ops; 1071 Ops.push_back(i); 1072 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) { 1073 const MachineOperand &MOj = MI->getOperand(j); 1074 if (!MOj.isReg()) 1075 continue; 1076 unsigned RegJ = MOj.getReg(); 1077 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ)) 1078 continue; 1079 if (RegJ == RegI) { 1080 Ops.push_back(j); 1081 if (!MOj.isUndef()) { 1082 HasUse |= MOj.isUse(); 1083 HasDef |= MOj.isDef(); 1084 } 1085 } 1086 } 1087 1088 // Create a new virtual register for the spill interval. 1089 // Create the new register now so we can map the fold instruction 1090 // to the new register so when it is unfolded we get the correct 1091 // answer. 1092 bool CreatedNewVReg = false; 1093 if (NewVReg == 0) { 1094 NewVReg = mri_->createVirtualRegister(rc); 1095 vrm.grow(); 1096 CreatedNewVReg = true; 1097 } 1098 1099 if (!TryFold) 1100 CanFold = false; 1101 else { 1102 // Do not fold load / store here if we are splitting. We'll find an 1103 // optimal point to insert a load / store later. 1104 if (!TrySplit) { 1105 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1106 Ops, FoldSS, FoldSlot, NewVReg)) { 1107 // Folding the load/store can completely change the instruction in 1108 // unpredictable ways, rescan it from the beginning. 1109 1110 if (FoldSS) { 1111 // We need to give the new vreg the same stack slot as the 1112 // spilled interval. 1113 vrm.assignVirt2StackSlot(NewVReg, FoldSlot); 1114 } 1115 1116 HasUse = false; 1117 HasDef = false; 1118 CanFold = false; 1119 if (isNotInMIMap(MI)) 1120 break; 1121 goto RestartInstruction; 1122 } 1123 } else { 1124 // We'll try to fold it later if it's profitable. 1125 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat); 1126 } 1127 } 1128 1129 mop.setReg(NewVReg); 1130 if (mop.isImplicit()) 1131 rewriteImplicitOps(li, MI, NewVReg, vrm); 1132 1133 // Reuse NewVReg for other reads. 1134 for (unsigned j = 0, e = Ops.size(); j != e; ++j) { 1135 MachineOperand &mopj = MI->getOperand(Ops[j]); 1136 mopj.setReg(NewVReg); 1137 if (mopj.isImplicit()) 1138 rewriteImplicitOps(li, MI, NewVReg, vrm); 1139 } 1140 1141 if (CreatedNewVReg) { 1142 if (DefIsReMat) { 1143 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI); 1144 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) { 1145 // Each valnum may have its own remat id. 1146 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg); 1147 } else { 1148 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]); 1149 } 1150 if (!CanDelete || (HasUse && HasDef)) { 1151 // If this is a two-addr instruction then its use operands are 1152 // rematerializable but its def is not. It should be assigned a 1153 // stack slot. 1154 vrm.assignVirt2StackSlot(NewVReg, Slot); 1155 } 1156 } else { 1157 vrm.assignVirt2StackSlot(NewVReg, Slot); 1158 } 1159 } else if (HasUse && HasDef && 1160 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) { 1161 // If this interval hasn't been assigned a stack slot (because earlier 1162 // def is a deleted remat def), do it now. 1163 assert(Slot != VirtRegMap::NO_STACK_SLOT); 1164 vrm.assignVirt2StackSlot(NewVReg, Slot); 1165 } 1166 1167 // Re-matting an instruction with virtual register use. Add the 1168 // register as an implicit use on the use MI. 1169 if (DefIsReMat && ImpUse) 1170 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1171 1172 // Create a new register interval for this spill / remat. 1173 LiveInterval &nI = getOrCreateInterval(NewVReg); 1174 if (CreatedNewVReg) { 1175 NewLIs.push_back(&nI); 1176 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg)); 1177 if (TrySplit) 1178 vrm.setIsSplitFromReg(NewVReg, li.reg); 1179 } 1180 1181 if (HasUse) { 1182 if (CreatedNewVReg) { 1183 LiveRange LR(index.getLoadIndex(), index.getDefIndex(), 1184 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); 1185 DEBUG(errs() << " +" << LR); 1186 nI.addRange(LR); 1187 } else { 1188 // Extend the split live interval to this def / use. 1189 SlotIndex End = index.getDefIndex(); 1190 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End, 1191 nI.getValNumInfo(nI.getNumValNums()-1)); 1192 DEBUG(errs() << " +" << LR); 1193 nI.addRange(LR); 1194 } 1195 } 1196 if (HasDef) { 1197 LiveRange LR(index.getDefIndex(), index.getStoreIndex(), 1198 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); 1199 DEBUG(errs() << " +" << LR); 1200 nI.addRange(LR); 1201 } 1202 1203 DEBUG({ 1204 errs() << "\t\t\t\tAdded new interval: "; 1205 nI.print(errs(), tri_); 1206 errs() << '\n'; 1207 }); 1208 } 1209 return CanFold; 1210} 1211bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li, 1212 const VNInfo *VNI, 1213 MachineBasicBlock *MBB, 1214 SlotIndex Idx) const { 1215 SlotIndex End = getMBBEndIdx(MBB); 1216 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) { 1217 if (VNI->kills[j].isPHI()) 1218 continue; 1219 1220 SlotIndex KillIdx = VNI->kills[j]; 1221 if (KillIdx > Idx && KillIdx < End) 1222 return true; 1223 } 1224 return false; 1225} 1226 1227/// RewriteInfo - Keep track of machine instrs that will be rewritten 1228/// during spilling. 1229namespace { 1230 struct RewriteInfo { 1231 SlotIndex Index; 1232 MachineInstr *MI; 1233 bool HasUse; 1234 bool HasDef; 1235 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d) 1236 : Index(i), MI(mi), HasUse(u), HasDef(d) {} 1237 }; 1238 1239 struct RewriteInfoCompare { 1240 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const { 1241 return LHS.Index < RHS.Index; 1242 } 1243 }; 1244} 1245 1246void LiveIntervals:: 1247rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit, 1248 LiveInterval::Ranges::const_iterator &I, 1249 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1250 unsigned Slot, int LdSlot, 1251 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1252 VirtRegMap &vrm, 1253 const TargetRegisterClass* rc, 1254 SmallVector<int, 4> &ReMatIds, 1255 const MachineLoopInfo *loopInfo, 1256 BitVector &SpillMBBs, 1257 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes, 1258 BitVector &RestoreMBBs, 1259 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes, 1260 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1261 std::vector<LiveInterval*> &NewLIs) { 1262 bool AllCanFold = true; 1263 unsigned NewVReg = 0; 1264 SlotIndex start = I->start.getBaseIndex(); 1265 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 1266 1267 // First collect all the def / use in this live range that will be rewritten. 1268 // Make sure they are sorted according to instruction index. 1269 std::vector<RewriteInfo> RewriteMIs; 1270 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1271 re = mri_->reg_end(); ri != re; ) { 1272 MachineInstr *MI = &*ri; 1273 MachineOperand &O = ri.getOperand(); 1274 ++ri; 1275 assert(!O.isImplicit() && "Spilling register that's used as implicit use?"); 1276 SlotIndex index = getInstructionIndex(MI); 1277 if (index < start || index >= end) 1278 continue; 1279 1280 if (O.isUndef()) 1281 // Must be defined by an implicit def. It should not be spilled. Note, 1282 // this is for correctness reason. e.g. 1283 // 8 %reg1024<def> = IMPLICIT_DEF 1284 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2 1285 // The live range [12, 14) are not part of the r1024 live interval since 1286 // it's defined by an implicit def. It will not conflicts with live 1287 // interval of r1025. Now suppose both registers are spilled, you can 1288 // easily see a situation where both registers are reloaded before 1289 // the INSERT_SUBREG and both target registers that would overlap. 1290 continue; 1291 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef())); 1292 } 1293 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare()); 1294 1295 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0; 1296 // Now rewrite the defs and uses. 1297 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) { 1298 RewriteInfo &rwi = RewriteMIs[i]; 1299 ++i; 1300 SlotIndex index = rwi.Index; 1301 bool MIHasUse = rwi.HasUse; 1302 bool MIHasDef = rwi.HasDef; 1303 MachineInstr *MI = rwi.MI; 1304 // If MI def and/or use the same register multiple times, then there 1305 // are multiple entries. 1306 unsigned NumUses = MIHasUse; 1307 while (i != e && RewriteMIs[i].MI == MI) { 1308 assert(RewriteMIs[i].Index == index); 1309 bool isUse = RewriteMIs[i].HasUse; 1310 if (isUse) ++NumUses; 1311 MIHasUse |= isUse; 1312 MIHasDef |= RewriteMIs[i].HasDef; 1313 ++i; 1314 } 1315 MachineBasicBlock *MBB = MI->getParent(); 1316 1317 if (ImpUse && MI != ReMatDefMI) { 1318 // Re-matting an instruction with virtual register use. Update the 1319 // register interval's spill weight to HUGE_VALF to prevent it from 1320 // being spilled. 1321 LiveInterval &ImpLi = getInterval(ImpUse); 1322 ImpLi.weight = HUGE_VALF; 1323 } 1324 1325 unsigned MBBId = MBB->getNumber(); 1326 unsigned ThisVReg = 0; 1327 if (TrySplit) { 1328 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId); 1329 if (NVI != MBBVRegsMap.end()) { 1330 ThisVReg = NVI->second; 1331 // One common case: 1332 // x = use 1333 // ... 1334 // ... 1335 // def = ... 1336 // = use 1337 // It's better to start a new interval to avoid artifically 1338 // extend the new interval. 1339 if (MIHasDef && !MIHasUse) { 1340 MBBVRegsMap.erase(MBB->getNumber()); 1341 ThisVReg = 0; 1342 } 1343 } 1344 } 1345 1346 bool IsNew = ThisVReg == 0; 1347 if (IsNew) { 1348 // This ends the previous live interval. If all of its def / use 1349 // can be folded, give it a low spill weight. 1350 if (NewVReg && TrySplit && AllCanFold) { 1351 LiveInterval &nI = getOrCreateInterval(NewVReg); 1352 nI.weight /= 10.0F; 1353 } 1354 AllCanFold = true; 1355 } 1356 NewVReg = ThisVReg; 1357 1358 bool HasDef = false; 1359 bool HasUse = false; 1360 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit, 1361 index, end, MI, ReMatOrigDefMI, ReMatDefMI, 1362 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1363 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg, 1364 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs); 1365 if (!HasDef && !HasUse) 1366 continue; 1367 1368 AllCanFold &= CanFold; 1369 1370 // Update weight of spill interval. 1371 LiveInterval &nI = getOrCreateInterval(NewVReg); 1372 if (!TrySplit) { 1373 // The spill weight is now infinity as it cannot be spilled again. 1374 nI.weight = HUGE_VALF; 1375 continue; 1376 } 1377 1378 // Keep track of the last def and first use in each MBB. 1379 if (HasDef) { 1380 if (MI != ReMatOrigDefMI || !CanDelete) { 1381 bool HasKill = false; 1382 if (!HasUse) 1383 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex()); 1384 else { 1385 // If this is a two-address code, then this index starts a new VNInfo. 1386 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex()); 1387 if (VNI) 1388 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex()); 1389 } 1390 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1391 SpillIdxes.find(MBBId); 1392 if (!HasKill) { 1393 if (SII == SpillIdxes.end()) { 1394 std::vector<SRInfo> S; 1395 S.push_back(SRInfo(index, NewVReg, true)); 1396 SpillIdxes.insert(std::make_pair(MBBId, S)); 1397 } else if (SII->second.back().vreg != NewVReg) { 1398 SII->second.push_back(SRInfo(index, NewVReg, true)); 1399 } else if (index > SII->second.back().index) { 1400 // If there is an earlier def and this is a two-address 1401 // instruction, then it's not possible to fold the store (which 1402 // would also fold the load). 1403 SRInfo &Info = SII->second.back(); 1404 Info.index = index; 1405 Info.canFold = !HasUse; 1406 } 1407 SpillMBBs.set(MBBId); 1408 } else if (SII != SpillIdxes.end() && 1409 SII->second.back().vreg == NewVReg && 1410 index > SII->second.back().index) { 1411 // There is an earlier def that's not killed (must be two-address). 1412 // The spill is no longer needed. 1413 SII->second.pop_back(); 1414 if (SII->second.empty()) { 1415 SpillIdxes.erase(MBBId); 1416 SpillMBBs.reset(MBBId); 1417 } 1418 } 1419 } 1420 } 1421 1422 if (HasUse) { 1423 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1424 SpillIdxes.find(MBBId); 1425 if (SII != SpillIdxes.end() && 1426 SII->second.back().vreg == NewVReg && 1427 index > SII->second.back().index) 1428 // Use(s) following the last def, it's not safe to fold the spill. 1429 SII->second.back().canFold = false; 1430 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII = 1431 RestoreIdxes.find(MBBId); 1432 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg) 1433 // If we are splitting live intervals, only fold if it's the first 1434 // use and there isn't another use later in the MBB. 1435 RII->second.back().canFold = false; 1436 else if (IsNew) { 1437 // Only need a reload if there isn't an earlier def / use. 1438 if (RII == RestoreIdxes.end()) { 1439 std::vector<SRInfo> Infos; 1440 Infos.push_back(SRInfo(index, NewVReg, true)); 1441 RestoreIdxes.insert(std::make_pair(MBBId, Infos)); 1442 } else { 1443 RII->second.push_back(SRInfo(index, NewVReg, true)); 1444 } 1445 RestoreMBBs.set(MBBId); 1446 } 1447 } 1448 1449 // Update spill weight. 1450 unsigned loopDepth = loopInfo->getLoopDepth(MBB); 1451 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth); 1452 } 1453 1454 if (NewVReg && TrySplit && AllCanFold) { 1455 // If all of its def / use can be folded, give it a low spill weight. 1456 LiveInterval &nI = getOrCreateInterval(NewVReg); 1457 nI.weight /= 10.0F; 1458 } 1459} 1460 1461bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index, 1462 unsigned vr, BitVector &RestoreMBBs, 1463 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1464 if (!RestoreMBBs[Id]) 1465 return false; 1466 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1467 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1468 if (Restores[i].index == index && 1469 Restores[i].vreg == vr && 1470 Restores[i].canFold) 1471 return true; 1472 return false; 1473} 1474 1475void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index, 1476 unsigned vr, BitVector &RestoreMBBs, 1477 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1478 if (!RestoreMBBs[Id]) 1479 return; 1480 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1481 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1482 if (Restores[i].index == index && Restores[i].vreg) 1483 Restores[i].index = SlotIndex(); 1484} 1485 1486/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being 1487/// spilled and create empty intervals for their uses. 1488void 1489LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm, 1490 const TargetRegisterClass* rc, 1491 std::vector<LiveInterval*> &NewLIs) { 1492 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1493 re = mri_->reg_end(); ri != re; ) { 1494 MachineOperand &O = ri.getOperand(); 1495 MachineInstr *MI = &*ri; 1496 ++ri; 1497 if (O.isDef()) { 1498 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF && 1499 "Register def was not rewritten?"); 1500 RemoveMachineInstrFromMaps(MI); 1501 vrm.RemoveMachineInstrFromMaps(MI); 1502 MI->eraseFromParent(); 1503 } else { 1504 // This must be an use of an implicit_def so it's not part of the live 1505 // interval. Create a new empty live interval for it. 1506 // FIXME: Can we simply erase some of the instructions? e.g. Stores? 1507 unsigned NewVReg = mri_->createVirtualRegister(rc); 1508 vrm.grow(); 1509 vrm.setIsImplicitlyDefined(NewVReg); 1510 NewLIs.push_back(&getOrCreateInterval(NewVReg)); 1511 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1512 MachineOperand &MO = MI->getOperand(i); 1513 if (MO.isReg() && MO.getReg() == li.reg) { 1514 MO.setReg(NewVReg); 1515 MO.setIsUndef(); 1516 } 1517 } 1518 } 1519 } 1520} 1521 1522std::vector<LiveInterval*> LiveIntervals:: 1523addIntervalsForSpillsFast(const LiveInterval &li, 1524 const MachineLoopInfo *loopInfo, 1525 VirtRegMap &vrm) { 1526 unsigned slot = vrm.assignVirt2StackSlot(li.reg); 1527 1528 std::vector<LiveInterval*> added; 1529 1530 assert(li.weight != HUGE_VALF && 1531 "attempt to spill already spilled interval!"); 1532 1533 DEBUG({ 1534 errs() << "\t\t\t\tadding intervals for spills for interval: "; 1535 li.dump(); 1536 errs() << '\n'; 1537 }); 1538 1539 const TargetRegisterClass* rc = mri_->getRegClass(li.reg); 1540 1541 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg); 1542 while (RI != mri_->reg_end()) { 1543 MachineInstr* MI = &*RI; 1544 1545 SmallVector<unsigned, 2> Indices; 1546 bool HasUse = false; 1547 bool HasDef = false; 1548 1549 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 1550 MachineOperand& mop = MI->getOperand(i); 1551 if (!mop.isReg() || mop.getReg() != li.reg) continue; 1552 1553 HasUse |= MI->getOperand(i).isUse(); 1554 HasDef |= MI->getOperand(i).isDef(); 1555 1556 Indices.push_back(i); 1557 } 1558 1559 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI), 1560 Indices, true, slot, li.reg)) { 1561 unsigned NewVReg = mri_->createVirtualRegister(rc); 1562 vrm.grow(); 1563 vrm.assignVirt2StackSlot(NewVReg, slot); 1564 1565 // create a new register for this spill 1566 LiveInterval &nI = getOrCreateInterval(NewVReg); 1567 1568 // the spill weight is now infinity as it 1569 // cannot be spilled again 1570 nI.weight = HUGE_VALF; 1571 1572 // Rewrite register operands to use the new vreg. 1573 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(), 1574 E = Indices.end(); I != E; ++I) { 1575 MI->getOperand(*I).setReg(NewVReg); 1576 1577 if (MI->getOperand(*I).isUse()) 1578 MI->getOperand(*I).setIsKill(true); 1579 } 1580 1581 // Fill in the new live interval. 1582 SlotIndex index = getInstructionIndex(MI); 1583 if (HasUse) { 1584 LiveRange LR(index.getLoadIndex(), index.getUseIndex(), 1585 nI.getNextValue(SlotIndex(), 0, false, 1586 getVNInfoAllocator())); 1587 DEBUG(errs() << " +" << LR); 1588 nI.addRange(LR); 1589 vrm.addRestorePoint(NewVReg, MI); 1590 } 1591 if (HasDef) { 1592 LiveRange LR(index.getDefIndex(), index.getStoreIndex(), 1593 nI.getNextValue(SlotIndex(), 0, false, 1594 getVNInfoAllocator())); 1595 DEBUG(errs() << " +" << LR); 1596 nI.addRange(LR); 1597 vrm.addSpillPoint(NewVReg, true, MI); 1598 } 1599 1600 added.push_back(&nI); 1601 1602 DEBUG({ 1603 errs() << "\t\t\t\tadded new interval: "; 1604 nI.dump(); 1605 errs() << '\n'; 1606 }); 1607 } 1608 1609 1610 RI = mri_->reg_begin(li.reg); 1611 } 1612 1613 return added; 1614} 1615 1616std::vector<LiveInterval*> LiveIntervals:: 1617addIntervalsForSpills(const LiveInterval &li, 1618 SmallVectorImpl<LiveInterval*> &SpillIs, 1619 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) { 1620 1621 if (EnableFastSpilling) 1622 return addIntervalsForSpillsFast(li, loopInfo, vrm); 1623 1624 assert(li.weight != HUGE_VALF && 1625 "attempt to spill already spilled interval!"); 1626 1627 DEBUG({ 1628 errs() << "\t\t\t\tadding intervals for spills for interval: "; 1629 li.print(errs(), tri_); 1630 errs() << '\n'; 1631 }); 1632 1633 // Each bit specify whether a spill is required in the MBB. 1634 BitVector SpillMBBs(mf_->getNumBlockIDs()); 1635 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes; 1636 BitVector RestoreMBBs(mf_->getNumBlockIDs()); 1637 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes; 1638 DenseMap<unsigned,unsigned> MBBVRegsMap; 1639 std::vector<LiveInterval*> NewLIs; 1640 const TargetRegisterClass* rc = mri_->getRegClass(li.reg); 1641 1642 unsigned NumValNums = li.getNumValNums(); 1643 SmallVector<MachineInstr*, 4> ReMatDefs; 1644 ReMatDefs.resize(NumValNums, NULL); 1645 SmallVector<MachineInstr*, 4> ReMatOrigDefs; 1646 ReMatOrigDefs.resize(NumValNums, NULL); 1647 SmallVector<int, 4> ReMatIds; 1648 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT); 1649 BitVector ReMatDelete(NumValNums); 1650 unsigned Slot = VirtRegMap::MAX_STACK_SLOT; 1651 1652 // Spilling a split live interval. It cannot be split any further. Also, 1653 // it's also guaranteed to be a single val# / range interval. 1654 if (vrm.getPreSplitReg(li.reg)) { 1655 vrm.setIsSplitFromReg(li.reg, 0); 1656 // Unset the split kill marker on the last use. 1657 SlotIndex KillIdx = vrm.getKillPoint(li.reg); 1658 if (KillIdx != SlotIndex()) { 1659 MachineInstr *KillMI = getInstructionFromIndex(KillIdx); 1660 assert(KillMI && "Last use disappeared?"); 1661 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true); 1662 assert(KillOp != -1 && "Last use disappeared?"); 1663 KillMI->getOperand(KillOp).setIsKill(false); 1664 } 1665 vrm.removeKillPoint(li.reg); 1666 bool DefIsReMat = vrm.isReMaterialized(li.reg); 1667 Slot = vrm.getStackSlot(li.reg); 1668 assert(Slot != VirtRegMap::MAX_STACK_SLOT); 1669 MachineInstr *ReMatDefMI = DefIsReMat ? 1670 vrm.getReMaterializedMI(li.reg) : NULL; 1671 int LdSlot = 0; 1672 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1673 bool isLoad = isLoadSS || 1674 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad())); 1675 bool IsFirstRange = true; 1676 for (LiveInterval::Ranges::const_iterator 1677 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1678 // If this is a split live interval with multiple ranges, it means there 1679 // are two-address instructions that re-defined the value. Only the 1680 // first def can be rematerialized! 1681 if (IsFirstRange) { 1682 // Note ReMatOrigDefMI has already been deleted. 1683 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI, 1684 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1685 false, vrm, rc, ReMatIds, loopInfo, 1686 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1687 MBBVRegsMap, NewLIs); 1688 } else { 1689 rewriteInstructionsForSpills(li, false, I, NULL, 0, 1690 Slot, 0, false, false, false, 1691 false, vrm, rc, ReMatIds, loopInfo, 1692 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1693 MBBVRegsMap, NewLIs); 1694 } 1695 IsFirstRange = false; 1696 } 1697 1698 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1699 return NewLIs; 1700 } 1701 1702 bool TrySplit = !intervalIsInOneMBB(li); 1703 if (TrySplit) 1704 ++numSplits; 1705 bool NeedStackSlot = false; 1706 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 1707 i != e; ++i) { 1708 const VNInfo *VNI = *i; 1709 unsigned VN = VNI->id; 1710 if (VNI->isUnused()) 1711 continue; // Dead val#. 1712 // Is the def for the val# rematerializable? 1713 MachineInstr *ReMatDefMI = VNI->isDefAccurate() 1714 ? getInstructionFromIndex(VNI->def) : 0; 1715 bool dummy; 1716 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) { 1717 // Remember how to remat the def of this val#. 1718 ReMatOrigDefs[VN] = ReMatDefMI; 1719 // Original def may be modified so we have to make a copy here. 1720 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI); 1721 CloneMIs.push_back(Clone); 1722 ReMatDefs[VN] = Clone; 1723 1724 bool CanDelete = true; 1725 if (VNI->hasPHIKill()) { 1726 // A kill is a phi node, not all of its uses can be rematerialized. 1727 // It must not be deleted. 1728 CanDelete = false; 1729 // Need a stack slot if there is any live range where uses cannot be 1730 // rematerialized. 1731 NeedStackSlot = true; 1732 } 1733 if (CanDelete) 1734 ReMatDelete.set(VN); 1735 } else { 1736 // Need a stack slot if there is any live range where uses cannot be 1737 // rematerialized. 1738 NeedStackSlot = true; 1739 } 1740 } 1741 1742 // One stack slot per live interval. 1743 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) { 1744 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT) 1745 Slot = vrm.assignVirt2StackSlot(li.reg); 1746 1747 // This case only occurs when the prealloc splitter has already assigned 1748 // a stack slot to this vreg. 1749 else 1750 Slot = vrm.getStackSlot(li.reg); 1751 } 1752 1753 // Create new intervals and rewrite defs and uses. 1754 for (LiveInterval::Ranges::const_iterator 1755 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1756 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id]; 1757 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id]; 1758 bool DefIsReMat = ReMatDefMI != NULL; 1759 bool CanDelete = ReMatDelete[I->valno->id]; 1760 int LdSlot = 0; 1761 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1762 bool isLoad = isLoadSS || 1763 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad()); 1764 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI, 1765 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1766 CanDelete, vrm, rc, ReMatIds, loopInfo, 1767 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1768 MBBVRegsMap, NewLIs); 1769 } 1770 1771 // Insert spills / restores if we are splitting. 1772 if (!TrySplit) { 1773 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1774 return NewLIs; 1775 } 1776 1777 SmallPtrSet<LiveInterval*, 4> AddedKill; 1778 SmallVector<unsigned, 2> Ops; 1779 if (NeedStackSlot) { 1780 int Id = SpillMBBs.find_first(); 1781 while (Id != -1) { 1782 std::vector<SRInfo> &spills = SpillIdxes[Id]; 1783 for (unsigned i = 0, e = spills.size(); i != e; ++i) { 1784 SlotIndex index = spills[i].index; 1785 unsigned VReg = spills[i].vreg; 1786 LiveInterval &nI = getOrCreateInterval(VReg); 1787 bool isReMat = vrm.isReMaterialized(VReg); 1788 MachineInstr *MI = getInstructionFromIndex(index); 1789 bool CanFold = false; 1790 bool FoundUse = false; 1791 Ops.clear(); 1792 if (spills[i].canFold) { 1793 CanFold = true; 1794 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1795 MachineOperand &MO = MI->getOperand(j); 1796 if (!MO.isReg() || MO.getReg() != VReg) 1797 continue; 1798 1799 Ops.push_back(j); 1800 if (MO.isDef()) 1801 continue; 1802 if (isReMat || 1803 (!FoundUse && !alsoFoldARestore(Id, index, VReg, 1804 RestoreMBBs, RestoreIdxes))) { 1805 // MI has two-address uses of the same register. If the use 1806 // isn't the first and only use in the BB, then we can't fold 1807 // it. FIXME: Move this to rewriteInstructionsForSpills. 1808 CanFold = false; 1809 break; 1810 } 1811 FoundUse = true; 1812 } 1813 } 1814 // Fold the store into the def if possible. 1815 bool Folded = false; 1816 if (CanFold && !Ops.empty()) { 1817 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){ 1818 Folded = true; 1819 if (FoundUse) { 1820 // Also folded uses, do not issue a load. 1821 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes); 1822 nI.removeRange(index.getLoadIndex(), index.getDefIndex()); 1823 } 1824 nI.removeRange(index.getDefIndex(), index.getStoreIndex()); 1825 } 1826 } 1827 1828 // Otherwise tell the spiller to issue a spill. 1829 if (!Folded) { 1830 LiveRange *LR = &nI.ranges[nI.ranges.size()-1]; 1831 bool isKill = LR->end == index.getStoreIndex(); 1832 if (!MI->registerDefIsDead(nI.reg)) 1833 // No need to spill a dead def. 1834 vrm.addSpillPoint(VReg, isKill, MI); 1835 if (isKill) 1836 AddedKill.insert(&nI); 1837 } 1838 } 1839 Id = SpillMBBs.find_next(Id); 1840 } 1841 } 1842 1843 int Id = RestoreMBBs.find_first(); 1844 while (Id != -1) { 1845 std::vector<SRInfo> &restores = RestoreIdxes[Id]; 1846 for (unsigned i = 0, e = restores.size(); i != e; ++i) { 1847 SlotIndex index = restores[i].index; 1848 if (index == SlotIndex()) 1849 continue; 1850 unsigned VReg = restores[i].vreg; 1851 LiveInterval &nI = getOrCreateInterval(VReg); 1852 bool isReMat = vrm.isReMaterialized(VReg); 1853 MachineInstr *MI = getInstructionFromIndex(index); 1854 bool CanFold = false; 1855 Ops.clear(); 1856 if (restores[i].canFold) { 1857 CanFold = true; 1858 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1859 MachineOperand &MO = MI->getOperand(j); 1860 if (!MO.isReg() || MO.getReg() != VReg) 1861 continue; 1862 1863 if (MO.isDef()) { 1864 // If this restore were to be folded, it would have been folded 1865 // already. 1866 CanFold = false; 1867 break; 1868 } 1869 Ops.push_back(j); 1870 } 1871 } 1872 1873 // Fold the load into the use if possible. 1874 bool Folded = false; 1875 if (CanFold && !Ops.empty()) { 1876 if (!isReMat) 1877 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg); 1878 else { 1879 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg); 1880 int LdSlot = 0; 1881 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1882 // If the rematerializable def is a load, also try to fold it. 1883 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad()) 1884 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1885 Ops, isLoadSS, LdSlot, VReg); 1886 if (!Folded) { 1887 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI); 1888 if (ImpUse) { 1889 // Re-matting an instruction with virtual register use. Add the 1890 // register as an implicit use on the use MI and update the register 1891 // interval's spill weight to HUGE_VALF to prevent it from being 1892 // spilled. 1893 LiveInterval &ImpLi = getInterval(ImpUse); 1894 ImpLi.weight = HUGE_VALF; 1895 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1896 } 1897 } 1898 } 1899 } 1900 // If folding is not possible / failed, then tell the spiller to issue a 1901 // load / rematerialization for us. 1902 if (Folded) 1903 nI.removeRange(index.getLoadIndex(), index.getDefIndex()); 1904 else 1905 vrm.addRestorePoint(VReg, MI); 1906 } 1907 Id = RestoreMBBs.find_next(Id); 1908 } 1909 1910 // Finalize intervals: add kills, finalize spill weights, and filter out 1911 // dead intervals. 1912 std::vector<LiveInterval*> RetNewLIs; 1913 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) { 1914 LiveInterval *LI = NewLIs[i]; 1915 if (!LI->empty()) { 1916 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI); 1917 if (!AddedKill.count(LI)) { 1918 LiveRange *LR = &LI->ranges[LI->ranges.size()-1]; 1919 SlotIndex LastUseIdx = LR->end.getBaseIndex(); 1920 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx); 1921 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false); 1922 assert(UseIdx != -1); 1923 if (!LastUse->isRegTiedToDefOperand(UseIdx)) { 1924 LastUse->getOperand(UseIdx).setIsKill(); 1925 vrm.addKillPoint(LI->reg, LastUseIdx); 1926 } 1927 } 1928 RetNewLIs.push_back(LI); 1929 } 1930 } 1931 1932 handleSpilledImpDefs(li, vrm, rc, RetNewLIs); 1933 return RetNewLIs; 1934} 1935 1936/// hasAllocatableSuperReg - Return true if the specified physical register has 1937/// any super register that's allocatable. 1938bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const { 1939 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) 1940 if (allocatableRegs_[*AS] && hasInterval(*AS)) 1941 return true; 1942 return false; 1943} 1944 1945/// getRepresentativeReg - Find the largest super register of the specified 1946/// physical register. 1947unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const { 1948 // Find the largest super-register that is allocatable. 1949 unsigned BestReg = Reg; 1950 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) { 1951 unsigned SuperReg = *AS; 1952 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) { 1953 BestReg = SuperReg; 1954 break; 1955 } 1956 } 1957 return BestReg; 1958} 1959 1960/// getNumConflictsWithPhysReg - Return the number of uses and defs of the 1961/// specified interval that conflicts with the specified physical register. 1962unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li, 1963 unsigned PhysReg) const { 1964 unsigned NumConflicts = 0; 1965 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg)); 1966 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 1967 E = mri_->reg_end(); I != E; ++I) { 1968 MachineOperand &O = I.getOperand(); 1969 MachineInstr *MI = O.getParent(); 1970 SlotIndex Index = getInstructionIndex(MI); 1971 if (pli.liveAt(Index)) 1972 ++NumConflicts; 1973 } 1974 return NumConflicts; 1975} 1976 1977/// spillPhysRegAroundRegDefsUses - Spill the specified physical register 1978/// around all defs and uses of the specified interval. Return true if it 1979/// was able to cut its interval. 1980bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li, 1981 unsigned PhysReg, VirtRegMap &vrm) { 1982 unsigned SpillReg = getRepresentativeReg(PhysReg); 1983 1984 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS) 1985 // If there are registers which alias PhysReg, but which are not a 1986 // sub-register of the chosen representative super register. Assert 1987 // since we can't handle it yet. 1988 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) || 1989 tri_->isSuperRegister(*AS, SpillReg)); 1990 1991 bool Cut = false; 1992 SmallVector<unsigned, 4> PRegs; 1993 if (hasInterval(SpillReg)) 1994 PRegs.push_back(SpillReg); 1995 else { 1996 SmallSet<unsigned, 4> Added; 1997 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) 1998 if (Added.insert(*AS) && hasInterval(*AS)) { 1999 PRegs.push_back(*AS); 2000 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS) 2001 Added.insert(*ASS); 2002 } 2003 } 2004 2005 SmallPtrSet<MachineInstr*, 8> SeenMIs; 2006 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 2007 E = mri_->reg_end(); I != E; ++I) { 2008 MachineOperand &O = I.getOperand(); 2009 MachineInstr *MI = O.getParent(); 2010 if (SeenMIs.count(MI)) 2011 continue; 2012 SeenMIs.insert(MI); 2013 SlotIndex Index = getInstructionIndex(MI); 2014 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) { 2015 unsigned PReg = PRegs[i]; 2016 LiveInterval &pli = getInterval(PReg); 2017 if (!pli.liveAt(Index)) 2018 continue; 2019 vrm.addEmergencySpill(PReg, MI); 2020 SlotIndex StartIdx = Index.getLoadIndex(); 2021 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex(); 2022 if (pli.isInOneLiveRange(StartIdx, EndIdx)) { 2023 pli.removeRange(StartIdx, EndIdx); 2024 Cut = true; 2025 } else { 2026 std::string msg; 2027 raw_string_ostream Msg(msg); 2028 Msg << "Ran out of registers during register allocation!"; 2029 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) { 2030 Msg << "\nPlease check your inline asm statement for invalid " 2031 << "constraints:\n"; 2032 MI->print(Msg, tm_); 2033 } 2034 llvm_report_error(Msg.str()); 2035 } 2036 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) { 2037 if (!hasInterval(*AS)) 2038 continue; 2039 LiveInterval &spli = getInterval(*AS); 2040 if (spli.liveAt(Index)) 2041 spli.removeRange(Index.getLoadIndex(), 2042 Index.getNextIndex().getBaseIndex()); 2043 } 2044 } 2045 } 2046 return Cut; 2047} 2048 2049LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg, 2050 MachineInstr* startInst) { 2051 LiveInterval& Interval = getOrCreateInterval(reg); 2052 VNInfo* VN = Interval.getNextValue( 2053 SlotIndex(getInstructionIndex(startInst).getDefIndex()), 2054 startInst, true, getVNInfoAllocator()); 2055 VN->setHasPHIKill(true); 2056 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent())); 2057 LiveRange LR( 2058 SlotIndex(getInstructionIndex(startInst).getDefIndex()), 2059 getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN); 2060 Interval.addRange(LR); 2061 2062 return LR; 2063} 2064 2065