LiveIntervalAnalysis.cpp revision 6b74e505be1016223e090a6c806f7caa3165a146
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the LiveInterval analysis pass which is used 11// by the Linear Scan Register allocator. This pass linearizes the 12// basic blocks of the function in DFS order and uses the 13// LiveVariables pass to conservatively compute live intervals for 14// each virtual and physical register. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "liveintervals" 19#include "llvm/CodeGen/LiveIntervalAnalysis.h" 20#include "VirtRegMap.h" 21#include "llvm/Value.h" 22#include "llvm/Analysis/AliasAnalysis.h" 23#include "llvm/CodeGen/LiveVariables.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineInstr.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineLoopInfo.h" 28#include "llvm/CodeGen/MachineMemOperand.h" 29#include "llvm/CodeGen/MachineRegisterInfo.h" 30#include "llvm/CodeGen/Passes.h" 31#include "llvm/CodeGen/ProcessImplicitDefs.h" 32#include "llvm/Target/TargetRegisterInfo.h" 33#include "llvm/Target/TargetInstrInfo.h" 34#include "llvm/Target/TargetMachine.h" 35#include "llvm/Target/TargetOptions.h" 36#include "llvm/Support/CommandLine.h" 37#include "llvm/Support/Debug.h" 38#include "llvm/Support/ErrorHandling.h" 39#include "llvm/Support/raw_ostream.h" 40#include "llvm/ADT/DepthFirstIterator.h" 41#include "llvm/ADT/SmallSet.h" 42#include "llvm/ADT/Statistic.h" 43#include "llvm/ADT/STLExtras.h" 44#include <algorithm> 45#include <limits> 46#include <cmath> 47using namespace llvm; 48 49// Hidden options for help debugging. 50static cl::opt<bool> DisableReMat("disable-rematerialization", 51 cl::init(false), cl::Hidden); 52 53static cl::opt<bool> EnableFastSpilling("fast-spill", 54 cl::init(false), cl::Hidden); 55 56STATISTIC(numIntervals , "Number of original intervals"); 57STATISTIC(numFolds , "Number of loads/stores folded into instructions"); 58STATISTIC(numSplits , "Number of intervals split"); 59 60char LiveIntervals::ID = 0; 61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis"); 62 63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { 64 AU.setPreservesCFG(); 65 AU.addRequired<AliasAnalysis>(); 66 AU.addPreserved<AliasAnalysis>(); 67 AU.addPreserved<LiveVariables>(); 68 AU.addRequired<LiveVariables>(); 69 AU.addPreservedID(MachineLoopInfoID); 70 AU.addPreservedID(MachineDominatorsID); 71 72 if (!StrongPHIElim) { 73 AU.addPreservedID(PHIEliminationID); 74 AU.addRequiredID(PHIEliminationID); 75 } 76 77 AU.addRequiredID(TwoAddressInstructionPassID); 78 AU.addPreserved<ProcessImplicitDefs>(); 79 AU.addRequired<ProcessImplicitDefs>(); 80 AU.addPreserved<SlotIndexes>(); 81 AU.addRequiredTransitive<SlotIndexes>(); 82 MachineFunctionPass::getAnalysisUsage(AU); 83} 84 85void LiveIntervals::releaseMemory() { 86 // Free the live intervals themselves. 87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(), 88 E = r2iMap_.end(); I != E; ++I) 89 delete I->second; 90 91 r2iMap_.clear(); 92 93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd. 94 VNInfoAllocator.Reset(); 95 while (!CloneMIs.empty()) { 96 MachineInstr *MI = CloneMIs.back(); 97 CloneMIs.pop_back(); 98 mf_->DeleteMachineInstr(MI); 99 } 100} 101 102/// runOnMachineFunction - Register allocate the whole function 103/// 104bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { 105 mf_ = &fn; 106 mri_ = &mf_->getRegInfo(); 107 tm_ = &fn.getTarget(); 108 tri_ = tm_->getRegisterInfo(); 109 tii_ = tm_->getInstrInfo(); 110 aa_ = &getAnalysis<AliasAnalysis>(); 111 lv_ = &getAnalysis<LiveVariables>(); 112 indexes_ = &getAnalysis<SlotIndexes>(); 113 allocatableRegs_ = tri_->getAllocatableSet(fn); 114 115 computeIntervals(); 116 117 numIntervals += getNumIntervals(); 118 119 DEBUG(dump()); 120 return true; 121} 122 123/// print - Implement the dump method. 124void LiveIntervals::print(raw_ostream &OS, const Module* ) const { 125 OS << "********** INTERVALS **********\n"; 126 for (const_iterator I = begin(), E = end(); I != E; ++I) { 127 I->second->print(OS, tri_); 128 OS << "\n"; 129 } 130 131 printInstrs(OS); 132} 133 134void LiveIntervals::printInstrs(raw_ostream &OS) const { 135 OS << "********** MACHINEINSTRS **********\n"; 136 137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end(); 138 mbbi != mbbe; ++mbbi) { 139 OS << "BB#" << mbbi->getNumber() 140 << ":\t\t# derived from " << mbbi->getName() << "\n"; 141 for (MachineBasicBlock::iterator mii = mbbi->begin(), 142 mie = mbbi->end(); mii != mie; ++mii) { 143 OS << getInstructionIndex(mii) << '\t' << *mii; 144 } 145 } 146} 147 148void LiveIntervals::dumpInstrs() const { 149 printInstrs(errs()); 150} 151 152/// conflictsWithPhysRegDef - Returns true if the specified register 153/// is defined during the duration of the specified interval. 154bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li, 155 VirtRegMap &vrm, unsigned reg) { 156 for (LiveInterval::Ranges::const_iterator 157 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 158 for (SlotIndex index = I->start.getBaseIndex(), 159 end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 160 index != end; 161 index = index.getNextIndex()) { 162 MachineInstr *MI = getInstructionFromIndex(index); 163 if (!MI) 164 continue; // skip deleted instructions 165 166 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 167 MachineOperand& mop = MI->getOperand(i); 168 if (!mop.isReg() || mop.isUse()) 169 continue; 170 unsigned PhysReg = mop.getReg(); 171 if (PhysReg == 0 || PhysReg == li.reg) 172 continue; 173 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) { 174 if (!vrm.hasPhys(PhysReg)) 175 continue; 176 PhysReg = vrm.getPhys(PhysReg); 177 } 178 if (PhysReg && tri_->regsOverlap(PhysReg, reg)) 179 return true; 180 } 181 } 182 } 183 184 return false; 185} 186 187/// conflictsWithPhysRegUse - Returns true if the specified register is used or 188/// defined during the duration of the specified interval. Copies to and from 189/// li.reg are allowed. 190bool LiveIntervals::conflictsWithPhysRegUse(const LiveInterval &li, 191 VirtRegMap &vrm, unsigned reg) { 192 for (LiveInterval::Ranges::const_iterator 193 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 194 for (SlotIndex index = I->start.getBaseIndex(), 195 end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 196 index != end; 197 index = index.getNextIndex()) { 198 MachineInstr *MI = getInstructionFromIndex(index); 199 if (!MI) 200 continue; // skip deleted instructions 201 202 // Terminators are considered conflicts since reg may be used at the 203 // destination. 204 if (MI->getDesc().isTerminator()) 205 return true; 206 207 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 208 MachineOperand& mop = MI->getOperand(i); 209 if (!mop.isReg() || mop.isUndef()) 210 continue; 211 unsigned PhysReg = mop.getReg(); 212 if (PhysReg == 0 || PhysReg == li.reg) 213 continue; 214 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) { 215 if (!vrm.hasPhys(PhysReg)) 216 continue; 217 PhysReg = vrm.getPhys(PhysReg); 218 } 219 if (PhysReg && tri_->regsOverlap(PhysReg, reg)) { 220 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 221 if (!tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) || 222 (SrcReg != li.reg && DstReg != li.reg)) 223 return true; 224 } 225 } 226 } 227 } 228 return false; 229} 230 231/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except 232/// it can check use as well. 233bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li, 234 unsigned Reg, bool CheckUse, 235 SmallPtrSet<MachineInstr*,32> &JoinedCopies) { 236 for (LiveInterval::Ranges::const_iterator 237 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 238 for (SlotIndex index = I->start.getBaseIndex(), 239 end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 240 index != end; 241 index = index.getNextIndex()) { 242 MachineInstr *MI = getInstructionFromIndex(index); 243 if (!MI) 244 continue; // skip deleted instructions 245 246 if (JoinedCopies.count(MI)) 247 continue; 248 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 249 MachineOperand& MO = MI->getOperand(i); 250 if (!MO.isReg()) 251 continue; 252 if (MO.isUse() && !CheckUse) 253 continue; 254 unsigned PhysReg = MO.getReg(); 255 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg)) 256 continue; 257 if (tri_->isSubRegister(Reg, PhysReg)) 258 return true; 259 } 260 } 261 } 262 263 return false; 264} 265 266#ifndef NDEBUG 267static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) { 268 if (TargetRegisterInfo::isPhysicalRegister(reg)) 269 errs() << tri_->getName(reg); 270 else 271 errs() << "%reg" << reg; 272} 273#endif 274 275void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, 276 MachineBasicBlock::iterator mi, 277 SlotIndex MIIdx, 278 MachineOperand& MO, 279 unsigned MOIdx, 280 LiveInterval &interval) { 281 DEBUG({ 282 errs() << "\t\tregister: "; 283 printRegName(interval.reg, tri_); 284 }); 285 286 // Virtual registers may be defined multiple times (due to phi 287 // elimination and 2-addr elimination). Much of what we do only has to be 288 // done once for the vreg. We use an empty interval to detect the first 289 // time we see a vreg. 290 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg); 291 if (interval.empty()) { 292 // Get the Idx of the defining instructions. 293 SlotIndex defIndex = MIIdx.getDefIndex(); 294 // Earlyclobbers move back one, so that they overlap the live range 295 // of inputs. 296 if (MO.isEarlyClobber()) 297 defIndex = MIIdx.getUseIndex(); 298 VNInfo *ValNo; 299 MachineInstr *CopyMI = NULL; 300 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 301 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG || 302 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG || 303 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG || 304 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) 305 CopyMI = mi; 306 // Earlyclobbers move back one. 307 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator); 308 309 assert(ValNo->id == 0 && "First value in interval is not 0?"); 310 311 // Loop over all of the blocks that the vreg is defined in. There are 312 // two cases we have to handle here. The most common case is a vreg 313 // whose lifetime is contained within a basic block. In this case there 314 // will be a single kill, in MBB, which comes after the definition. 315 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) { 316 // FIXME: what about dead vars? 317 SlotIndex killIdx; 318 if (vi.Kills[0] != mi) 319 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex(); 320 else 321 killIdx = defIndex.getStoreIndex(); 322 323 // If the kill happens after the definition, we have an intra-block 324 // live range. 325 if (killIdx > defIndex) { 326 assert(vi.AliveBlocks.empty() && 327 "Shouldn't be alive across any blocks!"); 328 LiveRange LR(defIndex, killIdx, ValNo); 329 interval.addRange(LR); 330 DEBUG(errs() << " +" << LR << "\n"); 331 ValNo->addKill(killIdx); 332 return; 333 } 334 } 335 336 // The other case we handle is when a virtual register lives to the end 337 // of the defining block, potentially live across some blocks, then is 338 // live into some number of blocks, but gets killed. Start by adding a 339 // range that goes from this definition to the end of the defining block. 340 LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(), 341 ValNo); 342 DEBUG(errs() << " +" << NewLR); 343 interval.addRange(NewLR); 344 345 // Iterate over all of the blocks that the variable is completely 346 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the 347 // live interval. 348 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(), 349 E = vi.AliveBlocks.end(); I != E; ++I) { 350 LiveRange LR( 351 getMBBStartIdx(mf_->getBlockNumbered(*I)), 352 getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(), 353 ValNo); 354 interval.addRange(LR); 355 DEBUG(errs() << " +" << LR); 356 } 357 358 // Finally, this virtual register is live from the start of any killing 359 // block to the 'use' slot of the killing instruction. 360 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) { 361 MachineInstr *Kill = vi.Kills[i]; 362 SlotIndex killIdx = 363 getInstructionIndex(Kill).getDefIndex(); 364 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo); 365 interval.addRange(LR); 366 ValNo->addKill(killIdx); 367 DEBUG(errs() << " +" << LR); 368 } 369 370 } else { 371 // If this is the second time we see a virtual register definition, it 372 // must be due to phi elimination or two addr elimination. If this is 373 // the result of two address elimination, then the vreg is one of the 374 // def-and-use register operand. 375 if (mi->isRegTiedToUseOperand(MOIdx)) { 376 // If this is a two-address definition, then we have already processed 377 // the live range. The only problem is that we didn't realize there 378 // are actually two values in the live interval. Because of this we 379 // need to take the LiveRegion that defines this register and split it 380 // into two values. 381 assert(interval.containsOneValue()); 382 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex(); 383 SlotIndex RedefIndex = MIIdx.getDefIndex(); 384 if (MO.isEarlyClobber()) 385 RedefIndex = MIIdx.getUseIndex(); 386 387 const LiveRange *OldLR = 388 interval.getLiveRangeContaining(RedefIndex.getUseIndex()); 389 VNInfo *OldValNo = OldLR->valno; 390 391 // Delete the initial value, which should be short and continuous, 392 // because the 2-addr copy must be in the same MBB as the redef. 393 interval.removeRange(DefIndex, RedefIndex); 394 395 // Two-address vregs should always only be redefined once. This means 396 // that at this point, there should be exactly one value number in it. 397 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!"); 398 399 // The new value number (#1) is defined by the instruction we claimed 400 // defined value #0. 401 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(), 402 false, // update at * 403 VNInfoAllocator); 404 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here 405 406 // Value#0 is now defined by the 2-addr instruction. 407 OldValNo->def = RedefIndex; 408 OldValNo->setCopy(0); 409 410 // Add the new live interval which replaces the range for the input copy. 411 LiveRange LR(DefIndex, RedefIndex, ValNo); 412 DEBUG(errs() << " replace range with " << LR); 413 interval.addRange(LR); 414 ValNo->addKill(RedefIndex); 415 416 // If this redefinition is dead, we need to add a dummy unit live 417 // range covering the def slot. 418 if (MO.isDead()) 419 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(), 420 OldValNo)); 421 422 DEBUG({ 423 errs() << " RESULT: "; 424 interval.print(errs(), tri_); 425 }); 426 } else { 427 // Otherwise, this must be because of phi elimination. If this is the 428 // first redefinition of the vreg that we have seen, go back and change 429 // the live range in the PHI block to be a different value number. 430 if (interval.containsOneValue()) { 431 // Remove the old range that we now know has an incorrect number. 432 VNInfo *VNI = interval.getValNumInfo(0); 433 MachineInstr *Killer = vi.Kills[0]; 434 SlotIndex Start = getMBBStartIdx(Killer->getParent()); 435 SlotIndex End = getInstructionIndex(Killer).getDefIndex(); 436 DEBUG({ 437 errs() << " Removing [" << Start << "," << End << "] from: "; 438 interval.print(errs(), tri_); 439 errs() << "\n"; 440 }); 441 interval.removeRange(Start, End); 442 assert(interval.ranges.size() == 1 && 443 "Newly discovered PHI interval has >1 ranges."); 444 MachineBasicBlock *killMBB = getMBBFromIndex(interval.endIndex()); 445 VNI->addKill(indexes_->getTerminatorGap(killMBB)); 446 VNI->setHasPHIKill(true); 447 DEBUG({ 448 errs() << " RESULT: "; 449 interval.print(errs(), tri_); 450 }); 451 452 // Replace the interval with one of a NEW value number. Note that this 453 // value number isn't actually defined by an instruction, weird huh? :) 454 LiveRange LR(Start, End, 455 interval.getNextValue(SlotIndex(getMBBStartIdx(mbb), true), 456 0, false, VNInfoAllocator)); 457 LR.valno->setIsPHIDef(true); 458 DEBUG(errs() << " replace range with " << LR); 459 interval.addRange(LR); 460 LR.valno->addKill(End); 461 DEBUG({ 462 errs() << " RESULT: "; 463 interval.print(errs(), tri_); 464 }); 465 } 466 467 // In the case of PHI elimination, each variable definition is only 468 // live until the end of the block. We've already taken care of the 469 // rest of the live range. 470 SlotIndex defIndex = MIIdx.getDefIndex(); 471 if (MO.isEarlyClobber()) 472 defIndex = MIIdx.getUseIndex(); 473 474 VNInfo *ValNo; 475 MachineInstr *CopyMI = NULL; 476 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 477 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG || 478 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG || 479 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG || 480 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) 481 CopyMI = mi; 482 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator); 483 484 SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex(); 485 LiveRange LR(defIndex, killIndex, ValNo); 486 interval.addRange(LR); 487 ValNo->addKill(indexes_->getTerminatorGap(mbb)); 488 ValNo->setHasPHIKill(true); 489 DEBUG(errs() << " +" << LR); 490 } 491 } 492 493 DEBUG(errs() << '\n'); 494} 495 496void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, 497 MachineBasicBlock::iterator mi, 498 SlotIndex MIIdx, 499 MachineOperand& MO, 500 LiveInterval &interval, 501 MachineInstr *CopyMI) { 502 // A physical register cannot be live across basic block, so its 503 // lifetime must end somewhere in its defining basic block. 504 DEBUG({ 505 errs() << "\t\tregister: "; 506 printRegName(interval.reg, tri_); 507 }); 508 509 SlotIndex baseIndex = MIIdx; 510 SlotIndex start = baseIndex.getDefIndex(); 511 // Earlyclobbers move back one. 512 if (MO.isEarlyClobber()) 513 start = MIIdx.getUseIndex(); 514 SlotIndex end = start; 515 516 // If it is not used after definition, it is considered dead at 517 // the instruction defining it. Hence its interval is: 518 // [defSlot(def), defSlot(def)+1) 519 // For earlyclobbers, the defSlot was pushed back one; the extra 520 // advance below compensates. 521 if (MO.isDead()) { 522 DEBUG(errs() << " dead"); 523 end = start.getStoreIndex(); 524 goto exit; 525 } 526 527 // If it is not dead on definition, it must be killed by a 528 // subsequent instruction. Hence its interval is: 529 // [defSlot(def), useSlot(kill)+1) 530 baseIndex = baseIndex.getNextIndex(); 531 while (++mi != MBB->end()) { 532 533 if (getInstructionFromIndex(baseIndex) == 0) 534 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 535 536 if (mi->killsRegister(interval.reg, tri_)) { 537 DEBUG(errs() << " killed"); 538 end = baseIndex.getDefIndex(); 539 goto exit; 540 } else { 541 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_); 542 if (DefIdx != -1) { 543 if (mi->isRegTiedToUseOperand(DefIdx)) { 544 // Two-address instruction. 545 end = baseIndex.getDefIndex(); 546 } else { 547 // Another instruction redefines the register before it is ever read. 548 // Then the register is essentially dead at the instruction that defines 549 // it. Hence its interval is: 550 // [defSlot(def), defSlot(def)+1) 551 DEBUG(errs() << " dead"); 552 end = start.getStoreIndex(); 553 } 554 goto exit; 555 } 556 } 557 558 baseIndex = baseIndex.getNextIndex(); 559 } 560 561 // The only case we should have a dead physreg here without a killing or 562 // instruction where we know it's dead is if it is live-in to the function 563 // and never used. Another possible case is the implicit use of the 564 // physical register has been deleted by two-address pass. 565 end = start.getStoreIndex(); 566 567exit: 568 assert(start < end && "did not find end of interval?"); 569 570 // Already exists? Extend old live interval. 571 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start); 572 bool Extend = OldLR != interval.end(); 573 VNInfo *ValNo = Extend 574 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator); 575 if (MO.isEarlyClobber() && Extend) 576 ValNo->setHasRedefByEC(true); 577 LiveRange LR(start, end, ValNo); 578 interval.addRange(LR); 579 LR.valno->addKill(end); 580 DEBUG(errs() << " +" << LR << '\n'); 581} 582 583void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB, 584 MachineBasicBlock::iterator MI, 585 SlotIndex MIIdx, 586 MachineOperand& MO, 587 unsigned MOIdx) { 588 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) 589 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx, 590 getOrCreateInterval(MO.getReg())); 591 else if (allocatableRegs_[MO.getReg()]) { 592 MachineInstr *CopyMI = NULL; 593 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 594 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG || 595 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG || 596 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG || 597 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg)) 598 CopyMI = MI; 599 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 600 getOrCreateInterval(MO.getReg()), CopyMI); 601 // Def of a register also defines its sub-registers. 602 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS) 603 // If MI also modifies the sub-register explicitly, avoid processing it 604 // more than once. Do not pass in TRI here so it checks for exact match. 605 if (!MI->modifiesRegister(*AS)) 606 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 607 getOrCreateInterval(*AS), 0); 608 } 609} 610 611void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, 612 SlotIndex MIIdx, 613 LiveInterval &interval, bool isAlias) { 614 DEBUG({ 615 errs() << "\t\tlivein register: "; 616 printRegName(interval.reg, tri_); 617 }); 618 619 // Look for kills, if it reaches a def before it's killed, then it shouldn't 620 // be considered a livein. 621 MachineBasicBlock::iterator mi = MBB->begin(); 622 SlotIndex baseIndex = MIIdx; 623 SlotIndex start = baseIndex; 624 if (getInstructionFromIndex(baseIndex) == 0) 625 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 626 627 SlotIndex end = baseIndex; 628 bool SeenDefUse = false; 629 630 while (mi != MBB->end()) { 631 if (mi->killsRegister(interval.reg, tri_)) { 632 DEBUG(errs() << " killed"); 633 end = baseIndex.getDefIndex(); 634 SeenDefUse = true; 635 break; 636 } else if (mi->modifiesRegister(interval.reg, tri_)) { 637 // Another instruction redefines the register before it is ever read. 638 // Then the register is essentially dead at the instruction that defines 639 // it. Hence its interval is: 640 // [defSlot(def), defSlot(def)+1) 641 DEBUG(errs() << " dead"); 642 end = start.getStoreIndex(); 643 SeenDefUse = true; 644 break; 645 } 646 647 ++mi; 648 if (mi != MBB->end()) { 649 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 650 } 651 } 652 653 // Live-in register might not be used at all. 654 if (!SeenDefUse) { 655 if (isAlias) { 656 DEBUG(errs() << " dead"); 657 end = MIIdx.getStoreIndex(); 658 } else { 659 DEBUG(errs() << " live through"); 660 end = baseIndex; 661 } 662 } 663 664 VNInfo *vni = 665 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true), 666 0, false, VNInfoAllocator); 667 vni->setIsPHIDef(true); 668 LiveRange LR(start, end, vni); 669 670 interval.addRange(LR); 671 LR.valno->addKill(end); 672 DEBUG(errs() << " +" << LR << '\n'); 673} 674 675/// computeIntervals - computes the live intervals for virtual 676/// registers. for some ordering of the machine instructions [1,N] a 677/// live interval is an interval [i, j) where 1 <= i <= j < N for 678/// which a variable is live 679void LiveIntervals::computeIntervals() { 680 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n" 681 << "********** Function: " 682 << ((Value*)mf_->getFunction())->getName() << '\n'); 683 684 SmallVector<unsigned, 8> UndefUses; 685 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end(); 686 MBBI != E; ++MBBI) { 687 MachineBasicBlock *MBB = MBBI; 688 // Track the index of the current machine instr. 689 SlotIndex MIIndex = getMBBStartIdx(MBB); 690 DEBUG(errs() << MBB->getName() << ":\n"); 691 692 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end(); 693 694 // Create intervals for live-ins to this BB first. 695 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(), 696 LE = MBB->livein_end(); LI != LE; ++LI) { 697 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI)); 698 // Multiple live-ins can alias the same register. 699 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS) 700 if (!hasInterval(*AS)) 701 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS), 702 true); 703 } 704 705 // Skip over empty initial indices. 706 if (getInstructionFromIndex(MIIndex) == 0) 707 MIIndex = indexes_->getNextNonNullIndex(MIIndex); 708 709 for (; MI != miEnd; ++MI) { 710 DEBUG(errs() << MIIndex << "\t" << *MI); 711 712 // Handle defs. 713 for (int i = MI->getNumOperands() - 1; i >= 0; --i) { 714 MachineOperand &MO = MI->getOperand(i); 715 if (!MO.isReg() || !MO.getReg()) 716 continue; 717 718 // handle register defs - build intervals 719 if (MO.isDef()) 720 handleRegisterDef(MBB, MI, MIIndex, MO, i); 721 else if (MO.isUndef()) 722 UndefUses.push_back(MO.getReg()); 723 } 724 725 // Move to the next instr slot. 726 MIIndex = indexes_->getNextNonNullIndex(MIIndex); 727 } 728 } 729 730 // Create empty intervals for registers defined by implicit_def's (except 731 // for those implicit_def that define values which are liveout of their 732 // blocks. 733 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) { 734 unsigned UndefReg = UndefUses[i]; 735 (void)getOrCreateInterval(UndefReg); 736 } 737} 738 739LiveInterval* LiveIntervals::createInterval(unsigned reg) { 740 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F; 741 return new LiveInterval(reg, Weight); 742} 743 744/// dupInterval - Duplicate a live interval. The caller is responsible for 745/// managing the allocated memory. 746LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) { 747 LiveInterval *NewLI = createInterval(li->reg); 748 NewLI->Copy(*li, mri_, getVNInfoAllocator()); 749 return NewLI; 750} 751 752/// getVNInfoSourceReg - Helper function that parses the specified VNInfo 753/// copy field and returns the source register that defines it. 754unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const { 755 if (!VNI->getCopy()) 756 return 0; 757 758 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) { 759 // If it's extracting out of a physical register, return the sub-register. 760 unsigned Reg = VNI->getCopy()->getOperand(1).getReg(); 761 if (TargetRegisterInfo::isPhysicalRegister(Reg)) 762 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm()); 763 return Reg; 764 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG || 765 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG) 766 return VNI->getCopy()->getOperand(2).getReg(); 767 768 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 769 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg)) 770 return SrcReg; 771 llvm_unreachable("Unrecognized copy instruction!"); 772 return 0; 773} 774 775//===----------------------------------------------------------------------===// 776// Register allocator hooks. 777// 778 779/// getReMatImplicitUse - If the remat definition MI has one (for now, we only 780/// allow one) virtual register operand, then its uses are implicitly using 781/// the register. Returns the virtual register. 782unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li, 783 MachineInstr *MI) const { 784 unsigned RegOp = 0; 785 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 786 MachineOperand &MO = MI->getOperand(i); 787 if (!MO.isReg() || !MO.isUse()) 788 continue; 789 unsigned Reg = MO.getReg(); 790 if (Reg == 0 || Reg == li.reg) 791 continue; 792 793 if (TargetRegisterInfo::isPhysicalRegister(Reg) && 794 !allocatableRegs_[Reg]) 795 continue; 796 // FIXME: For now, only remat MI with at most one register operand. 797 assert(!RegOp && 798 "Can't rematerialize instruction with multiple register operand!"); 799 RegOp = MO.getReg(); 800#ifndef NDEBUG 801 break; 802#endif 803 } 804 return RegOp; 805} 806 807/// isValNoAvailableAt - Return true if the val# of the specified interval 808/// which reaches the given instruction also reaches the specified use index. 809bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI, 810 SlotIndex UseIdx) const { 811 SlotIndex Index = getInstructionIndex(MI); 812 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno; 813 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx); 814 return UI != li.end() && UI->valno == ValNo; 815} 816 817/// isReMaterializable - Returns true if the definition MI of the specified 818/// val# of the specified interval is re-materializable. 819bool LiveIntervals::isReMaterializable(const LiveInterval &li, 820 const VNInfo *ValNo, MachineInstr *MI, 821 SmallVectorImpl<LiveInterval*> &SpillIs, 822 bool &isLoad) { 823 if (DisableReMat) 824 return false; 825 826 if (!tii_->isTriviallyReMaterializable(MI, aa_)) 827 return false; 828 829 // Target-specific code can mark an instruction as being rematerializable 830 // if it has one virtual reg use, though it had better be something like 831 // a PIC base register which is likely to be live everywhere. 832 unsigned ImpUse = getReMatImplicitUse(li, MI); 833 if (ImpUse) { 834 const LiveInterval &ImpLi = getInterval(ImpUse); 835 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg), 836 re = mri_->use_end(); ri != re; ++ri) { 837 MachineInstr *UseMI = &*ri; 838 SlotIndex UseIdx = getInstructionIndex(UseMI); 839 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo) 840 continue; 841 if (!isValNoAvailableAt(ImpLi, MI, UseIdx)) 842 return false; 843 } 844 845 // If a register operand of the re-materialized instruction is going to 846 // be spilled next, then it's not legal to re-materialize this instruction. 847 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i) 848 if (ImpUse == SpillIs[i]->reg) 849 return false; 850 } 851 return true; 852} 853 854/// isReMaterializable - Returns true if the definition MI of the specified 855/// val# of the specified interval is re-materializable. 856bool LiveIntervals::isReMaterializable(const LiveInterval &li, 857 const VNInfo *ValNo, MachineInstr *MI) { 858 SmallVector<LiveInterval*, 4> Dummy1; 859 bool Dummy2; 860 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2); 861} 862 863/// isReMaterializable - Returns true if every definition of MI of every 864/// val# of the specified interval is re-materializable. 865bool LiveIntervals::isReMaterializable(const LiveInterval &li, 866 SmallVectorImpl<LiveInterval*> &SpillIs, 867 bool &isLoad) { 868 isLoad = false; 869 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 870 i != e; ++i) { 871 const VNInfo *VNI = *i; 872 if (VNI->isUnused()) 873 continue; // Dead val#. 874 // Is the def for the val# rematerializable? 875 if (!VNI->isDefAccurate()) 876 return false; 877 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def); 878 bool DefIsLoad = false; 879 if (!ReMatDefMI || 880 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad)) 881 return false; 882 isLoad |= DefIsLoad; 883 } 884 return true; 885} 886 887/// FilterFoldedOps - Filter out two-address use operands. Return 888/// true if it finds any issue with the operands that ought to prevent 889/// folding. 890static bool FilterFoldedOps(MachineInstr *MI, 891 SmallVector<unsigned, 2> &Ops, 892 unsigned &MRInfo, 893 SmallVector<unsigned, 2> &FoldOps) { 894 MRInfo = 0; 895 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 896 unsigned OpIdx = Ops[i]; 897 MachineOperand &MO = MI->getOperand(OpIdx); 898 // FIXME: fold subreg use. 899 if (MO.getSubReg()) 900 return true; 901 if (MO.isDef()) 902 MRInfo |= (unsigned)VirtRegMap::isMod; 903 else { 904 // Filter out two-address use operand(s). 905 if (MI->isRegTiedToDefOperand(OpIdx)) { 906 MRInfo = VirtRegMap::isModRef; 907 continue; 908 } 909 MRInfo |= (unsigned)VirtRegMap::isRef; 910 } 911 FoldOps.push_back(OpIdx); 912 } 913 return false; 914} 915 916 917/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from 918/// slot / to reg or any rematerialized load into ith operand of specified 919/// MI. If it is successul, MI is updated with the newly created MI and 920/// returns true. 921bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, 922 VirtRegMap &vrm, MachineInstr *DefMI, 923 SlotIndex InstrIdx, 924 SmallVector<unsigned, 2> &Ops, 925 bool isSS, int Slot, unsigned Reg) { 926 // If it is an implicit def instruction, just delete it. 927 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) { 928 RemoveMachineInstrFromMaps(MI); 929 vrm.RemoveMachineInstrFromMaps(MI); 930 MI->eraseFromParent(); 931 ++numFolds; 932 return true; 933 } 934 935 // Filter the list of operand indexes that are to be folded. Abort if 936 // any operand will prevent folding. 937 unsigned MRInfo = 0; 938 SmallVector<unsigned, 2> FoldOps; 939 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 940 return false; 941 942 // The only time it's safe to fold into a two address instruction is when 943 // it's folding reload and spill from / into a spill stack slot. 944 if (DefMI && (MRInfo & VirtRegMap::isMod)) 945 return false; 946 947 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot) 948 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI); 949 if (fmi) { 950 // Remember this instruction uses the spill slot. 951 if (isSS) vrm.addSpillSlotUse(Slot, fmi); 952 953 // Attempt to fold the memory reference into the instruction. If 954 // we can do this, we don't need to insert spill code. 955 MachineBasicBlock &MBB = *MI->getParent(); 956 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot)) 957 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo); 958 vrm.transferSpillPts(MI, fmi); 959 vrm.transferRestorePts(MI, fmi); 960 vrm.transferEmergencySpills(MI, fmi); 961 ReplaceMachineInstrInMaps(MI, fmi); 962 MI = MBB.insert(MBB.erase(MI), fmi); 963 ++numFolds; 964 return true; 965 } 966 return false; 967} 968 969/// canFoldMemoryOperand - Returns true if the specified load / store 970/// folding is possible. 971bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI, 972 SmallVector<unsigned, 2> &Ops, 973 bool ReMat) const { 974 // Filter the list of operand indexes that are to be folded. Abort if 975 // any operand will prevent folding. 976 unsigned MRInfo = 0; 977 SmallVector<unsigned, 2> FoldOps; 978 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 979 return false; 980 981 // It's only legal to remat for a use, not a def. 982 if (ReMat && (MRInfo & VirtRegMap::isMod)) 983 return false; 984 985 return tii_->canFoldMemoryOperand(MI, FoldOps); 986} 987 988bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const { 989 LiveInterval::Ranges::const_iterator itr = li.ranges.begin(); 990 991 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end); 992 993 if (mbb == 0) 994 return false; 995 996 for (++itr; itr != li.ranges.end(); ++itr) { 997 MachineBasicBlock *mbb2 = 998 indexes_->getMBBCoveringRange(itr->start, itr->end); 999 1000 if (mbb2 != mbb) 1001 return false; 1002 } 1003 1004 return true; 1005} 1006 1007/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of 1008/// interval on to-be re-materialized operands of MI) with new register. 1009void LiveIntervals::rewriteImplicitOps(const LiveInterval &li, 1010 MachineInstr *MI, unsigned NewVReg, 1011 VirtRegMap &vrm) { 1012 // There is an implicit use. That means one of the other operand is 1013 // being remat'ed and the remat'ed instruction has li.reg as an 1014 // use operand. Make sure we rewrite that as well. 1015 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1016 MachineOperand &MO = MI->getOperand(i); 1017 if (!MO.isReg()) 1018 continue; 1019 unsigned Reg = MO.getReg(); 1020 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 1021 continue; 1022 if (!vrm.isReMaterialized(Reg)) 1023 continue; 1024 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg); 1025 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg); 1026 if (UseMO) 1027 UseMO->setReg(NewVReg); 1028 } 1029} 1030 1031/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions 1032/// for addIntervalsForSpills to rewrite uses / defs for the given live range. 1033bool LiveIntervals:: 1034rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, 1035 bool TrySplit, SlotIndex index, SlotIndex end, 1036 MachineInstr *MI, 1037 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1038 unsigned Slot, int LdSlot, 1039 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1040 VirtRegMap &vrm, 1041 const TargetRegisterClass* rc, 1042 SmallVector<int, 4> &ReMatIds, 1043 const MachineLoopInfo *loopInfo, 1044 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse, 1045 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1046 std::vector<LiveInterval*> &NewLIs) { 1047 bool CanFold = false; 1048 RestartInstruction: 1049 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 1050 MachineOperand& mop = MI->getOperand(i); 1051 if (!mop.isReg()) 1052 continue; 1053 unsigned Reg = mop.getReg(); 1054 unsigned RegI = Reg; 1055 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 1056 continue; 1057 if (Reg != li.reg) 1058 continue; 1059 1060 bool TryFold = !DefIsReMat; 1061 bool FoldSS = true; // Default behavior unless it's a remat. 1062 int FoldSlot = Slot; 1063 if (DefIsReMat) { 1064 // If this is the rematerializable definition MI itself and 1065 // all of its uses are rematerialized, simply delete it. 1066 if (MI == ReMatOrigDefMI && CanDelete) { 1067 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: " 1068 << MI << '\n'); 1069 RemoveMachineInstrFromMaps(MI); 1070 vrm.RemoveMachineInstrFromMaps(MI); 1071 MI->eraseFromParent(); 1072 break; 1073 } 1074 1075 // If def for this use can't be rematerialized, then try folding. 1076 // If def is rematerializable and it's a load, also try folding. 1077 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad)); 1078 if (isLoad) { 1079 // Try fold loads (from stack slot, constant pool, etc.) into uses. 1080 FoldSS = isLoadSS; 1081 FoldSlot = LdSlot; 1082 } 1083 } 1084 1085 // Scan all of the operands of this instruction rewriting operands 1086 // to use NewVReg instead of li.reg as appropriate. We do this for 1087 // two reasons: 1088 // 1089 // 1. If the instr reads the same spilled vreg multiple times, we 1090 // want to reuse the NewVReg. 1091 // 2. If the instr is a two-addr instruction, we are required to 1092 // keep the src/dst regs pinned. 1093 // 1094 // Keep track of whether we replace a use and/or def so that we can 1095 // create the spill interval with the appropriate range. 1096 1097 HasUse = mop.isUse(); 1098 HasDef = mop.isDef(); 1099 SmallVector<unsigned, 2> Ops; 1100 Ops.push_back(i); 1101 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) { 1102 const MachineOperand &MOj = MI->getOperand(j); 1103 if (!MOj.isReg()) 1104 continue; 1105 unsigned RegJ = MOj.getReg(); 1106 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ)) 1107 continue; 1108 if (RegJ == RegI) { 1109 Ops.push_back(j); 1110 if (!MOj.isUndef()) { 1111 HasUse |= MOj.isUse(); 1112 HasDef |= MOj.isDef(); 1113 } 1114 } 1115 } 1116 1117 // Create a new virtual register for the spill interval. 1118 // Create the new register now so we can map the fold instruction 1119 // to the new register so when it is unfolded we get the correct 1120 // answer. 1121 bool CreatedNewVReg = false; 1122 if (NewVReg == 0) { 1123 NewVReg = mri_->createVirtualRegister(rc); 1124 vrm.grow(); 1125 CreatedNewVReg = true; 1126 1127 // The new virtual register should get the same allocation hints as the 1128 // old one. 1129 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg); 1130 if (Hint.first || Hint.second) 1131 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second); 1132 } 1133 1134 if (!TryFold) 1135 CanFold = false; 1136 else { 1137 // Do not fold load / store here if we are splitting. We'll find an 1138 // optimal point to insert a load / store later. 1139 if (!TrySplit) { 1140 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1141 Ops, FoldSS, FoldSlot, NewVReg)) { 1142 // Folding the load/store can completely change the instruction in 1143 // unpredictable ways, rescan it from the beginning. 1144 1145 if (FoldSS) { 1146 // We need to give the new vreg the same stack slot as the 1147 // spilled interval. 1148 vrm.assignVirt2StackSlot(NewVReg, FoldSlot); 1149 } 1150 1151 HasUse = false; 1152 HasDef = false; 1153 CanFold = false; 1154 if (isNotInMIMap(MI)) 1155 break; 1156 goto RestartInstruction; 1157 } 1158 } else { 1159 // We'll try to fold it later if it's profitable. 1160 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat); 1161 } 1162 } 1163 1164 mop.setReg(NewVReg); 1165 if (mop.isImplicit()) 1166 rewriteImplicitOps(li, MI, NewVReg, vrm); 1167 1168 // Reuse NewVReg for other reads. 1169 for (unsigned j = 0, e = Ops.size(); j != e; ++j) { 1170 MachineOperand &mopj = MI->getOperand(Ops[j]); 1171 mopj.setReg(NewVReg); 1172 if (mopj.isImplicit()) 1173 rewriteImplicitOps(li, MI, NewVReg, vrm); 1174 } 1175 1176 if (CreatedNewVReg) { 1177 if (DefIsReMat) { 1178 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI); 1179 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) { 1180 // Each valnum may have its own remat id. 1181 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg); 1182 } else { 1183 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]); 1184 } 1185 if (!CanDelete || (HasUse && HasDef)) { 1186 // If this is a two-addr instruction then its use operands are 1187 // rematerializable but its def is not. It should be assigned a 1188 // stack slot. 1189 vrm.assignVirt2StackSlot(NewVReg, Slot); 1190 } 1191 } else { 1192 vrm.assignVirt2StackSlot(NewVReg, Slot); 1193 } 1194 } else if (HasUse && HasDef && 1195 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) { 1196 // If this interval hasn't been assigned a stack slot (because earlier 1197 // def is a deleted remat def), do it now. 1198 assert(Slot != VirtRegMap::NO_STACK_SLOT); 1199 vrm.assignVirt2StackSlot(NewVReg, Slot); 1200 } 1201 1202 // Re-matting an instruction with virtual register use. Add the 1203 // register as an implicit use on the use MI. 1204 if (DefIsReMat && ImpUse) 1205 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1206 1207 // Create a new register interval for this spill / remat. 1208 LiveInterval &nI = getOrCreateInterval(NewVReg); 1209 if (CreatedNewVReg) { 1210 NewLIs.push_back(&nI); 1211 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg)); 1212 if (TrySplit) 1213 vrm.setIsSplitFromReg(NewVReg, li.reg); 1214 } 1215 1216 if (HasUse) { 1217 if (CreatedNewVReg) { 1218 LiveRange LR(index.getLoadIndex(), index.getDefIndex(), 1219 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); 1220 DEBUG(errs() << " +" << LR); 1221 nI.addRange(LR); 1222 } else { 1223 // Extend the split live interval to this def / use. 1224 SlotIndex End = index.getDefIndex(); 1225 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End, 1226 nI.getValNumInfo(nI.getNumValNums()-1)); 1227 DEBUG(errs() << " +" << LR); 1228 nI.addRange(LR); 1229 } 1230 } 1231 if (HasDef) { 1232 LiveRange LR(index.getDefIndex(), index.getStoreIndex(), 1233 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); 1234 DEBUG(errs() << " +" << LR); 1235 nI.addRange(LR); 1236 } 1237 1238 DEBUG({ 1239 errs() << "\t\t\t\tAdded new interval: "; 1240 nI.print(errs(), tri_); 1241 errs() << '\n'; 1242 }); 1243 } 1244 return CanFold; 1245} 1246bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li, 1247 const VNInfo *VNI, 1248 MachineBasicBlock *MBB, 1249 SlotIndex Idx) const { 1250 SlotIndex End = getMBBEndIdx(MBB); 1251 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) { 1252 if (VNI->kills[j].isPHI()) 1253 continue; 1254 1255 SlotIndex KillIdx = VNI->kills[j]; 1256 if (KillIdx > Idx && KillIdx < End) 1257 return true; 1258 } 1259 return false; 1260} 1261 1262/// RewriteInfo - Keep track of machine instrs that will be rewritten 1263/// during spilling. 1264namespace { 1265 struct RewriteInfo { 1266 SlotIndex Index; 1267 MachineInstr *MI; 1268 bool HasUse; 1269 bool HasDef; 1270 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d) 1271 : Index(i), MI(mi), HasUse(u), HasDef(d) {} 1272 }; 1273 1274 struct RewriteInfoCompare { 1275 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const { 1276 return LHS.Index < RHS.Index; 1277 } 1278 }; 1279} 1280 1281void LiveIntervals:: 1282rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit, 1283 LiveInterval::Ranges::const_iterator &I, 1284 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1285 unsigned Slot, int LdSlot, 1286 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1287 VirtRegMap &vrm, 1288 const TargetRegisterClass* rc, 1289 SmallVector<int, 4> &ReMatIds, 1290 const MachineLoopInfo *loopInfo, 1291 BitVector &SpillMBBs, 1292 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes, 1293 BitVector &RestoreMBBs, 1294 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes, 1295 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1296 std::vector<LiveInterval*> &NewLIs) { 1297 bool AllCanFold = true; 1298 unsigned NewVReg = 0; 1299 SlotIndex start = I->start.getBaseIndex(); 1300 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 1301 1302 // First collect all the def / use in this live range that will be rewritten. 1303 // Make sure they are sorted according to instruction index. 1304 std::vector<RewriteInfo> RewriteMIs; 1305 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1306 re = mri_->reg_end(); ri != re; ) { 1307 MachineInstr *MI = &*ri; 1308 MachineOperand &O = ri.getOperand(); 1309 ++ri; 1310 assert(!O.isImplicit() && "Spilling register that's used as implicit use?"); 1311 SlotIndex index = getInstructionIndex(MI); 1312 if (index < start || index >= end) 1313 continue; 1314 1315 if (O.isUndef()) 1316 // Must be defined by an implicit def. It should not be spilled. Note, 1317 // this is for correctness reason. e.g. 1318 // 8 %reg1024<def> = IMPLICIT_DEF 1319 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2 1320 // The live range [12, 14) are not part of the r1024 live interval since 1321 // it's defined by an implicit def. It will not conflicts with live 1322 // interval of r1025. Now suppose both registers are spilled, you can 1323 // easily see a situation where both registers are reloaded before 1324 // the INSERT_SUBREG and both target registers that would overlap. 1325 continue; 1326 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef())); 1327 } 1328 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare()); 1329 1330 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0; 1331 // Now rewrite the defs and uses. 1332 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) { 1333 RewriteInfo &rwi = RewriteMIs[i]; 1334 ++i; 1335 SlotIndex index = rwi.Index; 1336 bool MIHasUse = rwi.HasUse; 1337 bool MIHasDef = rwi.HasDef; 1338 MachineInstr *MI = rwi.MI; 1339 // If MI def and/or use the same register multiple times, then there 1340 // are multiple entries. 1341 unsigned NumUses = MIHasUse; 1342 while (i != e && RewriteMIs[i].MI == MI) { 1343 assert(RewriteMIs[i].Index == index); 1344 bool isUse = RewriteMIs[i].HasUse; 1345 if (isUse) ++NumUses; 1346 MIHasUse |= isUse; 1347 MIHasDef |= RewriteMIs[i].HasDef; 1348 ++i; 1349 } 1350 MachineBasicBlock *MBB = MI->getParent(); 1351 1352 if (ImpUse && MI != ReMatDefMI) { 1353 // Re-matting an instruction with virtual register use. Update the 1354 // register interval's spill weight to HUGE_VALF to prevent it from 1355 // being spilled. 1356 LiveInterval &ImpLi = getInterval(ImpUse); 1357 ImpLi.weight = HUGE_VALF; 1358 } 1359 1360 unsigned MBBId = MBB->getNumber(); 1361 unsigned ThisVReg = 0; 1362 if (TrySplit) { 1363 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId); 1364 if (NVI != MBBVRegsMap.end()) { 1365 ThisVReg = NVI->second; 1366 // One common case: 1367 // x = use 1368 // ... 1369 // ... 1370 // def = ... 1371 // = use 1372 // It's better to start a new interval to avoid artifically 1373 // extend the new interval. 1374 if (MIHasDef && !MIHasUse) { 1375 MBBVRegsMap.erase(MBB->getNumber()); 1376 ThisVReg = 0; 1377 } 1378 } 1379 } 1380 1381 bool IsNew = ThisVReg == 0; 1382 if (IsNew) { 1383 // This ends the previous live interval. If all of its def / use 1384 // can be folded, give it a low spill weight. 1385 if (NewVReg && TrySplit && AllCanFold) { 1386 LiveInterval &nI = getOrCreateInterval(NewVReg); 1387 nI.weight /= 10.0F; 1388 } 1389 AllCanFold = true; 1390 } 1391 NewVReg = ThisVReg; 1392 1393 bool HasDef = false; 1394 bool HasUse = false; 1395 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit, 1396 index, end, MI, ReMatOrigDefMI, ReMatDefMI, 1397 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1398 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg, 1399 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs); 1400 if (!HasDef && !HasUse) 1401 continue; 1402 1403 AllCanFold &= CanFold; 1404 1405 // Update weight of spill interval. 1406 LiveInterval &nI = getOrCreateInterval(NewVReg); 1407 if (!TrySplit) { 1408 // The spill weight is now infinity as it cannot be spilled again. 1409 nI.weight = HUGE_VALF; 1410 continue; 1411 } 1412 1413 // Keep track of the last def and first use in each MBB. 1414 if (HasDef) { 1415 if (MI != ReMatOrigDefMI || !CanDelete) { 1416 bool HasKill = false; 1417 if (!HasUse) 1418 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex()); 1419 else { 1420 // If this is a two-address code, then this index starts a new VNInfo. 1421 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex()); 1422 if (VNI) 1423 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex()); 1424 } 1425 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1426 SpillIdxes.find(MBBId); 1427 if (!HasKill) { 1428 if (SII == SpillIdxes.end()) { 1429 std::vector<SRInfo> S; 1430 S.push_back(SRInfo(index, NewVReg, true)); 1431 SpillIdxes.insert(std::make_pair(MBBId, S)); 1432 } else if (SII->second.back().vreg != NewVReg) { 1433 SII->second.push_back(SRInfo(index, NewVReg, true)); 1434 } else if (index > SII->second.back().index) { 1435 // If there is an earlier def and this is a two-address 1436 // instruction, then it's not possible to fold the store (which 1437 // would also fold the load). 1438 SRInfo &Info = SII->second.back(); 1439 Info.index = index; 1440 Info.canFold = !HasUse; 1441 } 1442 SpillMBBs.set(MBBId); 1443 } else if (SII != SpillIdxes.end() && 1444 SII->second.back().vreg == NewVReg && 1445 index > SII->second.back().index) { 1446 // There is an earlier def that's not killed (must be two-address). 1447 // The spill is no longer needed. 1448 SII->second.pop_back(); 1449 if (SII->second.empty()) { 1450 SpillIdxes.erase(MBBId); 1451 SpillMBBs.reset(MBBId); 1452 } 1453 } 1454 } 1455 } 1456 1457 if (HasUse) { 1458 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1459 SpillIdxes.find(MBBId); 1460 if (SII != SpillIdxes.end() && 1461 SII->second.back().vreg == NewVReg && 1462 index > SII->second.back().index) 1463 // Use(s) following the last def, it's not safe to fold the spill. 1464 SII->second.back().canFold = false; 1465 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII = 1466 RestoreIdxes.find(MBBId); 1467 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg) 1468 // If we are splitting live intervals, only fold if it's the first 1469 // use and there isn't another use later in the MBB. 1470 RII->second.back().canFold = false; 1471 else if (IsNew) { 1472 // Only need a reload if there isn't an earlier def / use. 1473 if (RII == RestoreIdxes.end()) { 1474 std::vector<SRInfo> Infos; 1475 Infos.push_back(SRInfo(index, NewVReg, true)); 1476 RestoreIdxes.insert(std::make_pair(MBBId, Infos)); 1477 } else { 1478 RII->second.push_back(SRInfo(index, NewVReg, true)); 1479 } 1480 RestoreMBBs.set(MBBId); 1481 } 1482 } 1483 1484 // Update spill weight. 1485 unsigned loopDepth = loopInfo->getLoopDepth(MBB); 1486 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth); 1487 } 1488 1489 if (NewVReg && TrySplit && AllCanFold) { 1490 // If all of its def / use can be folded, give it a low spill weight. 1491 LiveInterval &nI = getOrCreateInterval(NewVReg); 1492 nI.weight /= 10.0F; 1493 } 1494} 1495 1496bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index, 1497 unsigned vr, BitVector &RestoreMBBs, 1498 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1499 if (!RestoreMBBs[Id]) 1500 return false; 1501 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1502 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1503 if (Restores[i].index == index && 1504 Restores[i].vreg == vr && 1505 Restores[i].canFold) 1506 return true; 1507 return false; 1508} 1509 1510void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index, 1511 unsigned vr, BitVector &RestoreMBBs, 1512 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1513 if (!RestoreMBBs[Id]) 1514 return; 1515 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1516 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1517 if (Restores[i].index == index && Restores[i].vreg) 1518 Restores[i].index = SlotIndex(); 1519} 1520 1521/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being 1522/// spilled and create empty intervals for their uses. 1523void 1524LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm, 1525 const TargetRegisterClass* rc, 1526 std::vector<LiveInterval*> &NewLIs) { 1527 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1528 re = mri_->reg_end(); ri != re; ) { 1529 MachineOperand &O = ri.getOperand(); 1530 MachineInstr *MI = &*ri; 1531 ++ri; 1532 if (O.isDef()) { 1533 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF && 1534 "Register def was not rewritten?"); 1535 RemoveMachineInstrFromMaps(MI); 1536 vrm.RemoveMachineInstrFromMaps(MI); 1537 MI->eraseFromParent(); 1538 } else { 1539 // This must be an use of an implicit_def so it's not part of the live 1540 // interval. Create a new empty live interval for it. 1541 // FIXME: Can we simply erase some of the instructions? e.g. Stores? 1542 unsigned NewVReg = mri_->createVirtualRegister(rc); 1543 vrm.grow(); 1544 vrm.setIsImplicitlyDefined(NewVReg); 1545 NewLIs.push_back(&getOrCreateInterval(NewVReg)); 1546 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1547 MachineOperand &MO = MI->getOperand(i); 1548 if (MO.isReg() && MO.getReg() == li.reg) { 1549 MO.setReg(NewVReg); 1550 MO.setIsUndef(); 1551 } 1552 } 1553 } 1554 } 1555} 1556 1557std::vector<LiveInterval*> LiveIntervals:: 1558addIntervalsForSpillsFast(const LiveInterval &li, 1559 const MachineLoopInfo *loopInfo, 1560 VirtRegMap &vrm) { 1561 unsigned slot = vrm.assignVirt2StackSlot(li.reg); 1562 1563 std::vector<LiveInterval*> added; 1564 1565 assert(li.weight != HUGE_VALF && 1566 "attempt to spill already spilled interval!"); 1567 1568 DEBUG({ 1569 errs() << "\t\t\t\tadding intervals for spills for interval: "; 1570 li.dump(); 1571 errs() << '\n'; 1572 }); 1573 1574 const TargetRegisterClass* rc = mri_->getRegClass(li.reg); 1575 1576 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg); 1577 while (RI != mri_->reg_end()) { 1578 MachineInstr* MI = &*RI; 1579 1580 SmallVector<unsigned, 2> Indices; 1581 bool HasUse = false; 1582 bool HasDef = false; 1583 1584 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 1585 MachineOperand& mop = MI->getOperand(i); 1586 if (!mop.isReg() || mop.getReg() != li.reg) continue; 1587 1588 HasUse |= MI->getOperand(i).isUse(); 1589 HasDef |= MI->getOperand(i).isDef(); 1590 1591 Indices.push_back(i); 1592 } 1593 1594 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI), 1595 Indices, true, slot, li.reg)) { 1596 unsigned NewVReg = mri_->createVirtualRegister(rc); 1597 vrm.grow(); 1598 vrm.assignVirt2StackSlot(NewVReg, slot); 1599 1600 // create a new register for this spill 1601 LiveInterval &nI = getOrCreateInterval(NewVReg); 1602 1603 // the spill weight is now infinity as it 1604 // cannot be spilled again 1605 nI.weight = HUGE_VALF; 1606 1607 // Rewrite register operands to use the new vreg. 1608 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(), 1609 E = Indices.end(); I != E; ++I) { 1610 MI->getOperand(*I).setReg(NewVReg); 1611 1612 if (MI->getOperand(*I).isUse()) 1613 MI->getOperand(*I).setIsKill(true); 1614 } 1615 1616 // Fill in the new live interval. 1617 SlotIndex index = getInstructionIndex(MI); 1618 if (HasUse) { 1619 LiveRange LR(index.getLoadIndex(), index.getUseIndex(), 1620 nI.getNextValue(SlotIndex(), 0, false, 1621 getVNInfoAllocator())); 1622 DEBUG(errs() << " +" << LR); 1623 nI.addRange(LR); 1624 vrm.addRestorePoint(NewVReg, MI); 1625 } 1626 if (HasDef) { 1627 LiveRange LR(index.getDefIndex(), index.getStoreIndex(), 1628 nI.getNextValue(SlotIndex(), 0, false, 1629 getVNInfoAllocator())); 1630 DEBUG(errs() << " +" << LR); 1631 nI.addRange(LR); 1632 vrm.addSpillPoint(NewVReg, true, MI); 1633 } 1634 1635 added.push_back(&nI); 1636 1637 DEBUG({ 1638 errs() << "\t\t\t\tadded new interval: "; 1639 nI.dump(); 1640 errs() << '\n'; 1641 }); 1642 } 1643 1644 1645 RI = mri_->reg_begin(li.reg); 1646 } 1647 1648 return added; 1649} 1650 1651std::vector<LiveInterval*> LiveIntervals:: 1652addIntervalsForSpills(const LiveInterval &li, 1653 SmallVectorImpl<LiveInterval*> &SpillIs, 1654 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) { 1655 1656 if (EnableFastSpilling) 1657 return addIntervalsForSpillsFast(li, loopInfo, vrm); 1658 1659 assert(li.weight != HUGE_VALF && 1660 "attempt to spill already spilled interval!"); 1661 1662 DEBUG({ 1663 errs() << "\t\t\t\tadding intervals for spills for interval: "; 1664 li.print(errs(), tri_); 1665 errs() << '\n'; 1666 }); 1667 1668 // Each bit specify whether a spill is required in the MBB. 1669 BitVector SpillMBBs(mf_->getNumBlockIDs()); 1670 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes; 1671 BitVector RestoreMBBs(mf_->getNumBlockIDs()); 1672 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes; 1673 DenseMap<unsigned,unsigned> MBBVRegsMap; 1674 std::vector<LiveInterval*> NewLIs; 1675 const TargetRegisterClass* rc = mri_->getRegClass(li.reg); 1676 1677 unsigned NumValNums = li.getNumValNums(); 1678 SmallVector<MachineInstr*, 4> ReMatDefs; 1679 ReMatDefs.resize(NumValNums, NULL); 1680 SmallVector<MachineInstr*, 4> ReMatOrigDefs; 1681 ReMatOrigDefs.resize(NumValNums, NULL); 1682 SmallVector<int, 4> ReMatIds; 1683 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT); 1684 BitVector ReMatDelete(NumValNums); 1685 unsigned Slot = VirtRegMap::MAX_STACK_SLOT; 1686 1687 // Spilling a split live interval. It cannot be split any further. Also, 1688 // it's also guaranteed to be a single val# / range interval. 1689 if (vrm.getPreSplitReg(li.reg)) { 1690 vrm.setIsSplitFromReg(li.reg, 0); 1691 // Unset the split kill marker on the last use. 1692 SlotIndex KillIdx = vrm.getKillPoint(li.reg); 1693 if (KillIdx != SlotIndex()) { 1694 MachineInstr *KillMI = getInstructionFromIndex(KillIdx); 1695 assert(KillMI && "Last use disappeared?"); 1696 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true); 1697 assert(KillOp != -1 && "Last use disappeared?"); 1698 KillMI->getOperand(KillOp).setIsKill(false); 1699 } 1700 vrm.removeKillPoint(li.reg); 1701 bool DefIsReMat = vrm.isReMaterialized(li.reg); 1702 Slot = vrm.getStackSlot(li.reg); 1703 assert(Slot != VirtRegMap::MAX_STACK_SLOT); 1704 MachineInstr *ReMatDefMI = DefIsReMat ? 1705 vrm.getReMaterializedMI(li.reg) : NULL; 1706 int LdSlot = 0; 1707 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1708 bool isLoad = isLoadSS || 1709 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad())); 1710 bool IsFirstRange = true; 1711 for (LiveInterval::Ranges::const_iterator 1712 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1713 // If this is a split live interval with multiple ranges, it means there 1714 // are two-address instructions that re-defined the value. Only the 1715 // first def can be rematerialized! 1716 if (IsFirstRange) { 1717 // Note ReMatOrigDefMI has already been deleted. 1718 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI, 1719 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1720 false, vrm, rc, ReMatIds, loopInfo, 1721 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1722 MBBVRegsMap, NewLIs); 1723 } else { 1724 rewriteInstructionsForSpills(li, false, I, NULL, 0, 1725 Slot, 0, false, false, false, 1726 false, vrm, rc, ReMatIds, loopInfo, 1727 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1728 MBBVRegsMap, NewLIs); 1729 } 1730 IsFirstRange = false; 1731 } 1732 1733 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1734 return NewLIs; 1735 } 1736 1737 bool TrySplit = !intervalIsInOneMBB(li); 1738 if (TrySplit) 1739 ++numSplits; 1740 bool NeedStackSlot = false; 1741 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 1742 i != e; ++i) { 1743 const VNInfo *VNI = *i; 1744 unsigned VN = VNI->id; 1745 if (VNI->isUnused()) 1746 continue; // Dead val#. 1747 // Is the def for the val# rematerializable? 1748 MachineInstr *ReMatDefMI = VNI->isDefAccurate() 1749 ? getInstructionFromIndex(VNI->def) : 0; 1750 bool dummy; 1751 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) { 1752 // Remember how to remat the def of this val#. 1753 ReMatOrigDefs[VN] = ReMatDefMI; 1754 // Original def may be modified so we have to make a copy here. 1755 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI); 1756 CloneMIs.push_back(Clone); 1757 ReMatDefs[VN] = Clone; 1758 1759 bool CanDelete = true; 1760 if (VNI->hasPHIKill()) { 1761 // A kill is a phi node, not all of its uses can be rematerialized. 1762 // It must not be deleted. 1763 CanDelete = false; 1764 // Need a stack slot if there is any live range where uses cannot be 1765 // rematerialized. 1766 NeedStackSlot = true; 1767 } 1768 if (CanDelete) 1769 ReMatDelete.set(VN); 1770 } else { 1771 // Need a stack slot if there is any live range where uses cannot be 1772 // rematerialized. 1773 NeedStackSlot = true; 1774 } 1775 } 1776 1777 // One stack slot per live interval. 1778 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) { 1779 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT) 1780 Slot = vrm.assignVirt2StackSlot(li.reg); 1781 1782 // This case only occurs when the prealloc splitter has already assigned 1783 // a stack slot to this vreg. 1784 else 1785 Slot = vrm.getStackSlot(li.reg); 1786 } 1787 1788 // Create new intervals and rewrite defs and uses. 1789 for (LiveInterval::Ranges::const_iterator 1790 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1791 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id]; 1792 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id]; 1793 bool DefIsReMat = ReMatDefMI != NULL; 1794 bool CanDelete = ReMatDelete[I->valno->id]; 1795 int LdSlot = 0; 1796 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1797 bool isLoad = isLoadSS || 1798 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad()); 1799 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI, 1800 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1801 CanDelete, vrm, rc, ReMatIds, loopInfo, 1802 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1803 MBBVRegsMap, NewLIs); 1804 } 1805 1806 // Insert spills / restores if we are splitting. 1807 if (!TrySplit) { 1808 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1809 return NewLIs; 1810 } 1811 1812 SmallPtrSet<LiveInterval*, 4> AddedKill; 1813 SmallVector<unsigned, 2> Ops; 1814 if (NeedStackSlot) { 1815 int Id = SpillMBBs.find_first(); 1816 while (Id != -1) { 1817 std::vector<SRInfo> &spills = SpillIdxes[Id]; 1818 for (unsigned i = 0, e = spills.size(); i != e; ++i) { 1819 SlotIndex index = spills[i].index; 1820 unsigned VReg = spills[i].vreg; 1821 LiveInterval &nI = getOrCreateInterval(VReg); 1822 bool isReMat = vrm.isReMaterialized(VReg); 1823 MachineInstr *MI = getInstructionFromIndex(index); 1824 bool CanFold = false; 1825 bool FoundUse = false; 1826 Ops.clear(); 1827 if (spills[i].canFold) { 1828 CanFold = true; 1829 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1830 MachineOperand &MO = MI->getOperand(j); 1831 if (!MO.isReg() || MO.getReg() != VReg) 1832 continue; 1833 1834 Ops.push_back(j); 1835 if (MO.isDef()) 1836 continue; 1837 if (isReMat || 1838 (!FoundUse && !alsoFoldARestore(Id, index, VReg, 1839 RestoreMBBs, RestoreIdxes))) { 1840 // MI has two-address uses of the same register. If the use 1841 // isn't the first and only use in the BB, then we can't fold 1842 // it. FIXME: Move this to rewriteInstructionsForSpills. 1843 CanFold = false; 1844 break; 1845 } 1846 FoundUse = true; 1847 } 1848 } 1849 // Fold the store into the def if possible. 1850 bool Folded = false; 1851 if (CanFold && !Ops.empty()) { 1852 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){ 1853 Folded = true; 1854 if (FoundUse) { 1855 // Also folded uses, do not issue a load. 1856 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes); 1857 nI.removeRange(index.getLoadIndex(), index.getDefIndex()); 1858 } 1859 nI.removeRange(index.getDefIndex(), index.getStoreIndex()); 1860 } 1861 } 1862 1863 // Otherwise tell the spiller to issue a spill. 1864 if (!Folded) { 1865 LiveRange *LR = &nI.ranges[nI.ranges.size()-1]; 1866 bool isKill = LR->end == index.getStoreIndex(); 1867 if (!MI->registerDefIsDead(nI.reg)) 1868 // No need to spill a dead def. 1869 vrm.addSpillPoint(VReg, isKill, MI); 1870 if (isKill) 1871 AddedKill.insert(&nI); 1872 } 1873 } 1874 Id = SpillMBBs.find_next(Id); 1875 } 1876 } 1877 1878 int Id = RestoreMBBs.find_first(); 1879 while (Id != -1) { 1880 std::vector<SRInfo> &restores = RestoreIdxes[Id]; 1881 for (unsigned i = 0, e = restores.size(); i != e; ++i) { 1882 SlotIndex index = restores[i].index; 1883 if (index == SlotIndex()) 1884 continue; 1885 unsigned VReg = restores[i].vreg; 1886 LiveInterval &nI = getOrCreateInterval(VReg); 1887 bool isReMat = vrm.isReMaterialized(VReg); 1888 MachineInstr *MI = getInstructionFromIndex(index); 1889 bool CanFold = false; 1890 Ops.clear(); 1891 if (restores[i].canFold) { 1892 CanFold = true; 1893 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1894 MachineOperand &MO = MI->getOperand(j); 1895 if (!MO.isReg() || MO.getReg() != VReg) 1896 continue; 1897 1898 if (MO.isDef()) { 1899 // If this restore were to be folded, it would have been folded 1900 // already. 1901 CanFold = false; 1902 break; 1903 } 1904 Ops.push_back(j); 1905 } 1906 } 1907 1908 // Fold the load into the use if possible. 1909 bool Folded = false; 1910 if (CanFold && !Ops.empty()) { 1911 if (!isReMat) 1912 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg); 1913 else { 1914 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg); 1915 int LdSlot = 0; 1916 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1917 // If the rematerializable def is a load, also try to fold it. 1918 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad()) 1919 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1920 Ops, isLoadSS, LdSlot, VReg); 1921 if (!Folded) { 1922 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI); 1923 if (ImpUse) { 1924 // Re-matting an instruction with virtual register use. Add the 1925 // register as an implicit use on the use MI and update the register 1926 // interval's spill weight to HUGE_VALF to prevent it from being 1927 // spilled. 1928 LiveInterval &ImpLi = getInterval(ImpUse); 1929 ImpLi.weight = HUGE_VALF; 1930 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1931 } 1932 } 1933 } 1934 } 1935 // If folding is not possible / failed, then tell the spiller to issue a 1936 // load / rematerialization for us. 1937 if (Folded) 1938 nI.removeRange(index.getLoadIndex(), index.getDefIndex()); 1939 else 1940 vrm.addRestorePoint(VReg, MI); 1941 } 1942 Id = RestoreMBBs.find_next(Id); 1943 } 1944 1945 // Finalize intervals: add kills, finalize spill weights, and filter out 1946 // dead intervals. 1947 std::vector<LiveInterval*> RetNewLIs; 1948 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) { 1949 LiveInterval *LI = NewLIs[i]; 1950 if (!LI->empty()) { 1951 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI); 1952 if (!AddedKill.count(LI)) { 1953 LiveRange *LR = &LI->ranges[LI->ranges.size()-1]; 1954 SlotIndex LastUseIdx = LR->end.getBaseIndex(); 1955 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx); 1956 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false); 1957 assert(UseIdx != -1); 1958 if (!LastUse->isRegTiedToDefOperand(UseIdx)) { 1959 LastUse->getOperand(UseIdx).setIsKill(); 1960 vrm.addKillPoint(LI->reg, LastUseIdx); 1961 } 1962 } 1963 RetNewLIs.push_back(LI); 1964 } 1965 } 1966 1967 handleSpilledImpDefs(li, vrm, rc, RetNewLIs); 1968 return RetNewLIs; 1969} 1970 1971/// hasAllocatableSuperReg - Return true if the specified physical register has 1972/// any super register that's allocatable. 1973bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const { 1974 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) 1975 if (allocatableRegs_[*AS] && hasInterval(*AS)) 1976 return true; 1977 return false; 1978} 1979 1980/// getRepresentativeReg - Find the largest super register of the specified 1981/// physical register. 1982unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const { 1983 // Find the largest super-register that is allocatable. 1984 unsigned BestReg = Reg; 1985 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) { 1986 unsigned SuperReg = *AS; 1987 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) { 1988 BestReg = SuperReg; 1989 break; 1990 } 1991 } 1992 return BestReg; 1993} 1994 1995/// getNumConflictsWithPhysReg - Return the number of uses and defs of the 1996/// specified interval that conflicts with the specified physical register. 1997unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li, 1998 unsigned PhysReg) const { 1999 unsigned NumConflicts = 0; 2000 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg)); 2001 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 2002 E = mri_->reg_end(); I != E; ++I) { 2003 MachineOperand &O = I.getOperand(); 2004 MachineInstr *MI = O.getParent(); 2005 SlotIndex Index = getInstructionIndex(MI); 2006 if (pli.liveAt(Index)) 2007 ++NumConflicts; 2008 } 2009 return NumConflicts; 2010} 2011 2012/// spillPhysRegAroundRegDefsUses - Spill the specified physical register 2013/// around all defs and uses of the specified interval. Return true if it 2014/// was able to cut its interval. 2015bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li, 2016 unsigned PhysReg, VirtRegMap &vrm) { 2017 unsigned SpillReg = getRepresentativeReg(PhysReg); 2018 2019 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS) 2020 // If there are registers which alias PhysReg, but which are not a 2021 // sub-register of the chosen representative super register. Assert 2022 // since we can't handle it yet. 2023 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) || 2024 tri_->isSuperRegister(*AS, SpillReg)); 2025 2026 bool Cut = false; 2027 SmallVector<unsigned, 4> PRegs; 2028 if (hasInterval(SpillReg)) 2029 PRegs.push_back(SpillReg); 2030 else { 2031 SmallSet<unsigned, 4> Added; 2032 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) 2033 if (Added.insert(*AS) && hasInterval(*AS)) { 2034 PRegs.push_back(*AS); 2035 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS) 2036 Added.insert(*ASS); 2037 } 2038 } 2039 2040 SmallPtrSet<MachineInstr*, 8> SeenMIs; 2041 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 2042 E = mri_->reg_end(); I != E; ++I) { 2043 MachineOperand &O = I.getOperand(); 2044 MachineInstr *MI = O.getParent(); 2045 if (SeenMIs.count(MI)) 2046 continue; 2047 SeenMIs.insert(MI); 2048 SlotIndex Index = getInstructionIndex(MI); 2049 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) { 2050 unsigned PReg = PRegs[i]; 2051 LiveInterval &pli = getInterval(PReg); 2052 if (!pli.liveAt(Index)) 2053 continue; 2054 vrm.addEmergencySpill(PReg, MI); 2055 SlotIndex StartIdx = Index.getLoadIndex(); 2056 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex(); 2057 if (pli.isInOneLiveRange(StartIdx, EndIdx)) { 2058 pli.removeRange(StartIdx, EndIdx); 2059 Cut = true; 2060 } else { 2061 std::string msg; 2062 raw_string_ostream Msg(msg); 2063 Msg << "Ran out of registers during register allocation!"; 2064 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) { 2065 Msg << "\nPlease check your inline asm statement for invalid " 2066 << "constraints:\n"; 2067 MI->print(Msg, tm_); 2068 } 2069 llvm_report_error(Msg.str()); 2070 } 2071 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) { 2072 if (!hasInterval(*AS)) 2073 continue; 2074 LiveInterval &spli = getInterval(*AS); 2075 if (spli.liveAt(Index)) 2076 spli.removeRange(Index.getLoadIndex(), 2077 Index.getNextIndex().getBaseIndex()); 2078 } 2079 } 2080 } 2081 return Cut; 2082} 2083 2084LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg, 2085 MachineInstr* startInst) { 2086 LiveInterval& Interval = getOrCreateInterval(reg); 2087 VNInfo* VN = Interval.getNextValue( 2088 SlotIndex(getInstructionIndex(startInst).getDefIndex()), 2089 startInst, true, getVNInfoAllocator()); 2090 VN->setHasPHIKill(true); 2091 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent())); 2092 LiveRange LR( 2093 SlotIndex(getInstructionIndex(startInst).getDefIndex()), 2094 getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN); 2095 Interval.addRange(LR); 2096 2097 return LR; 2098} 2099 2100