LiveIntervalAnalysis.cpp revision cf97036675340bc889cfe04295cda63afe9496e2
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the LiveInterval analysis pass which is used 11// by the Linear Scan Register allocator. This pass linearizes the 12// basic blocks of the function in DFS order and uses the 13// LiveVariables pass to conservatively compute live intervals for 14// each virtual and physical register. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "liveintervals" 19#include "llvm/CodeGen/LiveIntervalAnalysis.h" 20#include "VirtRegMap.h" 21#include "llvm/Value.h" 22#include "llvm/Analysis/AliasAnalysis.h" 23#include "llvm/CodeGen/LiveVariables.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineInstr.h" 26#include "llvm/CodeGen/MachineInstrBuilder.h" 27#include "llvm/CodeGen/MachineLoopInfo.h" 28#include "llvm/CodeGen/MachineMemOperand.h" 29#include "llvm/CodeGen/MachineRegisterInfo.h" 30#include "llvm/CodeGen/Passes.h" 31#include "llvm/CodeGen/ProcessImplicitDefs.h" 32#include "llvm/Target/TargetRegisterInfo.h" 33#include "llvm/Target/TargetInstrInfo.h" 34#include "llvm/Target/TargetMachine.h" 35#include "llvm/Target/TargetOptions.h" 36#include "llvm/Support/CommandLine.h" 37#include "llvm/Support/Debug.h" 38#include "llvm/Support/ErrorHandling.h" 39#include "llvm/Support/raw_ostream.h" 40#include "llvm/ADT/DepthFirstIterator.h" 41#include "llvm/ADT/SmallSet.h" 42#include "llvm/ADT/Statistic.h" 43#include "llvm/ADT/STLExtras.h" 44#include <algorithm> 45#include <limits> 46#include <cmath> 47using namespace llvm; 48 49// Hidden options for help debugging. 50static cl::opt<bool> DisableReMat("disable-rematerialization", 51 cl::init(false), cl::Hidden); 52 53static cl::opt<bool> EnableFastSpilling("fast-spill", 54 cl::init(false), cl::Hidden); 55 56STATISTIC(numIntervals , "Number of original intervals"); 57STATISTIC(numFolds , "Number of loads/stores folded into instructions"); 58STATISTIC(numSplits , "Number of intervals split"); 59 60char LiveIntervals::ID = 0; 61static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis"); 62 63void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { 64 AU.setPreservesCFG(); 65 AU.addRequired<AliasAnalysis>(); 66 AU.addPreserved<AliasAnalysis>(); 67 AU.addPreserved<LiveVariables>(); 68 AU.addRequired<LiveVariables>(); 69 AU.addPreservedID(MachineLoopInfoID); 70 AU.addPreservedID(MachineDominatorsID); 71 72 if (!StrongPHIElim) { 73 AU.addPreservedID(PHIEliminationID); 74 AU.addRequiredID(PHIEliminationID); 75 } 76 77 AU.addRequiredID(TwoAddressInstructionPassID); 78 AU.addPreserved<ProcessImplicitDefs>(); 79 AU.addRequired<ProcessImplicitDefs>(); 80 AU.addPreserved<SlotIndexes>(); 81 AU.addRequiredTransitive<SlotIndexes>(); 82 MachineFunctionPass::getAnalysisUsage(AU); 83} 84 85void LiveIntervals::releaseMemory() { 86 // Free the live intervals themselves. 87 for (DenseMap<unsigned, LiveInterval*>::iterator I = r2iMap_.begin(), 88 E = r2iMap_.end(); I != E; ++I) 89 delete I->second; 90 91 r2iMap_.clear(); 92 93 // Release VNInfo memroy regions after all VNInfo objects are dtor'd. 94 VNInfoAllocator.Reset(); 95 while (!CloneMIs.empty()) { 96 MachineInstr *MI = CloneMIs.back(); 97 CloneMIs.pop_back(); 98 mf_->DeleteMachineInstr(MI); 99 } 100} 101 102/// runOnMachineFunction - Register allocate the whole function 103/// 104bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { 105 mf_ = &fn; 106 mri_ = &mf_->getRegInfo(); 107 tm_ = &fn.getTarget(); 108 tri_ = tm_->getRegisterInfo(); 109 tii_ = tm_->getInstrInfo(); 110 aa_ = &getAnalysis<AliasAnalysis>(); 111 lv_ = &getAnalysis<LiveVariables>(); 112 indexes_ = &getAnalysis<SlotIndexes>(); 113 allocatableRegs_ = tri_->getAllocatableSet(fn); 114 115 computeIntervals(); 116 117 numIntervals += getNumIntervals(); 118 119 DEBUG(dump()); 120 return true; 121} 122 123/// print - Implement the dump method. 124void LiveIntervals::print(raw_ostream &OS, const Module* ) const { 125 OS << "********** INTERVALS **********\n"; 126 for (const_iterator I = begin(), E = end(); I != E; ++I) { 127 I->second->print(OS, tri_); 128 OS << "\n"; 129 } 130 131 printInstrs(OS); 132} 133 134void LiveIntervals::printInstrs(raw_ostream &OS) const { 135 OS << "********** MACHINEINSTRS **********\n"; 136 137 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end(); 138 mbbi != mbbe; ++mbbi) { 139 OS << "BB#" << mbbi->getNumber() 140 << ":\t\t# derived from " << mbbi->getName() << "\n"; 141 for (MachineBasicBlock::iterator mii = mbbi->begin(), 142 mie = mbbi->end(); mii != mie; ++mii) { 143 OS << getInstructionIndex(mii) << '\t' << *mii; 144 } 145 } 146} 147 148void LiveIntervals::dumpInstrs() const { 149 printInstrs(errs()); 150} 151 152bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li, 153 VirtRegMap &vrm, unsigned reg) { 154 // We don't handle fancy stuff crossing basic block boundaries 155 if (li.ranges.size() != 1) 156 return true; 157 const LiveRange &range = li.ranges.front(); 158 SlotIndex idx = range.start.getBaseIndex(); 159 SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex(); 160 161 // Skip deleted instructions 162 MachineInstr *firstMI = getInstructionFromIndex(idx); 163 while (!firstMI && idx != end) { 164 idx = idx.getNextIndex(); 165 firstMI = getInstructionFromIndex(idx); 166 } 167 if (!firstMI) 168 return false; 169 170 // Find last instruction in range 171 SlotIndex lastIdx = end.getPrevIndex(); 172 MachineInstr *lastMI = getInstructionFromIndex(lastIdx); 173 while (!lastMI && lastIdx != idx) { 174 lastIdx = lastIdx.getPrevIndex(); 175 lastMI = getInstructionFromIndex(lastIdx); 176 } 177 if (!lastMI) 178 return false; 179 180 // Range cannot cross basic block boundaries or terminators 181 MachineBasicBlock *MBB = firstMI->getParent(); 182 if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator()) 183 return true; 184 185 MachineBasicBlock::const_iterator E = lastMI; 186 ++E; 187 for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) { 188 const MachineInstr &MI = *I; 189 190 // Allow copies to and from li.reg 191 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 192 if (tii_->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg)) 193 if (SrcReg == li.reg || DstReg == li.reg) 194 continue; 195 196 // Check for operands using reg 197 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { 198 const MachineOperand& mop = MI.getOperand(i); 199 if (!mop.isReg()) 200 continue; 201 unsigned PhysReg = mop.getReg(); 202 if (PhysReg == 0 || PhysReg == li.reg) 203 continue; 204 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) { 205 if (!vrm.hasPhys(PhysReg)) 206 continue; 207 PhysReg = vrm.getPhys(PhysReg); 208 } 209 if (PhysReg && tri_->regsOverlap(PhysReg, reg)) 210 return true; 211 } 212 } 213 214 // No conflicts found. 215 return false; 216} 217 218/// conflictsWithPhysRegRef - Similar to conflictsWithPhysRegRef except 219/// it can check use as well. 220bool LiveIntervals::conflictsWithPhysRegRef(LiveInterval &li, 221 unsigned Reg, bool CheckUse, 222 SmallPtrSet<MachineInstr*,32> &JoinedCopies) { 223 for (LiveInterval::Ranges::const_iterator 224 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 225 for (SlotIndex index = I->start.getBaseIndex(), 226 end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 227 index != end; 228 index = index.getNextIndex()) { 229 MachineInstr *MI = getInstructionFromIndex(index); 230 if (!MI) 231 continue; // skip deleted instructions 232 233 if (JoinedCopies.count(MI)) 234 continue; 235 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 236 MachineOperand& MO = MI->getOperand(i); 237 if (!MO.isReg()) 238 continue; 239 if (MO.isUse() && !CheckUse) 240 continue; 241 unsigned PhysReg = MO.getReg(); 242 if (PhysReg == 0 || TargetRegisterInfo::isVirtualRegister(PhysReg)) 243 continue; 244 if (tri_->isSubRegister(Reg, PhysReg)) 245 return true; 246 } 247 } 248 } 249 250 return false; 251} 252 253#ifndef NDEBUG 254static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) { 255 if (TargetRegisterInfo::isPhysicalRegister(reg)) 256 errs() << tri_->getName(reg); 257 else 258 errs() << "%reg" << reg; 259} 260#endif 261 262void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, 263 MachineBasicBlock::iterator mi, 264 SlotIndex MIIdx, 265 MachineOperand& MO, 266 unsigned MOIdx, 267 LiveInterval &interval) { 268 DEBUG({ 269 errs() << "\t\tregister: "; 270 printRegName(interval.reg, tri_); 271 }); 272 273 // Virtual registers may be defined multiple times (due to phi 274 // elimination and 2-addr elimination). Much of what we do only has to be 275 // done once for the vreg. We use an empty interval to detect the first 276 // time we see a vreg. 277 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg); 278 if (interval.empty()) { 279 // Get the Idx of the defining instructions. 280 SlotIndex defIndex = MIIdx.getDefIndex(); 281 // Earlyclobbers move back one, so that they overlap the live range 282 // of inputs. 283 if (MO.isEarlyClobber()) 284 defIndex = MIIdx.getUseIndex(); 285 VNInfo *ValNo; 286 MachineInstr *CopyMI = NULL; 287 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 288 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG || 289 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG || 290 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG || 291 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) 292 CopyMI = mi; 293 // Earlyclobbers move back one. 294 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator); 295 296 assert(ValNo->id == 0 && "First value in interval is not 0?"); 297 298 // Loop over all of the blocks that the vreg is defined in. There are 299 // two cases we have to handle here. The most common case is a vreg 300 // whose lifetime is contained within a basic block. In this case there 301 // will be a single kill, in MBB, which comes after the definition. 302 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) { 303 // FIXME: what about dead vars? 304 SlotIndex killIdx; 305 if (vi.Kills[0] != mi) 306 killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex(); 307 else 308 killIdx = defIndex.getStoreIndex(); 309 310 // If the kill happens after the definition, we have an intra-block 311 // live range. 312 if (killIdx > defIndex) { 313 assert(vi.AliveBlocks.empty() && 314 "Shouldn't be alive across any blocks!"); 315 LiveRange LR(defIndex, killIdx, ValNo); 316 interval.addRange(LR); 317 DEBUG(errs() << " +" << LR << "\n"); 318 ValNo->addKill(killIdx); 319 return; 320 } 321 } 322 323 // The other case we handle is when a virtual register lives to the end 324 // of the defining block, potentially live across some blocks, then is 325 // live into some number of blocks, but gets killed. Start by adding a 326 // range that goes from this definition to the end of the defining block. 327 LiveRange NewLR(defIndex, getMBBEndIdx(mbb).getNextIndex().getLoadIndex(), 328 ValNo); 329 DEBUG(errs() << " +" << NewLR); 330 interval.addRange(NewLR); 331 332 // Iterate over all of the blocks that the variable is completely 333 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the 334 // live interval. 335 for (SparseBitVector<>::iterator I = vi.AliveBlocks.begin(), 336 E = vi.AliveBlocks.end(); I != E; ++I) { 337 LiveRange LR( 338 getMBBStartIdx(mf_->getBlockNumbered(*I)), 339 getMBBEndIdx(mf_->getBlockNumbered(*I)).getNextIndex().getLoadIndex(), 340 ValNo); 341 interval.addRange(LR); 342 DEBUG(errs() << " +" << LR); 343 } 344 345 // Finally, this virtual register is live from the start of any killing 346 // block to the 'use' slot of the killing instruction. 347 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) { 348 MachineInstr *Kill = vi.Kills[i]; 349 SlotIndex killIdx = 350 getInstructionIndex(Kill).getDefIndex(); 351 LiveRange LR(getMBBStartIdx(Kill->getParent()), killIdx, ValNo); 352 interval.addRange(LR); 353 ValNo->addKill(killIdx); 354 DEBUG(errs() << " +" << LR); 355 } 356 357 } else { 358 // If this is the second time we see a virtual register definition, it 359 // must be due to phi elimination or two addr elimination. If this is 360 // the result of two address elimination, then the vreg is one of the 361 // def-and-use register operand. 362 if (mi->isRegTiedToUseOperand(MOIdx)) { 363 // If this is a two-address definition, then we have already processed 364 // the live range. The only problem is that we didn't realize there 365 // are actually two values in the live interval. Because of this we 366 // need to take the LiveRegion that defines this register and split it 367 // into two values. 368 assert(interval.containsOneValue()); 369 SlotIndex DefIndex = interval.getValNumInfo(0)->def.getDefIndex(); 370 SlotIndex RedefIndex = MIIdx.getDefIndex(); 371 if (MO.isEarlyClobber()) 372 RedefIndex = MIIdx.getUseIndex(); 373 374 const LiveRange *OldLR = 375 interval.getLiveRangeContaining(RedefIndex.getUseIndex()); 376 VNInfo *OldValNo = OldLR->valno; 377 378 // Delete the initial value, which should be short and continuous, 379 // because the 2-addr copy must be in the same MBB as the redef. 380 interval.removeRange(DefIndex, RedefIndex); 381 382 // Two-address vregs should always only be redefined once. This means 383 // that at this point, there should be exactly one value number in it. 384 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!"); 385 386 // The new value number (#1) is defined by the instruction we claimed 387 // defined value #0. 388 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(), 389 false, // update at * 390 VNInfoAllocator); 391 ValNo->setFlags(OldValNo->getFlags()); // * <- updating here 392 393 // Value#0 is now defined by the 2-addr instruction. 394 OldValNo->def = RedefIndex; 395 OldValNo->setCopy(0); 396 397 // Add the new live interval which replaces the range for the input copy. 398 LiveRange LR(DefIndex, RedefIndex, ValNo); 399 DEBUG(errs() << " replace range with " << LR); 400 interval.addRange(LR); 401 ValNo->addKill(RedefIndex); 402 403 // If this redefinition is dead, we need to add a dummy unit live 404 // range covering the def slot. 405 if (MO.isDead()) 406 interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(), 407 OldValNo)); 408 409 DEBUG({ 410 errs() << " RESULT: "; 411 interval.print(errs(), tri_); 412 }); 413 } else { 414 // Otherwise, this must be because of phi elimination. If this is the 415 // first redefinition of the vreg that we have seen, go back and change 416 // the live range in the PHI block to be a different value number. 417 if (interval.containsOneValue()) { 418 // Remove the old range that we now know has an incorrect number. 419 VNInfo *VNI = interval.getValNumInfo(0); 420 MachineInstr *Killer = vi.Kills[0]; 421 SlotIndex Start = getMBBStartIdx(Killer->getParent()); 422 SlotIndex End = getInstructionIndex(Killer).getDefIndex(); 423 DEBUG({ 424 errs() << " Removing [" << Start << "," << End << "] from: "; 425 interval.print(errs(), tri_); 426 errs() << "\n"; 427 }); 428 interval.removeRange(Start, End); 429 assert(interval.ranges.size() == 1 && 430 "Newly discovered PHI interval has >1 ranges."); 431 MachineBasicBlock *killMBB = getMBBFromIndex(VNI->def); 432 VNI->addKill(indexes_->getTerminatorGap(killMBB)); 433 VNI->setHasPHIKill(true); 434 DEBUG({ 435 errs() << " RESULT: "; 436 interval.print(errs(), tri_); 437 }); 438 439 // Replace the interval with one of a NEW value number. Note that this 440 // value number isn't actually defined by an instruction, weird huh? :) 441 LiveRange LR(Start, End, 442 interval.getNextValue(SlotIndex(getMBBStartIdx(Killer->getParent()), true), 443 0, false, VNInfoAllocator)); 444 LR.valno->setIsPHIDef(true); 445 DEBUG(errs() << " replace range with " << LR); 446 interval.addRange(LR); 447 LR.valno->addKill(End); 448 DEBUG({ 449 errs() << " RESULT: "; 450 interval.print(errs(), tri_); 451 }); 452 } 453 454 // In the case of PHI elimination, each variable definition is only 455 // live until the end of the block. We've already taken care of the 456 // rest of the live range. 457 SlotIndex defIndex = MIIdx.getDefIndex(); 458 if (MO.isEarlyClobber()) 459 defIndex = MIIdx.getUseIndex(); 460 461 VNInfo *ValNo; 462 MachineInstr *CopyMI = NULL; 463 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 464 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG || 465 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG || 466 mi->getOpcode() == TargetInstrInfo::SUBREG_TO_REG || 467 tii_->isMoveInstr(*mi, SrcReg, DstReg, SrcSubReg, DstSubReg)) 468 CopyMI = mi; 469 ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator); 470 471 SlotIndex killIndex = getMBBEndIdx(mbb).getNextIndex().getLoadIndex(); 472 LiveRange LR(defIndex, killIndex, ValNo); 473 interval.addRange(LR); 474 ValNo->addKill(indexes_->getTerminatorGap(mbb)); 475 ValNo->setHasPHIKill(true); 476 DEBUG(errs() << " +" << LR); 477 } 478 } 479 480 DEBUG(errs() << '\n'); 481} 482 483void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, 484 MachineBasicBlock::iterator mi, 485 SlotIndex MIIdx, 486 MachineOperand& MO, 487 LiveInterval &interval, 488 MachineInstr *CopyMI) { 489 // A physical register cannot be live across basic block, so its 490 // lifetime must end somewhere in its defining basic block. 491 DEBUG({ 492 errs() << "\t\tregister: "; 493 printRegName(interval.reg, tri_); 494 }); 495 496 SlotIndex baseIndex = MIIdx; 497 SlotIndex start = baseIndex.getDefIndex(); 498 // Earlyclobbers move back one. 499 if (MO.isEarlyClobber()) 500 start = MIIdx.getUseIndex(); 501 SlotIndex end = start; 502 503 // If it is not used after definition, it is considered dead at 504 // the instruction defining it. Hence its interval is: 505 // [defSlot(def), defSlot(def)+1) 506 // For earlyclobbers, the defSlot was pushed back one; the extra 507 // advance below compensates. 508 if (MO.isDead()) { 509 DEBUG(errs() << " dead"); 510 end = start.getStoreIndex(); 511 goto exit; 512 } 513 514 // If it is not dead on definition, it must be killed by a 515 // subsequent instruction. Hence its interval is: 516 // [defSlot(def), useSlot(kill)+1) 517 baseIndex = baseIndex.getNextIndex(); 518 while (++mi != MBB->end()) { 519 520 if (getInstructionFromIndex(baseIndex) == 0) 521 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 522 523 if (mi->killsRegister(interval.reg, tri_)) { 524 DEBUG(errs() << " killed"); 525 end = baseIndex.getDefIndex(); 526 goto exit; 527 } else { 528 int DefIdx = mi->findRegisterDefOperandIdx(interval.reg, false, tri_); 529 if (DefIdx != -1) { 530 if (mi->isRegTiedToUseOperand(DefIdx)) { 531 // Two-address instruction. 532 end = baseIndex.getDefIndex(); 533 } else { 534 // Another instruction redefines the register before it is ever read. 535 // Then the register is essentially dead at the instruction that defines 536 // it. Hence its interval is: 537 // [defSlot(def), defSlot(def)+1) 538 DEBUG(errs() << " dead"); 539 end = start.getStoreIndex(); 540 } 541 goto exit; 542 } 543 } 544 545 baseIndex = baseIndex.getNextIndex(); 546 } 547 548 // The only case we should have a dead physreg here without a killing or 549 // instruction where we know it's dead is if it is live-in to the function 550 // and never used. Another possible case is the implicit use of the 551 // physical register has been deleted by two-address pass. 552 end = start.getStoreIndex(); 553 554exit: 555 assert(start < end && "did not find end of interval?"); 556 557 // Already exists? Extend old live interval. 558 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start); 559 bool Extend = OldLR != interval.end(); 560 VNInfo *ValNo = Extend 561 ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator); 562 if (MO.isEarlyClobber() && Extend) 563 ValNo->setHasRedefByEC(true); 564 LiveRange LR(start, end, ValNo); 565 interval.addRange(LR); 566 LR.valno->addKill(end); 567 DEBUG(errs() << " +" << LR << '\n'); 568} 569 570void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB, 571 MachineBasicBlock::iterator MI, 572 SlotIndex MIIdx, 573 MachineOperand& MO, 574 unsigned MOIdx) { 575 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) 576 handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx, 577 getOrCreateInterval(MO.getReg())); 578 else if (allocatableRegs_[MO.getReg()]) { 579 MachineInstr *CopyMI = NULL; 580 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 581 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG || 582 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG || 583 MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG || 584 tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg)) 585 CopyMI = MI; 586 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 587 getOrCreateInterval(MO.getReg()), CopyMI); 588 // Def of a register also defines its sub-registers. 589 for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS) 590 // If MI also modifies the sub-register explicitly, avoid processing it 591 // more than once. Do not pass in TRI here so it checks for exact match. 592 if (!MI->modifiesRegister(*AS)) 593 handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, 594 getOrCreateInterval(*AS), 0); 595 } 596} 597 598void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, 599 SlotIndex MIIdx, 600 LiveInterval &interval, bool isAlias) { 601 DEBUG({ 602 errs() << "\t\tlivein register: "; 603 printRegName(interval.reg, tri_); 604 }); 605 606 // Look for kills, if it reaches a def before it's killed, then it shouldn't 607 // be considered a livein. 608 MachineBasicBlock::iterator mi = MBB->begin(); 609 SlotIndex baseIndex = MIIdx; 610 SlotIndex start = baseIndex; 611 if (getInstructionFromIndex(baseIndex) == 0) 612 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 613 614 SlotIndex end = baseIndex; 615 bool SeenDefUse = false; 616 617 while (mi != MBB->end()) { 618 if (mi->killsRegister(interval.reg, tri_)) { 619 DEBUG(errs() << " killed"); 620 end = baseIndex.getDefIndex(); 621 SeenDefUse = true; 622 break; 623 } else if (mi->modifiesRegister(interval.reg, tri_)) { 624 // Another instruction redefines the register before it is ever read. 625 // Then the register is essentially dead at the instruction that defines 626 // it. Hence its interval is: 627 // [defSlot(def), defSlot(def)+1) 628 DEBUG(errs() << " dead"); 629 end = start.getStoreIndex(); 630 SeenDefUse = true; 631 break; 632 } 633 634 ++mi; 635 if (mi != MBB->end()) { 636 baseIndex = indexes_->getNextNonNullIndex(baseIndex); 637 } 638 } 639 640 // Live-in register might not be used at all. 641 if (!SeenDefUse) { 642 if (isAlias) { 643 DEBUG(errs() << " dead"); 644 end = MIIdx.getStoreIndex(); 645 } else { 646 DEBUG(errs() << " live through"); 647 end = baseIndex; 648 } 649 } 650 651 VNInfo *vni = 652 interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true), 653 0, false, VNInfoAllocator); 654 vni->setIsPHIDef(true); 655 LiveRange LR(start, end, vni); 656 657 interval.addRange(LR); 658 LR.valno->addKill(end); 659 DEBUG(errs() << " +" << LR << '\n'); 660} 661 662/// computeIntervals - computes the live intervals for virtual 663/// registers. for some ordering of the machine instructions [1,N] a 664/// live interval is an interval [i, j) where 1 <= i <= j < N for 665/// which a variable is live 666void LiveIntervals::computeIntervals() { 667 DEBUG(errs() << "********** COMPUTING LIVE INTERVALS **********\n" 668 << "********** Function: " 669 << ((Value*)mf_->getFunction())->getName() << '\n'); 670 671 SmallVector<unsigned, 8> UndefUses; 672 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end(); 673 MBBI != E; ++MBBI) { 674 MachineBasicBlock *MBB = MBBI; 675 // Track the index of the current machine instr. 676 SlotIndex MIIndex = getMBBStartIdx(MBB); 677 DEBUG(errs() << MBB->getName() << ":\n"); 678 679 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end(); 680 681 // Create intervals for live-ins to this BB first. 682 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(), 683 LE = MBB->livein_end(); LI != LE; ++LI) { 684 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI)); 685 // Multiple live-ins can alias the same register. 686 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS) 687 if (!hasInterval(*AS)) 688 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS), 689 true); 690 } 691 692 // Skip over empty initial indices. 693 if (getInstructionFromIndex(MIIndex) == 0) 694 MIIndex = indexes_->getNextNonNullIndex(MIIndex); 695 696 for (; MI != miEnd; ++MI) { 697 DEBUG(errs() << MIIndex << "\t" << *MI); 698 699 // Handle defs. 700 for (int i = MI->getNumOperands() - 1; i >= 0; --i) { 701 MachineOperand &MO = MI->getOperand(i); 702 if (!MO.isReg() || !MO.getReg()) 703 continue; 704 705 // handle register defs - build intervals 706 if (MO.isDef()) 707 handleRegisterDef(MBB, MI, MIIndex, MO, i); 708 else if (MO.isUndef()) 709 UndefUses.push_back(MO.getReg()); 710 } 711 712 // Move to the next instr slot. 713 MIIndex = indexes_->getNextNonNullIndex(MIIndex); 714 } 715 } 716 717 // Create empty intervals for registers defined by implicit_def's (except 718 // for those implicit_def that define values which are liveout of their 719 // blocks. 720 for (unsigned i = 0, e = UndefUses.size(); i != e; ++i) { 721 unsigned UndefReg = UndefUses[i]; 722 (void)getOrCreateInterval(UndefReg); 723 } 724} 725 726LiveInterval* LiveIntervals::createInterval(unsigned reg) { 727 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? HUGE_VALF : 0.0F; 728 return new LiveInterval(reg, Weight); 729} 730 731/// dupInterval - Duplicate a live interval. The caller is responsible for 732/// managing the allocated memory. 733LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) { 734 LiveInterval *NewLI = createInterval(li->reg); 735 NewLI->Copy(*li, mri_, getVNInfoAllocator()); 736 return NewLI; 737} 738 739/// getVNInfoSourceReg - Helper function that parses the specified VNInfo 740/// copy field and returns the source register that defines it. 741unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const { 742 if (!VNI->getCopy()) 743 return 0; 744 745 if (VNI->getCopy()->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) { 746 // If it's extracting out of a physical register, return the sub-register. 747 unsigned Reg = VNI->getCopy()->getOperand(1).getReg(); 748 if (TargetRegisterInfo::isPhysicalRegister(Reg)) 749 Reg = tri_->getSubReg(Reg, VNI->getCopy()->getOperand(2).getImm()); 750 return Reg; 751 } else if (VNI->getCopy()->getOpcode() == TargetInstrInfo::INSERT_SUBREG || 752 VNI->getCopy()->getOpcode() == TargetInstrInfo::SUBREG_TO_REG) 753 return VNI->getCopy()->getOperand(2).getReg(); 754 755 unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; 756 if (tii_->isMoveInstr(*VNI->getCopy(), SrcReg, DstReg, SrcSubReg, DstSubReg)) 757 return SrcReg; 758 llvm_unreachable("Unrecognized copy instruction!"); 759 return 0; 760} 761 762//===----------------------------------------------------------------------===// 763// Register allocator hooks. 764// 765 766/// getReMatImplicitUse - If the remat definition MI has one (for now, we only 767/// allow one) virtual register operand, then its uses are implicitly using 768/// the register. Returns the virtual register. 769unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li, 770 MachineInstr *MI) const { 771 unsigned RegOp = 0; 772 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 773 MachineOperand &MO = MI->getOperand(i); 774 if (!MO.isReg() || !MO.isUse()) 775 continue; 776 unsigned Reg = MO.getReg(); 777 if (Reg == 0 || Reg == li.reg) 778 continue; 779 780 if (TargetRegisterInfo::isPhysicalRegister(Reg) && 781 !allocatableRegs_[Reg]) 782 continue; 783 // FIXME: For now, only remat MI with at most one register operand. 784 assert(!RegOp && 785 "Can't rematerialize instruction with multiple register operand!"); 786 RegOp = MO.getReg(); 787#ifndef NDEBUG 788 break; 789#endif 790 } 791 return RegOp; 792} 793 794/// isValNoAvailableAt - Return true if the val# of the specified interval 795/// which reaches the given instruction also reaches the specified use index. 796bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI, 797 SlotIndex UseIdx) const { 798 SlotIndex Index = getInstructionIndex(MI); 799 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno; 800 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx); 801 return UI != li.end() && UI->valno == ValNo; 802} 803 804/// isReMaterializable - Returns true if the definition MI of the specified 805/// val# of the specified interval is re-materializable. 806bool LiveIntervals::isReMaterializable(const LiveInterval &li, 807 const VNInfo *ValNo, MachineInstr *MI, 808 SmallVectorImpl<LiveInterval*> &SpillIs, 809 bool &isLoad) { 810 if (DisableReMat) 811 return false; 812 813 if (!tii_->isTriviallyReMaterializable(MI, aa_)) 814 return false; 815 816 // Target-specific code can mark an instruction as being rematerializable 817 // if it has one virtual reg use, though it had better be something like 818 // a PIC base register which is likely to be live everywhere. 819 unsigned ImpUse = getReMatImplicitUse(li, MI); 820 if (ImpUse) { 821 const LiveInterval &ImpLi = getInterval(ImpUse); 822 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg), 823 re = mri_->use_end(); ri != re; ++ri) { 824 MachineInstr *UseMI = &*ri; 825 SlotIndex UseIdx = getInstructionIndex(UseMI); 826 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo) 827 continue; 828 if (!isValNoAvailableAt(ImpLi, MI, UseIdx)) 829 return false; 830 } 831 832 // If a register operand of the re-materialized instruction is going to 833 // be spilled next, then it's not legal to re-materialize this instruction. 834 for (unsigned i = 0, e = SpillIs.size(); i != e; ++i) 835 if (ImpUse == SpillIs[i]->reg) 836 return false; 837 } 838 return true; 839} 840 841/// isReMaterializable - Returns true if the definition MI of the specified 842/// val# of the specified interval is re-materializable. 843bool LiveIntervals::isReMaterializable(const LiveInterval &li, 844 const VNInfo *ValNo, MachineInstr *MI) { 845 SmallVector<LiveInterval*, 4> Dummy1; 846 bool Dummy2; 847 return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2); 848} 849 850/// isReMaterializable - Returns true if every definition of MI of every 851/// val# of the specified interval is re-materializable. 852bool LiveIntervals::isReMaterializable(const LiveInterval &li, 853 SmallVectorImpl<LiveInterval*> &SpillIs, 854 bool &isLoad) { 855 isLoad = false; 856 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 857 i != e; ++i) { 858 const VNInfo *VNI = *i; 859 if (VNI->isUnused()) 860 continue; // Dead val#. 861 // Is the def for the val# rematerializable? 862 if (!VNI->isDefAccurate()) 863 return false; 864 MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def); 865 bool DefIsLoad = false; 866 if (!ReMatDefMI || 867 !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad)) 868 return false; 869 isLoad |= DefIsLoad; 870 } 871 return true; 872} 873 874/// FilterFoldedOps - Filter out two-address use operands. Return 875/// true if it finds any issue with the operands that ought to prevent 876/// folding. 877static bool FilterFoldedOps(MachineInstr *MI, 878 SmallVector<unsigned, 2> &Ops, 879 unsigned &MRInfo, 880 SmallVector<unsigned, 2> &FoldOps) { 881 MRInfo = 0; 882 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 883 unsigned OpIdx = Ops[i]; 884 MachineOperand &MO = MI->getOperand(OpIdx); 885 // FIXME: fold subreg use. 886 if (MO.getSubReg()) 887 return true; 888 if (MO.isDef()) 889 MRInfo |= (unsigned)VirtRegMap::isMod; 890 else { 891 // Filter out two-address use operand(s). 892 if (MI->isRegTiedToDefOperand(OpIdx)) { 893 MRInfo = VirtRegMap::isModRef; 894 continue; 895 } 896 MRInfo |= (unsigned)VirtRegMap::isRef; 897 } 898 FoldOps.push_back(OpIdx); 899 } 900 return false; 901} 902 903 904/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from 905/// slot / to reg or any rematerialized load into ith operand of specified 906/// MI. If it is successul, MI is updated with the newly created MI and 907/// returns true. 908bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, 909 VirtRegMap &vrm, MachineInstr *DefMI, 910 SlotIndex InstrIdx, 911 SmallVector<unsigned, 2> &Ops, 912 bool isSS, int Slot, unsigned Reg) { 913 // If it is an implicit def instruction, just delete it. 914 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) { 915 RemoveMachineInstrFromMaps(MI); 916 vrm.RemoveMachineInstrFromMaps(MI); 917 MI->eraseFromParent(); 918 ++numFolds; 919 return true; 920 } 921 922 // Filter the list of operand indexes that are to be folded. Abort if 923 // any operand will prevent folding. 924 unsigned MRInfo = 0; 925 SmallVector<unsigned, 2> FoldOps; 926 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 927 return false; 928 929 // The only time it's safe to fold into a two address instruction is when 930 // it's folding reload and spill from / into a spill stack slot. 931 if (DefMI && (MRInfo & VirtRegMap::isMod)) 932 return false; 933 934 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot) 935 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI); 936 if (fmi) { 937 // Remember this instruction uses the spill slot. 938 if (isSS) vrm.addSpillSlotUse(Slot, fmi); 939 940 // Attempt to fold the memory reference into the instruction. If 941 // we can do this, we don't need to insert spill code. 942 MachineBasicBlock &MBB = *MI->getParent(); 943 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot)) 944 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo); 945 vrm.transferSpillPts(MI, fmi); 946 vrm.transferRestorePts(MI, fmi); 947 vrm.transferEmergencySpills(MI, fmi); 948 ReplaceMachineInstrInMaps(MI, fmi); 949 MI = MBB.insert(MBB.erase(MI), fmi); 950 ++numFolds; 951 return true; 952 } 953 return false; 954} 955 956/// canFoldMemoryOperand - Returns true if the specified load / store 957/// folding is possible. 958bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI, 959 SmallVector<unsigned, 2> &Ops, 960 bool ReMat) const { 961 // Filter the list of operand indexes that are to be folded. Abort if 962 // any operand will prevent folding. 963 unsigned MRInfo = 0; 964 SmallVector<unsigned, 2> FoldOps; 965 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 966 return false; 967 968 // It's only legal to remat for a use, not a def. 969 if (ReMat && (MRInfo & VirtRegMap::isMod)) 970 return false; 971 972 return tii_->canFoldMemoryOperand(MI, FoldOps); 973} 974 975bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const { 976 LiveInterval::Ranges::const_iterator itr = li.ranges.begin(); 977 978 MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end); 979 980 if (mbb == 0) 981 return false; 982 983 for (++itr; itr != li.ranges.end(); ++itr) { 984 MachineBasicBlock *mbb2 = 985 indexes_->getMBBCoveringRange(itr->start, itr->end); 986 987 if (mbb2 != mbb) 988 return false; 989 } 990 991 return true; 992} 993 994/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of 995/// interval on to-be re-materialized operands of MI) with new register. 996void LiveIntervals::rewriteImplicitOps(const LiveInterval &li, 997 MachineInstr *MI, unsigned NewVReg, 998 VirtRegMap &vrm) { 999 // There is an implicit use. That means one of the other operand is 1000 // being remat'ed and the remat'ed instruction has li.reg as an 1001 // use operand. Make sure we rewrite that as well. 1002 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1003 MachineOperand &MO = MI->getOperand(i); 1004 if (!MO.isReg()) 1005 continue; 1006 unsigned Reg = MO.getReg(); 1007 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 1008 continue; 1009 if (!vrm.isReMaterialized(Reg)) 1010 continue; 1011 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg); 1012 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg); 1013 if (UseMO) 1014 UseMO->setReg(NewVReg); 1015 } 1016} 1017 1018/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions 1019/// for addIntervalsForSpills to rewrite uses / defs for the given live range. 1020bool LiveIntervals:: 1021rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, 1022 bool TrySplit, SlotIndex index, SlotIndex end, 1023 MachineInstr *MI, 1024 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1025 unsigned Slot, int LdSlot, 1026 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1027 VirtRegMap &vrm, 1028 const TargetRegisterClass* rc, 1029 SmallVector<int, 4> &ReMatIds, 1030 const MachineLoopInfo *loopInfo, 1031 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse, 1032 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1033 std::vector<LiveInterval*> &NewLIs) { 1034 bool CanFold = false; 1035 RestartInstruction: 1036 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 1037 MachineOperand& mop = MI->getOperand(i); 1038 if (!mop.isReg()) 1039 continue; 1040 unsigned Reg = mop.getReg(); 1041 unsigned RegI = Reg; 1042 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 1043 continue; 1044 if (Reg != li.reg) 1045 continue; 1046 1047 bool TryFold = !DefIsReMat; 1048 bool FoldSS = true; // Default behavior unless it's a remat. 1049 int FoldSlot = Slot; 1050 if (DefIsReMat) { 1051 // If this is the rematerializable definition MI itself and 1052 // all of its uses are rematerialized, simply delete it. 1053 if (MI == ReMatOrigDefMI && CanDelete) { 1054 DEBUG(errs() << "\t\t\t\tErasing re-materlizable def: " 1055 << MI << '\n'); 1056 RemoveMachineInstrFromMaps(MI); 1057 vrm.RemoveMachineInstrFromMaps(MI); 1058 MI->eraseFromParent(); 1059 break; 1060 } 1061 1062 // If def for this use can't be rematerialized, then try folding. 1063 // If def is rematerializable and it's a load, also try folding. 1064 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad)); 1065 if (isLoad) { 1066 // Try fold loads (from stack slot, constant pool, etc.) into uses. 1067 FoldSS = isLoadSS; 1068 FoldSlot = LdSlot; 1069 } 1070 } 1071 1072 // Scan all of the operands of this instruction rewriting operands 1073 // to use NewVReg instead of li.reg as appropriate. We do this for 1074 // two reasons: 1075 // 1076 // 1. If the instr reads the same spilled vreg multiple times, we 1077 // want to reuse the NewVReg. 1078 // 2. If the instr is a two-addr instruction, we are required to 1079 // keep the src/dst regs pinned. 1080 // 1081 // Keep track of whether we replace a use and/or def so that we can 1082 // create the spill interval with the appropriate range. 1083 1084 HasUse = mop.isUse(); 1085 HasDef = mop.isDef(); 1086 SmallVector<unsigned, 2> Ops; 1087 Ops.push_back(i); 1088 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) { 1089 const MachineOperand &MOj = MI->getOperand(j); 1090 if (!MOj.isReg()) 1091 continue; 1092 unsigned RegJ = MOj.getReg(); 1093 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ)) 1094 continue; 1095 if (RegJ == RegI) { 1096 Ops.push_back(j); 1097 if (!MOj.isUndef()) { 1098 HasUse |= MOj.isUse(); 1099 HasDef |= MOj.isDef(); 1100 } 1101 } 1102 } 1103 1104 // Create a new virtual register for the spill interval. 1105 // Create the new register now so we can map the fold instruction 1106 // to the new register so when it is unfolded we get the correct 1107 // answer. 1108 bool CreatedNewVReg = false; 1109 if (NewVReg == 0) { 1110 NewVReg = mri_->createVirtualRegister(rc); 1111 vrm.grow(); 1112 CreatedNewVReg = true; 1113 1114 // The new virtual register should get the same allocation hints as the 1115 // old one. 1116 std::pair<unsigned, unsigned> Hint = mri_->getRegAllocationHint(Reg); 1117 if (Hint.first || Hint.second) 1118 mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second); 1119 } 1120 1121 if (!TryFold) 1122 CanFold = false; 1123 else { 1124 // Do not fold load / store here if we are splitting. We'll find an 1125 // optimal point to insert a load / store later. 1126 if (!TrySplit) { 1127 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1128 Ops, FoldSS, FoldSlot, NewVReg)) { 1129 // Folding the load/store can completely change the instruction in 1130 // unpredictable ways, rescan it from the beginning. 1131 1132 if (FoldSS) { 1133 // We need to give the new vreg the same stack slot as the 1134 // spilled interval. 1135 vrm.assignVirt2StackSlot(NewVReg, FoldSlot); 1136 } 1137 1138 HasUse = false; 1139 HasDef = false; 1140 CanFold = false; 1141 if (isNotInMIMap(MI)) 1142 break; 1143 goto RestartInstruction; 1144 } 1145 } else { 1146 // We'll try to fold it later if it's profitable. 1147 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat); 1148 } 1149 } 1150 1151 mop.setReg(NewVReg); 1152 if (mop.isImplicit()) 1153 rewriteImplicitOps(li, MI, NewVReg, vrm); 1154 1155 // Reuse NewVReg for other reads. 1156 for (unsigned j = 0, e = Ops.size(); j != e; ++j) { 1157 MachineOperand &mopj = MI->getOperand(Ops[j]); 1158 mopj.setReg(NewVReg); 1159 if (mopj.isImplicit()) 1160 rewriteImplicitOps(li, MI, NewVReg, vrm); 1161 } 1162 1163 if (CreatedNewVReg) { 1164 if (DefIsReMat) { 1165 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI); 1166 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) { 1167 // Each valnum may have its own remat id. 1168 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg); 1169 } else { 1170 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]); 1171 } 1172 if (!CanDelete || (HasUse && HasDef)) { 1173 // If this is a two-addr instruction then its use operands are 1174 // rematerializable but its def is not. It should be assigned a 1175 // stack slot. 1176 vrm.assignVirt2StackSlot(NewVReg, Slot); 1177 } 1178 } else { 1179 vrm.assignVirt2StackSlot(NewVReg, Slot); 1180 } 1181 } else if (HasUse && HasDef && 1182 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) { 1183 // If this interval hasn't been assigned a stack slot (because earlier 1184 // def is a deleted remat def), do it now. 1185 assert(Slot != VirtRegMap::NO_STACK_SLOT); 1186 vrm.assignVirt2StackSlot(NewVReg, Slot); 1187 } 1188 1189 // Re-matting an instruction with virtual register use. Add the 1190 // register as an implicit use on the use MI. 1191 if (DefIsReMat && ImpUse) 1192 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1193 1194 // Create a new register interval for this spill / remat. 1195 LiveInterval &nI = getOrCreateInterval(NewVReg); 1196 if (CreatedNewVReg) { 1197 NewLIs.push_back(&nI); 1198 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg)); 1199 if (TrySplit) 1200 vrm.setIsSplitFromReg(NewVReg, li.reg); 1201 } 1202 1203 if (HasUse) { 1204 if (CreatedNewVReg) { 1205 LiveRange LR(index.getLoadIndex(), index.getDefIndex(), 1206 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); 1207 DEBUG(errs() << " +" << LR); 1208 nI.addRange(LR); 1209 } else { 1210 // Extend the split live interval to this def / use. 1211 SlotIndex End = index.getDefIndex(); 1212 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End, 1213 nI.getValNumInfo(nI.getNumValNums()-1)); 1214 DEBUG(errs() << " +" << LR); 1215 nI.addRange(LR); 1216 } 1217 } 1218 if (HasDef) { 1219 LiveRange LR(index.getDefIndex(), index.getStoreIndex(), 1220 nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); 1221 DEBUG(errs() << " +" << LR); 1222 nI.addRange(LR); 1223 } 1224 1225 DEBUG({ 1226 errs() << "\t\t\t\tAdded new interval: "; 1227 nI.print(errs(), tri_); 1228 errs() << '\n'; 1229 }); 1230 } 1231 return CanFold; 1232} 1233bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li, 1234 const VNInfo *VNI, 1235 MachineBasicBlock *MBB, 1236 SlotIndex Idx) const { 1237 SlotIndex End = getMBBEndIdx(MBB); 1238 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) { 1239 if (VNI->kills[j].isPHI()) 1240 continue; 1241 1242 SlotIndex KillIdx = VNI->kills[j]; 1243 if (KillIdx > Idx && KillIdx < End) 1244 return true; 1245 } 1246 return false; 1247} 1248 1249/// RewriteInfo - Keep track of machine instrs that will be rewritten 1250/// during spilling. 1251namespace { 1252 struct RewriteInfo { 1253 SlotIndex Index; 1254 MachineInstr *MI; 1255 bool HasUse; 1256 bool HasDef; 1257 RewriteInfo(SlotIndex i, MachineInstr *mi, bool u, bool d) 1258 : Index(i), MI(mi), HasUse(u), HasDef(d) {} 1259 }; 1260 1261 struct RewriteInfoCompare { 1262 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const { 1263 return LHS.Index < RHS.Index; 1264 } 1265 }; 1266} 1267 1268void LiveIntervals:: 1269rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit, 1270 LiveInterval::Ranges::const_iterator &I, 1271 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1272 unsigned Slot, int LdSlot, 1273 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1274 VirtRegMap &vrm, 1275 const TargetRegisterClass* rc, 1276 SmallVector<int, 4> &ReMatIds, 1277 const MachineLoopInfo *loopInfo, 1278 BitVector &SpillMBBs, 1279 DenseMap<unsigned, std::vector<SRInfo> > &SpillIdxes, 1280 BitVector &RestoreMBBs, 1281 DenseMap<unsigned, std::vector<SRInfo> > &RestoreIdxes, 1282 DenseMap<unsigned,unsigned> &MBBVRegsMap, 1283 std::vector<LiveInterval*> &NewLIs) { 1284 bool AllCanFold = true; 1285 unsigned NewVReg = 0; 1286 SlotIndex start = I->start.getBaseIndex(); 1287 SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); 1288 1289 // First collect all the def / use in this live range that will be rewritten. 1290 // Make sure they are sorted according to instruction index. 1291 std::vector<RewriteInfo> RewriteMIs; 1292 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1293 re = mri_->reg_end(); ri != re; ) { 1294 MachineInstr *MI = &*ri; 1295 MachineOperand &O = ri.getOperand(); 1296 ++ri; 1297 assert(!O.isImplicit() && "Spilling register that's used as implicit use?"); 1298 SlotIndex index = getInstructionIndex(MI); 1299 if (index < start || index >= end) 1300 continue; 1301 1302 if (O.isUndef()) 1303 // Must be defined by an implicit def. It should not be spilled. Note, 1304 // this is for correctness reason. e.g. 1305 // 8 %reg1024<def> = IMPLICIT_DEF 1306 // 12 %reg1024<def> = INSERT_SUBREG %reg1024<kill>, %reg1025, 2 1307 // The live range [12, 14) are not part of the r1024 live interval since 1308 // it's defined by an implicit def. It will not conflicts with live 1309 // interval of r1025. Now suppose both registers are spilled, you can 1310 // easily see a situation where both registers are reloaded before 1311 // the INSERT_SUBREG and both target registers that would overlap. 1312 continue; 1313 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef())); 1314 } 1315 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare()); 1316 1317 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0; 1318 // Now rewrite the defs and uses. 1319 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) { 1320 RewriteInfo &rwi = RewriteMIs[i]; 1321 ++i; 1322 SlotIndex index = rwi.Index; 1323 bool MIHasUse = rwi.HasUse; 1324 bool MIHasDef = rwi.HasDef; 1325 MachineInstr *MI = rwi.MI; 1326 // If MI def and/or use the same register multiple times, then there 1327 // are multiple entries. 1328 unsigned NumUses = MIHasUse; 1329 while (i != e && RewriteMIs[i].MI == MI) { 1330 assert(RewriteMIs[i].Index == index); 1331 bool isUse = RewriteMIs[i].HasUse; 1332 if (isUse) ++NumUses; 1333 MIHasUse |= isUse; 1334 MIHasDef |= RewriteMIs[i].HasDef; 1335 ++i; 1336 } 1337 MachineBasicBlock *MBB = MI->getParent(); 1338 1339 if (ImpUse && MI != ReMatDefMI) { 1340 // Re-matting an instruction with virtual register use. Update the 1341 // register interval's spill weight to HUGE_VALF to prevent it from 1342 // being spilled. 1343 LiveInterval &ImpLi = getInterval(ImpUse); 1344 ImpLi.weight = HUGE_VALF; 1345 } 1346 1347 unsigned MBBId = MBB->getNumber(); 1348 unsigned ThisVReg = 0; 1349 if (TrySplit) { 1350 DenseMap<unsigned,unsigned>::iterator NVI = MBBVRegsMap.find(MBBId); 1351 if (NVI != MBBVRegsMap.end()) { 1352 ThisVReg = NVI->second; 1353 // One common case: 1354 // x = use 1355 // ... 1356 // ... 1357 // def = ... 1358 // = use 1359 // It's better to start a new interval to avoid artifically 1360 // extend the new interval. 1361 if (MIHasDef && !MIHasUse) { 1362 MBBVRegsMap.erase(MBB->getNumber()); 1363 ThisVReg = 0; 1364 } 1365 } 1366 } 1367 1368 bool IsNew = ThisVReg == 0; 1369 if (IsNew) { 1370 // This ends the previous live interval. If all of its def / use 1371 // can be folded, give it a low spill weight. 1372 if (NewVReg && TrySplit && AllCanFold) { 1373 LiveInterval &nI = getOrCreateInterval(NewVReg); 1374 nI.weight /= 10.0F; 1375 } 1376 AllCanFold = true; 1377 } 1378 NewVReg = ThisVReg; 1379 1380 bool HasDef = false; 1381 bool HasUse = false; 1382 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit, 1383 index, end, MI, ReMatOrigDefMI, ReMatDefMI, 1384 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1385 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg, 1386 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs); 1387 if (!HasDef && !HasUse) 1388 continue; 1389 1390 AllCanFold &= CanFold; 1391 1392 // Update weight of spill interval. 1393 LiveInterval &nI = getOrCreateInterval(NewVReg); 1394 if (!TrySplit) { 1395 // The spill weight is now infinity as it cannot be spilled again. 1396 nI.weight = HUGE_VALF; 1397 continue; 1398 } 1399 1400 // Keep track of the last def and first use in each MBB. 1401 if (HasDef) { 1402 if (MI != ReMatOrigDefMI || !CanDelete) { 1403 bool HasKill = false; 1404 if (!HasUse) 1405 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex()); 1406 else { 1407 // If this is a two-address code, then this index starts a new VNInfo. 1408 const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex()); 1409 if (VNI) 1410 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex()); 1411 } 1412 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1413 SpillIdxes.find(MBBId); 1414 if (!HasKill) { 1415 if (SII == SpillIdxes.end()) { 1416 std::vector<SRInfo> S; 1417 S.push_back(SRInfo(index, NewVReg, true)); 1418 SpillIdxes.insert(std::make_pair(MBBId, S)); 1419 } else if (SII->second.back().vreg != NewVReg) { 1420 SII->second.push_back(SRInfo(index, NewVReg, true)); 1421 } else if (index > SII->second.back().index) { 1422 // If there is an earlier def and this is a two-address 1423 // instruction, then it's not possible to fold the store (which 1424 // would also fold the load). 1425 SRInfo &Info = SII->second.back(); 1426 Info.index = index; 1427 Info.canFold = !HasUse; 1428 } 1429 SpillMBBs.set(MBBId); 1430 } else if (SII != SpillIdxes.end() && 1431 SII->second.back().vreg == NewVReg && 1432 index > SII->second.back().index) { 1433 // There is an earlier def that's not killed (must be two-address). 1434 // The spill is no longer needed. 1435 SII->second.pop_back(); 1436 if (SII->second.empty()) { 1437 SpillIdxes.erase(MBBId); 1438 SpillMBBs.reset(MBBId); 1439 } 1440 } 1441 } 1442 } 1443 1444 if (HasUse) { 1445 DenseMap<unsigned, std::vector<SRInfo> >::iterator SII = 1446 SpillIdxes.find(MBBId); 1447 if (SII != SpillIdxes.end() && 1448 SII->second.back().vreg == NewVReg && 1449 index > SII->second.back().index) 1450 // Use(s) following the last def, it's not safe to fold the spill. 1451 SII->second.back().canFold = false; 1452 DenseMap<unsigned, std::vector<SRInfo> >::iterator RII = 1453 RestoreIdxes.find(MBBId); 1454 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg) 1455 // If we are splitting live intervals, only fold if it's the first 1456 // use and there isn't another use later in the MBB. 1457 RII->second.back().canFold = false; 1458 else if (IsNew) { 1459 // Only need a reload if there isn't an earlier def / use. 1460 if (RII == RestoreIdxes.end()) { 1461 std::vector<SRInfo> Infos; 1462 Infos.push_back(SRInfo(index, NewVReg, true)); 1463 RestoreIdxes.insert(std::make_pair(MBBId, Infos)); 1464 } else { 1465 RII->second.push_back(SRInfo(index, NewVReg, true)); 1466 } 1467 RestoreMBBs.set(MBBId); 1468 } 1469 } 1470 1471 // Update spill weight. 1472 unsigned loopDepth = loopInfo->getLoopDepth(MBB); 1473 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth); 1474 } 1475 1476 if (NewVReg && TrySplit && AllCanFold) { 1477 // If all of its def / use can be folded, give it a low spill weight. 1478 LiveInterval &nI = getOrCreateInterval(NewVReg); 1479 nI.weight /= 10.0F; 1480 } 1481} 1482 1483bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index, 1484 unsigned vr, BitVector &RestoreMBBs, 1485 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1486 if (!RestoreMBBs[Id]) 1487 return false; 1488 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1489 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1490 if (Restores[i].index == index && 1491 Restores[i].vreg == vr && 1492 Restores[i].canFold) 1493 return true; 1494 return false; 1495} 1496 1497void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index, 1498 unsigned vr, BitVector &RestoreMBBs, 1499 DenseMap<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1500 if (!RestoreMBBs[Id]) 1501 return; 1502 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1503 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1504 if (Restores[i].index == index && Restores[i].vreg) 1505 Restores[i].index = SlotIndex(); 1506} 1507 1508/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being 1509/// spilled and create empty intervals for their uses. 1510void 1511LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm, 1512 const TargetRegisterClass* rc, 1513 std::vector<LiveInterval*> &NewLIs) { 1514 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1515 re = mri_->reg_end(); ri != re; ) { 1516 MachineOperand &O = ri.getOperand(); 1517 MachineInstr *MI = &*ri; 1518 ++ri; 1519 if (O.isDef()) { 1520 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF && 1521 "Register def was not rewritten?"); 1522 RemoveMachineInstrFromMaps(MI); 1523 vrm.RemoveMachineInstrFromMaps(MI); 1524 MI->eraseFromParent(); 1525 } else { 1526 // This must be an use of an implicit_def so it's not part of the live 1527 // interval. Create a new empty live interval for it. 1528 // FIXME: Can we simply erase some of the instructions? e.g. Stores? 1529 unsigned NewVReg = mri_->createVirtualRegister(rc); 1530 vrm.grow(); 1531 vrm.setIsImplicitlyDefined(NewVReg); 1532 NewLIs.push_back(&getOrCreateInterval(NewVReg)); 1533 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1534 MachineOperand &MO = MI->getOperand(i); 1535 if (MO.isReg() && MO.getReg() == li.reg) { 1536 MO.setReg(NewVReg); 1537 MO.setIsUndef(); 1538 } 1539 } 1540 } 1541 } 1542} 1543 1544std::vector<LiveInterval*> LiveIntervals:: 1545addIntervalsForSpillsFast(const LiveInterval &li, 1546 const MachineLoopInfo *loopInfo, 1547 VirtRegMap &vrm) { 1548 unsigned slot = vrm.assignVirt2StackSlot(li.reg); 1549 1550 std::vector<LiveInterval*> added; 1551 1552 assert(li.weight != HUGE_VALF && 1553 "attempt to spill already spilled interval!"); 1554 1555 DEBUG({ 1556 errs() << "\t\t\t\tadding intervals for spills for interval: "; 1557 li.dump(); 1558 errs() << '\n'; 1559 }); 1560 1561 const TargetRegisterClass* rc = mri_->getRegClass(li.reg); 1562 1563 MachineRegisterInfo::reg_iterator RI = mri_->reg_begin(li.reg); 1564 while (RI != mri_->reg_end()) { 1565 MachineInstr* MI = &*RI; 1566 1567 SmallVector<unsigned, 2> Indices; 1568 bool HasUse = false; 1569 bool HasDef = false; 1570 1571 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 1572 MachineOperand& mop = MI->getOperand(i); 1573 if (!mop.isReg() || mop.getReg() != li.reg) continue; 1574 1575 HasUse |= MI->getOperand(i).isUse(); 1576 HasDef |= MI->getOperand(i).isDef(); 1577 1578 Indices.push_back(i); 1579 } 1580 1581 if (!tryFoldMemoryOperand(MI, vrm, NULL, getInstructionIndex(MI), 1582 Indices, true, slot, li.reg)) { 1583 unsigned NewVReg = mri_->createVirtualRegister(rc); 1584 vrm.grow(); 1585 vrm.assignVirt2StackSlot(NewVReg, slot); 1586 1587 // create a new register for this spill 1588 LiveInterval &nI = getOrCreateInterval(NewVReg); 1589 1590 // the spill weight is now infinity as it 1591 // cannot be spilled again 1592 nI.weight = HUGE_VALF; 1593 1594 // Rewrite register operands to use the new vreg. 1595 for (SmallVectorImpl<unsigned>::iterator I = Indices.begin(), 1596 E = Indices.end(); I != E; ++I) { 1597 MI->getOperand(*I).setReg(NewVReg); 1598 1599 if (MI->getOperand(*I).isUse()) 1600 MI->getOperand(*I).setIsKill(true); 1601 } 1602 1603 // Fill in the new live interval. 1604 SlotIndex index = getInstructionIndex(MI); 1605 if (HasUse) { 1606 LiveRange LR(index.getLoadIndex(), index.getUseIndex(), 1607 nI.getNextValue(SlotIndex(), 0, false, 1608 getVNInfoAllocator())); 1609 DEBUG(errs() << " +" << LR); 1610 nI.addRange(LR); 1611 vrm.addRestorePoint(NewVReg, MI); 1612 } 1613 if (HasDef) { 1614 LiveRange LR(index.getDefIndex(), index.getStoreIndex(), 1615 nI.getNextValue(SlotIndex(), 0, false, 1616 getVNInfoAllocator())); 1617 DEBUG(errs() << " +" << LR); 1618 nI.addRange(LR); 1619 vrm.addSpillPoint(NewVReg, true, MI); 1620 } 1621 1622 added.push_back(&nI); 1623 1624 DEBUG({ 1625 errs() << "\t\t\t\tadded new interval: "; 1626 nI.dump(); 1627 errs() << '\n'; 1628 }); 1629 } 1630 1631 1632 RI = mri_->reg_begin(li.reg); 1633 } 1634 1635 return added; 1636} 1637 1638std::vector<LiveInterval*> LiveIntervals:: 1639addIntervalsForSpills(const LiveInterval &li, 1640 SmallVectorImpl<LiveInterval*> &SpillIs, 1641 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) { 1642 1643 if (EnableFastSpilling) 1644 return addIntervalsForSpillsFast(li, loopInfo, vrm); 1645 1646 assert(li.weight != HUGE_VALF && 1647 "attempt to spill already spilled interval!"); 1648 1649 DEBUG({ 1650 errs() << "\t\t\t\tadding intervals for spills for interval: "; 1651 li.print(errs(), tri_); 1652 errs() << '\n'; 1653 }); 1654 1655 // Each bit specify whether a spill is required in the MBB. 1656 BitVector SpillMBBs(mf_->getNumBlockIDs()); 1657 DenseMap<unsigned, std::vector<SRInfo> > SpillIdxes; 1658 BitVector RestoreMBBs(mf_->getNumBlockIDs()); 1659 DenseMap<unsigned, std::vector<SRInfo> > RestoreIdxes; 1660 DenseMap<unsigned,unsigned> MBBVRegsMap; 1661 std::vector<LiveInterval*> NewLIs; 1662 const TargetRegisterClass* rc = mri_->getRegClass(li.reg); 1663 1664 unsigned NumValNums = li.getNumValNums(); 1665 SmallVector<MachineInstr*, 4> ReMatDefs; 1666 ReMatDefs.resize(NumValNums, NULL); 1667 SmallVector<MachineInstr*, 4> ReMatOrigDefs; 1668 ReMatOrigDefs.resize(NumValNums, NULL); 1669 SmallVector<int, 4> ReMatIds; 1670 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT); 1671 BitVector ReMatDelete(NumValNums); 1672 unsigned Slot = VirtRegMap::MAX_STACK_SLOT; 1673 1674 // Spilling a split live interval. It cannot be split any further. Also, 1675 // it's also guaranteed to be a single val# / range interval. 1676 if (vrm.getPreSplitReg(li.reg)) { 1677 vrm.setIsSplitFromReg(li.reg, 0); 1678 // Unset the split kill marker on the last use. 1679 SlotIndex KillIdx = vrm.getKillPoint(li.reg); 1680 if (KillIdx != SlotIndex()) { 1681 MachineInstr *KillMI = getInstructionFromIndex(KillIdx); 1682 assert(KillMI && "Last use disappeared?"); 1683 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true); 1684 assert(KillOp != -1 && "Last use disappeared?"); 1685 KillMI->getOperand(KillOp).setIsKill(false); 1686 } 1687 vrm.removeKillPoint(li.reg); 1688 bool DefIsReMat = vrm.isReMaterialized(li.reg); 1689 Slot = vrm.getStackSlot(li.reg); 1690 assert(Slot != VirtRegMap::MAX_STACK_SLOT); 1691 MachineInstr *ReMatDefMI = DefIsReMat ? 1692 vrm.getReMaterializedMI(li.reg) : NULL; 1693 int LdSlot = 0; 1694 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1695 bool isLoad = isLoadSS || 1696 (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad())); 1697 bool IsFirstRange = true; 1698 for (LiveInterval::Ranges::const_iterator 1699 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1700 // If this is a split live interval with multiple ranges, it means there 1701 // are two-address instructions that re-defined the value. Only the 1702 // first def can be rematerialized! 1703 if (IsFirstRange) { 1704 // Note ReMatOrigDefMI has already been deleted. 1705 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI, 1706 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1707 false, vrm, rc, ReMatIds, loopInfo, 1708 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1709 MBBVRegsMap, NewLIs); 1710 } else { 1711 rewriteInstructionsForSpills(li, false, I, NULL, 0, 1712 Slot, 0, false, false, false, 1713 false, vrm, rc, ReMatIds, loopInfo, 1714 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1715 MBBVRegsMap, NewLIs); 1716 } 1717 IsFirstRange = false; 1718 } 1719 1720 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1721 return NewLIs; 1722 } 1723 1724 bool TrySplit = !intervalIsInOneMBB(li); 1725 if (TrySplit) 1726 ++numSplits; 1727 bool NeedStackSlot = false; 1728 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 1729 i != e; ++i) { 1730 const VNInfo *VNI = *i; 1731 unsigned VN = VNI->id; 1732 if (VNI->isUnused()) 1733 continue; // Dead val#. 1734 // Is the def for the val# rematerializable? 1735 MachineInstr *ReMatDefMI = VNI->isDefAccurate() 1736 ? getInstructionFromIndex(VNI->def) : 0; 1737 bool dummy; 1738 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) { 1739 // Remember how to remat the def of this val#. 1740 ReMatOrigDefs[VN] = ReMatDefMI; 1741 // Original def may be modified so we have to make a copy here. 1742 MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI); 1743 CloneMIs.push_back(Clone); 1744 ReMatDefs[VN] = Clone; 1745 1746 bool CanDelete = true; 1747 if (VNI->hasPHIKill()) { 1748 // A kill is a phi node, not all of its uses can be rematerialized. 1749 // It must not be deleted. 1750 CanDelete = false; 1751 // Need a stack slot if there is any live range where uses cannot be 1752 // rematerialized. 1753 NeedStackSlot = true; 1754 } 1755 if (CanDelete) 1756 ReMatDelete.set(VN); 1757 } else { 1758 // Need a stack slot if there is any live range where uses cannot be 1759 // rematerialized. 1760 NeedStackSlot = true; 1761 } 1762 } 1763 1764 // One stack slot per live interval. 1765 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) { 1766 if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT) 1767 Slot = vrm.assignVirt2StackSlot(li.reg); 1768 1769 // This case only occurs when the prealloc splitter has already assigned 1770 // a stack slot to this vreg. 1771 else 1772 Slot = vrm.getStackSlot(li.reg); 1773 } 1774 1775 // Create new intervals and rewrite defs and uses. 1776 for (LiveInterval::Ranges::const_iterator 1777 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1778 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id]; 1779 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id]; 1780 bool DefIsReMat = ReMatDefMI != NULL; 1781 bool CanDelete = ReMatDelete[I->valno->id]; 1782 int LdSlot = 0; 1783 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1784 bool isLoad = isLoadSS || 1785 (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad()); 1786 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI, 1787 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1788 CanDelete, vrm, rc, ReMatIds, loopInfo, 1789 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1790 MBBVRegsMap, NewLIs); 1791 } 1792 1793 // Insert spills / restores if we are splitting. 1794 if (!TrySplit) { 1795 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1796 return NewLIs; 1797 } 1798 1799 SmallPtrSet<LiveInterval*, 4> AddedKill; 1800 SmallVector<unsigned, 2> Ops; 1801 if (NeedStackSlot) { 1802 int Id = SpillMBBs.find_first(); 1803 while (Id != -1) { 1804 std::vector<SRInfo> &spills = SpillIdxes[Id]; 1805 for (unsigned i = 0, e = spills.size(); i != e; ++i) { 1806 SlotIndex index = spills[i].index; 1807 unsigned VReg = spills[i].vreg; 1808 LiveInterval &nI = getOrCreateInterval(VReg); 1809 bool isReMat = vrm.isReMaterialized(VReg); 1810 MachineInstr *MI = getInstructionFromIndex(index); 1811 bool CanFold = false; 1812 bool FoundUse = false; 1813 Ops.clear(); 1814 if (spills[i].canFold) { 1815 CanFold = true; 1816 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1817 MachineOperand &MO = MI->getOperand(j); 1818 if (!MO.isReg() || MO.getReg() != VReg) 1819 continue; 1820 1821 Ops.push_back(j); 1822 if (MO.isDef()) 1823 continue; 1824 if (isReMat || 1825 (!FoundUse && !alsoFoldARestore(Id, index, VReg, 1826 RestoreMBBs, RestoreIdxes))) { 1827 // MI has two-address uses of the same register. If the use 1828 // isn't the first and only use in the BB, then we can't fold 1829 // it. FIXME: Move this to rewriteInstructionsForSpills. 1830 CanFold = false; 1831 break; 1832 } 1833 FoundUse = true; 1834 } 1835 } 1836 // Fold the store into the def if possible. 1837 bool Folded = false; 1838 if (CanFold && !Ops.empty()) { 1839 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){ 1840 Folded = true; 1841 if (FoundUse) { 1842 // Also folded uses, do not issue a load. 1843 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes); 1844 nI.removeRange(index.getLoadIndex(), index.getDefIndex()); 1845 } 1846 nI.removeRange(index.getDefIndex(), index.getStoreIndex()); 1847 } 1848 } 1849 1850 // Otherwise tell the spiller to issue a spill. 1851 if (!Folded) { 1852 LiveRange *LR = &nI.ranges[nI.ranges.size()-1]; 1853 bool isKill = LR->end == index.getStoreIndex(); 1854 if (!MI->registerDefIsDead(nI.reg)) 1855 // No need to spill a dead def. 1856 vrm.addSpillPoint(VReg, isKill, MI); 1857 if (isKill) 1858 AddedKill.insert(&nI); 1859 } 1860 } 1861 Id = SpillMBBs.find_next(Id); 1862 } 1863 } 1864 1865 int Id = RestoreMBBs.find_first(); 1866 while (Id != -1) { 1867 std::vector<SRInfo> &restores = RestoreIdxes[Id]; 1868 for (unsigned i = 0, e = restores.size(); i != e; ++i) { 1869 SlotIndex index = restores[i].index; 1870 if (index == SlotIndex()) 1871 continue; 1872 unsigned VReg = restores[i].vreg; 1873 LiveInterval &nI = getOrCreateInterval(VReg); 1874 bool isReMat = vrm.isReMaterialized(VReg); 1875 MachineInstr *MI = getInstructionFromIndex(index); 1876 bool CanFold = false; 1877 Ops.clear(); 1878 if (restores[i].canFold) { 1879 CanFold = true; 1880 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1881 MachineOperand &MO = MI->getOperand(j); 1882 if (!MO.isReg() || MO.getReg() != VReg) 1883 continue; 1884 1885 if (MO.isDef()) { 1886 // If this restore were to be folded, it would have been folded 1887 // already. 1888 CanFold = false; 1889 break; 1890 } 1891 Ops.push_back(j); 1892 } 1893 } 1894 1895 // Fold the load into the use if possible. 1896 bool Folded = false; 1897 if (CanFold && !Ops.empty()) { 1898 if (!isReMat) 1899 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg); 1900 else { 1901 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg); 1902 int LdSlot = 0; 1903 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1904 // If the rematerializable def is a load, also try to fold it. 1905 if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad()) 1906 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1907 Ops, isLoadSS, LdSlot, VReg); 1908 if (!Folded) { 1909 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI); 1910 if (ImpUse) { 1911 // Re-matting an instruction with virtual register use. Add the 1912 // register as an implicit use on the use MI and update the register 1913 // interval's spill weight to HUGE_VALF to prevent it from being 1914 // spilled. 1915 LiveInterval &ImpLi = getInterval(ImpUse); 1916 ImpLi.weight = HUGE_VALF; 1917 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1918 } 1919 } 1920 } 1921 } 1922 // If folding is not possible / failed, then tell the spiller to issue a 1923 // load / rematerialization for us. 1924 if (Folded) 1925 nI.removeRange(index.getLoadIndex(), index.getDefIndex()); 1926 else 1927 vrm.addRestorePoint(VReg, MI); 1928 } 1929 Id = RestoreMBBs.find_next(Id); 1930 } 1931 1932 // Finalize intervals: add kills, finalize spill weights, and filter out 1933 // dead intervals. 1934 std::vector<LiveInterval*> RetNewLIs; 1935 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) { 1936 LiveInterval *LI = NewLIs[i]; 1937 if (!LI->empty()) { 1938 LI->weight /= SlotIndex::NUM * getApproximateInstructionCount(*LI); 1939 if (!AddedKill.count(LI)) { 1940 LiveRange *LR = &LI->ranges[LI->ranges.size()-1]; 1941 SlotIndex LastUseIdx = LR->end.getBaseIndex(); 1942 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx); 1943 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false); 1944 assert(UseIdx != -1); 1945 if (!LastUse->isRegTiedToDefOperand(UseIdx)) { 1946 LastUse->getOperand(UseIdx).setIsKill(); 1947 vrm.addKillPoint(LI->reg, LastUseIdx); 1948 } 1949 } 1950 RetNewLIs.push_back(LI); 1951 } 1952 } 1953 1954 handleSpilledImpDefs(li, vrm, rc, RetNewLIs); 1955 return RetNewLIs; 1956} 1957 1958/// hasAllocatableSuperReg - Return true if the specified physical register has 1959/// any super register that's allocatable. 1960bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const { 1961 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) 1962 if (allocatableRegs_[*AS] && hasInterval(*AS)) 1963 return true; 1964 return false; 1965} 1966 1967/// getRepresentativeReg - Find the largest super register of the specified 1968/// physical register. 1969unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const { 1970 // Find the largest super-register that is allocatable. 1971 unsigned BestReg = Reg; 1972 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) { 1973 unsigned SuperReg = *AS; 1974 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) { 1975 BestReg = SuperReg; 1976 break; 1977 } 1978 } 1979 return BestReg; 1980} 1981 1982/// getNumConflictsWithPhysReg - Return the number of uses and defs of the 1983/// specified interval that conflicts with the specified physical register. 1984unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li, 1985 unsigned PhysReg) const { 1986 unsigned NumConflicts = 0; 1987 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg)); 1988 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 1989 E = mri_->reg_end(); I != E; ++I) { 1990 MachineOperand &O = I.getOperand(); 1991 MachineInstr *MI = O.getParent(); 1992 SlotIndex Index = getInstructionIndex(MI); 1993 if (pli.liveAt(Index)) 1994 ++NumConflicts; 1995 } 1996 return NumConflicts; 1997} 1998 1999/// spillPhysRegAroundRegDefsUses - Spill the specified physical register 2000/// around all defs and uses of the specified interval. Return true if it 2001/// was able to cut its interval. 2002bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li, 2003 unsigned PhysReg, VirtRegMap &vrm) { 2004 unsigned SpillReg = getRepresentativeReg(PhysReg); 2005 2006 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS) 2007 // If there are registers which alias PhysReg, but which are not a 2008 // sub-register of the chosen representative super register. Assert 2009 // since we can't handle it yet. 2010 assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) || 2011 tri_->isSuperRegister(*AS, SpillReg)); 2012 2013 bool Cut = false; 2014 SmallVector<unsigned, 4> PRegs; 2015 if (hasInterval(SpillReg)) 2016 PRegs.push_back(SpillReg); 2017 else { 2018 SmallSet<unsigned, 4> Added; 2019 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) 2020 if (Added.insert(*AS) && hasInterval(*AS)) { 2021 PRegs.push_back(*AS); 2022 for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS) 2023 Added.insert(*ASS); 2024 } 2025 } 2026 2027 SmallPtrSet<MachineInstr*, 8> SeenMIs; 2028 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 2029 E = mri_->reg_end(); I != E; ++I) { 2030 MachineOperand &O = I.getOperand(); 2031 MachineInstr *MI = O.getParent(); 2032 if (SeenMIs.count(MI)) 2033 continue; 2034 SeenMIs.insert(MI); 2035 SlotIndex Index = getInstructionIndex(MI); 2036 for (unsigned i = 0, e = PRegs.size(); i != e; ++i) { 2037 unsigned PReg = PRegs[i]; 2038 LiveInterval &pli = getInterval(PReg); 2039 if (!pli.liveAt(Index)) 2040 continue; 2041 vrm.addEmergencySpill(PReg, MI); 2042 SlotIndex StartIdx = Index.getLoadIndex(); 2043 SlotIndex EndIdx = Index.getNextIndex().getBaseIndex(); 2044 if (pli.isInOneLiveRange(StartIdx, EndIdx)) { 2045 pli.removeRange(StartIdx, EndIdx); 2046 Cut = true; 2047 } else { 2048 std::string msg; 2049 raw_string_ostream Msg(msg); 2050 Msg << "Ran out of registers during register allocation!"; 2051 if (MI->getOpcode() == TargetInstrInfo::INLINEASM) { 2052 Msg << "\nPlease check your inline asm statement for invalid " 2053 << "constraints:\n"; 2054 MI->print(Msg, tm_); 2055 } 2056 llvm_report_error(Msg.str()); 2057 } 2058 for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) { 2059 if (!hasInterval(*AS)) 2060 continue; 2061 LiveInterval &spli = getInterval(*AS); 2062 if (spli.liveAt(Index)) 2063 spli.removeRange(Index.getLoadIndex(), 2064 Index.getNextIndex().getBaseIndex()); 2065 } 2066 } 2067 } 2068 return Cut; 2069} 2070 2071LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg, 2072 MachineInstr* startInst) { 2073 LiveInterval& Interval = getOrCreateInterval(reg); 2074 VNInfo* VN = Interval.getNextValue( 2075 SlotIndex(getInstructionIndex(startInst).getDefIndex()), 2076 startInst, true, getVNInfoAllocator()); 2077 VN->setHasPHIKill(true); 2078 VN->kills.push_back(indexes_->getTerminatorGap(startInst->getParent())); 2079 LiveRange LR( 2080 SlotIndex(getInstructionIndex(startInst).getDefIndex()), 2081 getMBBEndIdx(startInst->getParent()).getNextIndex().getBaseIndex(), VN); 2082 Interval.addRange(LR); 2083 2084 return LR; 2085} 2086 2087