LiveIntervalAnalysis.cpp revision 70e40deb364a03de234fe587934fb671fa16ce6b
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the LiveInterval analysis pass which is used 11// by the Linear Scan Register allocator. This pass linearizes the 12// basic blocks of the function in DFS order and uses the 13// LiveVariables pass to conservatively compute live intervals for 14// each virtual and physical register. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "liveintervals" 19#include "llvm/CodeGen/LiveIntervalAnalysis.h" 20#include "VirtRegMap.h" 21#include "llvm/Value.h" 22#include "llvm/CodeGen/LiveVariables.h" 23#include "llvm/CodeGen/MachineFrameInfo.h" 24#include "llvm/CodeGen/MachineInstr.h" 25#include "llvm/CodeGen/MachineLoopInfo.h" 26#include "llvm/CodeGen/MachineRegisterInfo.h" 27#include "llvm/CodeGen/Passes.h" 28#include "llvm/Target/TargetRegisterInfo.h" 29#include "llvm/Target/TargetInstrInfo.h" 30#include "llvm/Target/TargetMachine.h" 31#include "llvm/Support/CommandLine.h" 32#include "llvm/Support/Debug.h" 33#include "llvm/ADT/Statistic.h" 34#include "llvm/ADT/STLExtras.h" 35#include <algorithm> 36#include <cmath> 37using namespace llvm; 38 39namespace { 40 // Hidden options for help debugging. 41 cl::opt<bool> DisableReMat("disable-rematerialization", 42 cl::init(false), cl::Hidden); 43 44 cl::opt<bool> SplitAtBB("split-intervals-at-bb", 45 cl::init(true), cl::Hidden); 46 cl::opt<int> SplitLimit("split-limit", 47 cl::init(-1), cl::Hidden); 48} 49 50STATISTIC(numIntervals, "Number of original intervals"); 51STATISTIC(numIntervalsAfter, "Number of intervals after coalescing"); 52STATISTIC(numFolds , "Number of loads/stores folded into instructions"); 53STATISTIC(numSplits , "Number of intervals split"); 54 55char LiveIntervals::ID = 0; 56namespace { 57 RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis"); 58} 59 60void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { 61 AU.addPreserved<LiveVariables>(); 62 AU.addRequired<LiveVariables>(); 63 AU.addPreservedID(MachineLoopInfoID); 64 AU.addPreservedID(MachineDominatorsID); 65 AU.addPreservedID(PHIEliminationID); 66 AU.addRequiredID(PHIEliminationID); 67 AU.addRequiredID(TwoAddressInstructionPassID); 68 MachineFunctionPass::getAnalysisUsage(AU); 69} 70 71void LiveIntervals::releaseMemory() { 72 Idx2MBBMap.clear(); 73 mi2iMap_.clear(); 74 i2miMap_.clear(); 75 r2iMap_.clear(); 76 // Release VNInfo memroy regions after all VNInfo objects are dtor'd. 77 VNInfoAllocator.Reset(); 78 for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i) 79 delete ClonedMIs[i]; 80} 81 82/// runOnMachineFunction - Register allocate the whole function 83/// 84bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { 85 mf_ = &fn; 86 mri_ = &mf_->getRegInfo(); 87 tm_ = &fn.getTarget(); 88 tri_ = tm_->getRegisterInfo(); 89 tii_ = tm_->getInstrInfo(); 90 lv_ = &getAnalysis<LiveVariables>(); 91 allocatableRegs_ = tri_->getAllocatableSet(fn); 92 93 // Number MachineInstrs and MachineBasicBlocks. 94 // Initialize MBB indexes to a sentinal. 95 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U)); 96 97 unsigned MIIndex = 0; 98 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end(); 99 MBB != E; ++MBB) { 100 unsigned StartIdx = MIIndex; 101 102 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); 103 I != E; ++I) { 104 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second; 105 assert(inserted && "multiple MachineInstr -> index mappings"); 106 i2miMap_.push_back(I); 107 MIIndex += InstrSlots::NUM; 108 } 109 110 // Set the MBB2IdxMap entry for this MBB. 111 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1); 112 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB)); 113 } 114 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare()); 115 116 computeIntervals(); 117 118 numIntervals += getNumIntervals(); 119 120 DOUT << "********** INTERVALS **********\n"; 121 for (iterator I = begin(), E = end(); I != E; ++I) { 122 I->second.print(DOUT, tri_); 123 DOUT << "\n"; 124 } 125 126 numIntervalsAfter += getNumIntervals(); 127 DEBUG(dump()); 128 return true; 129} 130 131/// print - Implement the dump method. 132void LiveIntervals::print(std::ostream &O, const Module* ) const { 133 O << "********** INTERVALS **********\n"; 134 for (const_iterator I = begin(), E = end(); I != E; ++I) { 135 I->second.print(DOUT, tri_); 136 DOUT << "\n"; 137 } 138 139 O << "********** MACHINEINSTRS **********\n"; 140 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end(); 141 mbbi != mbbe; ++mbbi) { 142 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n"; 143 for (MachineBasicBlock::iterator mii = mbbi->begin(), 144 mie = mbbi->end(); mii != mie; ++mii) { 145 O << getInstructionIndex(mii) << '\t' << *mii; 146 } 147 } 148} 149 150/// conflictsWithPhysRegDef - Returns true if the specified register 151/// is defined during the duration of the specified interval. 152bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li, 153 VirtRegMap &vrm, unsigned reg) { 154 for (LiveInterval::Ranges::const_iterator 155 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 156 for (unsigned index = getBaseIndex(I->start), 157 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end; 158 index += InstrSlots::NUM) { 159 // skip deleted instructions 160 while (index != end && !getInstructionFromIndex(index)) 161 index += InstrSlots::NUM; 162 if (index == end) break; 163 164 MachineInstr *MI = getInstructionFromIndex(index); 165 unsigned SrcReg, DstReg; 166 if (tii_->isMoveInstr(*MI, SrcReg, DstReg)) 167 if (SrcReg == li.reg || DstReg == li.reg) 168 continue; 169 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 170 MachineOperand& mop = MI->getOperand(i); 171 if (!mop.isRegister()) 172 continue; 173 unsigned PhysReg = mop.getReg(); 174 if (PhysReg == 0 || PhysReg == li.reg) 175 continue; 176 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) { 177 if (!vrm.hasPhys(PhysReg)) 178 continue; 179 PhysReg = vrm.getPhys(PhysReg); 180 } 181 if (PhysReg && tri_->regsOverlap(PhysReg, reg)) 182 return true; 183 } 184 } 185 } 186 187 return false; 188} 189 190void LiveIntervals::printRegName(unsigned reg) const { 191 if (TargetRegisterInfo::isPhysicalRegister(reg)) 192 cerr << tri_->getName(reg); 193 else 194 cerr << "%reg" << reg; 195} 196 197void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, 198 MachineBasicBlock::iterator mi, 199 unsigned MIIdx, 200 LiveInterval &interval) { 201 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg)); 202 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg); 203 204 // Virtual registers may be defined multiple times (due to phi 205 // elimination and 2-addr elimination). Much of what we do only has to be 206 // done once for the vreg. We use an empty interval to detect the first 207 // time we see a vreg. 208 if (interval.empty()) { 209 // Get the Idx of the defining instructions. 210 unsigned defIndex = getDefIndex(MIIdx); 211 VNInfo *ValNo; 212 MachineInstr *CopyMI = NULL; 213 unsigned SrcReg, DstReg; 214 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG || 215 tii_->isMoveInstr(*mi, SrcReg, DstReg)) 216 CopyMI = mi; 217 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator); 218 219 assert(ValNo->id == 0 && "First value in interval is not 0?"); 220 221 // Loop over all of the blocks that the vreg is defined in. There are 222 // two cases we have to handle here. The most common case is a vreg 223 // whose lifetime is contained within a basic block. In this case there 224 // will be a single kill, in MBB, which comes after the definition. 225 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) { 226 // FIXME: what about dead vars? 227 unsigned killIdx; 228 if (vi.Kills[0] != mi) 229 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1; 230 else 231 killIdx = defIndex+1; 232 233 // If the kill happens after the definition, we have an intra-block 234 // live range. 235 if (killIdx > defIndex) { 236 assert(vi.AliveBlocks.none() && 237 "Shouldn't be alive across any blocks!"); 238 LiveRange LR(defIndex, killIdx, ValNo); 239 interval.addRange(LR); 240 DOUT << " +" << LR << "\n"; 241 interval.addKill(ValNo, killIdx); 242 return; 243 } 244 } 245 246 // The other case we handle is when a virtual register lives to the end 247 // of the defining block, potentially live across some blocks, then is 248 // live into some number of blocks, but gets killed. Start by adding a 249 // range that goes from this definition to the end of the defining block. 250 LiveRange NewLR(defIndex, 251 getInstructionIndex(&mbb->back()) + InstrSlots::NUM, 252 ValNo); 253 DOUT << " +" << NewLR; 254 interval.addRange(NewLR); 255 256 // Iterate over all of the blocks that the variable is completely 257 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the 258 // live interval. 259 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) { 260 if (vi.AliveBlocks[i]) { 261 MachineBasicBlock *MBB = mf_->getBlockNumbered(i); 262 if (!MBB->empty()) { 263 LiveRange LR(getMBBStartIdx(i), 264 getInstructionIndex(&MBB->back()) + InstrSlots::NUM, 265 ValNo); 266 interval.addRange(LR); 267 DOUT << " +" << LR; 268 } 269 } 270 } 271 272 // Finally, this virtual register is live from the start of any killing 273 // block to the 'use' slot of the killing instruction. 274 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) { 275 MachineInstr *Kill = vi.Kills[i]; 276 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1; 277 LiveRange LR(getMBBStartIdx(Kill->getParent()), 278 killIdx, ValNo); 279 interval.addRange(LR); 280 interval.addKill(ValNo, killIdx); 281 DOUT << " +" << LR; 282 } 283 284 } else { 285 // If this is the second time we see a virtual register definition, it 286 // must be due to phi elimination or two addr elimination. If this is 287 // the result of two address elimination, then the vreg is one of the 288 // def-and-use register operand. 289 if (mi->isRegReDefinedByTwoAddr(interval.reg)) { 290 // If this is a two-address definition, then we have already processed 291 // the live range. The only problem is that we didn't realize there 292 // are actually two values in the live interval. Because of this we 293 // need to take the LiveRegion that defines this register and split it 294 // into two values. 295 assert(interval.containsOneValue()); 296 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def); 297 unsigned RedefIndex = getDefIndex(MIIdx); 298 299 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1); 300 VNInfo *OldValNo = OldLR->valno; 301 302 // Delete the initial value, which should be short and continuous, 303 // because the 2-addr copy must be in the same MBB as the redef. 304 interval.removeRange(DefIndex, RedefIndex); 305 306 // Two-address vregs should always only be redefined once. This means 307 // that at this point, there should be exactly one value number in it. 308 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!"); 309 310 // The new value number (#1) is defined by the instruction we claimed 311 // defined value #0. 312 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy, 313 VNInfoAllocator); 314 315 // Value#0 is now defined by the 2-addr instruction. 316 OldValNo->def = RedefIndex; 317 OldValNo->copy = 0; 318 319 // Add the new live interval which replaces the range for the input copy. 320 LiveRange LR(DefIndex, RedefIndex, ValNo); 321 DOUT << " replace range with " << LR; 322 interval.addRange(LR); 323 interval.addKill(ValNo, RedefIndex); 324 325 // If this redefinition is dead, we need to add a dummy unit live 326 // range covering the def slot. 327 if (lv_->RegisterDefIsDead(mi, interval.reg)) 328 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo)); 329 330 DOUT << " RESULT: "; 331 interval.print(DOUT, tri_); 332 333 } else { 334 // Otherwise, this must be because of phi elimination. If this is the 335 // first redefinition of the vreg that we have seen, go back and change 336 // the live range in the PHI block to be a different value number. 337 if (interval.containsOneValue()) { 338 assert(vi.Kills.size() == 1 && 339 "PHI elimination vreg should have one kill, the PHI itself!"); 340 341 // Remove the old range that we now know has an incorrect number. 342 VNInfo *VNI = interval.getValNumInfo(0); 343 MachineInstr *Killer = vi.Kills[0]; 344 unsigned Start = getMBBStartIdx(Killer->getParent()); 345 unsigned End = getUseIndex(getInstructionIndex(Killer))+1; 346 DOUT << " Removing [" << Start << "," << End << "] from: "; 347 interval.print(DOUT, tri_); DOUT << "\n"; 348 interval.removeRange(Start, End); 349 VNI->hasPHIKill = true; 350 DOUT << " RESULT: "; interval.print(DOUT, tri_); 351 352 // Replace the interval with one of a NEW value number. Note that this 353 // value number isn't actually defined by an instruction, weird huh? :) 354 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator)); 355 DOUT << " replace range with " << LR; 356 interval.addRange(LR); 357 interval.addKill(LR.valno, End); 358 DOUT << " RESULT: "; interval.print(DOUT, tri_); 359 } 360 361 // In the case of PHI elimination, each variable definition is only 362 // live until the end of the block. We've already taken care of the 363 // rest of the live range. 364 unsigned defIndex = getDefIndex(MIIdx); 365 366 VNInfo *ValNo; 367 MachineInstr *CopyMI = NULL; 368 unsigned SrcReg, DstReg; 369 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG || 370 tii_->isMoveInstr(*mi, SrcReg, DstReg)) 371 CopyMI = mi; 372 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator); 373 374 unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM; 375 LiveRange LR(defIndex, killIndex, ValNo); 376 interval.addRange(LR); 377 interval.addKill(ValNo, killIndex); 378 ValNo->hasPHIKill = true; 379 DOUT << " +" << LR; 380 } 381 } 382 383 DOUT << '\n'; 384} 385 386void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, 387 MachineBasicBlock::iterator mi, 388 unsigned MIIdx, 389 LiveInterval &interval, 390 MachineInstr *CopyMI) { 391 // A physical register cannot be live across basic block, so its 392 // lifetime must end somewhere in its defining basic block. 393 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg)); 394 395 unsigned baseIndex = MIIdx; 396 unsigned start = getDefIndex(baseIndex); 397 unsigned end = start; 398 399 // If it is not used after definition, it is considered dead at 400 // the instruction defining it. Hence its interval is: 401 // [defSlot(def), defSlot(def)+1) 402 if (lv_->RegisterDefIsDead(mi, interval.reg)) { 403 DOUT << " dead"; 404 end = getDefIndex(start) + 1; 405 goto exit; 406 } 407 408 // If it is not dead on definition, it must be killed by a 409 // subsequent instruction. Hence its interval is: 410 // [defSlot(def), useSlot(kill)+1) 411 while (++mi != MBB->end()) { 412 baseIndex += InstrSlots::NUM; 413 if (lv_->KillsRegister(mi, interval.reg)) { 414 DOUT << " killed"; 415 end = getUseIndex(baseIndex) + 1; 416 goto exit; 417 } else if (lv_->ModifiesRegister(mi, interval.reg)) { 418 // Another instruction redefines the register before it is ever read. 419 // Then the register is essentially dead at the instruction that defines 420 // it. Hence its interval is: 421 // [defSlot(def), defSlot(def)+1) 422 DOUT << " dead"; 423 end = getDefIndex(start) + 1; 424 goto exit; 425 } 426 } 427 428 // The only case we should have a dead physreg here without a killing or 429 // instruction where we know it's dead is if it is live-in to the function 430 // and never used. 431 assert(!CopyMI && "physreg was not killed in defining block!"); 432 end = getDefIndex(start) + 1; // It's dead. 433 434exit: 435 assert(start < end && "did not find end of interval?"); 436 437 // Already exists? Extend old live interval. 438 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start); 439 VNInfo *ValNo = (OldLR != interval.end()) 440 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator); 441 LiveRange LR(start, end, ValNo); 442 interval.addRange(LR); 443 interval.addKill(LR.valno, end); 444 DOUT << " +" << LR << '\n'; 445} 446 447void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB, 448 MachineBasicBlock::iterator MI, 449 unsigned MIIdx, 450 unsigned reg) { 451 if (TargetRegisterInfo::isVirtualRegister(reg)) 452 handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg)); 453 else if (allocatableRegs_[reg]) { 454 MachineInstr *CopyMI = NULL; 455 unsigned SrcReg, DstReg; 456 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG || 457 tii_->isMoveInstr(*MI, SrcReg, DstReg)) 458 CopyMI = MI; 459 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI); 460 // Def of a register also defines its sub-registers. 461 for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS) 462 // Avoid processing some defs more than once. 463 if (!MI->findRegisterDefOperand(*AS)) 464 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0); 465 } 466} 467 468void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, 469 unsigned MIIdx, 470 LiveInterval &interval, bool isAlias) { 471 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg)); 472 473 // Look for kills, if it reaches a def before it's killed, then it shouldn't 474 // be considered a livein. 475 MachineBasicBlock::iterator mi = MBB->begin(); 476 unsigned baseIndex = MIIdx; 477 unsigned start = baseIndex; 478 unsigned end = start; 479 while (mi != MBB->end()) { 480 if (lv_->KillsRegister(mi, interval.reg)) { 481 DOUT << " killed"; 482 end = getUseIndex(baseIndex) + 1; 483 goto exit; 484 } else if (lv_->ModifiesRegister(mi, interval.reg)) { 485 // Another instruction redefines the register before it is ever read. 486 // Then the register is essentially dead at the instruction that defines 487 // it. Hence its interval is: 488 // [defSlot(def), defSlot(def)+1) 489 DOUT << " dead"; 490 end = getDefIndex(start) + 1; 491 goto exit; 492 } 493 494 baseIndex += InstrSlots::NUM; 495 ++mi; 496 } 497 498exit: 499 // Live-in register might not be used at all. 500 if (end == MIIdx) { 501 if (isAlias) { 502 DOUT << " dead"; 503 end = getDefIndex(MIIdx) + 1; 504 } else { 505 DOUT << " live through"; 506 end = baseIndex; 507 } 508 } 509 510 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator)); 511 interval.addRange(LR); 512 interval.addKill(LR.valno, end); 513 DOUT << " +" << LR << '\n'; 514} 515 516/// computeIntervals - computes the live intervals for virtual 517/// registers. for some ordering of the machine instructions [1,N] a 518/// live interval is an interval [i, j) where 1 <= i <= j < N for 519/// which a variable is live 520void LiveIntervals::computeIntervals() { 521 DOUT << "********** COMPUTING LIVE INTERVALS **********\n" 522 << "********** Function: " 523 << ((Value*)mf_->getFunction())->getName() << '\n'; 524 // Track the index of the current machine instr. 525 unsigned MIIndex = 0; 526 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end(); 527 MBBI != E; ++MBBI) { 528 MachineBasicBlock *MBB = MBBI; 529 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n"; 530 531 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end(); 532 533 // Create intervals for live-ins to this BB first. 534 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(), 535 LE = MBB->livein_end(); LI != LE; ++LI) { 536 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI)); 537 // Multiple live-ins can alias the same register. 538 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS) 539 if (!hasInterval(*AS)) 540 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS), 541 true); 542 } 543 544 for (; MI != miEnd; ++MI) { 545 DOUT << MIIndex << "\t" << *MI; 546 547 // Handle defs. 548 for (int i = MI->getNumOperands() - 1; i >= 0; --i) { 549 MachineOperand &MO = MI->getOperand(i); 550 // handle register defs - build intervals 551 if (MO.isRegister() && MO.getReg() && MO.isDef()) 552 handleRegisterDef(MBB, MI, MIIndex, MO.getReg()); 553 } 554 555 MIIndex += InstrSlots::NUM; 556 } 557 } 558} 559 560bool LiveIntervals::findLiveInMBBs(const LiveRange &LR, 561 SmallVectorImpl<MachineBasicBlock*> &MBBs) const { 562 std::vector<IdxMBBPair>::const_iterator I = 563 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start); 564 565 bool ResVal = false; 566 while (I != Idx2MBBMap.end()) { 567 if (LR.end <= I->first) 568 break; 569 MBBs.push_back(I->second); 570 ResVal = true; 571 ++I; 572 } 573 return ResVal; 574} 575 576 577LiveInterval LiveIntervals::createInterval(unsigned reg) { 578 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? 579 HUGE_VALF : 0.0F; 580 return LiveInterval(reg, Weight); 581} 582 583/// getVNInfoSourceReg - Helper function that parses the specified VNInfo 584/// copy field and returns the source register that defines it. 585unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const { 586 if (!VNI->copy) 587 return 0; 588 589 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) 590 return VNI->copy->getOperand(1).getReg(); 591 unsigned SrcReg, DstReg; 592 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg)) 593 return SrcReg; 594 assert(0 && "Unrecognized copy instruction!"); 595 return 0; 596} 597 598//===----------------------------------------------------------------------===// 599// Register allocator hooks. 600// 601 602/// getReMatImplicitUse - If the remat definition MI has one (for now, we only 603/// allow one) virtual register operand, then its uses are implicitly using 604/// the register. Returns the virtual register. 605unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li, 606 MachineInstr *MI) const { 607 unsigned RegOp = 0; 608 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 609 MachineOperand &MO = MI->getOperand(i); 610 if (!MO.isRegister() || !MO.isUse()) 611 continue; 612 unsigned Reg = MO.getReg(); 613 if (Reg == 0 || Reg == li.reg) 614 continue; 615 // FIXME: For now, only remat MI with at most one register operand. 616 assert(!RegOp && 617 "Can't rematerialize instruction with multiple register operand!"); 618 RegOp = MO.getReg(); 619 break; 620 } 621 return RegOp; 622} 623 624/// isValNoAvailableAt - Return true if the val# of the specified interval 625/// which reaches the given instruction also reaches the specified use index. 626bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI, 627 unsigned UseIdx) const { 628 unsigned Index = getInstructionIndex(MI); 629 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno; 630 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx); 631 return UI != li.end() && UI->valno == ValNo; 632} 633 634/// isReMaterializable - Returns true if the definition MI of the specified 635/// val# of the specified interval is re-materializable. 636bool LiveIntervals::isReMaterializable(const LiveInterval &li, 637 const VNInfo *ValNo, MachineInstr *MI, 638 bool &isLoad) { 639 if (DisableReMat) 640 return false; 641 642 isLoad = false; 643 const TargetInstrDesc &TID = MI->getDesc(); 644 if (TID.isImplicitDef()) 645 return true; 646 647 int FrameIdx = 0; 648 if (tii_->isLoadFromStackSlot(MI, FrameIdx) && 649 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx)) 650 // FIXME: Let target specific isReallyTriviallyReMaterializable determines 651 // this but remember this is not safe to fold into a two-address 652 // instruction. 653 // This is a load from fixed stack slot. It can be rematerialized. 654 return true; 655 656 if (tii_->isTriviallyReMaterializable(MI)) { 657 isLoad = TID.isSimpleLoad(); 658 659 unsigned ImpUse = getReMatImplicitUse(li, MI); 660 if (ImpUse) { 661 const LiveInterval &ImpLi = getInterval(ImpUse); 662 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg), 663 re = mri_->use_end(); ri != re; ++ri) { 664 MachineInstr *UseMI = &*ri; 665 unsigned UseIdx = getInstructionIndex(UseMI); 666 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo) 667 continue; 668 if (!isValNoAvailableAt(ImpLi, MI, UseIdx)) 669 return false; 670 } 671 } 672 return true; 673 } 674 675 return false; 676} 677 678/// isReMaterializable - Returns true if every definition of MI of every 679/// val# of the specified interval is re-materializable. 680bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) { 681 isLoad = false; 682 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 683 i != e; ++i) { 684 const VNInfo *VNI = *i; 685 unsigned DefIdx = VNI->def; 686 if (DefIdx == ~1U) 687 continue; // Dead val#. 688 // Is the def for the val# rematerializable? 689 if (DefIdx == ~0u) 690 return false; 691 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx); 692 bool DefIsLoad = false; 693 if (!ReMatDefMI || 694 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad)) 695 return false; 696 isLoad |= DefIsLoad; 697 } 698 return true; 699} 700 701/// FilterFoldedOps - Filter out two-address use operands. Return 702/// true if it finds any issue with the operands that ought to prevent 703/// folding. 704static bool FilterFoldedOps(MachineInstr *MI, 705 SmallVector<unsigned, 2> &Ops, 706 unsigned &MRInfo, 707 SmallVector<unsigned, 2> &FoldOps) { 708 const TargetInstrDesc &TID = MI->getDesc(); 709 710 MRInfo = 0; 711 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 712 unsigned OpIdx = Ops[i]; 713 MachineOperand &MO = MI->getOperand(OpIdx); 714 // FIXME: fold subreg use. 715 if (MO.getSubReg()) 716 return true; 717 if (MO.isDef()) 718 MRInfo |= (unsigned)VirtRegMap::isMod; 719 else { 720 // Filter out two-address use operand(s). 721 if (!MO.isImplicit() && 722 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) { 723 MRInfo = VirtRegMap::isModRef; 724 continue; 725 } 726 MRInfo |= (unsigned)VirtRegMap::isRef; 727 } 728 FoldOps.push_back(OpIdx); 729 } 730 return false; 731} 732 733 734/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from 735/// slot / to reg or any rematerialized load into ith operand of specified 736/// MI. If it is successul, MI is updated with the newly created MI and 737/// returns true. 738bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, 739 VirtRegMap &vrm, MachineInstr *DefMI, 740 unsigned InstrIdx, 741 SmallVector<unsigned, 2> &Ops, 742 bool isSS, int Slot, unsigned Reg) { 743 const TargetInstrDesc &TID = MI->getDesc(); 744 // If it is an implicit def instruction, just delete it. 745 if (TID.isImplicitDef()) { 746 RemoveMachineInstrFromMaps(MI); 747 vrm.RemoveMachineInstrFromMaps(MI); 748 MI->eraseFromParent(); 749 ++numFolds; 750 return true; 751 } 752 753 // Filter the list of operand indexes that are to be folded. Abort if 754 // any operand will prevent folding. 755 unsigned MRInfo = 0; 756 SmallVector<unsigned, 2> FoldOps; 757 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 758 return false; 759 760 // Can't fold a load from fixed stack slot into a two address instruction. 761 if (isSS && DefMI && (MRInfo & VirtRegMap::isMod)) 762 return false; 763 764 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot) 765 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI); 766 if (fmi) { 767 // Attempt to fold the memory reference into the instruction. If 768 // we can do this, we don't need to insert spill code. 769 if (lv_) 770 lv_->instructionChanged(MI, fmi); 771 else 772 fmi->copyKillDeadInfo(MI, tri_); 773 MachineBasicBlock &MBB = *MI->getParent(); 774 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot)) 775 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo); 776 vrm.transferSpillPts(MI, fmi); 777 vrm.transferRestorePts(MI, fmi); 778 mi2iMap_.erase(MI); 779 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi; 780 mi2iMap_[fmi] = InstrIdx; 781 MI = MBB.insert(MBB.erase(MI), fmi); 782 ++numFolds; 783 return true; 784 } 785 return false; 786} 787 788/// canFoldMemoryOperand - Returns true if the specified load / store 789/// folding is possible. 790bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI, 791 SmallVector<unsigned, 2> &Ops, 792 bool ReMatLoadSS) const { 793 // Filter the list of operand indexes that are to be folded. Abort if 794 // any operand will prevent folding. 795 unsigned MRInfo = 0; 796 SmallVector<unsigned, 2> FoldOps; 797 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 798 return false; 799 800 // Can't fold a load from fixed stack slot into a two address instruction. 801 if (ReMatLoadSS && (MRInfo & VirtRegMap::isMod)) 802 return false; 803 804 return tii_->canFoldMemoryOperand(MI, FoldOps); 805} 806 807bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const { 808 SmallPtrSet<MachineBasicBlock*, 4> MBBs; 809 for (LiveInterval::Ranges::const_iterator 810 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 811 std::vector<IdxMBBPair>::const_iterator II = 812 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start); 813 if (II == Idx2MBBMap.end()) 814 continue; 815 if (I->end > II->first) // crossing a MBB. 816 return false; 817 MBBs.insert(II->second); 818 if (MBBs.size() > 1) 819 return false; 820 } 821 return true; 822} 823 824/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of 825/// interval on to-be re-materialized operands of MI) with new register. 826void LiveIntervals::rewriteImplicitOps(const LiveInterval &li, 827 MachineInstr *MI, unsigned NewVReg, 828 VirtRegMap &vrm) { 829 // There is an implicit use. That means one of the other operand is 830 // being remat'ed and the remat'ed instruction has li.reg as an 831 // use operand. Make sure we rewrite that as well. 832 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 833 MachineOperand &MO = MI->getOperand(i); 834 if (!MO.isRegister()) 835 continue; 836 unsigned Reg = MO.getReg(); 837 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 838 continue; 839 if (!vrm.isReMaterialized(Reg)) 840 continue; 841 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg); 842 int OpIdx = ReMatMI->findRegisterUseOperandIdx(li.reg); 843 if (OpIdx != -1) 844 ReMatMI->getOperand(OpIdx).setReg(NewVReg); 845 } 846} 847 848/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions 849/// for addIntervalsForSpills to rewrite uses / defs for the given live range. 850bool LiveIntervals:: 851rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, 852 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI, 853 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 854 unsigned Slot, int LdSlot, 855 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 856 VirtRegMap &vrm, 857 const TargetRegisterClass* rc, 858 SmallVector<int, 4> &ReMatIds, 859 const MachineLoopInfo *loopInfo, 860 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse, 861 std::map<unsigned,unsigned> &MBBVRegsMap, 862 std::vector<LiveInterval*> &NewLIs) { 863 bool CanFold = false; 864 RestartInstruction: 865 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 866 MachineOperand& mop = MI->getOperand(i); 867 if (!mop.isRegister()) 868 continue; 869 unsigned Reg = mop.getReg(); 870 unsigned RegI = Reg; 871 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 872 continue; 873 if (Reg != li.reg) 874 continue; 875 876 bool TryFold = !DefIsReMat; 877 bool FoldSS = true; // Default behavior unless it's a remat. 878 int FoldSlot = Slot; 879 if (DefIsReMat) { 880 // If this is the rematerializable definition MI itself and 881 // all of its uses are rematerialized, simply delete it. 882 if (MI == ReMatOrigDefMI && CanDelete) { 883 DOUT << "\t\t\t\tErasing re-materlizable def: "; 884 DOUT << MI << '\n'; 885 unsigned ImpUse = getReMatImplicitUse(li, MI); 886 if (ImpUse) { 887 // To be deleted MI has a virtual register operand, update the 888 // spill weight of the register interval. 889 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent()); 890 LiveInterval &ImpLi = getInterval(ImpUse); 891 ImpLi.weight -= 892 getSpillWeight(false, true, loopDepth) / ImpLi.getSize(); 893 } 894 RemoveMachineInstrFromMaps(MI); 895 vrm.RemoveMachineInstrFromMaps(MI); 896 MI->eraseFromParent(); 897 break; 898 } 899 900 // If def for this use can't be rematerialized, then try folding. 901 // If def is rematerializable and it's a load, also try folding. 902 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad)); 903 if (isLoad) { 904 // Try fold loads (from stack slot, constant pool, etc.) into uses. 905 FoldSS = isLoadSS; 906 FoldSlot = LdSlot; 907 } 908 } 909 910 // Scan all of the operands of this instruction rewriting operands 911 // to use NewVReg instead of li.reg as appropriate. We do this for 912 // two reasons: 913 // 914 // 1. If the instr reads the same spilled vreg multiple times, we 915 // want to reuse the NewVReg. 916 // 2. If the instr is a two-addr instruction, we are required to 917 // keep the src/dst regs pinned. 918 // 919 // Keep track of whether we replace a use and/or def so that we can 920 // create the spill interval with the appropriate range. 921 922 HasUse = mop.isUse(); 923 HasDef = mop.isDef(); 924 SmallVector<unsigned, 2> Ops; 925 Ops.push_back(i); 926 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) { 927 const MachineOperand &MOj = MI->getOperand(j); 928 if (!MOj.isRegister()) 929 continue; 930 unsigned RegJ = MOj.getReg(); 931 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ)) 932 continue; 933 if (RegJ == RegI) { 934 Ops.push_back(j); 935 HasUse |= MOj.isUse(); 936 HasDef |= MOj.isDef(); 937 } 938 } 939 940 if (TryFold) { 941 // Do not fold load / store here if we are splitting. We'll find an 942 // optimal point to insert a load / store later. 943 if (!TrySplit) { 944 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 945 Ops, FoldSS, FoldSlot, Reg)) { 946 // Folding the load/store can completely change the instruction in 947 // unpredictable ways, rescan it from the beginning. 948 HasUse = false; 949 HasDef = false; 950 CanFold = false; 951 goto RestartInstruction; 952 } 953 } else { 954 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat && isLoadSS); 955 } 956 } else 957 CanFold = false; 958 959 // Create a new virtual register for the spill interval. 960 bool CreatedNewVReg = false; 961 if (NewVReg == 0) { 962 NewVReg = mri_->createVirtualRegister(rc); 963 vrm.grow(); 964 CreatedNewVReg = true; 965 } 966 mop.setReg(NewVReg); 967 if (mop.isImplicit()) 968 rewriteImplicitOps(li, MI, NewVReg, vrm); 969 970 // Reuse NewVReg for other reads. 971 for (unsigned j = 0, e = Ops.size(); j != e; ++j) { 972 MachineOperand &mopj = MI->getOperand(Ops[j]); 973 mopj.setReg(NewVReg); 974 if (mopj.isImplicit()) 975 rewriteImplicitOps(li, MI, NewVReg, vrm); 976 } 977 978 if (CreatedNewVReg) { 979 if (DefIsReMat) { 980 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/); 981 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) { 982 // Each valnum may have its own remat id. 983 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg); 984 } else { 985 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]); 986 } 987 if (!CanDelete || (HasUse && HasDef)) { 988 // If this is a two-addr instruction then its use operands are 989 // rematerializable but its def is not. It should be assigned a 990 // stack slot. 991 vrm.assignVirt2StackSlot(NewVReg, Slot); 992 } 993 } else { 994 vrm.assignVirt2StackSlot(NewVReg, Slot); 995 } 996 } else if (HasUse && HasDef && 997 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) { 998 // If this interval hasn't been assigned a stack slot (because earlier 999 // def is a deleted remat def), do it now. 1000 assert(Slot != VirtRegMap::NO_STACK_SLOT); 1001 vrm.assignVirt2StackSlot(NewVReg, Slot); 1002 } 1003 1004 // Re-matting an instruction with virtual register use. Add the 1005 // register as an implicit use on the use MI. 1006 if (DefIsReMat && ImpUse) 1007 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1008 1009 // create a new register interval for this spill / remat. 1010 LiveInterval &nI = getOrCreateInterval(NewVReg); 1011 if (CreatedNewVReg) { 1012 NewLIs.push_back(&nI); 1013 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg)); 1014 if (TrySplit) 1015 vrm.setIsSplitFromReg(NewVReg, li.reg); 1016 } 1017 1018 if (HasUse) { 1019 if (CreatedNewVReg) { 1020 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1, 1021 nI.getNextValue(~0U, 0, VNInfoAllocator)); 1022 DOUT << " +" << LR; 1023 nI.addRange(LR); 1024 } else { 1025 // Extend the split live interval to this def / use. 1026 unsigned End = getUseIndex(index)+1; 1027 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End, 1028 nI.getValNumInfo(nI.getNumValNums()-1)); 1029 DOUT << " +" << LR; 1030 nI.addRange(LR); 1031 } 1032 } 1033 if (HasDef) { 1034 LiveRange LR(getDefIndex(index), getStoreIndex(index), 1035 nI.getNextValue(~0U, 0, VNInfoAllocator)); 1036 DOUT << " +" << LR; 1037 nI.addRange(LR); 1038 } 1039 1040 DOUT << "\t\t\t\tAdded new interval: "; 1041 nI.print(DOUT, tri_); 1042 DOUT << '\n'; 1043 } 1044 return CanFold; 1045} 1046bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li, 1047 const VNInfo *VNI, 1048 MachineBasicBlock *MBB, unsigned Idx) const { 1049 unsigned End = getMBBEndIdx(MBB); 1050 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) { 1051 unsigned KillIdx = VNI->kills[j]; 1052 if (KillIdx > Idx && KillIdx < End) 1053 return true; 1054 } 1055 return false; 1056} 1057 1058static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) { 1059 const VNInfo *VNI = NULL; 1060 for (LiveInterval::const_vni_iterator i = li.vni_begin(), 1061 e = li.vni_end(); i != e; ++i) 1062 if ((*i)->def == DefIdx) { 1063 VNI = *i; 1064 break; 1065 } 1066 return VNI; 1067} 1068 1069/// RewriteInfo - Keep track of machine instrs that will be rewritten 1070/// during spilling. 1071struct RewriteInfo { 1072 unsigned Index; 1073 MachineInstr *MI; 1074 bool HasUse; 1075 bool HasDef; 1076 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d) 1077 : Index(i), MI(mi), HasUse(u), HasDef(d) {} 1078}; 1079 1080struct RewriteInfoCompare { 1081 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const { 1082 return LHS.Index < RHS.Index; 1083 } 1084}; 1085 1086void LiveIntervals:: 1087rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit, 1088 LiveInterval::Ranges::const_iterator &I, 1089 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1090 unsigned Slot, int LdSlot, 1091 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1092 VirtRegMap &vrm, 1093 const TargetRegisterClass* rc, 1094 SmallVector<int, 4> &ReMatIds, 1095 const MachineLoopInfo *loopInfo, 1096 BitVector &SpillMBBs, 1097 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes, 1098 BitVector &RestoreMBBs, 1099 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes, 1100 std::map<unsigned,unsigned> &MBBVRegsMap, 1101 std::vector<LiveInterval*> &NewLIs) { 1102 bool AllCanFold = true; 1103 unsigned NewVReg = 0; 1104 unsigned start = getBaseIndex(I->start); 1105 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM; 1106 1107 // First collect all the def / use in this live range that will be rewritten. 1108 // Make sure they are sorted according instruction index. 1109 std::vector<RewriteInfo> RewriteMIs; 1110 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1111 re = mri_->reg_end(); ri != re; ) { 1112 MachineInstr *MI = &(*ri); 1113 MachineOperand &O = ri.getOperand(); 1114 ++ri; 1115 unsigned index = getInstructionIndex(MI); 1116 if (index < start || index >= end) 1117 continue; 1118 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef())); 1119 } 1120 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare()); 1121 1122 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0; 1123 // Now rewrite the defs and uses. 1124 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) { 1125 RewriteInfo &rwi = RewriteMIs[i]; 1126 ++i; 1127 unsigned index = rwi.Index; 1128 bool MIHasUse = rwi.HasUse; 1129 bool MIHasDef = rwi.HasDef; 1130 MachineInstr *MI = rwi.MI; 1131 // If MI def and/or use the same register multiple times, then there 1132 // are multiple entries. 1133 unsigned NumUses = MIHasUse; 1134 while (i != e && RewriteMIs[i].MI == MI) { 1135 assert(RewriteMIs[i].Index == index); 1136 bool isUse = RewriteMIs[i].HasUse; 1137 if (isUse) ++NumUses; 1138 MIHasUse |= isUse; 1139 MIHasDef |= RewriteMIs[i].HasDef; 1140 ++i; 1141 } 1142 MachineBasicBlock *MBB = MI->getParent(); 1143 1144 if (ImpUse && MI != ReMatDefMI) { 1145 // Re-matting an instruction with virtual register use. Update the 1146 // register interval's spill weight. 1147 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent()); 1148 LiveInterval &ImpLi = getInterval(ImpUse); 1149 ImpLi.weight += 1150 getSpillWeight(false, true, loopDepth) * NumUses / ImpLi.getSize(); 1151 } 1152 1153 unsigned MBBId = MBB->getNumber(); 1154 unsigned ThisVReg = 0; 1155 if (TrySplit) { 1156 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId); 1157 if (NVI != MBBVRegsMap.end()) { 1158 ThisVReg = NVI->second; 1159 // One common case: 1160 // x = use 1161 // ... 1162 // ... 1163 // def = ... 1164 // = use 1165 // It's better to start a new interval to avoid artifically 1166 // extend the new interval. 1167 if (MIHasDef && !MIHasUse) { 1168 MBBVRegsMap.erase(MBB->getNumber()); 1169 ThisVReg = 0; 1170 } 1171 } 1172 } 1173 1174 bool IsNew = ThisVReg == 0; 1175 if (IsNew) { 1176 // This ends the previous live interval. If all of its def / use 1177 // can be folded, give it a low spill weight. 1178 if (NewVReg && TrySplit && AllCanFold) { 1179 LiveInterval &nI = getOrCreateInterval(NewVReg); 1180 nI.weight /= 10.0F; 1181 } 1182 AllCanFold = true; 1183 } 1184 NewVReg = ThisVReg; 1185 1186 bool HasDef = false; 1187 bool HasUse = false; 1188 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit, 1189 index, end, MI, ReMatOrigDefMI, ReMatDefMI, 1190 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1191 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg, 1192 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs); 1193 if (!HasDef && !HasUse) 1194 continue; 1195 1196 AllCanFold &= CanFold; 1197 1198 // Update weight of spill interval. 1199 LiveInterval &nI = getOrCreateInterval(NewVReg); 1200 if (!TrySplit) { 1201 // The spill weight is now infinity as it cannot be spilled again. 1202 nI.weight = HUGE_VALF; 1203 continue; 1204 } 1205 1206 // Keep track of the last def and first use in each MBB. 1207 if (HasDef) { 1208 if (MI != ReMatOrigDefMI || !CanDelete) { 1209 bool HasKill = false; 1210 if (!HasUse) 1211 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index)); 1212 else { 1213 // If this is a two-address code, then this index starts a new VNInfo. 1214 const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index)); 1215 if (VNI) 1216 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index)); 1217 } 1218 std::map<unsigned, std::vector<SRInfo> >::iterator SII = 1219 SpillIdxes.find(MBBId); 1220 if (!HasKill) { 1221 if (SII == SpillIdxes.end()) { 1222 std::vector<SRInfo> S; 1223 S.push_back(SRInfo(index, NewVReg, true)); 1224 SpillIdxes.insert(std::make_pair(MBBId, S)); 1225 } else if (SII->second.back().vreg != NewVReg) { 1226 SII->second.push_back(SRInfo(index, NewVReg, true)); 1227 } else if ((int)index > SII->second.back().index) { 1228 // If there is an earlier def and this is a two-address 1229 // instruction, then it's not possible to fold the store (which 1230 // would also fold the load). 1231 SRInfo &Info = SII->second.back(); 1232 Info.index = index; 1233 Info.canFold = !HasUse; 1234 } 1235 SpillMBBs.set(MBBId); 1236 } else if (SII != SpillIdxes.end() && 1237 SII->second.back().vreg == NewVReg && 1238 (int)index > SII->second.back().index) { 1239 // There is an earlier def that's not killed (must be two-address). 1240 // The spill is no longer needed. 1241 SII->second.pop_back(); 1242 if (SII->second.empty()) { 1243 SpillIdxes.erase(MBBId); 1244 SpillMBBs.reset(MBBId); 1245 } 1246 } 1247 } 1248 } 1249 1250 if (HasUse) { 1251 std::map<unsigned, std::vector<SRInfo> >::iterator SII = 1252 SpillIdxes.find(MBBId); 1253 if (SII != SpillIdxes.end() && 1254 SII->second.back().vreg == NewVReg && 1255 (int)index > SII->second.back().index) 1256 // Use(s) following the last def, it's not safe to fold the spill. 1257 SII->second.back().canFold = false; 1258 std::map<unsigned, std::vector<SRInfo> >::iterator RII = 1259 RestoreIdxes.find(MBBId); 1260 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg) 1261 // If we are splitting live intervals, only fold if it's the first 1262 // use and there isn't another use later in the MBB. 1263 RII->second.back().canFold = false; 1264 else if (IsNew) { 1265 // Only need a reload if there isn't an earlier def / use. 1266 if (RII == RestoreIdxes.end()) { 1267 std::vector<SRInfo> Infos; 1268 Infos.push_back(SRInfo(index, NewVReg, true)); 1269 RestoreIdxes.insert(std::make_pair(MBBId, Infos)); 1270 } else { 1271 RII->second.push_back(SRInfo(index, NewVReg, true)); 1272 } 1273 RestoreMBBs.set(MBBId); 1274 } 1275 } 1276 1277 // Update spill weight. 1278 unsigned loopDepth = loopInfo->getLoopDepth(MBB); 1279 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth); 1280 } 1281 1282 if (NewVReg && TrySplit && AllCanFold) { 1283 // If all of its def / use can be folded, give it a low spill weight. 1284 LiveInterval &nI = getOrCreateInterval(NewVReg); 1285 nI.weight /= 10.0F; 1286 } 1287} 1288 1289bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr, 1290 BitVector &RestoreMBBs, 1291 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1292 if (!RestoreMBBs[Id]) 1293 return false; 1294 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1295 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1296 if (Restores[i].index == index && 1297 Restores[i].vreg == vr && 1298 Restores[i].canFold) 1299 return true; 1300 return false; 1301} 1302 1303void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr, 1304 BitVector &RestoreMBBs, 1305 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1306 if (!RestoreMBBs[Id]) 1307 return; 1308 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1309 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1310 if (Restores[i].index == index && Restores[i].vreg) 1311 Restores[i].index = -1; 1312} 1313 1314 1315std::vector<LiveInterval*> LiveIntervals:: 1316addIntervalsForSpills(const LiveInterval &li, 1317 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) { 1318 // Since this is called after the analysis is done we don't know if 1319 // LiveVariables is available 1320 lv_ = getAnalysisToUpdate<LiveVariables>(); 1321 1322 assert(li.weight != HUGE_VALF && 1323 "attempt to spill already spilled interval!"); 1324 1325 DOUT << "\t\t\t\tadding intervals for spills for interval: "; 1326 li.print(DOUT, tri_); 1327 DOUT << '\n'; 1328 1329 // Each bit specify whether it a spill is required in the MBB. 1330 BitVector SpillMBBs(mf_->getNumBlockIDs()); 1331 std::map<unsigned, std::vector<SRInfo> > SpillIdxes; 1332 BitVector RestoreMBBs(mf_->getNumBlockIDs()); 1333 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes; 1334 std::map<unsigned,unsigned> MBBVRegsMap; 1335 std::vector<LiveInterval*> NewLIs; 1336 const TargetRegisterClass* rc = mri_->getRegClass(li.reg); 1337 1338 unsigned NumValNums = li.getNumValNums(); 1339 SmallVector<MachineInstr*, 4> ReMatDefs; 1340 ReMatDefs.resize(NumValNums, NULL); 1341 SmallVector<MachineInstr*, 4> ReMatOrigDefs; 1342 ReMatOrigDefs.resize(NumValNums, NULL); 1343 SmallVector<int, 4> ReMatIds; 1344 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT); 1345 BitVector ReMatDelete(NumValNums); 1346 unsigned Slot = VirtRegMap::MAX_STACK_SLOT; 1347 1348 // Spilling a split live interval. It cannot be split any further. Also, 1349 // it's also guaranteed to be a single val# / range interval. 1350 if (vrm.getPreSplitReg(li.reg)) { 1351 vrm.setIsSplitFromReg(li.reg, 0); 1352 // Unset the split kill marker on the last use. 1353 unsigned KillIdx = vrm.getKillPoint(li.reg); 1354 if (KillIdx) { 1355 MachineInstr *KillMI = getInstructionFromIndex(KillIdx); 1356 assert(KillMI && "Last use disappeared?"); 1357 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true); 1358 assert(KillOp != -1 && "Last use disappeared?"); 1359 KillMI->getOperand(KillOp).setIsKill(false); 1360 } 1361 vrm.removeKillPoint(li.reg); 1362 bool DefIsReMat = vrm.isReMaterialized(li.reg); 1363 Slot = vrm.getStackSlot(li.reg); 1364 assert(Slot != VirtRegMap::MAX_STACK_SLOT); 1365 MachineInstr *ReMatDefMI = DefIsReMat ? 1366 vrm.getReMaterializedMI(li.reg) : NULL; 1367 int LdSlot = 0; 1368 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1369 bool isLoad = isLoadSS || 1370 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad())); 1371 bool IsFirstRange = true; 1372 for (LiveInterval::Ranges::const_iterator 1373 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1374 // If this is a split live interval with multiple ranges, it means there 1375 // are two-address instructions that re-defined the value. Only the 1376 // first def can be rematerialized! 1377 if (IsFirstRange) { 1378 // Note ReMatOrigDefMI has already been deleted. 1379 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI, 1380 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1381 false, vrm, rc, ReMatIds, loopInfo, 1382 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1383 MBBVRegsMap, NewLIs); 1384 } else { 1385 rewriteInstructionsForSpills(li, false, I, NULL, 0, 1386 Slot, 0, false, false, false, 1387 false, vrm, rc, ReMatIds, loopInfo, 1388 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1389 MBBVRegsMap, NewLIs); 1390 } 1391 IsFirstRange = false; 1392 } 1393 return NewLIs; 1394 } 1395 1396 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li); 1397 if (SplitLimit != -1 && (int)numSplits >= SplitLimit) 1398 TrySplit = false; 1399 if (TrySplit) 1400 ++numSplits; 1401 bool NeedStackSlot = false; 1402 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 1403 i != e; ++i) { 1404 const VNInfo *VNI = *i; 1405 unsigned VN = VNI->id; 1406 unsigned DefIdx = VNI->def; 1407 if (DefIdx == ~1U) 1408 continue; // Dead val#. 1409 // Is the def for the val# rematerializable? 1410 MachineInstr *ReMatDefMI = (DefIdx == ~0u) 1411 ? 0 : getInstructionFromIndex(DefIdx); 1412 bool dummy; 1413 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) { 1414 // Remember how to remat the def of this val#. 1415 ReMatOrigDefs[VN] = ReMatDefMI; 1416 // Original def may be modified so we have to make a copy here. vrm must 1417 // delete these! 1418 ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone(); 1419 1420 bool CanDelete = true; 1421 if (VNI->hasPHIKill) { 1422 // A kill is a phi node, not all of its uses can be rematerialized. 1423 // It must not be deleted. 1424 CanDelete = false; 1425 // Need a stack slot if there is any live range where uses cannot be 1426 // rematerialized. 1427 NeedStackSlot = true; 1428 } 1429 if (CanDelete) 1430 ReMatDelete.set(VN); 1431 } else { 1432 // Need a stack slot if there is any live range where uses cannot be 1433 // rematerialized. 1434 NeedStackSlot = true; 1435 } 1436 } 1437 1438 // One stack slot per live interval. 1439 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) 1440 Slot = vrm.assignVirt2StackSlot(li.reg); 1441 1442 // Create new intervals and rewrite defs and uses. 1443 for (LiveInterval::Ranges::const_iterator 1444 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1445 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id]; 1446 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id]; 1447 bool DefIsReMat = ReMatDefMI != NULL; 1448 bool CanDelete = ReMatDelete[I->valno->id]; 1449 int LdSlot = 0; 1450 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1451 bool isLoad = isLoadSS || 1452 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad()); 1453 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI, 1454 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1455 CanDelete, vrm, rc, ReMatIds, loopInfo, 1456 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1457 MBBVRegsMap, NewLIs); 1458 } 1459 1460 // Insert spills / restores if we are splitting. 1461 if (!TrySplit) 1462 return NewLIs; 1463 1464 SmallPtrSet<LiveInterval*, 4> AddedKill; 1465 SmallVector<unsigned, 2> Ops; 1466 if (NeedStackSlot) { 1467 int Id = SpillMBBs.find_first(); 1468 while (Id != -1) { 1469 std::vector<SRInfo> &spills = SpillIdxes[Id]; 1470 for (unsigned i = 0, e = spills.size(); i != e; ++i) { 1471 int index = spills[i].index; 1472 unsigned VReg = spills[i].vreg; 1473 LiveInterval &nI = getOrCreateInterval(VReg); 1474 bool isReMat = vrm.isReMaterialized(VReg); 1475 MachineInstr *MI = getInstructionFromIndex(index); 1476 bool CanFold = false; 1477 bool FoundUse = false; 1478 Ops.clear(); 1479 if (spills[i].canFold) { 1480 CanFold = true; 1481 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1482 MachineOperand &MO = MI->getOperand(j); 1483 if (!MO.isRegister() || MO.getReg() != VReg) 1484 continue; 1485 1486 Ops.push_back(j); 1487 if (MO.isDef()) 1488 continue; 1489 if (isReMat || 1490 (!FoundUse && !alsoFoldARestore(Id, index, VReg, 1491 RestoreMBBs, RestoreIdxes))) { 1492 // MI has two-address uses of the same register. If the use 1493 // isn't the first and only use in the BB, then we can't fold 1494 // it. FIXME: Move this to rewriteInstructionsForSpills. 1495 CanFold = false; 1496 break; 1497 } 1498 FoundUse = true; 1499 } 1500 } 1501 // Fold the store into the def if possible. 1502 bool Folded = false; 1503 if (CanFold && !Ops.empty()) { 1504 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){ 1505 Folded = true; 1506 if (FoundUse > 0) { 1507 // Also folded uses, do not issue a load. 1508 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes); 1509 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1); 1510 } 1511 nI.removeRange(getDefIndex(index), getStoreIndex(index)); 1512 } 1513 } 1514 1515 // Else tell the spiller to issue a spill. 1516 if (!Folded) { 1517 LiveRange *LR = &nI.ranges[nI.ranges.size()-1]; 1518 bool isKill = LR->end == getStoreIndex(index); 1519 vrm.addSpillPoint(VReg, isKill, MI); 1520 if (isKill) 1521 AddedKill.insert(&nI); 1522 } 1523 } 1524 Id = SpillMBBs.find_next(Id); 1525 } 1526 } 1527 1528 int Id = RestoreMBBs.find_first(); 1529 while (Id != -1) { 1530 std::vector<SRInfo> &restores = RestoreIdxes[Id]; 1531 for (unsigned i = 0, e = restores.size(); i != e; ++i) { 1532 int index = restores[i].index; 1533 if (index == -1) 1534 continue; 1535 unsigned VReg = restores[i].vreg; 1536 LiveInterval &nI = getOrCreateInterval(VReg); 1537 MachineInstr *MI = getInstructionFromIndex(index); 1538 bool CanFold = false; 1539 Ops.clear(); 1540 if (restores[i].canFold) { 1541 CanFold = true; 1542 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1543 MachineOperand &MO = MI->getOperand(j); 1544 if (!MO.isRegister() || MO.getReg() != VReg) 1545 continue; 1546 1547 if (MO.isDef()) { 1548 // If this restore were to be folded, it would have been folded 1549 // already. 1550 CanFold = false; 1551 break; 1552 } 1553 Ops.push_back(j); 1554 } 1555 } 1556 1557 // Fold the load into the use if possible. 1558 bool Folded = false; 1559 if (CanFold && !Ops.empty()) { 1560 if (!vrm.isReMaterialized(VReg)) 1561 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg); 1562 else { 1563 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg); 1564 int LdSlot = 0; 1565 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1566 // If the rematerializable def is a load, also try to fold it. 1567 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad()) 1568 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1569 Ops, isLoadSS, LdSlot, VReg); 1570 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI); 1571 if (ImpUse) { 1572 // Re-matting an instruction with virtual register use. Add the 1573 // register as an implicit use on the use MI and update the register 1574 // interval's spill weight. 1575 unsigned loopDepth = loopInfo->getLoopDepth(MI->getParent()); 1576 LiveInterval &ImpLi = getInterval(ImpUse); 1577 ImpLi.weight += 1578 getSpillWeight(false, true, loopDepth) / ImpLi.getSize(); 1579 1580 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1581 } 1582 } 1583 } 1584 // If folding is not possible / failed, then tell the spiller to issue a 1585 // load / rematerialization for us. 1586 if (Folded) 1587 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1); 1588 else 1589 vrm.addRestorePoint(VReg, MI); 1590 } 1591 Id = RestoreMBBs.find_next(Id); 1592 } 1593 1594 // Finalize intervals: add kills, finalize spill weights, and filter out 1595 // dead intervals. 1596 std::vector<LiveInterval*> RetNewLIs; 1597 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) { 1598 LiveInterval *LI = NewLIs[i]; 1599 if (!LI->empty()) { 1600 LI->weight /= LI->getSize(); 1601 if (!AddedKill.count(LI)) { 1602 LiveRange *LR = &LI->ranges[LI->ranges.size()-1]; 1603 unsigned LastUseIdx = getBaseIndex(LR->end); 1604 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx); 1605 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg); 1606 assert(UseIdx != -1); 1607 if (LastUse->getOperand(UseIdx).isImplicit() || 1608 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){ 1609 LastUse->getOperand(UseIdx).setIsKill(); 1610 vrm.addKillPoint(LI->reg, LastUseIdx); 1611 } 1612 } 1613 RetNewLIs.push_back(LI); 1614 } 1615 } 1616 1617 return RetNewLIs; 1618} 1619