LiveIntervalAnalysis.cpp revision 87dc769d90f35ae3713d01245e730ddfd91a4564
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the LiveInterval analysis pass which is used 11// by the Linear Scan Register allocator. This pass linearizes the 12// basic blocks of the function in DFS order and uses the 13// LiveVariables pass to conservatively compute live intervals for 14// each virtual and physical register. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "liveintervals" 19#include "llvm/CodeGen/LiveIntervalAnalysis.h" 20#include "VirtRegMap.h" 21#include "llvm/Value.h" 22#include "llvm/CodeGen/LiveVariables.h" 23#include "llvm/CodeGen/MachineFrameInfo.h" 24#include "llvm/CodeGen/MachineInstr.h" 25#include "llvm/CodeGen/MachineLoopInfo.h" 26#include "llvm/CodeGen/MachineRegisterInfo.h" 27#include "llvm/CodeGen/Passes.h" 28#include "llvm/Target/TargetRegisterInfo.h" 29#include "llvm/Target/TargetInstrInfo.h" 30#include "llvm/Target/TargetMachine.h" 31#include "llvm/Support/CommandLine.h" 32#include "llvm/Support/Debug.h" 33#include "llvm/ADT/Statistic.h" 34#include "llvm/ADT/STLExtras.h" 35#include <algorithm> 36#include <cmath> 37using namespace llvm; 38 39// Hidden options for help debugging. 40static cl::opt<bool> DisableReMat("disable-rematerialization", 41 cl::init(false), cl::Hidden); 42 43static cl::opt<bool> SplitAtBB("split-intervals-at-bb", 44 cl::init(true), cl::Hidden); 45static cl::opt<int> SplitLimit("split-limit", 46 cl::init(-1), cl::Hidden); 47 48STATISTIC(numIntervals, "Number of original intervals"); 49STATISTIC(numIntervalsAfter, "Number of intervals after coalescing"); 50STATISTIC(numFolds , "Number of loads/stores folded into instructions"); 51STATISTIC(numSplits , "Number of intervals split"); 52 53char LiveIntervals::ID = 0; 54static RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis"); 55 56void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { 57 AU.addPreserved<LiveVariables>(); 58 AU.addRequired<LiveVariables>(); 59 AU.addPreservedID(MachineLoopInfoID); 60 AU.addPreservedID(MachineDominatorsID); 61 AU.addPreservedID(PHIEliminationID); 62 AU.addRequiredID(PHIEliminationID); 63 AU.addRequiredID(TwoAddressInstructionPassID); 64 MachineFunctionPass::getAnalysisUsage(AU); 65} 66 67void LiveIntervals::releaseMemory() { 68 Idx2MBBMap.clear(); 69 mi2iMap_.clear(); 70 i2miMap_.clear(); 71 r2iMap_.clear(); 72 // Release VNInfo memroy regions after all VNInfo objects are dtor'd. 73 VNInfoAllocator.Reset(); 74 for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i) 75 delete ClonedMIs[i]; 76} 77 78/// runOnMachineFunction - Register allocate the whole function 79/// 80bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { 81 mf_ = &fn; 82 mri_ = &mf_->getRegInfo(); 83 tm_ = &fn.getTarget(); 84 tri_ = tm_->getRegisterInfo(); 85 tii_ = tm_->getInstrInfo(); 86 lv_ = &getAnalysis<LiveVariables>(); 87 allocatableRegs_ = tri_->getAllocatableSet(fn); 88 89 // Number MachineInstrs and MachineBasicBlocks. 90 // Initialize MBB indexes to a sentinal. 91 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U)); 92 93 unsigned MIIndex = 0; 94 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end(); 95 MBB != E; ++MBB) { 96 unsigned StartIdx = MIIndex; 97 98 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end(); 99 I != E; ++I) { 100 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second; 101 assert(inserted && "multiple MachineInstr -> index mappings"); 102 i2miMap_.push_back(I); 103 MIIndex += InstrSlots::NUM; 104 } 105 106 // Set the MBB2IdxMap entry for this MBB. 107 MBB2IdxMap[MBB->getNumber()] = (StartIdx == MIIndex) 108 ? std::make_pair(StartIdx, StartIdx) // Empty MBB 109 : std::make_pair(StartIdx, MIIndex - 1); 110 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB)); 111 } 112 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare()); 113 114 computeIntervals(); 115 116 numIntervals += getNumIntervals(); 117 118 DOUT << "********** INTERVALS **********\n"; 119 for (iterator I = begin(), E = end(); I != E; ++I) { 120 I->second.print(DOUT, tri_); 121 DOUT << "\n"; 122 } 123 124 numIntervalsAfter += getNumIntervals(); 125 DEBUG(dump()); 126 return true; 127} 128 129/// print - Implement the dump method. 130void LiveIntervals::print(std::ostream &O, const Module* ) const { 131 O << "********** INTERVALS **********\n"; 132 for (const_iterator I = begin(), E = end(); I != E; ++I) { 133 I->second.print(DOUT, tri_); 134 DOUT << "\n"; 135 } 136 137 O << "********** MACHINEINSTRS **********\n"; 138 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end(); 139 mbbi != mbbe; ++mbbi) { 140 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n"; 141 for (MachineBasicBlock::iterator mii = mbbi->begin(), 142 mie = mbbi->end(); mii != mie; ++mii) { 143 O << getInstructionIndex(mii) << '\t' << *mii; 144 } 145 } 146} 147 148/// conflictsWithPhysRegDef - Returns true if the specified register 149/// is defined during the duration of the specified interval. 150bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li, 151 VirtRegMap &vrm, unsigned reg) { 152 for (LiveInterval::Ranges::const_iterator 153 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 154 for (unsigned index = getBaseIndex(I->start), 155 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end; 156 index += InstrSlots::NUM) { 157 // skip deleted instructions 158 while (index != end && !getInstructionFromIndex(index)) 159 index += InstrSlots::NUM; 160 if (index == end) break; 161 162 MachineInstr *MI = getInstructionFromIndex(index); 163 unsigned SrcReg, DstReg; 164 if (tii_->isMoveInstr(*MI, SrcReg, DstReg)) 165 if (SrcReg == li.reg || DstReg == li.reg) 166 continue; 167 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 168 MachineOperand& mop = MI->getOperand(i); 169 if (!mop.isRegister()) 170 continue; 171 unsigned PhysReg = mop.getReg(); 172 if (PhysReg == 0 || PhysReg == li.reg) 173 continue; 174 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) { 175 if (!vrm.hasPhys(PhysReg)) 176 continue; 177 PhysReg = vrm.getPhys(PhysReg); 178 } 179 if (PhysReg && tri_->regsOverlap(PhysReg, reg)) 180 return true; 181 } 182 } 183 } 184 185 return false; 186} 187 188void LiveIntervals::printRegName(unsigned reg) const { 189 if (TargetRegisterInfo::isPhysicalRegister(reg)) 190 cerr << tri_->getName(reg); 191 else 192 cerr << "%reg" << reg; 193} 194 195void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, 196 MachineBasicBlock::iterator mi, 197 unsigned MIIdx, 198 LiveInterval &interval) { 199 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg)); 200 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg); 201 202 if (mi->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) { 203 DOUT << "is a implicit_def\n"; 204 return; 205 } 206 207 // Virtual registers may be defined multiple times (due to phi 208 // elimination and 2-addr elimination). Much of what we do only has to be 209 // done once for the vreg. We use an empty interval to detect the first 210 // time we see a vreg. 211 if (interval.empty()) { 212 // Get the Idx of the defining instructions. 213 unsigned defIndex = getDefIndex(MIIdx); 214 VNInfo *ValNo; 215 MachineInstr *CopyMI = NULL; 216 unsigned SrcReg, DstReg; 217 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG || 218 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG || 219 tii_->isMoveInstr(*mi, SrcReg, DstReg)) 220 CopyMI = mi; 221 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator); 222 223 assert(ValNo->id == 0 && "First value in interval is not 0?"); 224 225 // Loop over all of the blocks that the vreg is defined in. There are 226 // two cases we have to handle here. The most common case is a vreg 227 // whose lifetime is contained within a basic block. In this case there 228 // will be a single kill, in MBB, which comes after the definition. 229 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) { 230 // FIXME: what about dead vars? 231 unsigned killIdx; 232 if (vi.Kills[0] != mi) 233 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1; 234 else 235 killIdx = defIndex+1; 236 237 // If the kill happens after the definition, we have an intra-block 238 // live range. 239 if (killIdx > defIndex) { 240 assert(vi.AliveBlocks.none() && 241 "Shouldn't be alive across any blocks!"); 242 LiveRange LR(defIndex, killIdx, ValNo); 243 interval.addRange(LR); 244 DOUT << " +" << LR << "\n"; 245 interval.addKill(ValNo, killIdx); 246 return; 247 } 248 } 249 250 // The other case we handle is when a virtual register lives to the end 251 // of the defining block, potentially live across some blocks, then is 252 // live into some number of blocks, but gets killed. Start by adding a 253 // range that goes from this definition to the end of the defining block. 254 LiveRange NewLR(defIndex, 255 getInstructionIndex(&mbb->back()) + InstrSlots::NUM, 256 ValNo); 257 DOUT << " +" << NewLR; 258 interval.addRange(NewLR); 259 260 // Iterate over all of the blocks that the variable is completely 261 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the 262 // live interval. 263 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) { 264 if (vi.AliveBlocks[i]) { 265 MachineBasicBlock *MBB = mf_->getBlockNumbered(i); 266 if (!MBB->empty()) { 267 LiveRange LR(getMBBStartIdx(i), 268 getInstructionIndex(&MBB->back()) + InstrSlots::NUM, 269 ValNo); 270 interval.addRange(LR); 271 DOUT << " +" << LR; 272 } 273 } 274 } 275 276 // Finally, this virtual register is live from the start of any killing 277 // block to the 'use' slot of the killing instruction. 278 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) { 279 MachineInstr *Kill = vi.Kills[i]; 280 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1; 281 LiveRange LR(getMBBStartIdx(Kill->getParent()), 282 killIdx, ValNo); 283 interval.addRange(LR); 284 interval.addKill(ValNo, killIdx); 285 DOUT << " +" << LR; 286 } 287 288 } else { 289 // If this is the second time we see a virtual register definition, it 290 // must be due to phi elimination or two addr elimination. If this is 291 // the result of two address elimination, then the vreg is one of the 292 // def-and-use register operand. 293 if (mi->isRegReDefinedByTwoAddr(interval.reg)) { 294 // If this is a two-address definition, then we have already processed 295 // the live range. The only problem is that we didn't realize there 296 // are actually two values in the live interval. Because of this we 297 // need to take the LiveRegion that defines this register and split it 298 // into two values. 299 assert(interval.containsOneValue()); 300 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def); 301 unsigned RedefIndex = getDefIndex(MIIdx); 302 303 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1); 304 VNInfo *OldValNo = OldLR->valno; 305 306 // Delete the initial value, which should be short and continuous, 307 // because the 2-addr copy must be in the same MBB as the redef. 308 interval.removeRange(DefIndex, RedefIndex); 309 310 // Two-address vregs should always only be redefined once. This means 311 // that at this point, there should be exactly one value number in it. 312 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!"); 313 314 // The new value number (#1) is defined by the instruction we claimed 315 // defined value #0. 316 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy, 317 VNInfoAllocator); 318 319 // Value#0 is now defined by the 2-addr instruction. 320 OldValNo->def = RedefIndex; 321 OldValNo->copy = 0; 322 323 // Add the new live interval which replaces the range for the input copy. 324 LiveRange LR(DefIndex, RedefIndex, ValNo); 325 DOUT << " replace range with " << LR; 326 interval.addRange(LR); 327 interval.addKill(ValNo, RedefIndex); 328 329 // If this redefinition is dead, we need to add a dummy unit live 330 // range covering the def slot. 331 if (mi->registerDefIsDead(interval.reg, tri_)) 332 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo)); 333 334 DOUT << " RESULT: "; 335 interval.print(DOUT, tri_); 336 337 } else { 338 // Otherwise, this must be because of phi elimination. If this is the 339 // first redefinition of the vreg that we have seen, go back and change 340 // the live range in the PHI block to be a different value number. 341 if (interval.containsOneValue()) { 342 assert(vi.Kills.size() == 1 && 343 "PHI elimination vreg should have one kill, the PHI itself!"); 344 345 // Remove the old range that we now know has an incorrect number. 346 VNInfo *VNI = interval.getValNumInfo(0); 347 MachineInstr *Killer = vi.Kills[0]; 348 unsigned Start = getMBBStartIdx(Killer->getParent()); 349 unsigned End = getUseIndex(getInstructionIndex(Killer))+1; 350 DOUT << " Removing [" << Start << "," << End << "] from: "; 351 interval.print(DOUT, tri_); DOUT << "\n"; 352 interval.removeRange(Start, End); 353 VNI->hasPHIKill = true; 354 DOUT << " RESULT: "; interval.print(DOUT, tri_); 355 356 // Replace the interval with one of a NEW value number. Note that this 357 // value number isn't actually defined by an instruction, weird huh? :) 358 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator)); 359 DOUT << " replace range with " << LR; 360 interval.addRange(LR); 361 interval.addKill(LR.valno, End); 362 DOUT << " RESULT: "; interval.print(DOUT, tri_); 363 } 364 365 // In the case of PHI elimination, each variable definition is only 366 // live until the end of the block. We've already taken care of the 367 // rest of the live range. 368 unsigned defIndex = getDefIndex(MIIdx); 369 370 VNInfo *ValNo; 371 MachineInstr *CopyMI = NULL; 372 unsigned SrcReg, DstReg; 373 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG || 374 mi->getOpcode() == TargetInstrInfo::INSERT_SUBREG || 375 tii_->isMoveInstr(*mi, SrcReg, DstReg)) 376 CopyMI = mi; 377 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator); 378 379 unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM; 380 LiveRange LR(defIndex, killIndex, ValNo); 381 interval.addRange(LR); 382 interval.addKill(ValNo, killIndex); 383 ValNo->hasPHIKill = true; 384 DOUT << " +" << LR; 385 } 386 } 387 388 DOUT << '\n'; 389} 390 391void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, 392 MachineBasicBlock::iterator mi, 393 unsigned MIIdx, 394 LiveInterval &interval, 395 MachineInstr *CopyMI) { 396 // A physical register cannot be live across basic block, so its 397 // lifetime must end somewhere in its defining basic block. 398 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg)); 399 400 unsigned baseIndex = MIIdx; 401 unsigned start = getDefIndex(baseIndex); 402 unsigned end = start; 403 404 // If it is not used after definition, it is considered dead at 405 // the instruction defining it. Hence its interval is: 406 // [defSlot(def), defSlot(def)+1) 407 if (mi->registerDefIsDead(interval.reg, tri_)) { 408 DOUT << " dead"; 409 end = getDefIndex(start) + 1; 410 goto exit; 411 } 412 413 // If it is not dead on definition, it must be killed by a 414 // subsequent instruction. Hence its interval is: 415 // [defSlot(def), useSlot(kill)+1) 416 while (++mi != MBB->end()) { 417 baseIndex += InstrSlots::NUM; 418 if (mi->killsRegister(interval.reg, tri_)) { 419 DOUT << " killed"; 420 end = getUseIndex(baseIndex) + 1; 421 goto exit; 422 } else if (mi->modifiesRegister(interval.reg, tri_)) { 423 // Another instruction redefines the register before it is ever read. 424 // Then the register is essentially dead at the instruction that defines 425 // it. Hence its interval is: 426 // [defSlot(def), defSlot(def)+1) 427 DOUT << " dead"; 428 end = getDefIndex(start) + 1; 429 goto exit; 430 } 431 } 432 433 // The only case we should have a dead physreg here without a killing or 434 // instruction where we know it's dead is if it is live-in to the function 435 // and never used. 436 assert(!CopyMI && "physreg was not killed in defining block!"); 437 end = getDefIndex(start) + 1; // It's dead. 438 439exit: 440 assert(start < end && "did not find end of interval?"); 441 442 // Already exists? Extend old live interval. 443 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start); 444 VNInfo *ValNo = (OldLR != interval.end()) 445 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator); 446 LiveRange LR(start, end, ValNo); 447 interval.addRange(LR); 448 interval.addKill(LR.valno, end); 449 DOUT << " +" << LR << '\n'; 450} 451 452void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB, 453 MachineBasicBlock::iterator MI, 454 unsigned MIIdx, 455 unsigned reg) { 456 if (TargetRegisterInfo::isVirtualRegister(reg)) 457 handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg)); 458 else if (allocatableRegs_[reg]) { 459 MachineInstr *CopyMI = NULL; 460 unsigned SrcReg, DstReg; 461 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG || 462 MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG || 463 tii_->isMoveInstr(*MI, SrcReg, DstReg)) 464 CopyMI = MI; 465 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI); 466 // Def of a register also defines its sub-registers. 467 for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS) 468 // If MI also modifies the sub-register explicitly, avoid processing it 469 // more than once. Do not pass in TRI here so it checks for exact match. 470 if (!MI->modifiesRegister(*AS)) 471 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0); 472 } 473} 474 475void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, 476 unsigned MIIdx, 477 LiveInterval &interval, bool isAlias) { 478 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg)); 479 480 // Look for kills, if it reaches a def before it's killed, then it shouldn't 481 // be considered a livein. 482 MachineBasicBlock::iterator mi = MBB->begin(); 483 unsigned baseIndex = MIIdx; 484 unsigned start = baseIndex; 485 unsigned end = start; 486 while (mi != MBB->end()) { 487 if (mi->killsRegister(interval.reg, tri_)) { 488 DOUT << " killed"; 489 end = getUseIndex(baseIndex) + 1; 490 goto exit; 491 } else if (mi->modifiesRegister(interval.reg, tri_)) { 492 // Another instruction redefines the register before it is ever read. 493 // Then the register is essentially dead at the instruction that defines 494 // it. Hence its interval is: 495 // [defSlot(def), defSlot(def)+1) 496 DOUT << " dead"; 497 end = getDefIndex(start) + 1; 498 goto exit; 499 } 500 501 baseIndex += InstrSlots::NUM; 502 ++mi; 503 } 504 505exit: 506 // Live-in register might not be used at all. 507 if (end == MIIdx) { 508 if (isAlias) { 509 DOUT << " dead"; 510 end = getDefIndex(MIIdx) + 1; 511 } else { 512 DOUT << " live through"; 513 end = baseIndex; 514 } 515 } 516 517 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator)); 518 interval.addRange(LR); 519 interval.addKill(LR.valno, end); 520 DOUT << " +" << LR << '\n'; 521} 522 523/// computeIntervals - computes the live intervals for virtual 524/// registers. for some ordering of the machine instructions [1,N] a 525/// live interval is an interval [i, j) where 1 <= i <= j < N for 526/// which a variable is live 527void LiveIntervals::computeIntervals() { 528 DOUT << "********** COMPUTING LIVE INTERVALS **********\n" 529 << "********** Function: " 530 << ((Value*)mf_->getFunction())->getName() << '\n'; 531 // Track the index of the current machine instr. 532 unsigned MIIndex = 0; 533 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end(); 534 MBBI != E; ++MBBI) { 535 MachineBasicBlock *MBB = MBBI; 536 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n"; 537 538 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end(); 539 540 // Create intervals for live-ins to this BB first. 541 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(), 542 LE = MBB->livein_end(); LI != LE; ++LI) { 543 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI)); 544 // Multiple live-ins can alias the same register. 545 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS) 546 if (!hasInterval(*AS)) 547 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS), 548 true); 549 } 550 551 for (; MI != miEnd; ++MI) { 552 DOUT << MIIndex << "\t" << *MI; 553 554 // Handle defs. 555 for (int i = MI->getNumOperands() - 1; i >= 0; --i) { 556 MachineOperand &MO = MI->getOperand(i); 557 // handle register defs - build intervals 558 if (MO.isRegister() && MO.getReg() && MO.isDef()) 559 handleRegisterDef(MBB, MI, MIIndex, MO.getReg()); 560 } 561 562 MIIndex += InstrSlots::NUM; 563 } 564 } 565} 566 567bool LiveIntervals::findLiveInMBBs(const LiveRange &LR, 568 SmallVectorImpl<MachineBasicBlock*> &MBBs) const { 569 std::vector<IdxMBBPair>::const_iterator I = 570 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start); 571 572 bool ResVal = false; 573 while (I != Idx2MBBMap.end()) { 574 if (LR.end <= I->first) 575 break; 576 MBBs.push_back(I->second); 577 ResVal = true; 578 ++I; 579 } 580 return ResVal; 581} 582 583 584LiveInterval LiveIntervals::createInterval(unsigned reg) { 585 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? 586 HUGE_VALF : 0.0F; 587 return LiveInterval(reg, Weight); 588} 589 590/// getVNInfoSourceReg - Helper function that parses the specified VNInfo 591/// copy field and returns the source register that defines it. 592unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const { 593 if (!VNI->copy) 594 return 0; 595 596 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG) 597 return VNI->copy->getOperand(1).getReg(); 598 if (VNI->copy->getOpcode() == TargetInstrInfo::INSERT_SUBREG) 599 return VNI->copy->getOperand(2).getReg(); 600 unsigned SrcReg, DstReg; 601 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg)) 602 return SrcReg; 603 assert(0 && "Unrecognized copy instruction!"); 604 return 0; 605} 606 607//===----------------------------------------------------------------------===// 608// Register allocator hooks. 609// 610 611/// getReMatImplicitUse - If the remat definition MI has one (for now, we only 612/// allow one) virtual register operand, then its uses are implicitly using 613/// the register. Returns the virtual register. 614unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li, 615 MachineInstr *MI) const { 616 unsigned RegOp = 0; 617 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 618 MachineOperand &MO = MI->getOperand(i); 619 if (!MO.isRegister() || !MO.isUse()) 620 continue; 621 unsigned Reg = MO.getReg(); 622 if (Reg == 0 || Reg == li.reg) 623 continue; 624 // FIXME: For now, only remat MI with at most one register operand. 625 assert(!RegOp && 626 "Can't rematerialize instruction with multiple register operand!"); 627 RegOp = MO.getReg(); 628 break; 629 } 630 return RegOp; 631} 632 633/// isValNoAvailableAt - Return true if the val# of the specified interval 634/// which reaches the given instruction also reaches the specified use index. 635bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI, 636 unsigned UseIdx) const { 637 unsigned Index = getInstructionIndex(MI); 638 VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno; 639 LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx); 640 return UI != li.end() && UI->valno == ValNo; 641} 642 643/// isReMaterializable - Returns true if the definition MI of the specified 644/// val# of the specified interval is re-materializable. 645bool LiveIntervals::isReMaterializable(const LiveInterval &li, 646 const VNInfo *ValNo, MachineInstr *MI, 647 bool &isLoad) { 648 if (DisableReMat) 649 return false; 650 651 isLoad = false; 652 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) 653 return true; 654 655 int FrameIdx = 0; 656 if (tii_->isLoadFromStackSlot(MI, FrameIdx) && 657 mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx)) 658 // FIXME: Let target specific isReallyTriviallyReMaterializable determines 659 // this but remember this is not safe to fold into a two-address 660 // instruction. 661 // This is a load from fixed stack slot. It can be rematerialized. 662 return true; 663 664 if (tii_->isTriviallyReMaterializable(MI)) { 665 const TargetInstrDesc &TID = MI->getDesc(); 666 isLoad = TID.isSimpleLoad(); 667 668 unsigned ImpUse = getReMatImplicitUse(li, MI); 669 if (ImpUse) { 670 const LiveInterval &ImpLi = getInterval(ImpUse); 671 for (MachineRegisterInfo::use_iterator ri = mri_->use_begin(li.reg), 672 re = mri_->use_end(); ri != re; ++ri) { 673 MachineInstr *UseMI = &*ri; 674 unsigned UseIdx = getInstructionIndex(UseMI); 675 if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo) 676 continue; 677 if (!isValNoAvailableAt(ImpLi, MI, UseIdx)) 678 return false; 679 } 680 } 681 return true; 682 } 683 684 return false; 685} 686 687/// isReMaterializable - Returns true if every definition of MI of every 688/// val# of the specified interval is re-materializable. 689bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) { 690 isLoad = false; 691 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 692 i != e; ++i) { 693 const VNInfo *VNI = *i; 694 unsigned DefIdx = VNI->def; 695 if (DefIdx == ~1U) 696 continue; // Dead val#. 697 // Is the def for the val# rematerializable? 698 if (DefIdx == ~0u) 699 return false; 700 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx); 701 bool DefIsLoad = false; 702 if (!ReMatDefMI || 703 !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad)) 704 return false; 705 isLoad |= DefIsLoad; 706 } 707 return true; 708} 709 710/// FilterFoldedOps - Filter out two-address use operands. Return 711/// true if it finds any issue with the operands that ought to prevent 712/// folding. 713static bool FilterFoldedOps(MachineInstr *MI, 714 SmallVector<unsigned, 2> &Ops, 715 unsigned &MRInfo, 716 SmallVector<unsigned, 2> &FoldOps) { 717 const TargetInstrDesc &TID = MI->getDesc(); 718 719 MRInfo = 0; 720 for (unsigned i = 0, e = Ops.size(); i != e; ++i) { 721 unsigned OpIdx = Ops[i]; 722 MachineOperand &MO = MI->getOperand(OpIdx); 723 // FIXME: fold subreg use. 724 if (MO.getSubReg()) 725 return true; 726 if (MO.isDef()) 727 MRInfo |= (unsigned)VirtRegMap::isMod; 728 else { 729 // Filter out two-address use operand(s). 730 if (!MO.isImplicit() && 731 TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) { 732 MRInfo = VirtRegMap::isModRef; 733 continue; 734 } 735 MRInfo |= (unsigned)VirtRegMap::isRef; 736 } 737 FoldOps.push_back(OpIdx); 738 } 739 return false; 740} 741 742 743/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from 744/// slot / to reg or any rematerialized load into ith operand of specified 745/// MI. If it is successul, MI is updated with the newly created MI and 746/// returns true. 747bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, 748 VirtRegMap &vrm, MachineInstr *DefMI, 749 unsigned InstrIdx, 750 SmallVector<unsigned, 2> &Ops, 751 bool isSS, int Slot, unsigned Reg) { 752 // If it is an implicit def instruction, just delete it. 753 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) { 754 RemoveMachineInstrFromMaps(MI); 755 vrm.RemoveMachineInstrFromMaps(MI); 756 MI->eraseFromParent(); 757 ++numFolds; 758 return true; 759 } 760 761 // Filter the list of operand indexes that are to be folded. Abort if 762 // any operand will prevent folding. 763 unsigned MRInfo = 0; 764 SmallVector<unsigned, 2> FoldOps; 765 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 766 return false; 767 768 // The only time it's safe to fold into a two address instruction is when 769 // it's folding reload and spill from / into a spill stack slot. 770 if (DefMI && (MRInfo & VirtRegMap::isMod)) 771 return false; 772 773 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot) 774 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI); 775 if (fmi) { 776 // Remember this instruction uses the spill slot. 777 if (isSS) vrm.addSpillSlotUse(Slot, fmi); 778 779 // Attempt to fold the memory reference into the instruction. If 780 // we can do this, we don't need to insert spill code. 781 if (lv_) 782 lv_->instructionChanged(MI, fmi); 783 else 784 fmi->copyKillDeadInfo(MI, tri_); 785 MachineBasicBlock &MBB = *MI->getParent(); 786 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot)) 787 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo); 788 vrm.transferSpillPts(MI, fmi); 789 vrm.transferRestorePts(MI, fmi); 790 vrm.transferEmergencySpills(MI, fmi); 791 mi2iMap_.erase(MI); 792 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi; 793 mi2iMap_[fmi] = InstrIdx; 794 MI = MBB.insert(MBB.erase(MI), fmi); 795 ++numFolds; 796 return true; 797 } 798 return false; 799} 800 801/// canFoldMemoryOperand - Returns true if the specified load / store 802/// folding is possible. 803bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI, 804 SmallVector<unsigned, 2> &Ops, 805 bool ReMat) const { 806 // Filter the list of operand indexes that are to be folded. Abort if 807 // any operand will prevent folding. 808 unsigned MRInfo = 0; 809 SmallVector<unsigned, 2> FoldOps; 810 if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) 811 return false; 812 813 // It's only legal to remat for a use, not a def. 814 if (ReMat && (MRInfo & VirtRegMap::isMod)) 815 return false; 816 817 return tii_->canFoldMemoryOperand(MI, FoldOps); 818} 819 820bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const { 821 SmallPtrSet<MachineBasicBlock*, 4> MBBs; 822 for (LiveInterval::Ranges::const_iterator 823 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 824 std::vector<IdxMBBPair>::const_iterator II = 825 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start); 826 if (II == Idx2MBBMap.end()) 827 continue; 828 if (I->end > II->first) // crossing a MBB. 829 return false; 830 MBBs.insert(II->second); 831 if (MBBs.size() > 1) 832 return false; 833 } 834 return true; 835} 836 837/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of 838/// interval on to-be re-materialized operands of MI) with new register. 839void LiveIntervals::rewriteImplicitOps(const LiveInterval &li, 840 MachineInstr *MI, unsigned NewVReg, 841 VirtRegMap &vrm) { 842 // There is an implicit use. That means one of the other operand is 843 // being remat'ed and the remat'ed instruction has li.reg as an 844 // use operand. Make sure we rewrite that as well. 845 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 846 MachineOperand &MO = MI->getOperand(i); 847 if (!MO.isRegister()) 848 continue; 849 unsigned Reg = MO.getReg(); 850 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 851 continue; 852 if (!vrm.isReMaterialized(Reg)) 853 continue; 854 MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg); 855 MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg); 856 if (UseMO) 857 UseMO->setReg(NewVReg); 858 } 859} 860 861/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions 862/// for addIntervalsForSpills to rewrite uses / defs for the given live range. 863bool LiveIntervals:: 864rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, 865 bool TrySplit, unsigned index, unsigned end, MachineInstr *MI, 866 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 867 unsigned Slot, int LdSlot, 868 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 869 VirtRegMap &vrm, 870 const TargetRegisterClass* rc, 871 SmallVector<int, 4> &ReMatIds, 872 const MachineLoopInfo *loopInfo, 873 unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse, 874 std::map<unsigned,unsigned> &MBBVRegsMap, 875 std::vector<LiveInterval*> &NewLIs) { 876 bool CanFold = false; 877 RestartInstruction: 878 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 879 MachineOperand& mop = MI->getOperand(i); 880 if (!mop.isRegister()) 881 continue; 882 unsigned Reg = mop.getReg(); 883 unsigned RegI = Reg; 884 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) 885 continue; 886 if (Reg != li.reg) 887 continue; 888 889 bool TryFold = !DefIsReMat; 890 bool FoldSS = true; // Default behavior unless it's a remat. 891 int FoldSlot = Slot; 892 if (DefIsReMat) { 893 // If this is the rematerializable definition MI itself and 894 // all of its uses are rematerialized, simply delete it. 895 if (MI == ReMatOrigDefMI && CanDelete) { 896 DOUT << "\t\t\t\tErasing re-materlizable def: "; 897 DOUT << MI << '\n'; 898 RemoveMachineInstrFromMaps(MI); 899 vrm.RemoveMachineInstrFromMaps(MI); 900 MI->eraseFromParent(); 901 break; 902 } 903 904 // If def for this use can't be rematerialized, then try folding. 905 // If def is rematerializable and it's a load, also try folding. 906 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad)); 907 if (isLoad) { 908 // Try fold loads (from stack slot, constant pool, etc.) into uses. 909 FoldSS = isLoadSS; 910 FoldSlot = LdSlot; 911 } 912 } 913 914 // Scan all of the operands of this instruction rewriting operands 915 // to use NewVReg instead of li.reg as appropriate. We do this for 916 // two reasons: 917 // 918 // 1. If the instr reads the same spilled vreg multiple times, we 919 // want to reuse the NewVReg. 920 // 2. If the instr is a two-addr instruction, we are required to 921 // keep the src/dst regs pinned. 922 // 923 // Keep track of whether we replace a use and/or def so that we can 924 // create the spill interval with the appropriate range. 925 926 HasUse = mop.isUse(); 927 HasDef = mop.isDef(); 928 SmallVector<unsigned, 2> Ops; 929 Ops.push_back(i); 930 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) { 931 const MachineOperand &MOj = MI->getOperand(j); 932 if (!MOj.isRegister()) 933 continue; 934 unsigned RegJ = MOj.getReg(); 935 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ)) 936 continue; 937 if (RegJ == RegI) { 938 Ops.push_back(j); 939 HasUse |= MOj.isUse(); 940 HasDef |= MOj.isDef(); 941 } 942 } 943 944 if (TryFold) { 945 // Do not fold load / store here if we are splitting. We'll find an 946 // optimal point to insert a load / store later. 947 if (!TrySplit) { 948 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 949 Ops, FoldSS, FoldSlot, Reg)) { 950 // Folding the load/store can completely change the instruction in 951 // unpredictable ways, rescan it from the beginning. 952 HasUse = false; 953 HasDef = false; 954 CanFold = false; 955 if (isRemoved(MI)) 956 break; 957 goto RestartInstruction; 958 } 959 } else { 960 CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat); 961 } 962 } else 963 CanFold = false; 964 965 // Create a new virtual register for the spill interval. 966 bool CreatedNewVReg = false; 967 if (NewVReg == 0) { 968 NewVReg = mri_->createVirtualRegister(rc); 969 vrm.grow(); 970 CreatedNewVReg = true; 971 } 972 mop.setReg(NewVReg); 973 if (mop.isImplicit()) 974 rewriteImplicitOps(li, MI, NewVReg, vrm); 975 976 // Reuse NewVReg for other reads. 977 for (unsigned j = 0, e = Ops.size(); j != e; ++j) { 978 MachineOperand &mopj = MI->getOperand(Ops[j]); 979 mopj.setReg(NewVReg); 980 if (mopj.isImplicit()) 981 rewriteImplicitOps(li, MI, NewVReg, vrm); 982 } 983 984 if (CreatedNewVReg) { 985 if (DefIsReMat) { 986 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/); 987 if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) { 988 // Each valnum may have its own remat id. 989 ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg); 990 } else { 991 vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]); 992 } 993 if (!CanDelete || (HasUse && HasDef)) { 994 // If this is a two-addr instruction then its use operands are 995 // rematerializable but its def is not. It should be assigned a 996 // stack slot. 997 vrm.assignVirt2StackSlot(NewVReg, Slot); 998 } 999 } else { 1000 vrm.assignVirt2StackSlot(NewVReg, Slot); 1001 } 1002 } else if (HasUse && HasDef && 1003 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) { 1004 // If this interval hasn't been assigned a stack slot (because earlier 1005 // def is a deleted remat def), do it now. 1006 assert(Slot != VirtRegMap::NO_STACK_SLOT); 1007 vrm.assignVirt2StackSlot(NewVReg, Slot); 1008 } 1009 1010 // Re-matting an instruction with virtual register use. Add the 1011 // register as an implicit use on the use MI. 1012 if (DefIsReMat && ImpUse) 1013 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1014 1015 // create a new register interval for this spill / remat. 1016 LiveInterval &nI = getOrCreateInterval(NewVReg); 1017 if (CreatedNewVReg) { 1018 NewLIs.push_back(&nI); 1019 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg)); 1020 if (TrySplit) 1021 vrm.setIsSplitFromReg(NewVReg, li.reg); 1022 } 1023 1024 if (HasUse) { 1025 if (CreatedNewVReg) { 1026 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1, 1027 nI.getNextValue(~0U, 0, VNInfoAllocator)); 1028 DOUT << " +" << LR; 1029 nI.addRange(LR); 1030 } else { 1031 // Extend the split live interval to this def / use. 1032 unsigned End = getUseIndex(index)+1; 1033 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End, 1034 nI.getValNumInfo(nI.getNumValNums()-1)); 1035 DOUT << " +" << LR; 1036 nI.addRange(LR); 1037 } 1038 } 1039 if (HasDef) { 1040 LiveRange LR(getDefIndex(index), getStoreIndex(index), 1041 nI.getNextValue(~0U, 0, VNInfoAllocator)); 1042 DOUT << " +" << LR; 1043 nI.addRange(LR); 1044 } 1045 1046 DOUT << "\t\t\t\tAdded new interval: "; 1047 nI.print(DOUT, tri_); 1048 DOUT << '\n'; 1049 } 1050 return CanFold; 1051} 1052bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li, 1053 const VNInfo *VNI, 1054 MachineBasicBlock *MBB, unsigned Idx) const { 1055 unsigned End = getMBBEndIdx(MBB); 1056 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) { 1057 unsigned KillIdx = VNI->kills[j]; 1058 if (KillIdx > Idx && KillIdx < End) 1059 return true; 1060 } 1061 return false; 1062} 1063 1064static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) { 1065 const VNInfo *VNI = NULL; 1066 for (LiveInterval::const_vni_iterator i = li.vni_begin(), 1067 e = li.vni_end(); i != e; ++i) 1068 if ((*i)->def == DefIdx) { 1069 VNI = *i; 1070 break; 1071 } 1072 return VNI; 1073} 1074 1075/// RewriteInfo - Keep track of machine instrs that will be rewritten 1076/// during spilling. 1077namespace { 1078 struct RewriteInfo { 1079 unsigned Index; 1080 MachineInstr *MI; 1081 bool HasUse; 1082 bool HasDef; 1083 RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d) 1084 : Index(i), MI(mi), HasUse(u), HasDef(d) {} 1085 }; 1086 1087 struct RewriteInfoCompare { 1088 bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const { 1089 return LHS.Index < RHS.Index; 1090 } 1091 }; 1092} 1093 1094void LiveIntervals:: 1095rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit, 1096 LiveInterval::Ranges::const_iterator &I, 1097 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, 1098 unsigned Slot, int LdSlot, 1099 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, 1100 VirtRegMap &vrm, 1101 const TargetRegisterClass* rc, 1102 SmallVector<int, 4> &ReMatIds, 1103 const MachineLoopInfo *loopInfo, 1104 BitVector &SpillMBBs, 1105 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes, 1106 BitVector &RestoreMBBs, 1107 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes, 1108 std::map<unsigned,unsigned> &MBBVRegsMap, 1109 std::vector<LiveInterval*> &NewLIs) { 1110 bool AllCanFold = true; 1111 unsigned NewVReg = 0; 1112 unsigned start = getBaseIndex(I->start); 1113 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM; 1114 1115 // First collect all the def / use in this live range that will be rewritten. 1116 // Make sure they are sorted according to instruction index. 1117 std::vector<RewriteInfo> RewriteMIs; 1118 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1119 re = mri_->reg_end(); ri != re; ) { 1120 MachineInstr *MI = &*ri; 1121 MachineOperand &O = ri.getOperand(); 1122 ++ri; 1123 assert(!O.isImplicit() && "Spilling register that's used as implicit use?"); 1124 unsigned index = getInstructionIndex(MI); 1125 if (index < start || index >= end) 1126 continue; 1127 RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef())); 1128 } 1129 std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare()); 1130 1131 unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0; 1132 // Now rewrite the defs and uses. 1133 for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) { 1134 RewriteInfo &rwi = RewriteMIs[i]; 1135 ++i; 1136 unsigned index = rwi.Index; 1137 bool MIHasUse = rwi.HasUse; 1138 bool MIHasDef = rwi.HasDef; 1139 MachineInstr *MI = rwi.MI; 1140 // If MI def and/or use the same register multiple times, then there 1141 // are multiple entries. 1142 unsigned NumUses = MIHasUse; 1143 while (i != e && RewriteMIs[i].MI == MI) { 1144 assert(RewriteMIs[i].Index == index); 1145 bool isUse = RewriteMIs[i].HasUse; 1146 if (isUse) ++NumUses; 1147 MIHasUse |= isUse; 1148 MIHasDef |= RewriteMIs[i].HasDef; 1149 ++i; 1150 } 1151 MachineBasicBlock *MBB = MI->getParent(); 1152 1153 if (ImpUse && MI != ReMatDefMI) { 1154 // Re-matting an instruction with virtual register use. Update the 1155 // register interval's spill weight to HUGE_VALF to prevent it from 1156 // being spilled. 1157 LiveInterval &ImpLi = getInterval(ImpUse); 1158 ImpLi.weight = HUGE_VALF; 1159 } 1160 1161 unsigned MBBId = MBB->getNumber(); 1162 unsigned ThisVReg = 0; 1163 if (TrySplit) { 1164 std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId); 1165 if (NVI != MBBVRegsMap.end()) { 1166 ThisVReg = NVI->second; 1167 // One common case: 1168 // x = use 1169 // ... 1170 // ... 1171 // def = ... 1172 // = use 1173 // It's better to start a new interval to avoid artifically 1174 // extend the new interval. 1175 if (MIHasDef && !MIHasUse) { 1176 MBBVRegsMap.erase(MBB->getNumber()); 1177 ThisVReg = 0; 1178 } 1179 } 1180 } 1181 1182 bool IsNew = ThisVReg == 0; 1183 if (IsNew) { 1184 // This ends the previous live interval. If all of its def / use 1185 // can be folded, give it a low spill weight. 1186 if (NewVReg && TrySplit && AllCanFold) { 1187 LiveInterval &nI = getOrCreateInterval(NewVReg); 1188 nI.weight /= 10.0F; 1189 } 1190 AllCanFold = true; 1191 } 1192 NewVReg = ThisVReg; 1193 1194 bool HasDef = false; 1195 bool HasUse = false; 1196 bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit, 1197 index, end, MI, ReMatOrigDefMI, ReMatDefMI, 1198 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1199 CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg, 1200 ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs); 1201 if (!HasDef && !HasUse) 1202 continue; 1203 1204 AllCanFold &= CanFold; 1205 1206 // Update weight of spill interval. 1207 LiveInterval &nI = getOrCreateInterval(NewVReg); 1208 if (!TrySplit) { 1209 // The spill weight is now infinity as it cannot be spilled again. 1210 nI.weight = HUGE_VALF; 1211 continue; 1212 } 1213 1214 // Keep track of the last def and first use in each MBB. 1215 if (HasDef) { 1216 if (MI != ReMatOrigDefMI || !CanDelete) { 1217 bool HasKill = false; 1218 if (!HasUse) 1219 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index)); 1220 else { 1221 // If this is a two-address code, then this index starts a new VNInfo. 1222 const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index)); 1223 if (VNI) 1224 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index)); 1225 } 1226 std::map<unsigned, std::vector<SRInfo> >::iterator SII = 1227 SpillIdxes.find(MBBId); 1228 if (!HasKill) { 1229 if (SII == SpillIdxes.end()) { 1230 std::vector<SRInfo> S; 1231 S.push_back(SRInfo(index, NewVReg, true)); 1232 SpillIdxes.insert(std::make_pair(MBBId, S)); 1233 } else if (SII->second.back().vreg != NewVReg) { 1234 SII->second.push_back(SRInfo(index, NewVReg, true)); 1235 } else if ((int)index > SII->second.back().index) { 1236 // If there is an earlier def and this is a two-address 1237 // instruction, then it's not possible to fold the store (which 1238 // would also fold the load). 1239 SRInfo &Info = SII->second.back(); 1240 Info.index = index; 1241 Info.canFold = !HasUse; 1242 } 1243 SpillMBBs.set(MBBId); 1244 } else if (SII != SpillIdxes.end() && 1245 SII->second.back().vreg == NewVReg && 1246 (int)index > SII->second.back().index) { 1247 // There is an earlier def that's not killed (must be two-address). 1248 // The spill is no longer needed. 1249 SII->second.pop_back(); 1250 if (SII->second.empty()) { 1251 SpillIdxes.erase(MBBId); 1252 SpillMBBs.reset(MBBId); 1253 } 1254 } 1255 } 1256 } 1257 1258 if (HasUse) { 1259 std::map<unsigned, std::vector<SRInfo> >::iterator SII = 1260 SpillIdxes.find(MBBId); 1261 if (SII != SpillIdxes.end() && 1262 SII->second.back().vreg == NewVReg && 1263 (int)index > SII->second.back().index) 1264 // Use(s) following the last def, it's not safe to fold the spill. 1265 SII->second.back().canFold = false; 1266 std::map<unsigned, std::vector<SRInfo> >::iterator RII = 1267 RestoreIdxes.find(MBBId); 1268 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg) 1269 // If we are splitting live intervals, only fold if it's the first 1270 // use and there isn't another use later in the MBB. 1271 RII->second.back().canFold = false; 1272 else if (IsNew) { 1273 // Only need a reload if there isn't an earlier def / use. 1274 if (RII == RestoreIdxes.end()) { 1275 std::vector<SRInfo> Infos; 1276 Infos.push_back(SRInfo(index, NewVReg, true)); 1277 RestoreIdxes.insert(std::make_pair(MBBId, Infos)); 1278 } else { 1279 RII->second.push_back(SRInfo(index, NewVReg, true)); 1280 } 1281 RestoreMBBs.set(MBBId); 1282 } 1283 } 1284 1285 // Update spill weight. 1286 unsigned loopDepth = loopInfo->getLoopDepth(MBB); 1287 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth); 1288 } 1289 1290 if (NewVReg && TrySplit && AllCanFold) { 1291 // If all of its def / use can be folded, give it a low spill weight. 1292 LiveInterval &nI = getOrCreateInterval(NewVReg); 1293 nI.weight /= 10.0F; 1294 } 1295} 1296 1297bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr, 1298 BitVector &RestoreMBBs, 1299 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1300 if (!RestoreMBBs[Id]) 1301 return false; 1302 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1303 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1304 if (Restores[i].index == index && 1305 Restores[i].vreg == vr && 1306 Restores[i].canFold) 1307 return true; 1308 return false; 1309} 1310 1311void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr, 1312 BitVector &RestoreMBBs, 1313 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) { 1314 if (!RestoreMBBs[Id]) 1315 return; 1316 std::vector<SRInfo> &Restores = RestoreIdxes[Id]; 1317 for (unsigned i = 0, e = Restores.size(); i != e; ++i) 1318 if (Restores[i].index == index && Restores[i].vreg) 1319 Restores[i].index = -1; 1320} 1321 1322/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being 1323/// spilled and create empty intervals for their uses. 1324void 1325LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm, 1326 const TargetRegisterClass* rc, 1327 std::vector<LiveInterval*> &NewLIs) { 1328 for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), 1329 re = mri_->reg_end(); ri != re; ) { 1330 MachineOperand &O = ri.getOperand(); 1331 MachineInstr *MI = &*ri; 1332 ++ri; 1333 if (O.isDef()) { 1334 assert(MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF && 1335 "Register def was not rewritten?"); 1336 RemoveMachineInstrFromMaps(MI); 1337 vrm.RemoveMachineInstrFromMaps(MI); 1338 MI->eraseFromParent(); 1339 } else { 1340 // This must be an use of an implicit_def so it's not part of the live 1341 // interval. Create a new empty live interval for it. 1342 // FIXME: Can we simply erase some of the instructions? e.g. Stores? 1343 unsigned NewVReg = mri_->createVirtualRegister(rc); 1344 vrm.grow(); 1345 vrm.setIsImplicitlyDefined(NewVReg); 1346 NewLIs.push_back(&getOrCreateInterval(NewVReg)); 1347 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { 1348 MachineOperand &MO = MI->getOperand(i); 1349 if (MO.isReg() && MO.getReg() == li.reg) 1350 MO.setReg(NewVReg); 1351 } 1352 } 1353 } 1354} 1355 1356 1357std::vector<LiveInterval*> LiveIntervals:: 1358addIntervalsForSpills(const LiveInterval &li, 1359 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) { 1360 // Since this is called after the analysis is done we don't know if 1361 // LiveVariables is available 1362 lv_ = getAnalysisToUpdate<LiveVariables>(); 1363 1364 assert(li.weight != HUGE_VALF && 1365 "attempt to spill already spilled interval!"); 1366 1367 DOUT << "\t\t\t\tadding intervals for spills for interval: "; 1368 li.print(DOUT, tri_); 1369 DOUT << '\n'; 1370 1371 // Each bit specify whether it a spill is required in the MBB. 1372 BitVector SpillMBBs(mf_->getNumBlockIDs()); 1373 std::map<unsigned, std::vector<SRInfo> > SpillIdxes; 1374 BitVector RestoreMBBs(mf_->getNumBlockIDs()); 1375 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes; 1376 std::map<unsigned,unsigned> MBBVRegsMap; 1377 std::vector<LiveInterval*> NewLIs; 1378 const TargetRegisterClass* rc = mri_->getRegClass(li.reg); 1379 1380 unsigned NumValNums = li.getNumValNums(); 1381 SmallVector<MachineInstr*, 4> ReMatDefs; 1382 ReMatDefs.resize(NumValNums, NULL); 1383 SmallVector<MachineInstr*, 4> ReMatOrigDefs; 1384 ReMatOrigDefs.resize(NumValNums, NULL); 1385 SmallVector<int, 4> ReMatIds; 1386 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT); 1387 BitVector ReMatDelete(NumValNums); 1388 unsigned Slot = VirtRegMap::MAX_STACK_SLOT; 1389 1390 // Spilling a split live interval. It cannot be split any further. Also, 1391 // it's also guaranteed to be a single val# / range interval. 1392 if (vrm.getPreSplitReg(li.reg)) { 1393 vrm.setIsSplitFromReg(li.reg, 0); 1394 // Unset the split kill marker on the last use. 1395 unsigned KillIdx = vrm.getKillPoint(li.reg); 1396 if (KillIdx) { 1397 MachineInstr *KillMI = getInstructionFromIndex(KillIdx); 1398 assert(KillMI && "Last use disappeared?"); 1399 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true); 1400 assert(KillOp != -1 && "Last use disappeared?"); 1401 KillMI->getOperand(KillOp).setIsKill(false); 1402 } 1403 vrm.removeKillPoint(li.reg); 1404 bool DefIsReMat = vrm.isReMaterialized(li.reg); 1405 Slot = vrm.getStackSlot(li.reg); 1406 assert(Slot != VirtRegMap::MAX_STACK_SLOT); 1407 MachineInstr *ReMatDefMI = DefIsReMat ? 1408 vrm.getReMaterializedMI(li.reg) : NULL; 1409 int LdSlot = 0; 1410 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1411 bool isLoad = isLoadSS || 1412 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad())); 1413 bool IsFirstRange = true; 1414 for (LiveInterval::Ranges::const_iterator 1415 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1416 // If this is a split live interval with multiple ranges, it means there 1417 // are two-address instructions that re-defined the value. Only the 1418 // first def can be rematerialized! 1419 if (IsFirstRange) { 1420 // Note ReMatOrigDefMI has already been deleted. 1421 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI, 1422 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1423 false, vrm, rc, ReMatIds, loopInfo, 1424 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1425 MBBVRegsMap, NewLIs); 1426 } else { 1427 rewriteInstructionsForSpills(li, false, I, NULL, 0, 1428 Slot, 0, false, false, false, 1429 false, vrm, rc, ReMatIds, loopInfo, 1430 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1431 MBBVRegsMap, NewLIs); 1432 } 1433 IsFirstRange = false; 1434 } 1435 1436 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1437 return NewLIs; 1438 } 1439 1440 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li); 1441 if (SplitLimit != -1 && (int)numSplits >= SplitLimit) 1442 TrySplit = false; 1443 if (TrySplit) 1444 ++numSplits; 1445 bool NeedStackSlot = false; 1446 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); 1447 i != e; ++i) { 1448 const VNInfo *VNI = *i; 1449 unsigned VN = VNI->id; 1450 unsigned DefIdx = VNI->def; 1451 if (DefIdx == ~1U) 1452 continue; // Dead val#. 1453 // Is the def for the val# rematerializable? 1454 MachineInstr *ReMatDefMI = (DefIdx == ~0u) 1455 ? 0 : getInstructionFromIndex(DefIdx); 1456 bool dummy; 1457 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) { 1458 // Remember how to remat the def of this val#. 1459 ReMatOrigDefs[VN] = ReMatDefMI; 1460 // Original def may be modified so we have to make a copy here. vrm must 1461 // delete these! 1462 ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone(); 1463 1464 bool CanDelete = true; 1465 if (VNI->hasPHIKill) { 1466 // A kill is a phi node, not all of its uses can be rematerialized. 1467 // It must not be deleted. 1468 CanDelete = false; 1469 // Need a stack slot if there is any live range where uses cannot be 1470 // rematerialized. 1471 NeedStackSlot = true; 1472 } 1473 if (CanDelete) 1474 ReMatDelete.set(VN); 1475 } else { 1476 // Need a stack slot if there is any live range where uses cannot be 1477 // rematerialized. 1478 NeedStackSlot = true; 1479 } 1480 } 1481 1482 // One stack slot per live interval. 1483 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) 1484 Slot = vrm.assignVirt2StackSlot(li.reg); 1485 1486 // Create new intervals and rewrite defs and uses. 1487 for (LiveInterval::Ranges::const_iterator 1488 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { 1489 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id]; 1490 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id]; 1491 bool DefIsReMat = ReMatDefMI != NULL; 1492 bool CanDelete = ReMatDelete[I->valno->id]; 1493 int LdSlot = 0; 1494 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1495 bool isLoad = isLoadSS || 1496 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad()); 1497 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI, 1498 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, 1499 CanDelete, vrm, rc, ReMatIds, loopInfo, 1500 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, 1501 MBBVRegsMap, NewLIs); 1502 } 1503 1504 // Insert spills / restores if we are splitting. 1505 if (!TrySplit) { 1506 handleSpilledImpDefs(li, vrm, rc, NewLIs); 1507 return NewLIs; 1508 } 1509 1510 SmallPtrSet<LiveInterval*, 4> AddedKill; 1511 SmallVector<unsigned, 2> Ops; 1512 if (NeedStackSlot) { 1513 int Id = SpillMBBs.find_first(); 1514 while (Id != -1) { 1515 std::vector<SRInfo> &spills = SpillIdxes[Id]; 1516 for (unsigned i = 0, e = spills.size(); i != e; ++i) { 1517 int index = spills[i].index; 1518 unsigned VReg = spills[i].vreg; 1519 LiveInterval &nI = getOrCreateInterval(VReg); 1520 bool isReMat = vrm.isReMaterialized(VReg); 1521 MachineInstr *MI = getInstructionFromIndex(index); 1522 bool CanFold = false; 1523 bool FoundUse = false; 1524 Ops.clear(); 1525 if (spills[i].canFold) { 1526 CanFold = true; 1527 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1528 MachineOperand &MO = MI->getOperand(j); 1529 if (!MO.isRegister() || MO.getReg() != VReg) 1530 continue; 1531 1532 Ops.push_back(j); 1533 if (MO.isDef()) 1534 continue; 1535 if (isReMat || 1536 (!FoundUse && !alsoFoldARestore(Id, index, VReg, 1537 RestoreMBBs, RestoreIdxes))) { 1538 // MI has two-address uses of the same register. If the use 1539 // isn't the first and only use in the BB, then we can't fold 1540 // it. FIXME: Move this to rewriteInstructionsForSpills. 1541 CanFold = false; 1542 break; 1543 } 1544 FoundUse = true; 1545 } 1546 } 1547 // Fold the store into the def if possible. 1548 bool Folded = false; 1549 if (CanFold && !Ops.empty()) { 1550 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){ 1551 Folded = true; 1552 if (FoundUse > 0) { 1553 // Also folded uses, do not issue a load. 1554 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes); 1555 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1); 1556 } 1557 nI.removeRange(getDefIndex(index), getStoreIndex(index)); 1558 } 1559 } 1560 1561 // Otherwise tell the spiller to issue a spill. 1562 if (!Folded) { 1563 LiveRange *LR = &nI.ranges[nI.ranges.size()-1]; 1564 bool isKill = LR->end == getStoreIndex(index); 1565 if (!MI->registerDefIsDead(nI.reg)) 1566 // No need to spill a dead def. 1567 vrm.addSpillPoint(VReg, isKill, MI); 1568 if (isKill) 1569 AddedKill.insert(&nI); 1570 } 1571 } 1572 Id = SpillMBBs.find_next(Id); 1573 } 1574 } 1575 1576 int Id = RestoreMBBs.find_first(); 1577 while (Id != -1) { 1578 std::vector<SRInfo> &restores = RestoreIdxes[Id]; 1579 for (unsigned i = 0, e = restores.size(); i != e; ++i) { 1580 int index = restores[i].index; 1581 if (index == -1) 1582 continue; 1583 unsigned VReg = restores[i].vreg; 1584 LiveInterval &nI = getOrCreateInterval(VReg); 1585 MachineInstr *MI = getInstructionFromIndex(index); 1586 bool CanFold = false; 1587 Ops.clear(); 1588 if (restores[i].canFold) { 1589 CanFold = true; 1590 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { 1591 MachineOperand &MO = MI->getOperand(j); 1592 if (!MO.isRegister() || MO.getReg() != VReg) 1593 continue; 1594 1595 if (MO.isDef()) { 1596 // If this restore were to be folded, it would have been folded 1597 // already. 1598 CanFold = false; 1599 break; 1600 } 1601 Ops.push_back(j); 1602 } 1603 } 1604 1605 // Fold the load into the use if possible. 1606 bool Folded = false; 1607 if (CanFold && !Ops.empty()) { 1608 if (!vrm.isReMaterialized(VReg)) 1609 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg); 1610 else { 1611 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg); 1612 int LdSlot = 0; 1613 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); 1614 // If the rematerializable def is a load, also try to fold it. 1615 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad()) 1616 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, 1617 Ops, isLoadSS, LdSlot, VReg); 1618 unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI); 1619 if (ImpUse) { 1620 // Re-matting an instruction with virtual register use. Add the 1621 // register as an implicit use on the use MI and update the register 1622 // interval's spill weight to HUGE_VALF to prevent it from being 1623 // spilled. 1624 LiveInterval &ImpLi = getInterval(ImpUse); 1625 ImpLi.weight = HUGE_VALF; 1626 MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); 1627 } 1628 } 1629 } 1630 // If folding is not possible / failed, then tell the spiller to issue a 1631 // load / rematerialization for us. 1632 if (Folded) 1633 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1); 1634 else 1635 vrm.addRestorePoint(VReg, MI); 1636 } 1637 Id = RestoreMBBs.find_next(Id); 1638 } 1639 1640 // Finalize intervals: add kills, finalize spill weights, and filter out 1641 // dead intervals. 1642 std::vector<LiveInterval*> RetNewLIs; 1643 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) { 1644 LiveInterval *LI = NewLIs[i]; 1645 if (!LI->empty()) { 1646 LI->weight /= LI->getSize(); 1647 if (!AddedKill.count(LI)) { 1648 LiveRange *LR = &LI->ranges[LI->ranges.size()-1]; 1649 unsigned LastUseIdx = getBaseIndex(LR->end); 1650 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx); 1651 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false); 1652 assert(UseIdx != -1); 1653 if (LastUse->getOperand(UseIdx).isImplicit() || 1654 LastUse->getDesc().getOperandConstraint(UseIdx,TOI::TIED_TO) == -1){ 1655 LastUse->getOperand(UseIdx).setIsKill(); 1656 vrm.addKillPoint(LI->reg, LastUseIdx); 1657 } 1658 } 1659 RetNewLIs.push_back(LI); 1660 } 1661 } 1662 1663 handleSpilledImpDefs(li, vrm, rc, RetNewLIs); 1664 return RetNewLIs; 1665} 1666 1667/// hasAllocatableSuperReg - Return true if the specified physical register has 1668/// any super register that's allocatable. 1669bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const { 1670 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) 1671 if (allocatableRegs_[*AS] && hasInterval(*AS)) 1672 return true; 1673 return false; 1674} 1675 1676/// getRepresentativeReg - Find the largest super register of the specified 1677/// physical register. 1678unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const { 1679 // Find the largest super-register that is allocatable. 1680 unsigned BestReg = Reg; 1681 for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) { 1682 unsigned SuperReg = *AS; 1683 if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) { 1684 BestReg = SuperReg; 1685 break; 1686 } 1687 } 1688 return BestReg; 1689} 1690 1691/// getNumConflictsWithPhysReg - Return the number of uses and defs of the 1692/// specified interval that conflicts with the specified physical register. 1693unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li, 1694 unsigned PhysReg) const { 1695 unsigned NumConflicts = 0; 1696 const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg)); 1697 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 1698 E = mri_->reg_end(); I != E; ++I) { 1699 MachineOperand &O = I.getOperand(); 1700 MachineInstr *MI = O.getParent(); 1701 unsigned Index = getInstructionIndex(MI); 1702 if (pli.liveAt(Index)) 1703 ++NumConflicts; 1704 } 1705 return NumConflicts; 1706} 1707 1708/// spillPhysRegAroundRegDefsUses - Spill the specified physical register 1709/// around all defs and uses of the specified interval. 1710void LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li, 1711 unsigned PhysReg, VirtRegMap &vrm) { 1712 unsigned SpillReg = getRepresentativeReg(PhysReg); 1713 1714 for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS) 1715 // If there are registers which alias PhysReg, but which are not a 1716 // sub-register of the chosen representative super register. Assert 1717 // since we can't handle it yet. 1718 assert(*AS == SpillReg || !allocatableRegs_[*AS] || 1719 tri_->isSuperRegister(*AS, SpillReg)); 1720 1721 LiveInterval &pli = getInterval(SpillReg); 1722 SmallPtrSet<MachineInstr*, 8> SeenMIs; 1723 for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), 1724 E = mri_->reg_end(); I != E; ++I) { 1725 MachineOperand &O = I.getOperand(); 1726 MachineInstr *MI = O.getParent(); 1727 if (SeenMIs.count(MI)) 1728 continue; 1729 SeenMIs.insert(MI); 1730 unsigned Index = getInstructionIndex(MI); 1731 if (pli.liveAt(Index)) { 1732 vrm.addEmergencySpill(SpillReg, MI); 1733 pli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1); 1734 for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) { 1735 if (!hasInterval(*AS)) 1736 continue; 1737 LiveInterval &spli = getInterval(*AS); 1738 if (spli.liveAt(Index)) 1739 spli.removeRange(getLoadIndex(Index), getStoreIndex(Index)+1); 1740 } 1741 } 1742 } 1743} 1744