LiveIntervalAnalysis.cpp revision e73701df947d23c65e96abc71a3be40ad77058ee
1//===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file was developed by the LLVM research group and is distributed under 6// the University of Illinois Open Source License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the LiveInterval analysis pass which is used 11// by the Linear Scan Register allocator. This pass linearizes the 12// basic blocks of the function in DFS order and uses the 13// LiveVariables pass to conservatively compute live intervals for 14// each virtual and physical register. 15// 16//===----------------------------------------------------------------------===// 17 18#define DEBUG_TYPE "liveintervals" 19#include "llvm/CodeGen/LiveIntervalAnalysis.h" 20#include "VirtRegMap.h" 21#include "llvm/Value.h" 22#include "llvm/Analysis/LoopInfo.h" 23#include "llvm/CodeGen/LiveVariables.h" 24#include "llvm/CodeGen/MachineFrameInfo.h" 25#include "llvm/CodeGen/MachineInstr.h" 26#include "llvm/CodeGen/Passes.h" 27#include "llvm/CodeGen/SSARegMap.h" 28#include "llvm/Target/MRegisterInfo.h" 29#include "llvm/Target/TargetInstrInfo.h" 30#include "llvm/Target/TargetMachine.h" 31#include "llvm/Support/CommandLine.h" 32#include "llvm/Support/Debug.h" 33#include "llvm/ADT/Statistic.h" 34#include "llvm/ADT/STLExtras.h" 35#include <algorithm> 36#include <cmath> 37#include <iostream> 38using namespace llvm; 39 40namespace { 41 RegisterAnalysis<LiveIntervals> X("liveintervals", "Live Interval Analysis"); 42 43 Statistic<> numIntervals 44 ("liveintervals", "Number of original intervals"); 45 46 Statistic<> numIntervalsAfter 47 ("liveintervals", "Number of intervals after coalescing"); 48 49 Statistic<> numJoins 50 ("liveintervals", "Number of interval joins performed"); 51 52 Statistic<> numPeep 53 ("liveintervals", "Number of identity moves eliminated after coalescing"); 54 55 Statistic<> numFolded 56 ("liveintervals", "Number of loads/stores folded into instructions"); 57 58 cl::opt<bool> 59 EnableJoining("join-liveintervals", 60 cl::desc("Join compatible live intervals"), 61 cl::init(true)); 62}; 63 64void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const 65{ 66 AU.addRequired<LiveVariables>(); 67 AU.addPreservedID(PHIEliminationID); 68 AU.addRequiredID(PHIEliminationID); 69 AU.addRequiredID(TwoAddressInstructionPassID); 70 AU.addRequired<LoopInfo>(); 71 MachineFunctionPass::getAnalysisUsage(AU); 72} 73 74void LiveIntervals::releaseMemory() 75{ 76 mi2iMap_.clear(); 77 i2miMap_.clear(); 78 r2iMap_.clear(); 79 r2rMap_.clear(); 80} 81 82 83/// runOnMachineFunction - Register allocate the whole function 84/// 85bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { 86 mf_ = &fn; 87 tm_ = &fn.getTarget(); 88 mri_ = tm_->getRegisterInfo(); 89 tii_ = tm_->getInstrInfo(); 90 lv_ = &getAnalysis<LiveVariables>(); 91 allocatableRegs_ = mri_->getAllocatableSet(fn); 92 r2rMap_.grow(mf_->getSSARegMap()->getLastVirtReg()); 93 94 // If this function has any live ins, insert a dummy instruction at the 95 // beginning of the function that we will pretend "defines" the values. This 96 // is to make the interval analysis simpler by providing a number. 97 if (fn.livein_begin() != fn.livein_end()) { 98 unsigned FirstLiveIn = fn.livein_begin()->first; 99 100 // Find a reg class that contains this live in. 101 const TargetRegisterClass *RC = 0; 102 for (MRegisterInfo::regclass_iterator RCI = mri_->regclass_begin(), 103 E = mri_->regclass_end(); RCI != E; ++RCI) 104 if ((*RCI)->contains(FirstLiveIn)) { 105 RC = *RCI; 106 break; 107 } 108 109 MachineInstr *OldFirstMI = fn.begin()->begin(); 110 mri_->copyRegToReg(*fn.begin(), fn.begin()->begin(), 111 FirstLiveIn, FirstLiveIn, RC); 112 assert(OldFirstMI != fn.begin()->begin() && 113 "copyRetToReg didn't insert anything!"); 114 } 115 116 // number MachineInstrs 117 unsigned miIndex = 0; 118 for (MachineFunction::iterator mbb = mf_->begin(), mbbEnd = mf_->end(); 119 mbb != mbbEnd; ++mbb) 120 for (MachineBasicBlock::iterator mi = mbb->begin(), miEnd = mbb->end(); 121 mi != miEnd; ++mi) { 122 bool inserted = mi2iMap_.insert(std::make_pair(mi, miIndex)).second; 123 assert(inserted && "multiple MachineInstr -> index mappings"); 124 i2miMap_.push_back(mi); 125 miIndex += InstrSlots::NUM; 126 } 127 128 // Note intervals due to live-in values. 129 if (fn.livein_begin() != fn.livein_end()) { 130 MachineBasicBlock *Entry = fn.begin(); 131 for (MachineFunction::livein_iterator I = fn.livein_begin(), 132 E = fn.livein_end(); I != E; ++I) { 133 handlePhysicalRegisterDef(Entry, Entry->begin(), 134 getOrCreateInterval(I->first), 0, 0, true); 135 for (const unsigned* AS = mri_->getAliasSet(I->first); *AS; ++AS) 136 handlePhysicalRegisterDef(Entry, Entry->begin(), 137 getOrCreateInterval(*AS), 0, 0, true); 138 } 139 } 140 141 computeIntervals(); 142 143 numIntervals += getNumIntervals(); 144 145 DEBUG(std::cerr << "********** INTERVALS **********\n"; 146 for (iterator I = begin(), E = end(); I != E; ++I) { 147 I->second.print(std::cerr, mri_); 148 std::cerr << "\n"; 149 }); 150 151 // join intervals if requested 152 if (EnableJoining) joinIntervals(); 153 154 numIntervalsAfter += getNumIntervals(); 155 156 // perform a final pass over the instructions and compute spill 157 // weights, coalesce virtual registers and remove identity moves 158 const LoopInfo& loopInfo = getAnalysis<LoopInfo>(); 159 160 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end(); 161 mbbi != mbbe; ++mbbi) { 162 MachineBasicBlock* mbb = mbbi; 163 unsigned loopDepth = loopInfo.getLoopDepth(mbb->getBasicBlock()); 164 165 for (MachineBasicBlock::iterator mii = mbb->begin(), mie = mbb->end(); 166 mii != mie; ) { 167 // if the move will be an identity move delete it 168 unsigned srcReg, dstReg, RegRep; 169 if (tii_->isMoveInstr(*mii, srcReg, dstReg) && 170 (RegRep = rep(srcReg)) == rep(dstReg)) { 171 // remove from def list 172 LiveInterval &interval = getOrCreateInterval(RegRep); 173 // remove index -> MachineInstr and 174 // MachineInstr -> index mappings 175 Mi2IndexMap::iterator mi2i = mi2iMap_.find(mii); 176 if (mi2i != mi2iMap_.end()) { 177 i2miMap_[mi2i->second/InstrSlots::NUM] = 0; 178 mi2iMap_.erase(mi2i); 179 } 180 mii = mbbi->erase(mii); 181 ++numPeep; 182 } 183 else { 184 for (unsigned i = 0; i < mii->getNumOperands(); ++i) { 185 const MachineOperand& mop = mii->getOperand(i); 186 if (mop.isRegister() && mop.getReg() && 187 MRegisterInfo::isVirtualRegister(mop.getReg())) { 188 // replace register with representative register 189 unsigned reg = rep(mop.getReg()); 190 mii->getOperand(i).setReg(reg); 191 192 LiveInterval &RegInt = getInterval(reg); 193 RegInt.weight += 194 (mop.isUse() + mop.isDef()) * pow(10.0F, (int)loopDepth); 195 } 196 } 197 ++mii; 198 } 199 } 200 } 201 202 DEBUG(dump()); 203 return true; 204} 205 206/// print - Implement the dump method. 207void LiveIntervals::print(std::ostream &O, const Module* ) const { 208 O << "********** INTERVALS **********\n"; 209 for (const_iterator I = begin(), E = end(); I != E; ++I) { 210 I->second.print(std::cerr, mri_); 211 std::cerr << "\n"; 212 } 213 214 O << "********** MACHINEINSTRS **********\n"; 215 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end(); 216 mbbi != mbbe; ++mbbi) { 217 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n"; 218 for (MachineBasicBlock::iterator mii = mbbi->begin(), 219 mie = mbbi->end(); mii != mie; ++mii) { 220 O << getInstructionIndex(mii) << '\t' << *mii; 221 } 222 } 223} 224 225std::vector<LiveInterval*> LiveIntervals:: 226addIntervalsForSpills(const LiveInterval &li, VirtRegMap &vrm, int slot) { 227 // since this is called after the analysis is done we don't know if 228 // LiveVariables is available 229 lv_ = getAnalysisToUpdate<LiveVariables>(); 230 231 std::vector<LiveInterval*> added; 232 233 assert(li.weight != HUGE_VAL && 234 "attempt to spill already spilled interval!"); 235 236 DEBUG(std::cerr << "\t\t\t\tadding intervals for spills for interval: " 237 << li << '\n'); 238 239 const TargetRegisterClass* rc = mf_->getSSARegMap()->getRegClass(li.reg); 240 241 for (LiveInterval::Ranges::const_iterator 242 i = li.ranges.begin(), e = li.ranges.end(); i != e; ++i) { 243 unsigned index = getBaseIndex(i->start); 244 unsigned end = getBaseIndex(i->end-1) + InstrSlots::NUM; 245 for (; index != end; index += InstrSlots::NUM) { 246 // skip deleted instructions 247 while (index != end && !getInstructionFromIndex(index)) 248 index += InstrSlots::NUM; 249 if (index == end) break; 250 251 MachineInstr *MI = getInstructionFromIndex(index); 252 253 // NewRegLiveIn - This instruction might have multiple uses of the spilled 254 // register. In this case, for the first use, keep track of the new vreg 255 // that we reload it into. If we see a second use, reuse this vreg 256 // instead of creating live ranges for two reloads. 257 unsigned NewRegLiveIn = 0; 258 259 for_operand: 260 for (unsigned i = 0; i != MI->getNumOperands(); ++i) { 261 MachineOperand& mop = MI->getOperand(i); 262 if (mop.isRegister() && mop.getReg() == li.reg) { 263 if (NewRegLiveIn && mop.isUse()) { 264 // We already emitted a reload of this value, reuse it for 265 // subsequent operands. 266 MI->getOperand(i).setReg(NewRegLiveIn); 267 DEBUG(std::cerr << "\t\t\t\treused reload into reg" << NewRegLiveIn 268 << " for operand #" << i << '\n'); 269 } else if (MachineInstr* fmi = mri_->foldMemoryOperand(MI, i, slot)) { 270 // Attempt to fold the memory reference into the instruction. If we 271 // can do this, we don't need to insert spill code. 272 if (lv_) 273 lv_->instructionChanged(MI, fmi); 274 MachineBasicBlock &MBB = *MI->getParent(); 275 vrm.virtFolded(li.reg, MI, i, fmi); 276 mi2iMap_.erase(MI); 277 i2miMap_[index/InstrSlots::NUM] = fmi; 278 mi2iMap_[fmi] = index; 279 MI = MBB.insert(MBB.erase(MI), fmi); 280 ++numFolded; 281 // Folding the load/store can completely change the instruction in 282 // unpredictable ways, rescan it from the beginning. 283 goto for_operand; 284 } else { 285 // This is tricky. We need to add information in the interval about 286 // the spill code so we have to use our extra load/store slots. 287 // 288 // If we have a use we are going to have a load so we start the 289 // interval from the load slot onwards. Otherwise we start from the 290 // def slot. 291 unsigned start = (mop.isUse() ? 292 getLoadIndex(index) : 293 getDefIndex(index)); 294 // If we have a def we are going to have a store right after it so 295 // we end the interval after the use of the next 296 // instruction. Otherwise we end after the use of this instruction. 297 unsigned end = 1 + (mop.isDef() ? 298 getStoreIndex(index) : 299 getUseIndex(index)); 300 301 // create a new register for this spill 302 NewRegLiveIn = mf_->getSSARegMap()->createVirtualRegister(rc); 303 MI->getOperand(i).setReg(NewRegLiveIn); 304 vrm.grow(); 305 vrm.assignVirt2StackSlot(NewRegLiveIn, slot); 306 LiveInterval& nI = getOrCreateInterval(NewRegLiveIn); 307 assert(nI.empty()); 308 309 // the spill weight is now infinity as it 310 // cannot be spilled again 311 nI.weight = float(HUGE_VAL); 312 LiveRange LR(start, end, nI.getNextValue()); 313 DEBUG(std::cerr << " +" << LR); 314 nI.addRange(LR); 315 added.push_back(&nI); 316 317 // update live variables if it is available 318 if (lv_) 319 lv_->addVirtualRegisterKilled(NewRegLiveIn, MI); 320 321 // If this is a live in, reuse it for subsequent live-ins. If it's 322 // a def, we can't do this. 323 if (!mop.isUse()) NewRegLiveIn = 0; 324 325 DEBUG(std::cerr << "\t\t\t\tadded new interval: " << nI << '\n'); 326 } 327 } 328 } 329 } 330 } 331 332 return added; 333} 334 335void LiveIntervals::printRegName(unsigned reg) const 336{ 337 if (MRegisterInfo::isPhysicalRegister(reg)) 338 std::cerr << mri_->getName(reg); 339 else 340 std::cerr << "%reg" << reg; 341} 342 343void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock* mbb, 344 MachineBasicBlock::iterator mi, 345 LiveInterval& interval) 346{ 347 DEBUG(std::cerr << "\t\tregister: "; printRegName(interval.reg)); 348 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg); 349 350 // Virtual registers may be defined multiple times (due to phi 351 // elimination and 2-addr elimination). Much of what we do only has to be 352 // done once for the vreg. We use an empty interval to detect the first 353 // time we see a vreg. 354 if (interval.empty()) { 355 // Get the Idx of the defining instructions. 356 unsigned defIndex = getDefIndex(getInstructionIndex(mi)); 357 358 unsigned ValNum = interval.getNextValue(); 359 assert(ValNum == 0 && "First value in interval is not 0?"); 360 ValNum = 0; // Clue in the optimizer. 361 362 // Loop over all of the blocks that the vreg is defined in. There are 363 // two cases we have to handle here. The most common case is a vreg 364 // whose lifetime is contained within a basic block. In this case there 365 // will be a single kill, in MBB, which comes after the definition. 366 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) { 367 // FIXME: what about dead vars? 368 unsigned killIdx; 369 if (vi.Kills[0] != mi) 370 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1; 371 else 372 killIdx = defIndex+1; 373 374 // If the kill happens after the definition, we have an intra-block 375 // live range. 376 if (killIdx > defIndex) { 377 assert(vi.AliveBlocks.empty() && 378 "Shouldn't be alive across any blocks!"); 379 LiveRange LR(defIndex, killIdx, ValNum); 380 interval.addRange(LR); 381 DEBUG(std::cerr << " +" << LR << "\n"); 382 return; 383 } 384 } 385 386 // The other case we handle is when a virtual register lives to the end 387 // of the defining block, potentially live across some blocks, then is 388 // live into some number of blocks, but gets killed. Start by adding a 389 // range that goes from this definition to the end of the defining block. 390 LiveRange NewLR(defIndex, 391 getInstructionIndex(&mbb->back()) + InstrSlots::NUM, 392 ValNum); 393 DEBUG(std::cerr << " +" << NewLR); 394 interval.addRange(NewLR); 395 396 // Iterate over all of the blocks that the variable is completely 397 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the 398 // live interval. 399 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) { 400 if (vi.AliveBlocks[i]) { 401 MachineBasicBlock* mbb = mf_->getBlockNumbered(i); 402 if (!mbb->empty()) { 403 LiveRange LR(getInstructionIndex(&mbb->front()), 404 getInstructionIndex(&mbb->back()) + InstrSlots::NUM, 405 ValNum); 406 interval.addRange(LR); 407 DEBUG(std::cerr << " +" << LR); 408 } 409 } 410 } 411 412 // Finally, this virtual register is live from the start of any killing 413 // block to the 'use' slot of the killing instruction. 414 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) { 415 MachineInstr *Kill = vi.Kills[i]; 416 LiveRange LR(getInstructionIndex(Kill->getParent()->begin()), 417 getUseIndex(getInstructionIndex(Kill))+1, 418 ValNum); 419 interval.addRange(LR); 420 DEBUG(std::cerr << " +" << LR); 421 } 422 423 } else { 424 // If this is the second time we see a virtual register definition, it 425 // must be due to phi elimination or two addr elimination. If this is 426 // the result of two address elimination, then the vreg is the first 427 // operand, and is a def-and-use. 428 if (mi->getOperand(0).isRegister() && 429 mi->getOperand(0).getReg() == interval.reg && 430 mi->getOperand(0).isDef() && mi->getOperand(0).isUse()) { 431 // If this is a two-address definition, then we have already processed 432 // the live range. The only problem is that we didn't realize there 433 // are actually two values in the live interval. Because of this we 434 // need to take the LiveRegion that defines this register and split it 435 // into two values. 436 unsigned DefIndex = getDefIndex(getInstructionIndex(vi.DefInst)); 437 unsigned RedefIndex = getDefIndex(getInstructionIndex(mi)); 438 439 // Delete the initial value, which should be short and continuous, 440 // becuase the 2-addr copy must be in the same MBB as the redef. 441 interval.removeRange(DefIndex, RedefIndex); 442 443 LiveRange LR(DefIndex, RedefIndex, interval.getNextValue()); 444 DEBUG(std::cerr << " replace range with " << LR); 445 interval.addRange(LR); 446 447 // If this redefinition is dead, we need to add a dummy unit live 448 // range covering the def slot. 449 if (lv_->RegisterDefIsDead(mi, interval.reg)) 450 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, 0)); 451 452 DEBUG(std::cerr << "RESULT: " << interval); 453 454 } else { 455 // Otherwise, this must be because of phi elimination. If this is the 456 // first redefinition of the vreg that we have seen, go back and change 457 // the live range in the PHI block to be a different value number. 458 if (interval.containsOneValue()) { 459 assert(vi.Kills.size() == 1 && 460 "PHI elimination vreg should have one kill, the PHI itself!"); 461 462 // Remove the old range that we now know has an incorrect number. 463 MachineInstr *Killer = vi.Kills[0]; 464 unsigned Start = getInstructionIndex(Killer->getParent()->begin()); 465 unsigned End = getUseIndex(getInstructionIndex(Killer))+1; 466 DEBUG(std::cerr << "Removing [" << Start << "," << End << "] from: " 467 << interval << "\n"); 468 interval.removeRange(Start, End); 469 DEBUG(std::cerr << "RESULT: " << interval); 470 471 // Replace the interval with one of a NEW value number. 472 LiveRange LR(Start, End, interval.getNextValue()); 473 DEBUG(std::cerr << " replace range with " << LR); 474 interval.addRange(LR); 475 DEBUG(std::cerr << "RESULT: " << interval); 476 } 477 478 // In the case of PHI elimination, each variable definition is only 479 // live until the end of the block. We've already taken care of the 480 // rest of the live range. 481 unsigned defIndex = getDefIndex(getInstructionIndex(mi)); 482 LiveRange LR(defIndex, 483 getInstructionIndex(&mbb->back()) + InstrSlots::NUM, 484 interval.getNextValue()); 485 interval.addRange(LR); 486 DEBUG(std::cerr << " +" << LR); 487 } 488 } 489 490 DEBUG(std::cerr << '\n'); 491} 492 493void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, 494 MachineBasicBlock::iterator mi, 495 LiveInterval& interval, 496 unsigned SrcReg, unsigned DestReg, 497 bool isLiveIn) 498{ 499 // A physical register cannot be live across basic block, so its 500 // lifetime must end somewhere in its defining basic block. 501 DEBUG(std::cerr << "\t\tregister: "; printRegName(interval.reg)); 502 typedef LiveVariables::killed_iterator KillIter; 503 504 unsigned baseIndex = getInstructionIndex(mi); 505 unsigned start = getDefIndex(baseIndex); 506 unsigned end = start; 507 508 // If it is not used after definition, it is considered dead at 509 // the instruction defining it. Hence its interval is: 510 // [defSlot(def), defSlot(def)+1) 511 if (lv_->RegisterDefIsDead(mi, interval.reg)) { 512 DEBUG(std::cerr << " dead"); 513 end = getDefIndex(start) + 1; 514 goto exit; 515 } 516 517 // If it is not dead on definition, it must be killed by a 518 // subsequent instruction. Hence its interval is: 519 // [defSlot(def), useSlot(kill)+1) 520 while (++mi != MBB->end()) { 521 baseIndex += InstrSlots::NUM; 522 if (lv_->KillsRegister(mi, interval.reg)) { 523 DEBUG(std::cerr << " killed"); 524 end = getUseIndex(baseIndex) + 1; 525 goto exit; 526 } 527 } 528 529 // The only case we should have a dead physreg here without a killing or 530 // instruction where we know it's dead is if it is live-in to the function 531 // and never used. 532 assert(isLiveIn && "physreg was not killed in defining block!"); 533 end = getDefIndex(start) + 1; // It's dead. 534 535exit: 536 assert(start < end && "did not find end of interval?"); 537 538 // Finally, if this is defining a new range for the physical register, and if 539 // that physreg is just a copy from a vreg, and if THAT vreg was a copy from 540 // the physreg, then the new fragment has the same value as the one copied 541 // into the vreg. 542 if (interval.reg == DestReg && !interval.empty() && 543 MRegisterInfo::isVirtualRegister(SrcReg)) { 544 545 // Get the live interval for the vreg, see if it is defined by a copy. 546 LiveInterval &SrcInterval = getOrCreateInterval(SrcReg); 547 548 if (SrcInterval.containsOneValue()) { 549 assert(!SrcInterval.empty() && "Can't contain a value and be empty!"); 550 551 // Get the first index of the first range. Though the interval may have 552 // multiple liveranges in it, we only check the first. 553 unsigned StartIdx = SrcInterval.begin()->start; 554 MachineInstr *SrcDefMI = getInstructionFromIndex(StartIdx); 555 556 // Check to see if the vreg was defined by a copy instruction, and that 557 // the source was this physreg. 558 unsigned VRegSrcSrc, VRegSrcDest; 559 if (tii_->isMoveInstr(*SrcDefMI, VRegSrcSrc, VRegSrcDest) && 560 SrcReg == VRegSrcDest && VRegSrcSrc == DestReg) { 561 // Okay, now we know that the vreg was defined by a copy from this 562 // physreg. Find the value number being copied and use it as the value 563 // for this range. 564 const LiveRange *DefRange = interval.getLiveRangeContaining(StartIdx-1); 565 if (DefRange) { 566 LiveRange LR(start, end, DefRange->ValId); 567 interval.addRange(LR); 568 DEBUG(std::cerr << " +" << LR << '\n'); 569 return; 570 } 571 } 572 } 573 } 574 575 576 LiveRange LR(start, end, interval.getNextValue()); 577 interval.addRange(LR); 578 DEBUG(std::cerr << " +" << LR << '\n'); 579} 580 581void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB, 582 MachineBasicBlock::iterator MI, 583 unsigned reg) { 584 if (MRegisterInfo::isVirtualRegister(reg)) 585 handleVirtualRegisterDef(MBB, MI, getOrCreateInterval(reg)); 586 else if (allocatableRegs_[reg]) { 587 unsigned SrcReg = 0, DestReg = 0; 588 if (!tii_->isMoveInstr(*MI, SrcReg, DestReg)) 589 SrcReg = DestReg = 0; 590 591 handlePhysicalRegisterDef(MBB, MI, getOrCreateInterval(reg), 592 SrcReg, DestReg); 593 for (const unsigned* AS = mri_->getAliasSet(reg); *AS; ++AS) 594 handlePhysicalRegisterDef(MBB, MI, getOrCreateInterval(*AS), 595 SrcReg, DestReg); 596 } 597} 598 599/// computeIntervals - computes the live intervals for virtual 600/// registers. for some ordering of the machine instructions [1,N] a 601/// live interval is an interval [i, j) where 1 <= i <= j < N for 602/// which a variable is live 603void LiveIntervals::computeIntervals() 604{ 605 DEBUG(std::cerr << "********** COMPUTING LIVE INTERVALS **********\n"); 606 DEBUG(std::cerr << "********** Function: " 607 << ((Value*)mf_->getFunction())->getName() << '\n'); 608 bool IgnoreFirstInstr = mf_->livein_begin() != mf_->livein_end(); 609 610 for (MachineFunction::iterator I = mf_->begin(), E = mf_->end(); 611 I != E; ++I) { 612 MachineBasicBlock* mbb = I; 613 DEBUG(std::cerr << ((Value*)mbb->getBasicBlock())->getName() << ":\n"); 614 615 MachineBasicBlock::iterator mi = mbb->begin(), miEnd = mbb->end(); 616 if (IgnoreFirstInstr) { ++mi; IgnoreFirstInstr = false; } 617 for (; mi != miEnd; ++mi) { 618 const TargetInstrDescriptor& tid = 619 tm_->getInstrInfo()->get(mi->getOpcode()); 620 DEBUG(std::cerr << getInstructionIndex(mi) << "\t" << *mi); 621 622 // handle implicit defs 623 for (const unsigned* id = tid.ImplicitDefs; *id; ++id) 624 handleRegisterDef(mbb, mi, *id); 625 626 // handle explicit defs 627 for (int i = mi->getNumOperands() - 1; i >= 0; --i) { 628 MachineOperand& mop = mi->getOperand(i); 629 // handle register defs - build intervals 630 if (mop.isRegister() && mop.getReg() && mop.isDef()) 631 handleRegisterDef(mbb, mi, mop.getReg()); 632 } 633 } 634 } 635} 636 637/// IntA is defined as a copy from IntB and we know it only has one value 638/// number. If all of the places that IntA and IntB overlap are defined by 639/// copies from IntA to IntB, we know that these two ranges can really be 640/// merged if we adjust the value numbers. If it is safe, adjust the value 641/// numbers and return true, allowing coalescing to occur. 642bool LiveIntervals:: 643AdjustIfAllOverlappingRangesAreCopiesFrom(LiveInterval &IntA, 644 LiveInterval &IntB, 645 unsigned CopyIdx) { 646 std::vector<LiveRange*> Ranges; 647 IntA.getOverlapingRanges(IntB, CopyIdx, Ranges); 648 649 assert(!Ranges.empty() && "Why didn't we do a simple join of this?"); 650 651 unsigned IntBRep = rep(IntB.reg); 652 653 // Check to see if all of the overlaps (entries in Ranges) are defined by a 654 // copy from IntA. If not, exit. 655 for (unsigned i = 0, e = Ranges.size(); i != e; ++i) { 656 unsigned Idx = Ranges[i]->start; 657 MachineInstr *MI = getInstructionFromIndex(Idx); 658 unsigned SrcReg, DestReg; 659 if (!tii_->isMoveInstr(*MI, SrcReg, DestReg)) return false; 660 661 // If this copy isn't actually defining this range, it must be a live 662 // range spanning basic blocks or something. 663 if (rep(DestReg) != rep(IntA.reg)) return false; 664 665 // Check to see if this is coming from IntB. If not, bail out. 666 if (rep(SrcReg) != IntBRep) return false; 667 } 668 669 // Okay, we can change this one. Get the IntB value number that IntA is 670 // copied from. 671 unsigned ActualValNo = IntA.getLiveRangeContaining(CopyIdx-1)->ValId; 672 673 // Change all of the value numbers to the same as what we IntA is copied from. 674 for (unsigned i = 0, e = Ranges.size(); i != e; ++i) 675 Ranges[i]->ValId = ActualValNo; 676 677 return true; 678} 679 680void LiveIntervals::joinIntervalsInMachineBB(MachineBasicBlock *MBB) { 681 DEBUG(std::cerr << ((Value*)MBB->getBasicBlock())->getName() << ":\n"); 682 683 for (MachineBasicBlock::iterator mi = MBB->begin(), mie = MBB->end(); 684 mi != mie; ++mi) { 685 DEBUG(std::cerr << getInstructionIndex(mi) << '\t' << *mi); 686 687 // we only join virtual registers with allocatable 688 // physical registers since we do not have liveness information 689 // on not allocatable physical registers 690 unsigned SrcReg, DestReg; 691 if (tii_->isMoveInstr(*mi, SrcReg, DestReg) && 692 (MRegisterInfo::isVirtualRegister(SrcReg) || allocatableRegs_[SrcReg])&& 693 (MRegisterInfo::isVirtualRegister(DestReg)||allocatableRegs_[DestReg])){ 694 695 // Get representative registers. 696 SrcReg = rep(SrcReg); 697 DestReg = rep(DestReg); 698 699 // If they are already joined we continue. 700 if (SrcReg == DestReg) 701 continue; 702 703 // If they are both physical registers, we cannot join them. 704 if (MRegisterInfo::isPhysicalRegister(SrcReg) && 705 MRegisterInfo::isPhysicalRegister(DestReg)) 706 continue; 707 708 // If they are not of compatible register classes, we cannot join them. 709 bool Swap = false; 710 if (!compatibleRegisterClasses(SrcReg, DestReg, Swap)) { 711 DEBUG(std::cerr << "Register classes aren't compatible!\n"); 712 continue; 713 } 714 715 LiveInterval &SrcInt = getInterval(SrcReg); 716 LiveInterval &DestInt = getInterval(DestReg); 717 assert(SrcInt.reg == SrcReg && DestInt.reg == DestReg && 718 "Register mapping is horribly broken!"); 719 720 DEBUG(std::cerr << "\t\tInspecting " << SrcInt << " and " << DestInt 721 << ": "); 722 723 // If two intervals contain a single value and are joined by a copy, it 724 // does not matter if the intervals overlap, they can always be joined. 725 bool Joinable = SrcInt.containsOneValue() && DestInt.containsOneValue(); 726 727 unsigned MIDefIdx = getDefIndex(getInstructionIndex(mi)); 728 729 // If the intervals think that this is joinable, do so now. 730 if (!Joinable && DestInt.joinable(SrcInt, MIDefIdx)) 731 Joinable = true; 732 733 // If DestInt is actually a copy from SrcInt (which we know) that is used 734 // to define another value of SrcInt, we can change the other range of 735 // SrcInt to be the value of the range that defines DestInt, allowing a 736 // coalesce. 737 if (!Joinable && DestInt.containsOneValue() && 738 AdjustIfAllOverlappingRangesAreCopiesFrom(SrcInt, DestInt, MIDefIdx)) 739 Joinable = true; 740 741 if (!Joinable || overlapsAliases(&SrcInt, &DestInt)) { 742 DEBUG(std::cerr << "Interference!\n"); 743 } else { 744 DestInt.join(SrcInt, MIDefIdx); 745 DEBUG(std::cerr << "Joined. Result = " << DestInt << "\n"); 746 747 if (!Swap && !MRegisterInfo::isPhysicalRegister(SrcReg)) { 748 r2iMap_.erase(SrcReg); 749 r2rMap_[SrcReg] = DestReg; 750 } else { 751 // Otherwise merge the data structures the other way so we don't lose 752 // the physreg information. 753 r2rMap_[DestReg] = SrcReg; 754 DestInt.reg = SrcReg; 755 SrcInt.swap(DestInt); 756 r2iMap_.erase(DestReg); 757 } 758 ++numJoins; 759 } 760 } 761 } 762} 763 764namespace { 765 // DepthMBBCompare - Comparison predicate that sort first based on the loop 766 // depth of the basic block (the unsigned), and then on the MBB number. 767 struct DepthMBBCompare { 768 typedef std::pair<unsigned, MachineBasicBlock*> DepthMBBPair; 769 bool operator()(const DepthMBBPair &LHS, const DepthMBBPair &RHS) const { 770 if (LHS.first > RHS.first) return true; // Deeper loops first 771 return LHS.first == RHS.first && 772 LHS.second->getNumber() < RHS.second->getNumber(); 773 } 774 }; 775} 776 777void LiveIntervals::joinIntervals() { 778 DEBUG(std::cerr << "********** JOINING INTERVALS ***********\n"); 779 780 const LoopInfo &LI = getAnalysis<LoopInfo>(); 781 if (LI.begin() == LI.end()) { 782 // If there are no loops in the function, join intervals in function order. 783 for (MachineFunction::iterator I = mf_->begin(), E = mf_->end(); 784 I != E; ++I) 785 joinIntervalsInMachineBB(I); 786 } else { 787 // Otherwise, join intervals in inner loops before other intervals. 788 // Unfortunately we can't just iterate over loop hierarchy here because 789 // there may be more MBB's than BB's. Collect MBB's for sorting. 790 std::vector<std::pair<unsigned, MachineBasicBlock*> > MBBs; 791 for (MachineFunction::iterator I = mf_->begin(), E = mf_->end(); 792 I != E; ++I) 793 MBBs.push_back(std::make_pair(LI.getLoopDepth(I->getBasicBlock()), I)); 794 795 // Sort by loop depth. 796 std::sort(MBBs.begin(), MBBs.end(), DepthMBBCompare()); 797 798 // Finally, join intervals in loop nest order. 799 for (unsigned i = 0, e = MBBs.size(); i != e; ++i) 800 joinIntervalsInMachineBB(MBBs[i].second); 801 } 802 803 DEBUG(std::cerr << "*** Register mapping ***\n"); 804 DEBUG(for (int i = 0, e = r2rMap_.size(); i != e; ++i) 805 if (r2rMap_[i]) 806 std::cerr << " reg " << i << " -> reg " << r2rMap_[i] << "\n"); 807} 808 809/// Return true if the two specified registers belong to same or compatible 810/// register classes. The registers may be either phys or virt regs. 811bool LiveIntervals::compatibleRegisterClasses(unsigned RegA, unsigned RegB, 812 bool &Swap) const { 813 814 // Get the register classes for the first reg. 815 if (MRegisterInfo::isPhysicalRegister(RegA)) { 816 assert(MRegisterInfo::isVirtualRegister(RegB) && 817 "Shouldn't consider two physregs!"); 818 return mf_->getSSARegMap()->getRegClass(RegB)->contains(RegA); 819 } 820 821 // Compare against the regclass for the second reg. 822 const TargetRegisterClass *RegClassA = mf_->getSSARegMap()->getRegClass(RegA); 823 if (MRegisterInfo::isVirtualRegister(RegB)) { 824 const TargetRegisterClass *RegClassB=mf_->getSSARegMap()->getRegClass(RegB); 825 if (RegClassA == RegClassB) 826 return true; 827 else { 828 if (RegClassB->hasSubRegClass(RegClassA)) { 829 Swap = true; 830 return true; 831 } 832 return RegClassA->hasSubRegClass(RegClassB); 833 } 834 } else 835 return RegClassA->contains(RegB); 836} 837 838bool LiveIntervals::overlapsAliases(const LiveInterval *LHS, 839 const LiveInterval *RHS) const { 840 if (!MRegisterInfo::isPhysicalRegister(LHS->reg)) { 841 if (!MRegisterInfo::isPhysicalRegister(RHS->reg)) 842 return false; // vreg-vreg merge has no aliases! 843 std::swap(LHS, RHS); 844 } 845 846 assert(MRegisterInfo::isPhysicalRegister(LHS->reg) && 847 MRegisterInfo::isVirtualRegister(RHS->reg) && 848 "first interval must describe a physical register"); 849 850 for (const unsigned *AS = mri_->getAliasSet(LHS->reg); *AS; ++AS) 851 if (RHS->overlaps(getInterval(*AS))) 852 return true; 853 854 return false; 855} 856 857LiveInterval LiveIntervals::createInterval(unsigned reg) { 858 float Weight = MRegisterInfo::isPhysicalRegister(reg) ? 859 (float)HUGE_VAL :0.0F; 860 return LiveInterval(reg, Weight); 861} 862