IceRegAlloc.cpp revision 87ff3a186eb0e5a9ff791964e377910acceed84e
1//===- subzero/src/IceRegAlloc.cpp - Linear-scan implementation -----------===// 2// 3// The Subzero Code Generator 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9// 10// This file implements the LinearScan class, which performs the 11// linear-scan register allocation after liveness analysis has been 12// performed. 13// 14//===----------------------------------------------------------------------===// 15 16#include "IceCfg.h" 17#include "IceCfgNode.h" 18#include "IceInst.h" 19#include "IceOperand.h" 20#include "IceRegAlloc.h" 21#include "IceTargetLowering.h" 22 23namespace Ice { 24 25namespace { 26 27// Returns true if Var has any definitions within Item's live range. 28// TODO(stichnot): Consider trimming the Definitions list similar to 29// how the live ranges are trimmed, since all the overlapsDefs() tests 30// are whether some variable's definitions overlap Cur, and trimming 31// is with respect Cur.start. Initial tests show no measurable 32// performance difference, so we'll keep the code simple for now. 33bool overlapsDefs(const Cfg *Func, const Variable *Item, const Variable *Var) { 34 const bool UseTrimmed = true; 35 VariablesMetadata *VMetadata = Func->getVMetadata(); 36 if (const Inst *FirstDef = VMetadata->getFirstDefinition(Var)) 37 if (Item->getLiveRange().overlapsInst(FirstDef->getNumber(), UseTrimmed)) 38 return true; 39 const InstDefList &Defs = VMetadata->getLatterDefinitions(Var); 40 for (size_t i = 0; i < Defs.size(); ++i) { 41 if (Item->getLiveRange().overlapsInst(Defs[i]->getNumber(), UseTrimmed)) 42 return true; 43 } 44 return false; 45} 46 47void dumpDisableOverlap(const Cfg *Func, const Variable *Var, 48 const char *Reason) { 49 if (Func->getContext()->isVerbose(IceV_LinearScan)) { 50 VariablesMetadata *VMetadata = Func->getVMetadata(); 51 Ostream &Str = Func->getContext()->getStrDump(); 52 Str << "Disabling Overlap due to " << Reason << " " << *Var 53 << " LIVE=" << Var->getLiveRange() << " Defs="; 54 if (const Inst *FirstDef = VMetadata->getFirstDefinition(Var)) 55 Str << FirstDef->getNumber(); 56 const InstDefList &Defs = VMetadata->getLatterDefinitions(Var); 57 for (size_t i = 0; i < Defs.size(); ++i) { 58 Str << "," << Defs[i]->getNumber(); 59 } 60 Str << "\n"; 61 } 62} 63 64void dumpLiveRange(const Variable *Var, const Cfg *Func) { 65 Ostream &Str = Func->getContext()->getStrDump(); 66 const static size_t BufLen = 30; 67 char buf[BufLen]; 68 snprintf(buf, BufLen, "%2d", Var->getRegNumTmp()); 69 Str << "R=" << buf << " V="; 70 Var->dump(Func); 71 Str << " Range=" << Var->getLiveRange(); 72} 73 74} // end of anonymous namespace 75 76void LinearScan::initForGlobalAlloc() { 77 TimerMarker T(TimerStack::TT_initUnhandled, Func); 78 Unhandled.clear(); 79 UnhandledPrecolored.clear(); 80 Handled.clear(); 81 Inactive.clear(); 82 Active.clear(); 83 // Gather the live ranges of all variables and add them to the 84 // Unhandled set. 85 const VarList &Vars = Func->getVariables(); 86 Unhandled.reserve(Vars.size()); 87 for (Variable *Var : Vars) { 88 // Explicitly don't consider zero-weight variables, which are 89 // meant to be spill slots. 90 if (Var->getWeight() == RegWeight::Zero) 91 continue; 92 // Don't bother if the variable has a null live range, which means 93 // it was never referenced. 94 if (Var->getLiveRange().isEmpty()) 95 continue; 96 Var->untrimLiveRange(); 97 Unhandled.push_back(Var); 98 if (Var->hasReg()) { 99 Var->setRegNumTmp(Var->getRegNum()); 100 Var->setLiveRangeInfiniteWeight(); 101 UnhandledPrecolored.push_back(Var); 102 } 103 } 104 struct CompareRanges { 105 bool operator()(const Variable *L, const Variable *R) { 106 InstNumberT Lstart = L->getLiveRange().getStart(); 107 InstNumberT Rstart = R->getLiveRange().getStart(); 108 if (Lstart == Rstart) 109 return L->getIndex() < R->getIndex(); 110 return Lstart < Rstart; 111 } 112 }; 113 // Do a reverse sort so that erasing elements (from the end) is fast. 114 std::sort(Unhandled.rbegin(), Unhandled.rend(), CompareRanges()); 115 std::sort(UnhandledPrecolored.rbegin(), UnhandledPrecolored.rend(), 116 CompareRanges()); 117 118 // Build the (ordered) list of FakeKill instruction numbers. 119 Kills.clear(); 120 for (CfgNode *Node : Func->getNodes()) { 121 for (auto I = Node->getInsts().begin(), E = Node->getInsts().end(); I != E; 122 ++I) { 123 if (I->isDeleted()) 124 continue; 125 if (auto Kill = llvm::dyn_cast<InstFakeKill>(I)) { 126 if (!Kill->getLinked()->isDeleted()) 127 Kills.push_back(I->getNumber()); 128 } 129 } 130 } 131} 132 133// Implements the linear-scan algorithm. Based on "Linear Scan 134// Register Allocation in the Context of SSA Form and Register 135// Constraints" by Hanspeter Mössenböck and Michael Pfeiffer, 136// ftp://ftp.ssw.uni-linz.ac.at/pub/Papers/Moe02.PDF . This 137// implementation is modified to take affinity into account and allow 138// two interfering variables to share the same register in certain 139// cases. 140// 141// Requires running Cfg::liveness(Liveness_Intervals) in 142// preparation. Results are assigned to Variable::RegNum for each 143// Variable. 144void LinearScan::scan(const llvm::SmallBitVector &RegMaskFull) { 145 TimerMarker T(TimerStack::TT_linearScan, Func); 146 assert(RegMaskFull.any()); // Sanity check 147 Ostream &Str = Func->getContext()->getStrDump(); 148 bool Verbose = Func->getContext()->isVerbose(IceV_LinearScan); 149 Func->resetCurrentNode(); 150 VariablesMetadata *VMetadata = Func->getVMetadata(); 151 152 // Build a LiveRange representing the Kills list. 153 LiveRange KillsRange; 154 for (InstNumberT I : Kills) 155 KillsRange.addSegment(I, I); 156 KillsRange.untrim(); 157 158 // RegUses[I] is the number of live ranges (variables) that register 159 // I is currently assigned to. It can be greater than 1 as a result 160 // of AllowOverlap inference below. 161 std::vector<int> RegUses(RegMaskFull.size()); 162 // Unhandled is already set to all ranges in increasing order of 163 // start points. 164 assert(Active.empty()); 165 assert(Inactive.empty()); 166 assert(Handled.empty()); 167 UnorderedRanges::iterator Next; 168 const TargetLowering::RegSetMask RegsInclude = 169 TargetLowering::RegSet_CallerSave; 170 const TargetLowering::RegSetMask RegsExclude = TargetLowering::RegSet_None; 171 const llvm::SmallBitVector KillsMask = 172 Func->getTarget()->getRegisterSet(RegsInclude, RegsExclude); 173 174 while (!Unhandled.empty()) { 175 Variable *Cur = Unhandled.back(); 176 Unhandled.pop_back(); 177 if (Verbose) { 178 Str << "\nConsidering "; 179 dumpLiveRange(Cur, Func); 180 Str << "\n"; 181 } 182 const llvm::SmallBitVector RegMask = 183 RegMaskFull & Func->getTarget()->getRegisterSetForType(Cur->getType()); 184 KillsRange.trim(Cur->getLiveRange().getStart()); 185 186 // Check for precolored ranges. If Cur is precolored, it 187 // definitely gets that register. Previously processed live 188 // ranges would have avoided that register due to it being 189 // precolored. Future processed live ranges won't evict that 190 // register because the live range has infinite weight. 191 if (Cur->hasReg()) { 192 int32_t RegNum = Cur->getRegNum(); 193 // RegNumTmp should have already been set above. 194 assert(Cur->getRegNumTmp() == RegNum); 195 if (Verbose) { 196 Str << "Precoloring "; 197 dumpLiveRange(Cur, Func); 198 Str << "\n"; 199 } 200 Active.push_back(Cur); 201 assert(RegUses[RegNum] >= 0); 202 ++RegUses[RegNum]; 203 assert(!UnhandledPrecolored.empty()); 204 assert(UnhandledPrecolored.back() == Cur); 205 UnhandledPrecolored.pop_back(); 206 continue; 207 } 208 209 // Check for active ranges that have expired or become inactive. 210 for (auto I = Active.begin(), E = Active.end(); I != E; I = Next) { 211 Next = I; 212 ++Next; 213 Variable *Item = *I; 214 Item->trimLiveRange(Cur->getLiveRange().getStart()); 215 bool Moved = false; 216 if (Item->rangeEndsBefore(Cur)) { 217 // Move Item from Active to Handled list. 218 if (Verbose) { 219 Str << "Expiring "; 220 dumpLiveRange(Item, Func); 221 Str << "\n"; 222 } 223 Handled.splice(Handled.end(), Active, I); 224 Moved = true; 225 } else if (!Item->rangeOverlapsStart(Cur)) { 226 // Move Item from Active to Inactive list. 227 if (Verbose) { 228 Str << "Inactivating "; 229 dumpLiveRange(Item, Func); 230 Str << "\n"; 231 } 232 Inactive.splice(Inactive.end(), Active, I); 233 Moved = true; 234 } 235 if (Moved) { 236 // Decrement Item from RegUses[]. 237 assert(Item->hasRegTmp()); 238 int32_t RegNum = Item->getRegNumTmp(); 239 --RegUses[RegNum]; 240 assert(RegUses[RegNum] >= 0); 241 } 242 } 243 244 // Check for inactive ranges that have expired or reactivated. 245 for (auto I = Inactive.begin(), E = Inactive.end(); I != E; I = Next) { 246 Next = I; 247 ++Next; 248 Variable *Item = *I; 249 Item->trimLiveRange(Cur->getLiveRange().getStart()); 250 if (Item->rangeEndsBefore(Cur)) { 251 // Move Item from Inactive to Handled list. 252 if (Verbose) { 253 Str << "Expiring "; 254 dumpLiveRange(Item, Func); 255 Str << "\n"; 256 } 257 Handled.splice(Handled.end(), Inactive, I); 258 } else if (Item->rangeOverlapsStart(Cur)) { 259 // Move Item from Inactive to Active list. 260 if (Verbose) { 261 Str << "Reactivating "; 262 dumpLiveRange(Item, Func); 263 Str << "\n"; 264 } 265 Active.splice(Active.end(), Inactive, I); 266 // Increment Item in RegUses[]. 267 assert(Item->hasRegTmp()); 268 int32_t RegNum = Item->getRegNumTmp(); 269 assert(RegUses[RegNum] >= 0); 270 ++RegUses[RegNum]; 271 } 272 } 273 274 // Calculate available registers into Free[]. 275 llvm::SmallBitVector Free = RegMask; 276 for (SizeT i = 0; i < RegMask.size(); ++i) { 277 if (RegUses[i] > 0) 278 Free[i] = false; 279 } 280 281 // Infer register preference and allowable overlap. Only form a 282 // preference when the current Variable has an unambiguous "first" 283 // definition. The preference is some source Variable of the 284 // defining instruction that either is assigned a register that is 285 // currently free, or that is assigned a register that is not free 286 // but overlap is allowed. Overlap is allowed when the Variable 287 // under consideration is single-definition, and its definition is 288 // a simple assignment - i.e., the register gets copied/aliased 289 // but is never modified. Furthermore, overlap is only allowed 290 // when preferred Variable definition instructions do not appear 291 // within the current Variable's live range. 292 Variable *Prefer = NULL; 293 int32_t PreferReg = Variable::NoRegister; 294 bool AllowOverlap = false; 295 if (const Inst *DefInst = VMetadata->getFirstDefinition(Cur)) { 296 assert(DefInst->getDest() == Cur); 297 bool IsAssign = DefInst->isSimpleAssign(); 298 bool IsSingleDef = !VMetadata->isMultiDef(Cur); 299 for (SizeT i = 0; i < DefInst->getSrcSize(); ++i) { 300 // TODO(stichnot): Iterate through the actual Variables of the 301 // instruction, not just the source operands. This could 302 // capture Load instructions, including address mode 303 // optimization, for Prefer (but not for AllowOverlap). 304 if (Variable *SrcVar = llvm::dyn_cast<Variable>(DefInst->getSrc(i))) { 305 int32_t SrcReg = SrcVar->getRegNumTmp(); 306 // Only consider source variables that have (so far) been 307 // assigned a register. That register must be one in the 308 // RegMask set, e.g. don't try to prefer the stack pointer 309 // as a result of the stacksave intrinsic. 310 if (SrcVar->hasRegTmp() && RegMask[SrcReg]) { 311 if (!Free[SrcReg]) { 312 // Don't bother trying to enable AllowOverlap if the 313 // register is already free. 314 AllowOverlap = 315 IsSingleDef && IsAssign && !overlapsDefs(Func, Cur, SrcVar); 316 } 317 if (AllowOverlap || Free[SrcReg]) { 318 Prefer = SrcVar; 319 PreferReg = SrcReg; 320 } 321 } 322 } 323 } 324 } 325 if (Verbose) { 326 if (Prefer) { 327 Str << "Initial Prefer=" << *Prefer << " R=" << PreferReg 328 << " LIVE=" << Prefer->getLiveRange() << " Overlap=" << AllowOverlap 329 << "\n"; 330 } 331 } 332 333 // Remove registers from the Free[] list where an Inactive range 334 // overlaps with the current range. 335 for (const Variable *Item : Inactive) { 336 if (Item->rangeOverlaps(Cur)) { 337 int32_t RegNum = Item->getRegNumTmp(); 338 // Don't assert(Free[RegNum]) because in theory (though 339 // probably never in practice) there could be two inactive 340 // variables that were marked with AllowOverlap. 341 Free[RegNum] = false; 342 // Disable AllowOverlap if an Inactive variable, which is not 343 // Prefer, shares Prefer's register, and has a definition 344 // within Cur's live range. 345 if (AllowOverlap && Item != Prefer && RegNum == PreferReg && 346 overlapsDefs(Func, Cur, Item)) { 347 AllowOverlap = false; 348 dumpDisableOverlap(Func, Item, "Inactive"); 349 } 350 } 351 } 352 353 // Disable AllowOverlap if an Active variable, which is not 354 // Prefer, shares Prefer's register, and has a definition within 355 // Cur's live range. 356 for (const Variable *Item : Active) { 357 int32_t RegNum = Item->getRegNumTmp(); 358 if (Item != Prefer && RegNum == PreferReg && 359 overlapsDefs(Func, Cur, Item)) { 360 AllowOverlap = false; 361 dumpDisableOverlap(Func, Item, "Active"); 362 } 363 } 364 365 std::vector<RegWeight> Weights(RegMask.size()); 366 367 // Remove registers from the Free[] list where an Unhandled 368 // precolored range overlaps with the current range, and set those 369 // registers to infinite weight so that they aren't candidates for 370 // eviction. Cur->rangeEndsBefore(Item) is an early exit check 371 // that turns a guaranteed O(N^2) algorithm into expected linear 372 // complexity. 373 llvm::SmallBitVector PrecoloredUnhandledMask(RegMask.size()); 374 // Note: PrecoloredUnhandledMask is only used for dumping. 375 for (auto I = UnhandledPrecolored.rbegin(), E = UnhandledPrecolored.rend(); 376 I != E; ++I) { 377 Variable *Item = *I; 378 assert(Item->hasReg()); 379 if (Cur->rangeEndsBefore(Item)) 380 break; 381 if (Item->rangeOverlaps(Cur)) { 382 int32_t ItemReg = Item->getRegNum(); // Note: not getRegNumTmp() 383 Weights[ItemReg].setWeight(RegWeight::Inf); 384 Free[ItemReg] = false; 385 PrecoloredUnhandledMask[ItemReg] = true; 386 // Disable AllowOverlap if the preferred register is one of 387 // these precolored unhandled overlapping ranges. 388 if (AllowOverlap && ItemReg == PreferReg) { 389 AllowOverlap = false; 390 dumpDisableOverlap(Func, Item, "PrecoloredUnhandled"); 391 } 392 } 393 } 394 395 // Remove scratch registers from the Free[] list, and mark their 396 // Weights[] as infinite, if KillsRange overlaps Cur's live range. 397 const bool UseTrimmed = true; 398 if (Cur->getLiveRange().overlaps(KillsRange, UseTrimmed)) { 399 Free.reset(KillsMask); 400 for (int i = KillsMask.find_first(); i != -1; 401 i = KillsMask.find_next(i)) { 402 Weights[i].setWeight(RegWeight::Inf); 403 if (PreferReg == i) 404 AllowOverlap = false; 405 } 406 } 407 408 // Print info about physical register availability. 409 if (Verbose) { 410 for (SizeT i = 0; i < RegMask.size(); ++i) { 411 if (RegMask[i]) { 412 Str << Func->getTarget()->getRegName(i, IceType_i32) 413 << "(U=" << RegUses[i] << ",F=" << Free[i] 414 << ",P=" << PrecoloredUnhandledMask[i] << ") "; 415 } 416 } 417 Str << "\n"; 418 } 419 420 if (Prefer && (AllowOverlap || Free[PreferReg])) { 421 // First choice: a preferred register that is either free or is 422 // allowed to overlap with its linked variable. 423 Cur->setRegNumTmp(PreferReg); 424 if (Verbose) { 425 Str << "Preferring "; 426 dumpLiveRange(Cur, Func); 427 Str << "\n"; 428 } 429 assert(RegUses[PreferReg] >= 0); 430 ++RegUses[PreferReg]; 431 Active.push_back(Cur); 432 } else if (Free.any()) { 433 // Second choice: any free register. TODO: After explicit 434 // affinity is considered, is there a strategy better than just 435 // picking the lowest-numbered available register? 436 int32_t RegNum = Free.find_first(); 437 Cur->setRegNumTmp(RegNum); 438 if (Verbose) { 439 Str << "Allocating "; 440 dumpLiveRange(Cur, Func); 441 Str << "\n"; 442 } 443 assert(RegUses[RegNum] >= 0); 444 ++RegUses[RegNum]; 445 Active.push_back(Cur); 446 } else { 447 // Fallback: there are no free registers, so we look for the 448 // lowest-weight register and see if Cur has higher weight. 449 // Check Active ranges. 450 for (const Variable *Item : Active) { 451 assert(Item->rangeOverlaps(Cur)); 452 int32_t RegNum = Item->getRegNumTmp(); 453 assert(Item->hasRegTmp()); 454 Weights[RegNum].addWeight(Item->getLiveRange().getWeight()); 455 } 456 // Same as above, but check Inactive ranges instead of Active. 457 for (const Variable *Item : Inactive) { 458 int32_t RegNum = Item->getRegNumTmp(); 459 assert(Item->hasRegTmp()); 460 if (Item->rangeOverlaps(Cur)) 461 Weights[RegNum].addWeight(Item->getLiveRange().getWeight()); 462 } 463 464 // All the weights are now calculated. Find the register with 465 // smallest weight. 466 int32_t MinWeightIndex = RegMask.find_first(); 467 // MinWeightIndex must be valid because of the initial 468 // RegMask.any() test. 469 assert(MinWeightIndex >= 0); 470 for (SizeT i = MinWeightIndex + 1; i < Weights.size(); ++i) { 471 if (RegMask[i] && Weights[i] < Weights[MinWeightIndex]) 472 MinWeightIndex = i; 473 } 474 475 if (Cur->getLiveRange().getWeight() <= Weights[MinWeightIndex]) { 476 // Cur doesn't have priority over any other live ranges, so 477 // don't allocate any register to it, and move it to the 478 // Handled state. 479 Handled.push_back(Cur); 480 if (Cur->getLiveRange().getWeight().isInf()) { 481 Func->setError("Unable to find a physical register for an " 482 "infinite-weight live range"); 483 } 484 } else { 485 // Evict all live ranges in Active that register number 486 // MinWeightIndex is assigned to. 487 for (auto I = Active.begin(), E = Active.end(); I != E; I = Next) { 488 Next = I; 489 ++Next; 490 Variable *Item = *I; 491 if (Item->getRegNumTmp() == MinWeightIndex) { 492 if (Verbose) { 493 Str << "Evicting "; 494 dumpLiveRange(Item, Func); 495 Str << "\n"; 496 } 497 --RegUses[MinWeightIndex]; 498 assert(RegUses[MinWeightIndex] >= 0); 499 Item->setRegNumTmp(Variable::NoRegister); 500 Handled.splice(Handled.end(), Active, I); 501 } 502 } 503 // Do the same for Inactive. 504 for (auto I = Inactive.begin(), E = Inactive.end(); I != E; I = Next) { 505 Next = I; 506 ++Next; 507 Variable *Item = *I; 508 // Note: The Item->rangeOverlaps(Cur) clause is not part of the 509 // description of AssignMemLoc() in the original paper. But 510 // there doesn't seem to be any need to evict an inactive 511 // live range that doesn't overlap with the live range 512 // currently being considered. It's especially bad if we 513 // would end up evicting an infinite-weight but 514 // currently-inactive live range. The most common situation 515 // for this would be a scratch register kill set for call 516 // instructions. 517 if (Item->getRegNumTmp() == MinWeightIndex && 518 Item->rangeOverlaps(Cur)) { 519 if (Verbose) { 520 Str << "Evicting "; 521 dumpLiveRange(Item, Func); 522 Str << "\n"; 523 } 524 Item->setRegNumTmp(Variable::NoRegister); 525 Handled.splice(Handled.end(), Inactive, I); 526 } 527 } 528 // Assign the register to Cur. 529 Cur->setRegNumTmp(MinWeightIndex); 530 assert(RegUses[MinWeightIndex] >= 0); 531 ++RegUses[MinWeightIndex]; 532 Active.push_back(Cur); 533 if (Verbose) { 534 Str << "Allocating "; 535 dumpLiveRange(Cur, Func); 536 Str << "\n"; 537 } 538 } 539 } 540 dump(Func); 541 } 542 // Move anything Active or Inactive to Handled for easier handling. 543 for (Variable *I : Active) 544 Handled.push_back(I); 545 Active.clear(); 546 for (Variable *I : Inactive) 547 Handled.push_back(I); 548 Inactive.clear(); 549 dump(Func); 550 551 // Finish up by assigning RegNumTmp->RegNum for each Variable. 552 for (Variable *Item : Handled) { 553 int32_t RegNum = Item->getRegNumTmp(); 554 if (Verbose) { 555 if (!Item->hasRegTmp()) { 556 Str << "Not assigning "; 557 Item->dump(Func); 558 Str << "\n"; 559 } else { 560 Str << (RegNum == Item->getRegNum() ? "Reassigning " : "Assigning ") 561 << Func->getTarget()->getRegName(RegNum, IceType_i32) << "(r" 562 << RegNum << ") to "; 563 Item->dump(Func); 564 Str << "\n"; 565 } 566 } 567 Item->setRegNum(Item->getRegNumTmp()); 568 } 569 570 // TODO: Consider running register allocation one more time, with 571 // infinite registers, for two reasons. First, evicted live ranges 572 // get a second chance for a register. Second, it allows coalescing 573 // of stack slots. If there is no time budget for the second 574 // register allocation run, each unallocated variable just gets its 575 // own slot. 576 // 577 // Another idea for coalescing stack slots is to initialize the 578 // Unhandled list with just the unallocated variables, saving time 579 // but not offering second-chance opportunities. 580} 581 582// ======================== Dump routines ======================== // 583 584void LinearScan::dump(Cfg *Func) const { 585 Ostream &Str = Func->getContext()->getStrDump(); 586 if (!Func->getContext()->isVerbose(IceV_LinearScan)) 587 return; 588 Func->resetCurrentNode(); 589 Str << "**** Current regalloc state:\n"; 590 Str << "++++++ Handled:\n"; 591 for (const Variable *Item : Handled) { 592 dumpLiveRange(Item, Func); 593 Str << "\n"; 594 } 595 Str << "++++++ Unhandled:\n"; 596 for (auto I = Unhandled.rbegin(), E = Unhandled.rend(); I != E; ++I) { 597 dumpLiveRange(*I, Func); 598 Str << "\n"; 599 } 600 Str << "++++++ Active:\n"; 601 for (const Variable *Item : Active) { 602 dumpLiveRange(Item, Func); 603 Str << "\n"; 604 } 605 Str << "++++++ Inactive:\n"; 606 for (const Variable *Item : Inactive) { 607 dumpLiveRange(Item, Func); 608 Str << "\n"; 609 } 610} 611 612} // end of namespace Ice 613