ralloc_util.cc revision 4b537a851b686402513a7c4a4e60f5457bb8d7c1
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17/* This file contains register alloction support. */ 18 19#include "dex/compiler_ir.h" 20#include "dex/compiler_internals.h" 21#include "mir_to_lir-inl.h" 22 23namespace art { 24 25/* 26 * Free all allocated temps in the temp pools. Note that this does 27 * not affect the "liveness" of a temp register, which will stay 28 * live until it is either explicitly killed or reallocated. 29 */ 30void Mir2Lir::ResetRegPool() { 31 GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_); 32 for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) { 33 info->MarkFree(); 34 } 35 // Reset temp tracking sanity check. 36 if (kIsDebugBuild) { 37 live_sreg_ = INVALID_SREG; 38 } 39} 40 41Mir2Lir::RegisterInfo::RegisterInfo(RegStorage r, const ResourceMask& mask) 42 : reg_(r), is_temp_(false), wide_value_(false), dirty_(false), aliased_(false), partner_(r), 43 s_reg_(INVALID_SREG), def_use_mask_(mask), master_(this), def_start_(nullptr), 44 def_end_(nullptr), alias_chain_(nullptr) { 45 switch (r.StorageSize()) { 46 case 0: storage_mask_ = 0xffffffff; break; 47 case 4: storage_mask_ = 0x00000001; break; 48 case 8: storage_mask_ = 0x00000003; break; 49 case 16: storage_mask_ = 0x0000000f; break; 50 case 32: storage_mask_ = 0x000000ff; break; 51 case 64: storage_mask_ = 0x0000ffff; break; 52 case 128: storage_mask_ = 0xffffffff; break; 53 } 54 used_storage_ = r.Valid() ? ~storage_mask_ : storage_mask_; 55 liveness_ = used_storage_; 56} 57 58Mir2Lir::RegisterPool::RegisterPool(Mir2Lir* m2l, ArenaAllocator* arena, 59 const ArrayRef<const RegStorage>& core_regs, 60 const ArrayRef<const RegStorage>& core64_regs, 61 const ArrayRef<const RegStorage>& sp_regs, 62 const ArrayRef<const RegStorage>& dp_regs, 63 const ArrayRef<const RegStorage>& reserved_regs, 64 const ArrayRef<const RegStorage>& reserved64_regs, 65 const ArrayRef<const RegStorage>& core_temps, 66 const ArrayRef<const RegStorage>& core64_temps, 67 const ArrayRef<const RegStorage>& sp_temps, 68 const ArrayRef<const RegStorage>& dp_temps) : 69 core_regs_(arena, core_regs.size()), next_core_reg_(0), 70 core64_regs_(arena, core64_regs.size()), next_core64_reg_(0), 71 sp_regs_(arena, sp_regs.size()), next_sp_reg_(0), 72 dp_regs_(arena, dp_regs.size()), next_dp_reg_(0), m2l_(m2l) { 73 // Initialize the fast lookup map. 74 m2l_->reginfo_map_.Reset(); 75 if (kIsDebugBuild) { 76 m2l_->reginfo_map_.Resize(RegStorage::kMaxRegs); 77 for (unsigned i = 0; i < RegStorage::kMaxRegs; i++) { 78 m2l_->reginfo_map_.Insert(nullptr); 79 } 80 } else { 81 m2l_->reginfo_map_.SetSize(RegStorage::kMaxRegs); 82 } 83 84 // Construct the register pool. 85 for (const RegStorage& reg : core_regs) { 86 RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg)); 87 m2l_->reginfo_map_.Put(reg.GetReg(), info); 88 core_regs_.Insert(info); 89 } 90 for (const RegStorage& reg : core64_regs) { 91 RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg)); 92 m2l_->reginfo_map_.Put(reg.GetReg(), info); 93 core64_regs_.Insert(info); 94 } 95 for (const RegStorage& reg : sp_regs) { 96 RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg)); 97 m2l_->reginfo_map_.Put(reg.GetReg(), info); 98 sp_regs_.Insert(info); 99 } 100 for (const RegStorage& reg : dp_regs) { 101 RegisterInfo* info = new (arena) RegisterInfo(reg, m2l_->GetRegMaskCommon(reg)); 102 m2l_->reginfo_map_.Put(reg.GetReg(), info); 103 dp_regs_.Insert(info); 104 } 105 106 // Keep special registers from being allocated. 107 for (RegStorage reg : reserved_regs) { 108 m2l_->MarkInUse(reg); 109 } 110 for (RegStorage reg : reserved64_regs) { 111 m2l_->MarkInUse(reg); 112 } 113 114 // Mark temp regs - all others not in use can be used for promotion 115 for (RegStorage reg : core_temps) { 116 m2l_->MarkTemp(reg); 117 } 118 for (RegStorage reg : core64_temps) { 119 m2l_->MarkTemp(reg); 120 } 121 for (RegStorage reg : sp_temps) { 122 m2l_->MarkTemp(reg); 123 } 124 for (RegStorage reg : dp_temps) { 125 m2l_->MarkTemp(reg); 126 } 127 128 // Add an entry for InvalidReg with zero'd mask. 129 RegisterInfo* invalid_reg = new (arena) RegisterInfo(RegStorage::InvalidReg(), kEncodeNone); 130 m2l_->reginfo_map_.Put(RegStorage::InvalidReg().GetReg(), invalid_reg); 131 132 // Existence of core64 registers implies wide references. 133 if (core64_regs_.Size() != 0) { 134 ref_regs_ = &core64_regs_; 135 next_ref_reg_ = &next_core64_reg_; 136 } else { 137 ref_regs_ = &core_regs_; 138 next_ref_reg_ = &next_core_reg_; 139 } 140} 141 142void Mir2Lir::DumpRegPool(GrowableArray<RegisterInfo*>* regs) { 143 LOG(INFO) << "================================================"; 144 GrowableArray<RegisterInfo*>::Iterator it(regs); 145 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 146 LOG(INFO) << StringPrintf( 147 "R[%d:%d:%c]: T:%d, U:%d, W:%d, p:%d, LV:%d, D:%d, SR:%d, DEF:%d", 148 info->GetReg().GetReg(), info->GetReg().GetRegNum(), info->GetReg().IsFloat() ? 'f' : 'c', 149 info->IsTemp(), info->InUse(), info->IsWide(), info->Partner().GetReg(), info->IsLive(), 150 info->IsDirty(), info->SReg(), info->DefStart() != nullptr); 151 } 152 LOG(INFO) << "================================================"; 153} 154 155void Mir2Lir::DumpCoreRegPool() { 156 DumpRegPool(®_pool_->core_regs_); 157 DumpRegPool(®_pool_->core64_regs_); 158} 159 160void Mir2Lir::DumpFpRegPool() { 161 DumpRegPool(®_pool_->sp_regs_); 162 DumpRegPool(®_pool_->dp_regs_); 163} 164 165void Mir2Lir::DumpRegPools() { 166 LOG(INFO) << "Core registers"; 167 DumpCoreRegPool(); 168 LOG(INFO) << "FP registers"; 169 DumpFpRegPool(); 170} 171 172void Mir2Lir::Clobber(RegStorage reg) { 173 if (UNLIKELY(reg.IsPair())) { 174 DCHECK(!GetRegInfo(reg.GetLow())->IsAliased()); 175 Clobber(reg.GetLow()); 176 DCHECK(!GetRegInfo(reg.GetHigh())->IsAliased()); 177 Clobber(reg.GetHigh()); 178 } else { 179 RegisterInfo* info = GetRegInfo(reg); 180 if (info->IsTemp() && !info->IsDead()) { 181 if (info->GetReg() != info->Partner()) { 182 ClobberBody(GetRegInfo(info->Partner())); 183 } 184 ClobberBody(info); 185 if (info->IsAliased()) { 186 ClobberAliases(info, info->StorageMask()); 187 } else { 188 RegisterInfo* master = info->Master(); 189 if (info != master) { 190 ClobberBody(info->Master()); 191 ClobberAliases(info->Master(), info->StorageMask()); 192 } 193 } 194 } 195 } 196} 197 198void Mir2Lir::ClobberAliases(RegisterInfo* info, uint32_t clobber_mask) { 199 for (RegisterInfo* alias = info->GetAliasChain(); alias != nullptr; 200 alias = alias->GetAliasChain()) { 201 DCHECK(!alias->IsAliased()); // Only the master should be marked as alised. 202 // Only clobber if we have overlap. 203 if ((alias->StorageMask() & clobber_mask) != 0) { 204 ClobberBody(alias); 205 } 206 } 207} 208 209/* 210 * Break the association between a Dalvik vreg and a physical temp register of either register 211 * class. 212 * TODO: Ideally, the public version of this code should not exist. Besides its local usage 213 * in the register utilities, is is also used by code gen routines to work around a deficiency in 214 * local register allocation, which fails to distinguish between the "in" and "out" identities 215 * of Dalvik vregs. This can result in useless register copies when the same Dalvik vreg 216 * is used both as the source and destination register of an operation in which the type 217 * changes (for example: INT_TO_FLOAT v1, v1). Revisit when improved register allocation is 218 * addressed. 219 */ 220void Mir2Lir::ClobberSReg(int s_reg) { 221 if (s_reg != INVALID_SREG) { 222 if (kIsDebugBuild && s_reg == live_sreg_) { 223 live_sreg_ = INVALID_SREG; 224 } 225 GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_); 226 for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) { 227 if (info->SReg() == s_reg) { 228 if (info->GetReg() != info->Partner()) { 229 // Dealing with a pair - clobber the other half. 230 DCHECK(!info->IsAliased()); 231 ClobberBody(GetRegInfo(info->Partner())); 232 } 233 ClobberBody(info); 234 if (info->IsAliased()) { 235 ClobberAliases(info, info->StorageMask()); 236 } 237 } 238 } 239 } 240} 241 242/* 243 * SSA names associated with the initial definitions of Dalvik 244 * registers are the same as the Dalvik register number (and 245 * thus take the same position in the promotion_map. However, 246 * the special Method* and compiler temp resisters use negative 247 * v_reg numbers to distinguish them and can have an arbitrary 248 * ssa name (above the last original Dalvik register). This function 249 * maps SSA names to positions in the promotion_map array. 250 */ 251int Mir2Lir::SRegToPMap(int s_reg) { 252 DCHECK_LT(s_reg, mir_graph_->GetNumSSARegs()); 253 DCHECK_GE(s_reg, 0); 254 int v_reg = mir_graph_->SRegToVReg(s_reg); 255 if (v_reg >= 0) { 256 DCHECK_LT(v_reg, cu_->num_dalvik_registers); 257 return v_reg; 258 } else { 259 /* 260 * It must be the case that the v_reg for temporary is less than or equal to the 261 * base reg for temps. For that reason, "position" must be zero or positive. 262 */ 263 unsigned int position = std::abs(v_reg) - std::abs(static_cast<int>(kVRegTempBaseReg)); 264 265 // The temporaries are placed after dalvik registers in the promotion map 266 DCHECK_LT(position, mir_graph_->GetNumUsedCompilerTemps()); 267 return cu_->num_dalvik_registers + position; 268 } 269} 270 271// TODO: refactor following Alloc/Record routines - much commonality. 272void Mir2Lir::RecordCorePromotion(RegStorage reg, int s_reg) { 273 int p_map_idx = SRegToPMap(s_reg); 274 int v_reg = mir_graph_->SRegToVReg(s_reg); 275 int reg_num = reg.GetRegNum(); 276 GetRegInfo(reg)->MarkInUse(); 277 core_spill_mask_ |= (1 << reg_num); 278 // Include reg for later sort 279 core_vmap_table_.push_back(reg_num << VREG_NUM_WIDTH | (v_reg & ((1 << VREG_NUM_WIDTH) - 1))); 280 num_core_spills_++; 281 promotion_map_[p_map_idx].core_location = kLocPhysReg; 282 promotion_map_[p_map_idx].core_reg = reg_num; 283} 284 285/* Reserve a callee-save register. Return InvalidReg if none available */ 286RegStorage Mir2Lir::AllocPreservedCoreReg(int s_reg) { 287 // TODO: 64-bit and refreg update 288 RegStorage res; 289 GrowableArray<RegisterInfo*>::Iterator it(®_pool_->core_regs_); 290 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 291 if (!info->IsTemp() && !info->InUse()) { 292 res = info->GetReg(); 293 RecordCorePromotion(res, s_reg); 294 break; 295 } 296 } 297 return res; 298} 299 300void Mir2Lir::RecordSinglePromotion(RegStorage reg, int s_reg) { 301 int p_map_idx = SRegToPMap(s_reg); 302 int v_reg = mir_graph_->SRegToVReg(s_reg); 303 GetRegInfo(reg)->MarkInUse(); 304 MarkPreservedSingle(v_reg, reg); 305 promotion_map_[p_map_idx].fp_location = kLocPhysReg; 306 promotion_map_[p_map_idx].FpReg = reg.GetReg(); 307} 308 309// Reserve a callee-save sp single register. 310RegStorage Mir2Lir::AllocPreservedSingle(int s_reg) { 311 RegStorage res; 312 GrowableArray<RegisterInfo*>::Iterator it(®_pool_->sp_regs_); 313 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 314 if (!info->IsTemp() && !info->InUse()) { 315 res = info->GetReg(); 316 RecordSinglePromotion(res, s_reg); 317 break; 318 } 319 } 320 return res; 321} 322 323void Mir2Lir::RecordDoublePromotion(RegStorage reg, int s_reg) { 324 int p_map_idx = SRegToPMap(s_reg); 325 int v_reg = mir_graph_->SRegToVReg(s_reg); 326 GetRegInfo(reg)->MarkInUse(); 327 MarkPreservedDouble(v_reg, reg); 328 promotion_map_[p_map_idx].fp_location = kLocPhysReg; 329 promotion_map_[p_map_idx].FpReg = reg.GetReg(); 330} 331 332// Reserve a callee-save dp solo register. 333RegStorage Mir2Lir::AllocPreservedDouble(int s_reg) { 334 RegStorage res; 335 GrowableArray<RegisterInfo*>::Iterator it(®_pool_->dp_regs_); 336 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 337 if (!info->IsTemp() && !info->InUse()) { 338 res = info->GetReg(); 339 RecordDoublePromotion(res, s_reg); 340 break; 341 } 342 } 343 return res; 344} 345 346 347RegStorage Mir2Lir::AllocTempBody(GrowableArray<RegisterInfo*> ®s, int* next_temp, bool required) { 348 int num_regs = regs.Size(); 349 int next = *next_temp; 350 for (int i = 0; i< num_regs; i++) { 351 if (next >= num_regs) 352 next = 0; 353 RegisterInfo* info = regs.Get(next); 354 // Try to allocate a register that doesn't hold a live value. 355 if (info->IsTemp() && !info->InUse() && info->IsDead()) { 356 Clobber(info->GetReg()); 357 info->MarkInUse(); 358 /* 359 * NOTE: "wideness" is an attribute of how the container is used, not its physical size. 360 * The caller will set wideness as appropriate. 361 */ 362 if (info->IsWide()) { 363 RegisterInfo* partner = GetRegInfo(info->Partner()); 364 DCHECK_EQ(info->GetReg().GetRegNum(), partner->Partner().GetRegNum()); 365 DCHECK(partner->IsWide()); 366 info->SetIsWide(false); 367 partner->SetIsWide(false); 368 } 369 *next_temp = next + 1; 370 return info->GetReg(); 371 } 372 next++; 373 } 374 next = *next_temp; 375 // No free non-live regs. Anything we can kill? 376 for (int i = 0; i< num_regs; i++) { 377 if (next >= num_regs) 378 next = 0; 379 RegisterInfo* info = regs.Get(next); 380 if (info->IsTemp() && !info->InUse()) { 381 // Got one. Kill it. 382 ClobberSReg(info->SReg()); 383 Clobber(info->GetReg()); 384 info->MarkInUse(); 385 if (info->IsWide()) { 386 RegisterInfo* partner = GetRegInfo(info->Partner()); 387 DCHECK_EQ(info->GetReg().GetRegNum(), partner->Partner().GetRegNum()); 388 DCHECK(partner->IsWide()); 389 info->SetIsWide(false); 390 partner->SetIsWide(false); 391 } 392 *next_temp = next + 1; 393 return info->GetReg(); 394 } 395 next++; 396 } 397 if (required) { 398 CodegenDump(); 399 DumpRegPools(); 400 LOG(FATAL) << "No free temp registers"; 401 } 402 return RegStorage::InvalidReg(); // No register available 403} 404 405/* Return a temp if one is available, -1 otherwise */ 406RegStorage Mir2Lir::AllocFreeTemp() { 407 return AllocTempBody(reg_pool_->core_regs_, ®_pool_->next_core_reg_, false); 408} 409 410RegStorage Mir2Lir::AllocTemp() { 411 return AllocTempBody(reg_pool_->core_regs_, ®_pool_->next_core_reg_, true); 412} 413 414RegStorage Mir2Lir::AllocTempWide() { 415 RegStorage res; 416 if (reg_pool_->core64_regs_.Size() != 0) { 417 res = AllocTempBody(reg_pool_->core64_regs_, ®_pool_->next_core64_reg_, true); 418 } else { 419 RegStorage low_reg = AllocTemp(); 420 RegStorage high_reg = AllocTemp(); 421 res = RegStorage::MakeRegPair(low_reg, high_reg); 422 } 423 CheckRegStorage(res, WidenessCheck::kCheckWide, RefCheck::kIgnoreRef, FPCheck::kCheckNotFP); 424 return res; 425} 426 427RegStorage Mir2Lir::AllocTempRef() { 428 RegStorage res = AllocTempBody(*reg_pool_->ref_regs_, reg_pool_->next_ref_reg_, true); 429 DCHECK(!res.IsPair()); 430 CheckRegStorage(res, WidenessCheck::kCheckNotWide, RefCheck::kCheckRef, FPCheck::kCheckNotFP); 431 return res; 432} 433 434RegStorage Mir2Lir::AllocTempSingle() { 435 RegStorage res = AllocTempBody(reg_pool_->sp_regs_, ®_pool_->next_sp_reg_, true); 436 DCHECK(res.IsSingle()) << "Reg: 0x" << std::hex << res.GetRawBits(); 437 CheckRegStorage(res, WidenessCheck::kCheckNotWide, RefCheck::kCheckNotRef, FPCheck::kIgnoreFP); 438 return res; 439} 440 441RegStorage Mir2Lir::AllocTempDouble() { 442 RegStorage res = AllocTempBody(reg_pool_->dp_regs_, ®_pool_->next_dp_reg_, true); 443 DCHECK(res.IsDouble()) << "Reg: 0x" << std::hex << res.GetRawBits(); 444 CheckRegStorage(res, WidenessCheck::kCheckWide, RefCheck::kCheckNotRef, FPCheck::kIgnoreFP); 445 return res; 446} 447 448RegStorage Mir2Lir::AllocTypedTempWide(bool fp_hint, int reg_class) { 449 DCHECK_NE(reg_class, kRefReg); // NOTE: the Dalvik width of a reference is always 32 bits. 450 if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) { 451 return AllocTempDouble(); 452 } 453 return AllocTempWide(); 454} 455 456RegStorage Mir2Lir::AllocTypedTemp(bool fp_hint, int reg_class) { 457 if (((reg_class == kAnyReg) && fp_hint) || (reg_class == kFPReg)) { 458 return AllocTempSingle(); 459 } else if (reg_class == kRefReg) { 460 return AllocTempRef(); 461 } 462 return AllocTemp(); 463} 464 465RegStorage Mir2Lir::FindLiveReg(GrowableArray<RegisterInfo*> ®s, int s_reg) { 466 RegStorage res; 467 GrowableArray<RegisterInfo*>::Iterator it(®s); 468 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 469 if ((info->SReg() == s_reg) && info->IsLive()) { 470 res = info->GetReg(); 471 break; 472 } 473 } 474 return res; 475} 476 477RegStorage Mir2Lir::AllocLiveReg(int s_reg, int reg_class, bool wide) { 478 RegStorage reg; 479 if (reg_class == kRefReg) { 480 reg = FindLiveReg(*reg_pool_->ref_regs_, s_reg); 481 CheckRegStorage(reg, WidenessCheck::kCheckNotWide, RefCheck::kCheckRef, FPCheck::kCheckNotFP); 482 } 483 if (!reg.Valid() && ((reg_class == kAnyReg) || (reg_class == kFPReg))) { 484 reg = FindLiveReg(wide ? reg_pool_->dp_regs_ : reg_pool_->sp_regs_, s_reg); 485 } 486 if (!reg.Valid() && (reg_class != kFPReg)) { 487 if (cu_->target64) { 488 reg = FindLiveReg(wide || reg_class == kRefReg ? reg_pool_->core64_regs_ : 489 reg_pool_->core_regs_, s_reg); 490 } else { 491 reg = FindLiveReg(reg_pool_->core_regs_, s_reg); 492 } 493 } 494 if (reg.Valid()) { 495 if (wide && !reg.IsFloat() && !cu_->target64) { 496 // Only allow reg pairs for core regs on 32-bit targets. 497 RegStorage high_reg = FindLiveReg(reg_pool_->core_regs_, s_reg + 1); 498 if (high_reg.Valid()) { 499 reg = RegStorage::MakeRegPair(reg, high_reg); 500 MarkWide(reg); 501 } else { 502 // Only half available. 503 reg = RegStorage::InvalidReg(); 504 } 505 } 506 if (reg.Valid() && (wide != GetRegInfo(reg)->IsWide())) { 507 // Width mismatch - don't try to reuse. 508 reg = RegStorage::InvalidReg(); 509 } 510 } 511 if (reg.Valid()) { 512 if (reg.IsPair()) { 513 RegisterInfo* info_low = GetRegInfo(reg.GetLow()); 514 RegisterInfo* info_high = GetRegInfo(reg.GetHigh()); 515 if (info_low->IsTemp()) { 516 info_low->MarkInUse(); 517 } 518 if (info_high->IsTemp()) { 519 info_high->MarkInUse(); 520 } 521 } else { 522 RegisterInfo* info = GetRegInfo(reg); 523 if (info->IsTemp()) { 524 info->MarkInUse(); 525 } 526 } 527 } else { 528 // Either not found, or something didn't match up. Clobber to prevent any stale instances. 529 ClobberSReg(s_reg); 530 if (wide) { 531 ClobberSReg(s_reg + 1); 532 } 533 } 534 CheckRegStorage(reg, WidenessCheck::kIgnoreWide, 535 reg_class == kRefReg ? RefCheck::kCheckRef : RefCheck::kIgnoreRef, 536 FPCheck::kIgnoreFP); 537 return reg; 538} 539 540void Mir2Lir::FreeTemp(RegStorage reg) { 541 if (reg.IsPair()) { 542 FreeTemp(reg.GetLow()); 543 FreeTemp(reg.GetHigh()); 544 } else { 545 RegisterInfo* p = GetRegInfo(reg); 546 if (p->IsTemp()) { 547 p->MarkFree(); 548 p->SetIsWide(false); 549 p->SetPartner(reg); 550 } 551 } 552} 553 554void Mir2Lir::FreeRegLocTemps(RegLocation rl_keep, RegLocation rl_free) { 555 DCHECK(rl_keep.wide); 556 DCHECK(rl_free.wide); 557 int free_low = rl_free.reg.GetLowReg(); 558 int free_high = rl_free.reg.GetHighReg(); 559 int keep_low = rl_keep.reg.GetLowReg(); 560 int keep_high = rl_keep.reg.GetHighReg(); 561 if ((free_low != keep_low) && (free_low != keep_high) && 562 (free_high != keep_low) && (free_high != keep_high)) { 563 // No overlap, free both 564 FreeTemp(rl_free.reg); 565 } 566} 567 568bool Mir2Lir::IsLive(RegStorage reg) { 569 bool res; 570 if (reg.IsPair()) { 571 RegisterInfo* p_lo = GetRegInfo(reg.GetLow()); 572 RegisterInfo* p_hi = GetRegInfo(reg.GetHigh()); 573 DCHECK_EQ(p_lo->IsLive(), p_hi->IsLive()); 574 res = p_lo->IsLive() || p_hi->IsLive(); 575 } else { 576 RegisterInfo* p = GetRegInfo(reg); 577 res = p->IsLive(); 578 } 579 return res; 580} 581 582bool Mir2Lir::IsTemp(RegStorage reg) { 583 bool res; 584 if (reg.IsPair()) { 585 RegisterInfo* p_lo = GetRegInfo(reg.GetLow()); 586 RegisterInfo* p_hi = GetRegInfo(reg.GetHigh()); 587 res = p_lo->IsTemp() || p_hi->IsTemp(); 588 } else { 589 RegisterInfo* p = GetRegInfo(reg); 590 res = p->IsTemp(); 591 } 592 return res; 593} 594 595bool Mir2Lir::IsPromoted(RegStorage reg) { 596 bool res; 597 if (reg.IsPair()) { 598 RegisterInfo* p_lo = GetRegInfo(reg.GetLow()); 599 RegisterInfo* p_hi = GetRegInfo(reg.GetHigh()); 600 res = !p_lo->IsTemp() || !p_hi->IsTemp(); 601 } else { 602 RegisterInfo* p = GetRegInfo(reg); 603 res = !p->IsTemp(); 604 } 605 return res; 606} 607 608bool Mir2Lir::IsDirty(RegStorage reg) { 609 bool res; 610 if (reg.IsPair()) { 611 RegisterInfo* p_lo = GetRegInfo(reg.GetLow()); 612 RegisterInfo* p_hi = GetRegInfo(reg.GetHigh()); 613 res = p_lo->IsDirty() || p_hi->IsDirty(); 614 } else { 615 RegisterInfo* p = GetRegInfo(reg); 616 res = p->IsDirty(); 617 } 618 return res; 619} 620 621/* 622 * Similar to AllocTemp(), but forces the allocation of a specific 623 * register. No check is made to see if the register was previously 624 * allocated. Use with caution. 625 */ 626void Mir2Lir::LockTemp(RegStorage reg) { 627 DCHECK(IsTemp(reg)); 628 if (reg.IsPair()) { 629 RegisterInfo* p_lo = GetRegInfo(reg.GetLow()); 630 RegisterInfo* p_hi = GetRegInfo(reg.GetHigh()); 631 p_lo->MarkInUse(); 632 p_lo->MarkDead(); 633 p_hi->MarkInUse(); 634 p_hi->MarkDead(); 635 } else { 636 RegisterInfo* p = GetRegInfo(reg); 637 p->MarkInUse(); 638 p->MarkDead(); 639 } 640} 641 642void Mir2Lir::ResetDef(RegStorage reg) { 643 if (reg.IsPair()) { 644 GetRegInfo(reg.GetLow())->ResetDefBody(); 645 GetRegInfo(reg.GetHigh())->ResetDefBody(); 646 } else { 647 GetRegInfo(reg)->ResetDefBody(); 648 } 649} 650 651void Mir2Lir::NullifyRange(RegStorage reg, int s_reg) { 652 RegisterInfo* info = nullptr; 653 RegStorage rs = reg.IsPair() ? reg.GetLow() : reg; 654 if (IsTemp(rs)) { 655 info = GetRegInfo(reg); 656 } 657 if ((info != nullptr) && (info->DefStart() != nullptr) && (info->DefEnd() != nullptr)) { 658 DCHECK_EQ(info->SReg(), s_reg); // Make sure we're on the same page. 659 for (LIR* p = info->DefStart();; p = p->next) { 660 NopLIR(p); 661 if (p == info->DefEnd()) { 662 break; 663 } 664 } 665 } 666} 667 668/* 669 * Mark the beginning and end LIR of a def sequence. Note that 670 * on entry start points to the LIR prior to the beginning of the 671 * sequence. 672 */ 673void Mir2Lir::MarkDef(RegLocation rl, LIR *start, LIR *finish) { 674 DCHECK(!rl.wide); 675 DCHECK(start && start->next); 676 DCHECK(finish); 677 RegisterInfo* p = GetRegInfo(rl.reg); 678 p->SetDefStart(start->next); 679 p->SetDefEnd(finish); 680} 681 682/* 683 * Mark the beginning and end LIR of a def sequence. Note that 684 * on entry start points to the LIR prior to the beginning of the 685 * sequence. 686 */ 687void Mir2Lir::MarkDefWide(RegLocation rl, LIR *start, LIR *finish) { 688 DCHECK(rl.wide); 689 DCHECK(start && start->next); 690 DCHECK(finish); 691 RegisterInfo* p; 692 if (rl.reg.IsPair()) { 693 p = GetRegInfo(rl.reg.GetLow()); 694 ResetDef(rl.reg.GetHigh()); // Only track low of pair 695 } else { 696 p = GetRegInfo(rl.reg); 697 } 698 p->SetDefStart(start->next); 699 p->SetDefEnd(finish); 700} 701 702void Mir2Lir::ResetDefLoc(RegLocation rl) { 703 DCHECK(!rl.wide); 704 if (IsTemp(rl.reg) && !(cu_->disable_opt & (1 << kSuppressLoads))) { 705 NullifyRange(rl.reg, rl.s_reg_low); 706 } 707 ResetDef(rl.reg); 708} 709 710void Mir2Lir::ResetDefLocWide(RegLocation rl) { 711 DCHECK(rl.wide); 712 // If pair, only track low reg of pair. 713 RegStorage rs = rl.reg.IsPair() ? rl.reg.GetLow() : rl.reg; 714 if (IsTemp(rs) && !(cu_->disable_opt & (1 << kSuppressLoads))) { 715 NullifyRange(rs, rl.s_reg_low); 716 } 717 ResetDef(rs); 718} 719 720void Mir2Lir::ResetDefTracking() { 721 GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_); 722 for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) { 723 info->ResetDefBody(); 724 } 725} 726 727void Mir2Lir::ClobberAllTemps() { 728 GrowableArray<RegisterInfo*>::Iterator iter(&tempreg_info_); 729 for (RegisterInfo* info = iter.Next(); info != NULL; info = iter.Next()) { 730 ClobberBody(info); 731 } 732} 733 734void Mir2Lir::FlushRegWide(RegStorage reg) { 735 if (reg.IsPair()) { 736 RegisterInfo* info1 = GetRegInfo(reg.GetLow()); 737 RegisterInfo* info2 = GetRegInfo(reg.GetHigh()); 738 DCHECK(info1 && info2 && info1->IsWide() && info2->IsWide() && 739 (info1->Partner() == info2->GetReg()) && (info2->Partner() == info1->GetReg())); 740 if ((info1->IsLive() && info1->IsDirty()) || (info2->IsLive() && info2->IsDirty())) { 741 if (!(info1->IsTemp() && info2->IsTemp())) { 742 /* Should not happen. If it does, there's a problem in eval_loc */ 743 LOG(FATAL) << "Long half-temp, half-promoted"; 744 } 745 746 info1->SetIsDirty(false); 747 info2->SetIsDirty(false); 748 if (mir_graph_->SRegToVReg(info2->SReg()) < mir_graph_->SRegToVReg(info1->SReg())) { 749 info1 = info2; 750 } 751 int v_reg = mir_graph_->SRegToVReg(info1->SReg()); 752 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 753 StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, k64, kNotVolatile); 754 } 755 } else { 756 RegisterInfo* info = GetRegInfo(reg); 757 if (info->IsLive() && info->IsDirty()) { 758 info->SetIsDirty(false); 759 int v_reg = mir_graph_->SRegToVReg(info->SReg()); 760 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 761 StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, k64, kNotVolatile); 762 } 763 } 764} 765 766void Mir2Lir::FlushReg(RegStorage reg) { 767 DCHECK(!reg.IsPair()); 768 RegisterInfo* info = GetRegInfo(reg); 769 if (info->IsLive() && info->IsDirty()) { 770 info->SetIsDirty(false); 771 int v_reg = mir_graph_->SRegToVReg(info->SReg()); 772 ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg); 773 StoreBaseDisp(TargetReg(kSp), VRegOffset(v_reg), reg, kWord, kNotVolatile); 774 } 775} 776 777void Mir2Lir::FlushSpecificReg(RegisterInfo* info) { 778 if (info->IsWide()) { 779 FlushRegWide(info->GetReg()); 780 } else { 781 FlushReg(info->GetReg()); 782 } 783} 784 785void Mir2Lir::FlushAllRegs() { 786 GrowableArray<RegisterInfo*>::Iterator it(&tempreg_info_); 787 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 788 if (info->IsDirty() && info->IsLive()) { 789 FlushSpecificReg(info); 790 } 791 info->MarkDead(); 792 info->SetIsWide(false); 793 } 794} 795 796 797bool Mir2Lir::RegClassMatches(int reg_class, RegStorage reg) { 798 if (reg_class == kAnyReg) { 799 return true; 800 } else if ((reg_class == kCoreReg) || (reg_class == kRefReg)) { 801 /* 802 * For this purpose, consider Core and Ref to be the same class. We aren't dealing 803 * with width here - that should be checked at a higher level (if needed). 804 */ 805 return !reg.IsFloat(); 806 } else { 807 return reg.IsFloat(); 808 } 809} 810 811void Mir2Lir::MarkLive(RegLocation loc) { 812 RegStorage reg = loc.reg; 813 if (!IsTemp(reg)) { 814 return; 815 } 816 int s_reg = loc.s_reg_low; 817 if (s_reg == INVALID_SREG) { 818 // Can't be live if no associated sreg. 819 if (reg.IsPair()) { 820 GetRegInfo(reg.GetLow())->MarkDead(); 821 GetRegInfo(reg.GetHigh())->MarkDead(); 822 } else { 823 GetRegInfo(reg)->MarkDead(); 824 } 825 } else { 826 if (reg.IsPair()) { 827 RegisterInfo* info_lo = GetRegInfo(reg.GetLow()); 828 RegisterInfo* info_hi = GetRegInfo(reg.GetHigh()); 829 if (info_lo->IsLive() && (info_lo->SReg() == s_reg) && info_hi->IsLive() && 830 (info_hi->SReg() == s_reg)) { 831 return; // Already live. 832 } 833 ClobberSReg(s_reg); 834 ClobberSReg(s_reg + 1); 835 info_lo->MarkLive(s_reg); 836 info_hi->MarkLive(s_reg + 1); 837 } else { 838 RegisterInfo* info = GetRegInfo(reg); 839 if (info->IsLive() && (info->SReg() == s_reg)) { 840 return; // Already live. 841 } 842 ClobberSReg(s_reg); 843 if (loc.wide) { 844 ClobberSReg(s_reg + 1); 845 } 846 info->MarkLive(s_reg); 847 } 848 if (loc.wide) { 849 MarkWide(reg); 850 } else { 851 MarkNarrow(reg); 852 } 853 } 854} 855 856void Mir2Lir::MarkTemp(RegStorage reg) { 857 DCHECK(!reg.IsPair()); 858 RegisterInfo* info = GetRegInfo(reg); 859 tempreg_info_.Insert(info); 860 info->SetIsTemp(true); 861} 862 863void Mir2Lir::UnmarkTemp(RegStorage reg) { 864 DCHECK(!reg.IsPair()); 865 RegisterInfo* info = GetRegInfo(reg); 866 tempreg_info_.Delete(info); 867 info->SetIsTemp(false); 868} 869 870void Mir2Lir::MarkWide(RegStorage reg) { 871 if (reg.IsPair()) { 872 RegisterInfo* info_lo = GetRegInfo(reg.GetLow()); 873 RegisterInfo* info_hi = GetRegInfo(reg.GetHigh()); 874 // Unpair any old partners. 875 if (info_lo->IsWide() && info_lo->Partner() != info_hi->GetReg()) { 876 GetRegInfo(info_lo->Partner())->SetIsWide(false); 877 } 878 if (info_hi->IsWide() && info_hi->Partner() != info_lo->GetReg()) { 879 GetRegInfo(info_hi->Partner())->SetIsWide(false); 880 } 881 info_lo->SetIsWide(true); 882 info_hi->SetIsWide(true); 883 info_lo->SetPartner(reg.GetHigh()); 884 info_hi->SetPartner(reg.GetLow()); 885 } else { 886 RegisterInfo* info = GetRegInfo(reg); 887 info->SetIsWide(true); 888 info->SetPartner(reg); 889 } 890} 891 892void Mir2Lir::MarkNarrow(RegStorage reg) { 893 DCHECK(!reg.IsPair()); 894 RegisterInfo* info = GetRegInfo(reg); 895 info->SetIsWide(false); 896 info->SetPartner(reg); 897} 898 899void Mir2Lir::MarkClean(RegLocation loc) { 900 if (loc.reg.IsPair()) { 901 RegisterInfo* info = GetRegInfo(loc.reg.GetLow()); 902 info->SetIsDirty(false); 903 info = GetRegInfo(loc.reg.GetHigh()); 904 info->SetIsDirty(false); 905 } else { 906 RegisterInfo* info = GetRegInfo(loc.reg); 907 info->SetIsDirty(false); 908 } 909} 910 911// FIXME: need to verify rules/assumptions about how wide values are treated in 64BitSolos. 912void Mir2Lir::MarkDirty(RegLocation loc) { 913 if (loc.home) { 914 // If already home, can't be dirty 915 return; 916 } 917 if (loc.reg.IsPair()) { 918 RegisterInfo* info = GetRegInfo(loc.reg.GetLow()); 919 info->SetIsDirty(true); 920 info = GetRegInfo(loc.reg.GetHigh()); 921 info->SetIsDirty(true); 922 } else { 923 RegisterInfo* info = GetRegInfo(loc.reg); 924 info->SetIsDirty(true); 925 } 926} 927 928void Mir2Lir::MarkInUse(RegStorage reg) { 929 if (reg.IsPair()) { 930 GetRegInfo(reg.GetLow())->MarkInUse(); 931 GetRegInfo(reg.GetHigh())->MarkInUse(); 932 } else { 933 GetRegInfo(reg)->MarkInUse(); 934 } 935} 936 937bool Mir2Lir::CheckCorePoolSanity() { 938 GrowableArray<RegisterInfo*>::Iterator it(&tempreg_info_); 939 for (RegisterInfo* info = it.Next(); info != nullptr; info = it.Next()) { 940 if (info->IsTemp() && info->IsLive() && info->IsWide()) { 941 RegStorage my_reg = info->GetReg(); 942 int my_sreg = info->SReg(); 943 RegStorage partner_reg = info->Partner(); 944 RegisterInfo* partner = GetRegInfo(partner_reg); 945 DCHECK(partner != NULL); 946 DCHECK(partner->IsWide()); 947 DCHECK_EQ(my_reg.GetReg(), partner->Partner().GetReg()); 948 DCHECK(partner->IsLive()); 949 int partner_sreg = partner->SReg(); 950 if (my_sreg == INVALID_SREG) { 951 DCHECK_EQ(partner_sreg, INVALID_SREG); 952 } else { 953 int diff = my_sreg - partner_sreg; 954 DCHECK((diff == 0) || (diff == -1) || (diff == 1)); 955 } 956 } 957 if (info->Master() != info) { 958 // Aliased. 959 if (info->IsLive() && (info->SReg() != INVALID_SREG)) { 960 // If I'm live, master should not be live, but should show liveness in alias set. 961 DCHECK_EQ(info->Master()->SReg(), INVALID_SREG); 962 DCHECK(!info->Master()->IsDead()); 963 } 964// TODO: Add checks in !info->IsDead() case to ensure every live bit is owned by exactly 1 reg. 965 } 966 if (info->IsAliased()) { 967 // Has child aliases. 968 DCHECK_EQ(info->Master(), info); 969 if (info->IsLive() && (info->SReg() != INVALID_SREG)) { 970 // Master live, no child should be dead - all should show liveness in set. 971 for (RegisterInfo* p = info->GetAliasChain(); p != nullptr; p = p->GetAliasChain()) { 972 DCHECK(!p->IsDead()); 973 DCHECK_EQ(p->SReg(), INVALID_SREG); 974 } 975 } else if (!info->IsDead()) { 976 // Master not live, one or more aliases must be. 977 bool live_alias = false; 978 for (RegisterInfo* p = info->GetAliasChain(); p != nullptr; p = p->GetAliasChain()) { 979 live_alias |= p->IsLive(); 980 } 981 DCHECK(live_alias); 982 } 983 } 984 if (info->IsLive() && (info->SReg() == INVALID_SREG)) { 985 // If not fully live, should have INVALID_SREG and def's should be null. 986 DCHECK(info->DefStart() == nullptr); 987 DCHECK(info->DefEnd() == nullptr); 988 } 989 } 990 return true; 991} 992 993/* 994 * Return an updated location record with current in-register status. 995 * If the value lives in live temps, reflect that fact. No code 996 * is generated. If the live value is part of an older pair, 997 * clobber both low and high. 998 * TUNING: clobbering both is a bit heavy-handed, but the alternative 999 * is a bit complex when dealing with FP regs. Examine code to see 1000 * if it's worthwhile trying to be more clever here. 1001 */ 1002RegLocation Mir2Lir::UpdateLoc(RegLocation loc) { 1003 DCHECK(!loc.wide); 1004 DCHECK(CheckCorePoolSanity()); 1005 if (loc.location != kLocPhysReg) { 1006 DCHECK((loc.location == kLocDalvikFrame) || 1007 (loc.location == kLocCompilerTemp)); 1008 RegStorage reg = AllocLiveReg(loc.s_reg_low, loc.ref ? kRefReg : kAnyReg, false); 1009 if (reg.Valid()) { 1010 bool match = true; 1011 RegisterInfo* info = GetRegInfo(reg); 1012 match &= !reg.IsPair(); 1013 match &= !info->IsWide(); 1014 if (match) { 1015 loc.location = kLocPhysReg; 1016 loc.reg = reg; 1017 } else { 1018 Clobber(reg); 1019 FreeTemp(reg); 1020 } 1021 } 1022 CheckRegLocation(loc); 1023 } 1024 return loc; 1025} 1026 1027RegLocation Mir2Lir::UpdateLocWide(RegLocation loc) { 1028 DCHECK(loc.wide); 1029 DCHECK(CheckCorePoolSanity()); 1030 if (loc.location != kLocPhysReg) { 1031 DCHECK((loc.location == kLocDalvikFrame) || 1032 (loc.location == kLocCompilerTemp)); 1033 RegStorage reg = AllocLiveReg(loc.s_reg_low, kAnyReg, true); 1034 if (reg.Valid()) { 1035 bool match = true; 1036 if (reg.IsPair()) { 1037 // If we've got a register pair, make sure that it was last used as the same pair. 1038 RegisterInfo* info_lo = GetRegInfo(reg.GetLow()); 1039 RegisterInfo* info_hi = GetRegInfo(reg.GetHigh()); 1040 match &= info_lo->IsWide(); 1041 match &= info_hi->IsWide(); 1042 match &= (info_lo->Partner() == info_hi->GetReg()); 1043 match &= (info_hi->Partner() == info_lo->GetReg()); 1044 } else { 1045 RegisterInfo* info = GetRegInfo(reg); 1046 match &= info->IsWide(); 1047 match &= (info->GetReg() == info->Partner()); 1048 } 1049 if (match) { 1050 loc.location = kLocPhysReg; 1051 loc.reg = reg; 1052 } else { 1053 Clobber(reg); 1054 FreeTemp(reg); 1055 } 1056 } 1057 CheckRegLocation(loc); 1058 } 1059 return loc; 1060} 1061 1062/* For use in cases we don't know (or care) width */ 1063RegLocation Mir2Lir::UpdateRawLoc(RegLocation loc) { 1064 if (loc.wide) 1065 return UpdateLocWide(loc); 1066 else 1067 return UpdateLoc(loc); 1068} 1069 1070RegLocation Mir2Lir::EvalLocWide(RegLocation loc, int reg_class, bool update) { 1071 DCHECK(loc.wide); 1072 1073 loc = UpdateLocWide(loc); 1074 1075 /* If already in registers, we can assume proper form. Right reg class? */ 1076 if (loc.location == kLocPhysReg) { 1077 if (!RegClassMatches(reg_class, loc.reg)) { 1078 // Wrong register class. Reallocate and transfer ownership. 1079 RegStorage new_regs = AllocTypedTempWide(loc.fp, reg_class); 1080 // Clobber the old regs. 1081 Clobber(loc.reg); 1082 // ...and mark the new ones live. 1083 loc.reg = new_regs; 1084 MarkWide(loc.reg); 1085 MarkLive(loc); 1086 } 1087 CheckRegLocation(loc); 1088 return loc; 1089 } 1090 1091 DCHECK_NE(loc.s_reg_low, INVALID_SREG); 1092 DCHECK_NE(GetSRegHi(loc.s_reg_low), INVALID_SREG); 1093 1094 loc.reg = AllocTypedTempWide(loc.fp, reg_class); 1095 MarkWide(loc.reg); 1096 1097 if (update) { 1098 loc.location = kLocPhysReg; 1099 MarkLive(loc); 1100 } 1101 CheckRegLocation(loc); 1102 return loc; 1103} 1104 1105RegLocation Mir2Lir::EvalLoc(RegLocation loc, int reg_class, bool update) { 1106 // Narrow reg_class if the loc is a ref. 1107 if (loc.ref && reg_class == kAnyReg) { 1108 reg_class = kRefReg; 1109 } 1110 1111 if (loc.wide) { 1112 return EvalLocWide(loc, reg_class, update); 1113 } 1114 1115 loc = UpdateLoc(loc); 1116 1117 if (loc.location == kLocPhysReg) { 1118 if (!RegClassMatches(reg_class, loc.reg)) { 1119 // Wrong register class. Reallocate and transfer ownership. 1120 RegStorage new_reg = AllocTypedTemp(loc.fp, reg_class); 1121 // Clobber the old reg. 1122 Clobber(loc.reg); 1123 // ...and mark the new one live. 1124 loc.reg = new_reg; 1125 MarkLive(loc); 1126 } 1127 CheckRegLocation(loc); 1128 return loc; 1129 } 1130 1131 DCHECK_NE(loc.s_reg_low, INVALID_SREG); 1132 1133 loc.reg = AllocTypedTemp(loc.fp, reg_class); 1134 CheckRegLocation(loc); 1135 1136 if (update) { 1137 loc.location = kLocPhysReg; 1138 MarkLive(loc); 1139 } 1140 CheckRegLocation(loc); 1141 return loc; 1142} 1143 1144/* USE SSA names to count references of base Dalvik v_regs. */ 1145void Mir2Lir::CountRefs(RefCounts* core_counts, RefCounts* fp_counts, size_t num_regs) { 1146 for (int i = 0; i < mir_graph_->GetNumSSARegs(); i++) { 1147 RegLocation loc = mir_graph_->reg_location_[i]; 1148 RefCounts* counts = loc.fp ? fp_counts : core_counts; 1149 int p_map_idx = SRegToPMap(loc.s_reg_low); 1150 if (loc.fp) { 1151 if (loc.wide) { 1152 // Treat doubles as a unit, using upper half of fp_counts array. 1153 counts[p_map_idx + num_regs].count += mir_graph_->GetUseCount(i); 1154 i++; 1155 } else { 1156 counts[p_map_idx].count += mir_graph_->GetUseCount(i); 1157 } 1158 } else if (!IsInexpensiveConstant(loc)) { 1159 counts[p_map_idx].count += mir_graph_->GetUseCount(i); 1160 } 1161 } 1162} 1163 1164/* qsort callback function, sort descending */ 1165static int SortCounts(const void *val1, const void *val2) { 1166 const Mir2Lir::RefCounts* op1 = reinterpret_cast<const Mir2Lir::RefCounts*>(val1); 1167 const Mir2Lir::RefCounts* op2 = reinterpret_cast<const Mir2Lir::RefCounts*>(val2); 1168 // Note that we fall back to sorting on reg so we get stable output 1169 // on differing qsort implementations (such as on host and target or 1170 // between local host and build servers). 1171 return (op1->count == op2->count) 1172 ? (op1->s_reg - op2->s_reg) 1173 : (op1->count < op2->count ? 1 : -1); 1174} 1175 1176void Mir2Lir::DumpCounts(const RefCounts* arr, int size, const char* msg) { 1177 LOG(INFO) << msg; 1178 for (int i = 0; i < size; i++) { 1179 if ((arr[i].s_reg & STARTING_DOUBLE_SREG) != 0) { 1180 LOG(INFO) << "s_reg[D" << (arr[i].s_reg & ~STARTING_DOUBLE_SREG) << "]: " << arr[i].count; 1181 } else { 1182 LOG(INFO) << "s_reg[" << arr[i].s_reg << "]: " << arr[i].count; 1183 } 1184 } 1185} 1186 1187/* 1188 * Note: some portions of this code required even if the kPromoteRegs 1189 * optimization is disabled. 1190 */ 1191void Mir2Lir::DoPromotion() { 1192 int dalvik_regs = cu_->num_dalvik_registers; 1193 int num_regs = dalvik_regs + mir_graph_->GetNumUsedCompilerTemps(); 1194 const int promotion_threshold = 1; 1195 // Allocate the promotion map - one entry for each Dalvik vReg or compiler temp 1196 promotion_map_ = static_cast<PromotionMap*> 1197 (arena_->Alloc(num_regs * sizeof(promotion_map_[0]), kArenaAllocRegAlloc)); 1198 1199 // Allow target code to add any special registers 1200 AdjustSpillMask(); 1201 1202 /* 1203 * Simple register promotion. Just do a static count of the uses 1204 * of Dalvik registers. Note that we examine the SSA names, but 1205 * count based on original Dalvik register name. Count refs 1206 * separately based on type in order to give allocation 1207 * preference to fp doubles - which must be allocated sequential 1208 * physical single fp registers starting with an even-numbered 1209 * reg. 1210 * TUNING: replace with linear scan once we have the ability 1211 * to describe register live ranges for GC. 1212 */ 1213 RefCounts *core_regs = 1214 static_cast<RefCounts*>(arena_->Alloc(sizeof(RefCounts) * num_regs, 1215 kArenaAllocRegAlloc)); 1216 RefCounts *FpRegs = 1217 static_cast<RefCounts *>(arena_->Alloc(sizeof(RefCounts) * num_regs * 2, 1218 kArenaAllocRegAlloc)); 1219 // Set ssa names for original Dalvik registers 1220 for (int i = 0; i < dalvik_regs; i++) { 1221 core_regs[i].s_reg = FpRegs[i].s_reg = i; 1222 } 1223 1224 // Set ssa names for compiler temporaries 1225 for (unsigned int ct_idx = 0; ct_idx < mir_graph_->GetNumUsedCompilerTemps(); ct_idx++) { 1226 CompilerTemp* ct = mir_graph_->GetCompilerTemp(ct_idx); 1227 core_regs[dalvik_regs + ct_idx].s_reg = ct->s_reg_low; 1228 FpRegs[dalvik_regs + ct_idx].s_reg = ct->s_reg_low; 1229 FpRegs[num_regs + dalvik_regs + ct_idx].s_reg = ct->s_reg_low; 1230 } 1231 1232 // Duplicate in upper half to represent possible fp double starting sregs. 1233 for (int i = 0; i < num_regs; i++) { 1234 FpRegs[num_regs + i].s_reg = FpRegs[i].s_reg | STARTING_DOUBLE_SREG; 1235 } 1236 1237 // Sum use counts of SSA regs by original Dalvik vreg. 1238 CountRefs(core_regs, FpRegs, num_regs); 1239 1240 1241 // Sort the count arrays 1242 qsort(core_regs, num_regs, sizeof(RefCounts), SortCounts); 1243 qsort(FpRegs, num_regs * 2, sizeof(RefCounts), SortCounts); 1244 1245 if (cu_->verbose) { 1246 DumpCounts(core_regs, num_regs, "Core regs after sort"); 1247 DumpCounts(FpRegs, num_regs * 2, "Fp regs after sort"); 1248 } 1249 1250 if (!(cu_->disable_opt & (1 << kPromoteRegs))) { 1251 // Promote FpRegs 1252 for (int i = 0; (i < (num_regs * 2)) && (FpRegs[i].count >= promotion_threshold); i++) { 1253 int p_map_idx = SRegToPMap(FpRegs[i].s_reg & ~STARTING_DOUBLE_SREG); 1254 if ((FpRegs[i].s_reg & STARTING_DOUBLE_SREG) != 0) { 1255 if ((promotion_map_[p_map_idx].fp_location != kLocPhysReg) && 1256 (promotion_map_[p_map_idx + 1].fp_location != kLocPhysReg)) { 1257 int low_sreg = FpRegs[i].s_reg & ~STARTING_DOUBLE_SREG; 1258 // Ignore result - if can't alloc double may still be able to alloc singles. 1259 AllocPreservedDouble(low_sreg); 1260 } 1261 } else if (promotion_map_[p_map_idx].fp_location != kLocPhysReg) { 1262 RegStorage reg = AllocPreservedSingle(FpRegs[i].s_reg); 1263 if (!reg.Valid()) { 1264 break; // No more left. 1265 } 1266 } 1267 } 1268 1269 // Promote core regs 1270 for (int i = 0; (i < num_regs) && 1271 (core_regs[i].count >= promotion_threshold); i++) { 1272 int p_map_idx = SRegToPMap(core_regs[i].s_reg); 1273 if (promotion_map_[p_map_idx].core_location != 1274 kLocPhysReg) { 1275 RegStorage reg = AllocPreservedCoreReg(core_regs[i].s_reg); 1276 if (!reg.Valid()) { 1277 break; // No more left 1278 } 1279 } 1280 } 1281 } 1282 1283 // Now, update SSA names to new home locations 1284 for (int i = 0; i < mir_graph_->GetNumSSARegs(); i++) { 1285 RegLocation *curr = &mir_graph_->reg_location_[i]; 1286 int p_map_idx = SRegToPMap(curr->s_reg_low); 1287 if (!curr->wide) { 1288 if (curr->fp) { 1289 if (promotion_map_[p_map_idx].fp_location == kLocPhysReg) { 1290 curr->location = kLocPhysReg; 1291 curr->reg = RegStorage::Solo32(promotion_map_[p_map_idx].FpReg); 1292 curr->home = true; 1293 } 1294 } else { 1295 if (promotion_map_[p_map_idx].core_location == kLocPhysReg) { 1296 curr->location = kLocPhysReg; 1297 curr->reg = RegStorage::Solo32(promotion_map_[p_map_idx].core_reg); 1298 curr->home = true; 1299 } 1300 } 1301 } else { 1302 if (curr->high_word) { 1303 continue; 1304 } 1305 if (curr->fp) { 1306 if ((promotion_map_[p_map_idx].fp_location == kLocPhysReg) && 1307 (promotion_map_[p_map_idx+1].fp_location == kLocPhysReg)) { 1308 int low_reg = promotion_map_[p_map_idx].FpReg; 1309 int high_reg = promotion_map_[p_map_idx+1].FpReg; 1310 // Doubles require pair of singles starting at even reg 1311 // TODO: move target-specific restrictions out of here. 1312 if (((low_reg & 0x1) == 0) && ((low_reg + 1) == high_reg)) { 1313 curr->location = kLocPhysReg; 1314 if (cu_->instruction_set == kThumb2) { 1315 curr->reg = RegStorage::FloatSolo64(RegStorage::RegNum(low_reg) >> 1); 1316 } else { 1317 curr->reg = RegStorage(RegStorage::k64BitPair, low_reg, high_reg); 1318 } 1319 curr->home = true; 1320 } 1321 } 1322 } else { 1323 if ((promotion_map_[p_map_idx].core_location == kLocPhysReg) 1324 && (promotion_map_[p_map_idx+1].core_location == 1325 kLocPhysReg)) { 1326 curr->location = kLocPhysReg; 1327 curr->reg = RegStorage(RegStorage::k64BitPair, promotion_map_[p_map_idx].core_reg, 1328 promotion_map_[p_map_idx+1].core_reg); 1329 curr->home = true; 1330 } 1331 } 1332 } 1333 } 1334 if (cu_->verbose) { 1335 DumpPromotionMap(); 1336 } 1337} 1338 1339/* Returns sp-relative offset in bytes for a VReg */ 1340int Mir2Lir::VRegOffset(int v_reg) { 1341 return StackVisitor::GetVRegOffset(cu_->code_item, core_spill_mask_, 1342 fp_spill_mask_, frame_size_, v_reg, 1343 cu_->instruction_set); 1344} 1345 1346/* Returns sp-relative offset in bytes for a SReg */ 1347int Mir2Lir::SRegOffset(int s_reg) { 1348 return VRegOffset(mir_graph_->SRegToVReg(s_reg)); 1349} 1350 1351/* Mark register usage state and return long retloc */ 1352RegLocation Mir2Lir::GetReturnWide(RegisterClass reg_class) { 1353 RegLocation res; 1354 switch (reg_class) { 1355 case kRefReg: LOG(FATAL); break; 1356 case kFPReg: res = LocCReturnDouble(); break; 1357 default: res = LocCReturnWide(); break; 1358 } 1359 Clobber(res.reg); 1360 LockTemp(res.reg); 1361 MarkWide(res.reg); 1362 CheckRegLocation(res); 1363 return res; 1364} 1365 1366RegLocation Mir2Lir::GetReturn(RegisterClass reg_class) { 1367 RegLocation res; 1368 switch (reg_class) { 1369 case kRefReg: res = LocCReturnRef(); break; 1370 case kFPReg: res = LocCReturnFloat(); break; 1371 default: res = LocCReturn(); break; 1372 } 1373 Clobber(res.reg); 1374 if (cu_->instruction_set == kMips) { 1375 MarkInUse(res.reg); 1376 } else { 1377 LockTemp(res.reg); 1378 } 1379 CheckRegLocation(res); 1380 return res; 1381} 1382 1383void Mir2Lir::SimpleRegAlloc() { 1384 DoPromotion(); 1385 1386 if (cu_->verbose && !(cu_->disable_opt & (1 << kPromoteRegs))) { 1387 LOG(INFO) << "After Promotion"; 1388 mir_graph_->DumpRegLocTable(mir_graph_->reg_location_, mir_graph_->GetNumSSARegs()); 1389 } 1390 1391 /* Set the frame size */ 1392 frame_size_ = ComputeFrameSize(); 1393} 1394 1395/* 1396 * Get the "real" sreg number associated with an s_reg slot. In general, 1397 * s_reg values passed through codegen are the SSA names created by 1398 * dataflow analysis and refer to slot numbers in the mir_graph_->reg_location 1399 * array. However, renaming is accomplished by simply replacing RegLocation 1400 * entries in the reglocation[] array. Therefore, when location 1401 * records for operands are first created, we need to ask the locRecord 1402 * identified by the dataflow pass what it's new name is. 1403 */ 1404int Mir2Lir::GetSRegHi(int lowSreg) { 1405 return (lowSreg == INVALID_SREG) ? INVALID_SREG : lowSreg + 1; 1406} 1407 1408bool Mir2Lir::LiveOut(int s_reg) { 1409 // For now. 1410 return true; 1411} 1412 1413} // namespace art 1414