stack.cc revision 356412e2b7ba3fde164bc08a44fee0ddc19c54e1
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "stack.h" 18 19#include "arch/context.h" 20#include "art_method-inl.h" 21#include "base/hex_dump.h" 22#include "entrypoints/entrypoint_utils-inl.h" 23#include "entrypoints/runtime_asm_entrypoints.h" 24#include "gc_map.h" 25#include "gc/space/image_space.h" 26#include "gc/space/space-inl.h" 27#include "linear_alloc.h" 28#include "mirror/class-inl.h" 29#include "mirror/object-inl.h" 30#include "mirror/object_array-inl.h" 31#include "quick/quick_method_frame_info.h" 32#include "runtime.h" 33#include "thread.h" 34#include "thread_list.h" 35#include "verify_object-inl.h" 36#include "vmap_table.h" 37 38namespace art { 39 40static constexpr bool kDebugStackWalk = false; 41 42mirror::Object* ShadowFrame::GetThisObject() const { 43 ArtMethod* m = GetMethod(); 44 if (m->IsStatic()) { 45 return nullptr; 46 } else if (m->IsNative()) { 47 return GetVRegReference(0); 48 } else { 49 const DexFile::CodeItem* code_item = m->GetCodeItem(); 50 CHECK(code_item != nullptr) << PrettyMethod(m); 51 uint16_t reg = code_item->registers_size_ - code_item->ins_size_; 52 return GetVRegReference(reg); 53 } 54} 55 56mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const { 57 ArtMethod* m = GetMethod(); 58 if (m->IsStatic()) { 59 return nullptr; 60 } else { 61 return GetVRegReference(NumberOfVRegs() - num_ins); 62 } 63} 64 65size_t ManagedStack::NumJniShadowFrameReferences() const { 66 size_t count = 0; 67 for (const ManagedStack* current_fragment = this; current_fragment != nullptr; 68 current_fragment = current_fragment->GetLink()) { 69 for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != nullptr; 70 current_frame = current_frame->GetLink()) { 71 if (current_frame->GetMethod()->IsNative()) { 72 // The JNI ShadowFrame only contains references. (For indirect reference.) 73 count += current_frame->NumberOfVRegs(); 74 } 75 } 76 } 77 return count; 78} 79 80bool ManagedStack::ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const { 81 for (const ManagedStack* current_fragment = this; current_fragment != nullptr; 82 current_fragment = current_fragment->GetLink()) { 83 for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != nullptr; 84 current_frame = current_frame->GetLink()) { 85 if (current_frame->Contains(shadow_frame_entry)) { 86 return true; 87 } 88 } 89 } 90 return false; 91} 92 93StackVisitor::StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind) 94 : StackVisitor(thread, context, walk_kind, 0) {} 95 96StackVisitor::StackVisitor(Thread* thread, 97 Context* context, 98 StackWalkKind walk_kind, 99 size_t num_frames) 100 : thread_(thread), 101 walk_kind_(walk_kind), 102 cur_shadow_frame_(nullptr), 103 cur_quick_frame_(nullptr), 104 cur_quick_frame_pc_(0), 105 num_frames_(num_frames), 106 cur_depth_(0), 107 current_inlining_depth_(0), 108 context_(context) { 109 DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread; 110} 111 112InlineInfo StackVisitor::GetCurrentInlineInfo() const { 113 ArtMethod* outer_method = *GetCurrentQuickFrame(); 114 uint32_t native_pc_offset = outer_method->NativeQuickPcOffset(cur_quick_frame_pc_); 115 CodeInfo code_info = outer_method->GetOptimizedCodeInfo(); 116 StackMapEncoding encoding = code_info.ExtractEncoding(); 117 StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding); 118 DCHECK(stack_map.IsValid()); 119 return code_info.GetInlineInfoOf(stack_map, encoding); 120} 121 122ArtMethod* StackVisitor::GetMethod() const { 123 if (cur_shadow_frame_ != nullptr) { 124 return cur_shadow_frame_->GetMethod(); 125 } else if (cur_quick_frame_ != nullptr) { 126 if (IsInInlinedFrame()) { 127 size_t depth_in_stack_map = current_inlining_depth_ - 1; 128 InlineInfo inline_info = GetCurrentInlineInfo(); 129 return GetResolvedMethod(*GetCurrentQuickFrame(), inline_info, depth_in_stack_map); 130 } else { 131 return *cur_quick_frame_; 132 } 133 } 134 return nullptr; 135} 136 137uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const { 138 if (cur_shadow_frame_ != nullptr) { 139 return cur_shadow_frame_->GetDexPC(); 140 } else if (cur_quick_frame_ != nullptr) { 141 if (IsInInlinedFrame()) { 142 size_t depth_in_stack_map = current_inlining_depth_ - 1; 143 return GetCurrentInlineInfo().GetDexPcAtDepth(depth_in_stack_map); 144 } else { 145 return GetMethod()->ToDexPc(cur_quick_frame_pc_, abort_on_failure); 146 } 147 } else { 148 return 0; 149 } 150} 151 152extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp) 153 SHARED_REQUIRES(Locks::mutator_lock_); 154 155mirror::Object* StackVisitor::GetThisObject() const { 156 DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*)); 157 ArtMethod* m = GetMethod(); 158 if (m->IsStatic()) { 159 return nullptr; 160 } else if (m->IsNative()) { 161 if (cur_quick_frame_ != nullptr) { 162 HandleScope* hs = reinterpret_cast<HandleScope*>( 163 reinterpret_cast<char*>(cur_quick_frame_) + m->GetHandleScopeOffset().SizeValue()); 164 return hs->GetReference(0); 165 } else { 166 return cur_shadow_frame_->GetVRegReference(0); 167 } 168 } else if (m->IsProxyMethod()) { 169 if (cur_quick_frame_ != nullptr) { 170 return artQuickGetProxyThisObject(cur_quick_frame_); 171 } else { 172 return cur_shadow_frame_->GetVRegReference(0); 173 } 174 } else { 175 const DexFile::CodeItem* code_item = m->GetCodeItem(); 176 if (code_item == nullptr) { 177 UNIMPLEMENTED(ERROR) << "Failed to determine this object of abstract or proxy method: " 178 << PrettyMethod(m); 179 return nullptr; 180 } else { 181 uint16_t reg = code_item->registers_size_ - code_item->ins_size_; 182 uint32_t value = 0; 183 bool success = GetVReg(m, reg, kReferenceVReg, &value); 184 // We currently always guarantee the `this` object is live throughout the method. 185 CHECK(success) << "Failed to read the this object in " << PrettyMethod(m); 186 return reinterpret_cast<mirror::Object*>(value); 187 } 188 } 189} 190 191size_t StackVisitor::GetNativePcOffset() const { 192 DCHECK(!IsShadowFrame()); 193 return GetMethod()->NativeQuickPcOffset(cur_quick_frame_pc_); 194} 195 196bool StackVisitor::IsReferenceVReg(ArtMethod* m, uint16_t vreg) { 197 // Process register map (which native and runtime methods don't have) 198 if (m->IsNative() || m->IsRuntimeMethod() || m->IsProxyMethod()) { 199 return false; 200 } 201 if (m->IsOptimized(sizeof(void*))) { 202 return true; // TODO: Implement. 203 } 204 const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*)); 205 CHECK(native_gc_map != nullptr) << PrettyMethod(m); 206 const DexFile::CodeItem* code_item = m->GetCodeItem(); 207 // Can't be null or how would we compile its instructions? 208 DCHECK(code_item != nullptr) << PrettyMethod(m); 209 NativePcOffsetToReferenceMap map(native_gc_map); 210 size_t num_regs = std::min(map.RegWidth() * 8, static_cast<size_t>(code_item->registers_size_)); 211 const uint8_t* reg_bitmap = nullptr; 212 if (num_regs > 0) { 213 Runtime* runtime = Runtime::Current(); 214 const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*)); 215 uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point); 216 reg_bitmap = map.FindBitMap(native_pc_offset); 217 DCHECK(reg_bitmap != nullptr); 218 } 219 // Does this register hold a reference? 220 return vreg < num_regs && TestBitmap(vreg, reg_bitmap); 221} 222 223bool StackVisitor::GetVRegFromDebuggerShadowFrame(uint16_t vreg, 224 VRegKind kind, 225 uint32_t* val) const { 226 size_t frame_id = const_cast<StackVisitor*>(this)->GetFrameId(); 227 ShadowFrame* shadow_frame = thread_->FindDebuggerShadowFrame(frame_id); 228 if (shadow_frame != nullptr) { 229 bool* updated_vreg_flags = thread_->GetUpdatedVRegFlags(frame_id); 230 DCHECK(updated_vreg_flags != nullptr); 231 if (updated_vreg_flags[vreg]) { 232 // Value is set by the debugger. 233 if (kind == kReferenceVReg) { 234 *val = static_cast<uint32_t>(reinterpret_cast<uintptr_t>( 235 shadow_frame->GetVRegReference(vreg))); 236 } else { 237 *val = shadow_frame->GetVReg(vreg); 238 } 239 return true; 240 } 241 } 242 // No value is set by the debugger. 243 return false; 244} 245 246bool StackVisitor::GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const { 247 if (cur_quick_frame_ != nullptr) { 248 DCHECK(context_ != nullptr); // You can't reliably read registers without a context. 249 DCHECK(m == GetMethod()); 250 // Check if there is value set by the debugger. 251 if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) { 252 return true; 253 } 254 if (m->IsOptimized(sizeof(void*))) { 255 return GetVRegFromOptimizedCode(m, vreg, kind, val); 256 } else { 257 return GetVRegFromQuickCode(m, vreg, kind, val); 258 } 259 } else { 260 DCHECK(cur_shadow_frame_ != nullptr); 261 *val = cur_shadow_frame_->GetVReg(vreg); 262 return true; 263 } 264} 265 266bool StackVisitor::GetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind, 267 uint32_t* val) const { 268 const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*)); 269 DCHECK(code_pointer != nullptr); 270 const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*))); 271 QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer); 272 uint32_t vmap_offset; 273 // TODO: IsInContext stops before spotting floating point registers. 274 if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) { 275 bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg); 276 uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask(); 277 uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kind); 278 return GetRegisterIfAccessible(reg, kind, val); 279 } else { 280 const DexFile::CodeItem* code_item = m->GetCodeItem(); 281 DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile 282 // its instructions? 283 *val = *GetVRegAddrFromQuickCode(cur_quick_frame_, code_item, frame_info.CoreSpillMask(), 284 frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg); 285 return true; 286 } 287} 288 289bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind, 290 uint32_t* val) const { 291 DCHECK_EQ(m, GetMethod()); 292 const DexFile::CodeItem* code_item = m->GetCodeItem(); 293 DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile 294 // its instructions? 295 uint16_t number_of_dex_registers = code_item->registers_size_; 296 DCHECK_LT(vreg, code_item->registers_size_); 297 ArtMethod* outer_method = *GetCurrentQuickFrame(); 298 const void* code_pointer = outer_method->GetQuickOatCodePointer(sizeof(void*)); 299 DCHECK(code_pointer != nullptr); 300 CodeInfo code_info = outer_method->GetOptimizedCodeInfo(); 301 StackMapEncoding encoding = code_info.ExtractEncoding(); 302 303 uint32_t native_pc_offset = outer_method->NativeQuickPcOffset(cur_quick_frame_pc_); 304 StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding); 305 DCHECK(stack_map.IsValid()); 306 size_t depth_in_stack_map = current_inlining_depth_ - 1; 307 308 DexRegisterMap dex_register_map = IsInInlinedFrame() 309 ? code_info.GetDexRegisterMapAtDepth(depth_in_stack_map, 310 code_info.GetInlineInfoOf(stack_map, encoding), 311 encoding, 312 number_of_dex_registers) 313 : code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers); 314 315 DexRegisterLocation::Kind location_kind = 316 dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info, encoding); 317 switch (location_kind) { 318 case DexRegisterLocation::Kind::kInStack: { 319 const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg, 320 number_of_dex_registers, 321 code_info, 322 encoding); 323 const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset; 324 *val = *reinterpret_cast<const uint32_t*>(addr); 325 return true; 326 } 327 case DexRegisterLocation::Kind::kInRegister: 328 case DexRegisterLocation::Kind::kInRegisterHigh: 329 case DexRegisterLocation::Kind::kInFpuRegister: 330 case DexRegisterLocation::Kind::kInFpuRegisterHigh: { 331 uint32_t reg = 332 dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info, encoding); 333 return GetRegisterIfAccessible(reg, kind, val); 334 } 335 case DexRegisterLocation::Kind::kConstant: 336 *val = dex_register_map.GetConstant(vreg, number_of_dex_registers, code_info, encoding); 337 return true; 338 case DexRegisterLocation::Kind::kNone: 339 return false; 340 default: 341 LOG(FATAL) 342 << "Unexpected location kind" 343 << DexRegisterLocation::PrettyDescriptor( 344 dex_register_map.GetLocationInternalKind(vreg, 345 number_of_dex_registers, 346 code_info, 347 encoding)); 348 UNREACHABLE(); 349 } 350} 351 352bool StackVisitor::GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const { 353 const bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg); 354 355 // X86 float registers are 64-bit and the logic below does not apply. 356 DCHECK(!is_float || kRuntimeISA != InstructionSet::kX86); 357 358 if (!IsAccessibleRegister(reg, is_float)) { 359 return false; 360 } 361 uintptr_t ptr_val = GetRegister(reg, is_float); 362 const bool target64 = Is64BitInstructionSet(kRuntimeISA); 363 if (target64) { 364 const bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg); 365 const bool wide_hi = (kind == kLongHiVReg) || (kind == kDoubleHiVReg); 366 int64_t value_long = static_cast<int64_t>(ptr_val); 367 if (wide_lo) { 368 ptr_val = static_cast<uintptr_t>(Low32Bits(value_long)); 369 } else if (wide_hi) { 370 ptr_val = static_cast<uintptr_t>(High32Bits(value_long)); 371 } 372 } 373 *val = ptr_val; 374 return true; 375} 376 377bool StackVisitor::GetVRegPairFromDebuggerShadowFrame(uint16_t vreg, 378 VRegKind kind_lo, 379 VRegKind kind_hi, 380 uint64_t* val) const { 381 uint32_t low_32bits; 382 uint32_t high_32bits; 383 bool success = GetVRegFromDebuggerShadowFrame(vreg, kind_lo, &low_32bits); 384 success &= GetVRegFromDebuggerShadowFrame(vreg + 1, kind_hi, &high_32bits); 385 if (success) { 386 *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits); 387 } 388 return success; 389} 390 391bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, 392 VRegKind kind_hi, uint64_t* val) const { 393 if (kind_lo == kLongLoVReg) { 394 DCHECK_EQ(kind_hi, kLongHiVReg); 395 } else if (kind_lo == kDoubleLoVReg) { 396 DCHECK_EQ(kind_hi, kDoubleHiVReg); 397 } else { 398 LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi; 399 UNREACHABLE(); 400 } 401 // Check if there is value set by the debugger. 402 if (GetVRegPairFromDebuggerShadowFrame(vreg, kind_lo, kind_hi, val)) { 403 return true; 404 } 405 if (cur_quick_frame_ != nullptr) { 406 DCHECK(context_ != nullptr); // You can't reliably read registers without a context. 407 DCHECK(m == GetMethod()); 408 if (m->IsOptimized(sizeof(void*))) { 409 return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val); 410 } else { 411 return GetVRegPairFromQuickCode(m, vreg, kind_lo, kind_hi, val); 412 } 413 } else { 414 DCHECK(cur_shadow_frame_ != nullptr); 415 *val = cur_shadow_frame_->GetVRegLong(vreg); 416 return true; 417 } 418} 419 420bool StackVisitor::GetVRegPairFromQuickCode(ArtMethod* m, uint16_t vreg, VRegKind kind_lo, 421 VRegKind kind_hi, uint64_t* val) const { 422 const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*)); 423 DCHECK(code_pointer != nullptr); 424 const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*))); 425 QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer); 426 uint32_t vmap_offset_lo, vmap_offset_hi; 427 // TODO: IsInContext stops before spotting floating point registers. 428 if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) && 429 vmap_table.IsInContext(vreg + 1, kind_hi, &vmap_offset_hi)) { 430 bool is_float = (kind_lo == kDoubleLoVReg); 431 uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask(); 432 uint32_t reg_lo = vmap_table.ComputeRegister(spill_mask, vmap_offset_lo, kind_lo); 433 uint32_t reg_hi = vmap_table.ComputeRegister(spill_mask, vmap_offset_hi, kind_hi); 434 return GetRegisterPairIfAccessible(reg_lo, reg_hi, kind_lo, val); 435 } else { 436 const DexFile::CodeItem* code_item = m->GetCodeItem(); 437 DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile 438 // its instructions? 439 uint32_t* addr = GetVRegAddrFromQuickCode( 440 cur_quick_frame_, code_item, frame_info.CoreSpillMask(), 441 frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg); 442 *val = *reinterpret_cast<uint64_t*>(addr); 443 return true; 444 } 445} 446 447bool StackVisitor::GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg, 448 VRegKind kind_lo, VRegKind kind_hi, 449 uint64_t* val) const { 450 uint32_t low_32bits; 451 uint32_t high_32bits; 452 bool success = GetVRegFromOptimizedCode(m, vreg, kind_lo, &low_32bits); 453 success &= GetVRegFromOptimizedCode(m, vreg + 1, kind_hi, &high_32bits); 454 if (success) { 455 *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits); 456 } 457 return success; 458} 459 460bool StackVisitor::GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, 461 VRegKind kind_lo, uint64_t* val) const { 462 const bool is_float = (kind_lo == kDoubleLoVReg); 463 if (!IsAccessibleRegister(reg_lo, is_float) || !IsAccessibleRegister(reg_hi, is_float)) { 464 return false; 465 } 466 uintptr_t ptr_val_lo = GetRegister(reg_lo, is_float); 467 uintptr_t ptr_val_hi = GetRegister(reg_hi, is_float); 468 bool target64 = Is64BitInstructionSet(kRuntimeISA); 469 if (target64) { 470 int64_t value_long_lo = static_cast<int64_t>(ptr_val_lo); 471 int64_t value_long_hi = static_cast<int64_t>(ptr_val_hi); 472 ptr_val_lo = static_cast<uintptr_t>(Low32Bits(value_long_lo)); 473 ptr_val_hi = static_cast<uintptr_t>(High32Bits(value_long_hi)); 474 } 475 *val = (static_cast<uint64_t>(ptr_val_hi) << 32) | static_cast<uint32_t>(ptr_val_lo); 476 return true; 477} 478 479bool StackVisitor::SetVReg(ArtMethod* m, uint16_t vreg, uint32_t new_value, 480 VRegKind kind) { 481 if (cur_quick_frame_ != nullptr) { 482 DCHECK(context_ != nullptr); // You can't reliably write registers without a context. 483 DCHECK(m == GetMethod()); 484 if (m->IsOptimized(sizeof(void*))) { 485 return false; 486 } else { 487 return SetVRegFromQuickCode(m, vreg, new_value, kind); 488 } 489 } else { 490 cur_shadow_frame_->SetVReg(vreg, new_value); 491 return true; 492 } 493} 494 495bool StackVisitor::SetVRegFromQuickCode(ArtMethod* m, uint16_t vreg, uint32_t new_value, 496 VRegKind kind) { 497 DCHECK(context_ != nullptr); // You can't reliably write registers without a context. 498 DCHECK(m == GetMethod()); 499 const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*)); 500 DCHECK(code_pointer != nullptr); 501 const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*))); 502 QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer); 503 uint32_t vmap_offset; 504 // TODO: IsInContext stops before spotting floating point registers. 505 if (vmap_table.IsInContext(vreg, kind, &vmap_offset)) { 506 bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg); 507 uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask(); 508 uint32_t reg = vmap_table.ComputeRegister(spill_mask, vmap_offset, kind); 509 return SetRegisterIfAccessible(reg, new_value, kind); 510 } else { 511 const DexFile::CodeItem* code_item = m->GetCodeItem(); 512 DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile 513 // its instructions? 514 uint32_t* addr = GetVRegAddrFromQuickCode( 515 cur_quick_frame_, code_item, frame_info.CoreSpillMask(), 516 frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg); 517 *addr = new_value; 518 return true; 519 } 520} 521 522bool StackVisitor::SetVRegFromDebugger(ArtMethod* m, 523 uint16_t vreg, 524 uint32_t new_value, 525 VRegKind kind) { 526 const DexFile::CodeItem* code_item = m->GetCodeItem(); 527 if (code_item == nullptr) { 528 return false; 529 } 530 ShadowFrame* shadow_frame = GetCurrentShadowFrame(); 531 if (shadow_frame == nullptr) { 532 // This is a compiled frame: we must prepare and update a shadow frame that will 533 // be executed by the interpreter after deoptimization of the stack. 534 const size_t frame_id = GetFrameId(); 535 const uint16_t num_regs = code_item->registers_size_; 536 shadow_frame = thread_->FindOrCreateDebuggerShadowFrame(frame_id, num_regs, m, GetDexPc()); 537 CHECK(shadow_frame != nullptr); 538 // Remember the vreg has been set for debugging and must not be overwritten by the 539 // original value during deoptimization of the stack. 540 thread_->GetUpdatedVRegFlags(frame_id)[vreg] = true; 541 } 542 if (kind == kReferenceVReg) { 543 shadow_frame->SetVRegReference(vreg, reinterpret_cast<mirror::Object*>(new_value)); 544 } else { 545 shadow_frame->SetVReg(vreg, new_value); 546 } 547 return true; 548} 549 550bool StackVisitor::SetRegisterIfAccessible(uint32_t reg, uint32_t new_value, VRegKind kind) { 551 const bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg); 552 if (!IsAccessibleRegister(reg, is_float)) { 553 return false; 554 } 555 const bool target64 = Is64BitInstructionSet(kRuntimeISA); 556 557 // Create a new value that can hold both low 32 and high 32 bits, in 558 // case we are running 64 bits. 559 uintptr_t full_new_value = new_value; 560 // Deal with 32 or 64-bit wide registers in a way that builds on all targets. 561 if (target64) { 562 bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg); 563 bool wide_hi = (kind == kLongHiVReg) || (kind == kDoubleHiVReg); 564 if (wide_lo || wide_hi) { 565 uintptr_t old_reg_val = GetRegister(reg, is_float); 566 uint64_t new_vreg_portion = static_cast<uint64_t>(new_value); 567 uint64_t old_reg_val_as_wide = static_cast<uint64_t>(old_reg_val); 568 uint64_t mask = 0xffffffff; 569 if (wide_lo) { 570 mask = mask << 32; 571 } else { 572 new_vreg_portion = new_vreg_portion << 32; 573 } 574 full_new_value = static_cast<uintptr_t>((old_reg_val_as_wide & mask) | new_vreg_portion); 575 } 576 } 577 SetRegister(reg, full_new_value, is_float); 578 return true; 579} 580 581bool StackVisitor::SetVRegPair(ArtMethod* m, uint16_t vreg, uint64_t new_value, 582 VRegKind kind_lo, VRegKind kind_hi) { 583 if (kind_lo == kLongLoVReg) { 584 DCHECK_EQ(kind_hi, kLongHiVReg); 585 } else if (kind_lo == kDoubleLoVReg) { 586 DCHECK_EQ(kind_hi, kDoubleHiVReg); 587 } else { 588 LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi; 589 } 590 if (cur_quick_frame_ != nullptr) { 591 DCHECK(context_ != nullptr); // You can't reliably write registers without a context. 592 DCHECK(m == GetMethod()); 593 if (m->IsOptimized(sizeof(void*))) { 594 return false; 595 } else { 596 return SetVRegPairFromQuickCode(m, vreg, new_value, kind_lo, kind_hi); 597 } 598 } else { 599 DCHECK(cur_shadow_frame_ != nullptr); 600 cur_shadow_frame_->SetVRegLong(vreg, new_value); 601 return true; 602 } 603} 604 605bool StackVisitor::SetVRegPairFromQuickCode( 606 ArtMethod* m, uint16_t vreg, uint64_t new_value, VRegKind kind_lo, VRegKind kind_hi) { 607 const void* code_pointer = m->GetQuickOatCodePointer(sizeof(void*)); 608 DCHECK(code_pointer != nullptr); 609 const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*))); 610 QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer); 611 uint32_t vmap_offset_lo, vmap_offset_hi; 612 // TODO: IsInContext stops before spotting floating point registers. 613 if (vmap_table.IsInContext(vreg, kind_lo, &vmap_offset_lo) && 614 vmap_table.IsInContext(vreg + 1, kind_hi, &vmap_offset_hi)) { 615 bool is_float = (kind_lo == kDoubleLoVReg); 616 uint32_t spill_mask = is_float ? frame_info.FpSpillMask() : frame_info.CoreSpillMask(); 617 uint32_t reg_lo = vmap_table.ComputeRegister(spill_mask, vmap_offset_lo, kind_lo); 618 uint32_t reg_hi = vmap_table.ComputeRegister(spill_mask, vmap_offset_hi, kind_hi); 619 return SetRegisterPairIfAccessible(reg_lo, reg_hi, new_value, is_float); 620 } else { 621 const DexFile::CodeItem* code_item = m->GetCodeItem(); 622 DCHECK(code_item != nullptr) << PrettyMethod(m); // Can't be null or how would we compile 623 // its instructions? 624 uint32_t* addr = GetVRegAddrFromQuickCode( 625 cur_quick_frame_, code_item, frame_info.CoreSpillMask(), 626 frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), vreg); 627 *reinterpret_cast<uint64_t*>(addr) = new_value; 628 return true; 629 } 630} 631 632bool StackVisitor::SetVRegPairFromDebugger(ArtMethod* m, 633 uint16_t vreg, 634 uint64_t new_value, 635 VRegKind kind_lo, 636 VRegKind kind_hi) { 637 if (kind_lo == kLongLoVReg) { 638 DCHECK_EQ(kind_hi, kLongHiVReg); 639 } else if (kind_lo == kDoubleLoVReg) { 640 DCHECK_EQ(kind_hi, kDoubleHiVReg); 641 } else { 642 LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi; 643 UNREACHABLE(); 644 } 645 const DexFile::CodeItem* code_item = m->GetCodeItem(); 646 if (code_item == nullptr) { 647 return false; 648 } 649 ShadowFrame* shadow_frame = GetCurrentShadowFrame(); 650 if (shadow_frame == nullptr) { 651 // This is a compiled frame: we must prepare for deoptimization (see SetVRegFromDebugger). 652 const size_t frame_id = GetFrameId(); 653 const uint16_t num_regs = code_item->registers_size_; 654 shadow_frame = thread_->FindOrCreateDebuggerShadowFrame(frame_id, num_regs, m, GetDexPc()); 655 CHECK(shadow_frame != nullptr); 656 // Remember the vreg pair has been set for debugging and must not be overwritten by the 657 // original value during deoptimization of the stack. 658 thread_->GetUpdatedVRegFlags(frame_id)[vreg] = true; 659 thread_->GetUpdatedVRegFlags(frame_id)[vreg + 1] = true; 660 } 661 shadow_frame->SetVRegLong(vreg, new_value); 662 return true; 663} 664 665bool StackVisitor::SetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi, 666 uint64_t new_value, bool is_float) { 667 if (!IsAccessibleRegister(reg_lo, is_float) || !IsAccessibleRegister(reg_hi, is_float)) { 668 return false; 669 } 670 uintptr_t new_value_lo = static_cast<uintptr_t>(new_value & 0xFFFFFFFF); 671 uintptr_t new_value_hi = static_cast<uintptr_t>(new_value >> 32); 672 bool target64 = Is64BitInstructionSet(kRuntimeISA); 673 // Deal with 32 or 64-bit wide registers in a way that builds on all targets. 674 if (target64) { 675 DCHECK_EQ(reg_lo, reg_hi); 676 SetRegister(reg_lo, new_value, is_float); 677 } else { 678 SetRegister(reg_lo, new_value_lo, is_float); 679 SetRegister(reg_hi, new_value_hi, is_float); 680 } 681 return true; 682} 683 684bool StackVisitor::IsAccessibleGPR(uint32_t reg) const { 685 DCHECK(context_ != nullptr); 686 return context_->IsAccessibleGPR(reg); 687} 688 689uintptr_t* StackVisitor::GetGPRAddress(uint32_t reg) const { 690 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine"; 691 DCHECK(context_ != nullptr); 692 return context_->GetGPRAddress(reg); 693} 694 695uintptr_t StackVisitor::GetGPR(uint32_t reg) const { 696 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine"; 697 DCHECK(context_ != nullptr); 698 return context_->GetGPR(reg); 699} 700 701void StackVisitor::SetGPR(uint32_t reg, uintptr_t value) { 702 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine"; 703 DCHECK(context_ != nullptr); 704 context_->SetGPR(reg, value); 705} 706 707bool StackVisitor::IsAccessibleFPR(uint32_t reg) const { 708 DCHECK(context_ != nullptr); 709 return context_->IsAccessibleFPR(reg); 710} 711 712uintptr_t StackVisitor::GetFPR(uint32_t reg) const { 713 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine"; 714 DCHECK(context_ != nullptr); 715 return context_->GetFPR(reg); 716} 717 718void StackVisitor::SetFPR(uint32_t reg, uintptr_t value) { 719 DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine"; 720 DCHECK(context_ != nullptr); 721 context_->SetFPR(reg, value); 722} 723 724uintptr_t StackVisitor::GetReturnPc() const { 725 uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame()); 726 DCHECK(sp != nullptr); 727 uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffset().SizeValue(); 728 return *reinterpret_cast<uintptr_t*>(pc_addr); 729} 730 731void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) { 732 uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame()); 733 CHECK(sp != nullptr); 734 uint8_t* pc_addr = sp + GetMethod()->GetReturnPcOffset().SizeValue(); 735 *reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc; 736} 737 738size_t StackVisitor::ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) { 739 struct NumFramesVisitor : public StackVisitor { 740 NumFramesVisitor(Thread* thread_in, StackWalkKind walk_kind_in) 741 : StackVisitor(thread_in, nullptr, walk_kind_in), frames(0) {} 742 743 bool VisitFrame() OVERRIDE { 744 frames++; 745 return true; 746 } 747 748 size_t frames; 749 }; 750 NumFramesVisitor visitor(thread, walk_kind); 751 visitor.WalkStack(true); 752 return visitor.frames; 753} 754 755bool StackVisitor::GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) { 756 struct HasMoreFramesVisitor : public StackVisitor { 757 HasMoreFramesVisitor(Thread* thread, 758 StackWalkKind walk_kind, 759 size_t num_frames, 760 size_t frame_height) 761 : StackVisitor(thread, nullptr, walk_kind, num_frames), 762 frame_height_(frame_height), 763 found_frame_(false), 764 has_more_frames_(false), 765 next_method_(nullptr), 766 next_dex_pc_(0) { 767 } 768 769 bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { 770 if (found_frame_) { 771 ArtMethod* method = GetMethod(); 772 if (method != nullptr && !method->IsRuntimeMethod()) { 773 has_more_frames_ = true; 774 next_method_ = method; 775 next_dex_pc_ = GetDexPc(); 776 return false; // End stack walk once next method is found. 777 } 778 } else if (GetFrameHeight() == frame_height_) { 779 found_frame_ = true; 780 } 781 return true; 782 } 783 784 size_t frame_height_; 785 bool found_frame_; 786 bool has_more_frames_; 787 ArtMethod* next_method_; 788 uint32_t next_dex_pc_; 789 }; 790 HasMoreFramesVisitor visitor(thread_, walk_kind_, GetNumFrames(), GetFrameHeight()); 791 visitor.WalkStack(true); 792 *next_method = visitor.next_method_; 793 *next_dex_pc = visitor.next_dex_pc_; 794 return visitor.has_more_frames_; 795} 796 797void StackVisitor::DescribeStack(Thread* thread) { 798 struct DescribeStackVisitor : public StackVisitor { 799 explicit DescribeStackVisitor(Thread* thread_in) 800 : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {} 801 802 bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) { 803 LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation(); 804 return true; 805 } 806 }; 807 DescribeStackVisitor visitor(thread); 808 visitor.WalkStack(true); 809} 810 811std::string StackVisitor::DescribeLocation() const { 812 std::string result("Visiting method '"); 813 ArtMethod* m = GetMethod(); 814 if (m == nullptr) { 815 return "upcall"; 816 } 817 result += PrettyMethod(m); 818 result += StringPrintf("' at dex PC 0x%04x", GetDexPc()); 819 if (!IsShadowFrame()) { 820 result += StringPrintf(" (native PC %p)", reinterpret_cast<void*>(GetCurrentQuickFramePc())); 821 } 822 return result; 823} 824 825static instrumentation::InstrumentationStackFrame& GetInstrumentationStackFrame(Thread* thread, 826 uint32_t depth) { 827 CHECK_LT(depth, thread->GetInstrumentationStack()->size()); 828 return thread->GetInstrumentationStack()->at(depth); 829} 830 831void StackVisitor::SanityCheckFrame() const { 832 if (kIsDebugBuild) { 833 ArtMethod* method = GetMethod(); 834 auto* declaring_class = method->GetDeclaringClass(); 835 // Runtime methods have null declaring class. 836 if (!method->IsRuntimeMethod()) { 837 CHECK(declaring_class != nullptr); 838 CHECK_EQ(declaring_class->GetClass(), declaring_class->GetClass()->GetClass()) 839 << declaring_class; 840 } else { 841 CHECK(declaring_class == nullptr); 842 } 843 Runtime* const runtime = Runtime::Current(); 844 LinearAlloc* const linear_alloc = runtime->GetLinearAlloc(); 845 if (!linear_alloc->Contains(method)) { 846 // Check class linker linear allocs. 847 mirror::Class* klass = method->GetDeclaringClass(); 848 LinearAlloc* const class_linear_alloc = (klass != nullptr) 849 ? ClassLinker::GetAllocatorForClassLoader(klass->GetClassLoader()) 850 : linear_alloc; 851 if (!class_linear_alloc->Contains(method)) { 852 // Check image space. 853 bool in_image = false; 854 for (auto& space : runtime->GetHeap()->GetContinuousSpaces()) { 855 if (space->IsImageSpace()) { 856 auto* image_space = space->AsImageSpace(); 857 const auto& header = image_space->GetImageHeader(); 858 const auto* methods = &header.GetMethodsSection(); 859 if (methods->Contains(reinterpret_cast<const uint8_t*>(method) - image_space->Begin())) { 860 in_image = true; 861 break; 862 } 863 } 864 } 865 CHECK(in_image) << PrettyMethod(method) << " not in linear alloc or image"; 866 } 867 } 868 if (cur_quick_frame_ != nullptr) { 869 method->AssertPcIsWithinQuickCode(cur_quick_frame_pc_); 870 // Frame sanity. 871 size_t frame_size = method->GetFrameSizeInBytes(); 872 CHECK_NE(frame_size, 0u); 873 // A rough guess at an upper size we expect to see for a frame. 874 // 256 registers 875 // 2 words HandleScope overhead 876 // 3+3 register spills 877 // TODO: this seems architecture specific for the case of JNI frames. 878 // TODO: 083-compiler-regressions ManyFloatArgs shows this estimate is wrong. 879 // const size_t kMaxExpectedFrameSize = (256 + 2 + 3 + 3) * sizeof(word); 880 const size_t kMaxExpectedFrameSize = 2 * KB; 881 CHECK_LE(frame_size, kMaxExpectedFrameSize); 882 size_t return_pc_offset = method->GetReturnPcOffset().SizeValue(); 883 CHECK_LT(return_pc_offset, frame_size); 884 } 885 } 886} 887 888void StackVisitor::WalkStack(bool include_transitions) { 889 DCHECK(thread_ == Thread::Current() || thread_->IsSuspended()); 890 CHECK_EQ(cur_depth_, 0U); 891 bool exit_stubs_installed = Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled(); 892 uint32_t instrumentation_stack_depth = 0; 893 894 for (const ManagedStack* current_fragment = thread_->GetManagedStack(); 895 current_fragment != nullptr; current_fragment = current_fragment->GetLink()) { 896 cur_shadow_frame_ = current_fragment->GetTopShadowFrame(); 897 cur_quick_frame_ = current_fragment->GetTopQuickFrame(); 898 cur_quick_frame_pc_ = 0; 899 900 if (cur_quick_frame_ != nullptr) { // Handle quick stack frames. 901 // Can't be both a shadow and a quick fragment. 902 DCHECK(current_fragment->GetTopShadowFrame() == nullptr); 903 ArtMethod* method = *cur_quick_frame_; 904 while (method != nullptr) { 905 SanityCheckFrame(); 906 907 if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames) 908 && method->IsOptimized(sizeof(void*))) { 909 CodeInfo code_info = method->GetOptimizedCodeInfo(); 910 StackMapEncoding encoding = code_info.ExtractEncoding(); 911 uint32_t native_pc_offset = method->NativeQuickPcOffset(cur_quick_frame_pc_); 912 StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding); 913 if (stack_map.IsValid() && stack_map.HasInlineInfo(encoding)) { 914 InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding); 915 DCHECK_EQ(current_inlining_depth_, 0u); 916 for (current_inlining_depth_ = inline_info.GetDepth(); 917 current_inlining_depth_ != 0; 918 --current_inlining_depth_) { 919 bool should_continue = VisitFrame(); 920 if (UNLIKELY(!should_continue)) { 921 return; 922 } 923 cur_depth_++; 924 } 925 } 926 } 927 928 bool should_continue = VisitFrame(); 929 if (UNLIKELY(!should_continue)) { 930 return; 931 } 932 933 if (context_ != nullptr) { 934 context_->FillCalleeSaves(*this); 935 } 936 size_t frame_size = method->GetFrameSizeInBytes(); 937 // Compute PC for next stack frame from return PC. 938 size_t return_pc_offset = method->GetReturnPcOffset(frame_size).SizeValue(); 939 uint8_t* return_pc_addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + return_pc_offset; 940 uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr); 941 if (UNLIKELY(exit_stubs_installed)) { 942 // While profiling, the return pc is restored from the side stack, except when walking 943 // the stack for an exception where the side stack will be unwound in VisitFrame. 944 if (reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == return_pc) { 945 const instrumentation::InstrumentationStackFrame& instrumentation_frame = 946 GetInstrumentationStackFrame(thread_, instrumentation_stack_depth); 947 instrumentation_stack_depth++; 948 if (GetMethod() == Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveAll)) { 949 // Skip runtime save all callee frames which are used to deliver exceptions. 950 } else if (instrumentation_frame.interpreter_entry_) { 951 ArtMethod* callee = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs); 952 CHECK_EQ(GetMethod(), callee) << "Expected: " << PrettyMethod(callee) << " Found: " 953 << PrettyMethod(GetMethod()); 954 } else if (instrumentation_frame.method_ != GetMethod()) { 955 LOG(FATAL) << "Expected: " << PrettyMethod(instrumentation_frame.method_) 956 << " Found: " << PrettyMethod(GetMethod()); 957 } 958 if (num_frames_ != 0) { 959 // Check agreement of frame Ids only if num_frames_ is computed to avoid infinite 960 // recursion. 961 CHECK(instrumentation_frame.frame_id_ == GetFrameId()) 962 << "Expected: " << instrumentation_frame.frame_id_ 963 << " Found: " << GetFrameId(); 964 } 965 return_pc = instrumentation_frame.return_pc_; 966 } 967 } 968 cur_quick_frame_pc_ = return_pc; 969 uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size; 970 cur_quick_frame_ = reinterpret_cast<ArtMethod**>(next_frame); 971 972 if (kDebugStackWalk) { 973 LOG(INFO) << PrettyMethod(method) << "@" << method << " size=" << frame_size 974 << " optimized=" << method->IsOptimized(sizeof(void*)) 975 << " native=" << method->IsNative() 976 << " entrypoints=" << method->GetEntryPointFromQuickCompiledCode() 977 << "," << method->GetEntryPointFromJni() 978 << " next=" << *cur_quick_frame_; 979 } 980 981 cur_depth_++; 982 method = *cur_quick_frame_; 983 } 984 } else if (cur_shadow_frame_ != nullptr) { 985 do { 986 SanityCheckFrame(); 987 bool should_continue = VisitFrame(); 988 if (UNLIKELY(!should_continue)) { 989 return; 990 } 991 cur_depth_++; 992 cur_shadow_frame_ = cur_shadow_frame_->GetLink(); 993 } while (cur_shadow_frame_ != nullptr); 994 } 995 if (include_transitions) { 996 bool should_continue = VisitFrame(); 997 if (!should_continue) { 998 return; 999 } 1000 } 1001 cur_depth_++; 1002 } 1003 if (num_frames_ != 0) { 1004 CHECK_EQ(cur_depth_, num_frames_); 1005 } 1006} 1007 1008void JavaFrameRootInfo::Describe(std::ostream& os) const { 1009 const StackVisitor* visitor = stack_visitor_; 1010 CHECK(visitor != nullptr); 1011 os << "Type=" << GetType() << " thread_id=" << GetThreadId() << " location=" << 1012 visitor->DescribeLocation() << " vreg=" << vreg_; 1013} 1014 1015int StackVisitor::GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item, 1016 uint32_t core_spills, uint32_t fp_spills, 1017 size_t frame_size, int reg, InstructionSet isa) { 1018 size_t pointer_size = InstructionSetPointerSize(isa); 1019 if (kIsDebugBuild) { 1020 auto* runtime = Runtime::Current(); 1021 if (runtime != nullptr) { 1022 CHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), pointer_size); 1023 } 1024 } 1025 DCHECK_ALIGNED(frame_size, kStackAlignment); 1026 DCHECK_NE(reg, -1); 1027 int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa) 1028 + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa) 1029 + sizeof(uint32_t); // Filler. 1030 int num_regs = code_item->registers_size_ - code_item->ins_size_; 1031 int temp_threshold = code_item->registers_size_; 1032 const int max_num_special_temps = 1; 1033 if (reg == temp_threshold) { 1034 // The current method pointer corresponds to special location on stack. 1035 return 0; 1036 } else if (reg >= temp_threshold + max_num_special_temps) { 1037 /* 1038 * Special temporaries may have custom locations and the logic above deals with that. 1039 * However, non-special temporaries are placed relative to the outs. 1040 */ 1041 int temps_start = code_item->outs_size_ * sizeof(uint32_t) + pointer_size /* art method */; 1042 int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t); 1043 return temps_start + relative_offset; 1044 } else if (reg < num_regs) { 1045 int locals_start = frame_size - spill_size - num_regs * sizeof(uint32_t); 1046 return locals_start + (reg * sizeof(uint32_t)); 1047 } else { 1048 // Handle ins. 1049 return frame_size + ((reg - num_regs) * sizeof(uint32_t)) + pointer_size /* art method */; 1050 } 1051} 1052 1053} // namespace art 1054