1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "stack.h"
18
19#include "arch/context.h"
20#include "art_method-inl.h"
21#include "base/hex_dump.h"
22#include "entrypoints/entrypoint_utils-inl.h"
23#include "entrypoints/runtime_asm_entrypoints.h"
24#include "gc/space/image_space.h"
25#include "gc/space/space-inl.h"
26#include "jit/jit.h"
27#include "jit/jit_code_cache.h"
28#include "linear_alloc.h"
29#include "mirror/class-inl.h"
30#include "mirror/object-inl.h"
31#include "mirror/object_array-inl.h"
32#include "oat_quick_method_header.h"
33#include "quick/quick_method_frame_info.h"
34#include "runtime.h"
35#include "thread.h"
36#include "thread_list.h"
37#include "verify_object-inl.h"
38
39namespace art {
40
41static constexpr bool kDebugStackWalk = false;
42
43mirror::Object* ShadowFrame::GetThisObject() const {
44  ArtMethod* m = GetMethod();
45  if (m->IsStatic()) {
46    return nullptr;
47  } else if (m->IsNative()) {
48    return GetVRegReference(0);
49  } else {
50    const DexFile::CodeItem* code_item = m->GetCodeItem();
51    CHECK(code_item != nullptr) << PrettyMethod(m);
52    uint16_t reg = code_item->registers_size_ - code_item->ins_size_;
53    return GetVRegReference(reg);
54  }
55}
56
57mirror::Object* ShadowFrame::GetThisObject(uint16_t num_ins) const {
58  ArtMethod* m = GetMethod();
59  if (m->IsStatic()) {
60    return nullptr;
61  } else {
62    return GetVRegReference(NumberOfVRegs() - num_ins);
63  }
64}
65
66size_t ManagedStack::NumJniShadowFrameReferences() const {
67  size_t count = 0;
68  for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
69       current_fragment = current_fragment->GetLink()) {
70    for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != nullptr;
71         current_frame = current_frame->GetLink()) {
72      if (current_frame->GetMethod()->IsNative()) {
73        // The JNI ShadowFrame only contains references. (For indirect reference.)
74        count += current_frame->NumberOfVRegs();
75      }
76    }
77  }
78  return count;
79}
80
81bool ManagedStack::ShadowFramesContain(StackReference<mirror::Object>* shadow_frame_entry) const {
82  for (const ManagedStack* current_fragment = this; current_fragment != nullptr;
83       current_fragment = current_fragment->GetLink()) {
84    for (ShadowFrame* current_frame = current_fragment->top_shadow_frame_; current_frame != nullptr;
85         current_frame = current_frame->GetLink()) {
86      if (current_frame->Contains(shadow_frame_entry)) {
87        return true;
88      }
89    }
90  }
91  return false;
92}
93
94StackVisitor::StackVisitor(Thread* thread, Context* context, StackWalkKind walk_kind)
95    : StackVisitor(thread, context, walk_kind, 0) {}
96
97StackVisitor::StackVisitor(Thread* thread,
98                           Context* context,
99                           StackWalkKind walk_kind,
100                           size_t num_frames)
101    : thread_(thread),
102      walk_kind_(walk_kind),
103      cur_shadow_frame_(nullptr),
104      cur_quick_frame_(nullptr),
105      cur_quick_frame_pc_(0),
106      cur_oat_quick_method_header_(nullptr),
107      num_frames_(num_frames),
108      cur_depth_(0),
109      current_inlining_depth_(0),
110      context_(context) {
111  DCHECK(thread == Thread::Current() || thread->IsSuspended()) << *thread;
112}
113
114InlineInfo StackVisitor::GetCurrentInlineInfo() const {
115  const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
116  uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
117  CodeInfo code_info = method_header->GetOptimizedCodeInfo();
118  CodeInfoEncoding encoding = code_info.ExtractEncoding();
119  StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
120  DCHECK(stack_map.IsValid());
121  return code_info.GetInlineInfoOf(stack_map, encoding);
122}
123
124ArtMethod* StackVisitor::GetMethod() const {
125  if (cur_shadow_frame_ != nullptr) {
126    return cur_shadow_frame_->GetMethod();
127  } else if (cur_quick_frame_ != nullptr) {
128    if (IsInInlinedFrame()) {
129      size_t depth_in_stack_map = current_inlining_depth_ - 1;
130      InlineInfo inline_info = GetCurrentInlineInfo();
131      const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
132      CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding();
133      DCHECK(walk_kind_ != StackWalkKind::kSkipInlinedFrames);
134      bool allow_resolve = walk_kind_ != StackWalkKind::kIncludeInlinedFramesNoResolve;
135      return allow_resolve
136          ? GetResolvedMethod<true>(*GetCurrentQuickFrame(),
137                                    inline_info,
138                                    encoding.inline_info_encoding,
139                                    depth_in_stack_map)
140          : GetResolvedMethod<false>(*GetCurrentQuickFrame(),
141                                     inline_info,
142                                     encoding.inline_info_encoding,
143                                     depth_in_stack_map);
144    } else {
145      return *cur_quick_frame_;
146    }
147  }
148  return nullptr;
149}
150
151uint32_t StackVisitor::GetDexPc(bool abort_on_failure) const {
152  if (cur_shadow_frame_ != nullptr) {
153    return cur_shadow_frame_->GetDexPC();
154  } else if (cur_quick_frame_ != nullptr) {
155    if (IsInInlinedFrame()) {
156      size_t depth_in_stack_map = current_inlining_depth_ - 1;
157      const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
158      CodeInfoEncoding encoding = method_header->GetOptimizedCodeInfo().ExtractEncoding();
159      return GetCurrentInlineInfo().GetDexPcAtDepth(encoding.inline_info_encoding,
160                                                    depth_in_stack_map);
161    } else if (cur_oat_quick_method_header_ == nullptr) {
162      return DexFile::kDexNoIndex;
163    } else {
164      return cur_oat_quick_method_header_->ToDexPc(
165          GetMethod(), cur_quick_frame_pc_, abort_on_failure);
166    }
167  } else {
168    return 0;
169  }
170}
171
172extern "C" mirror::Object* artQuickGetProxyThisObject(ArtMethod** sp)
173    SHARED_REQUIRES(Locks::mutator_lock_);
174
175mirror::Object* StackVisitor::GetThisObject() const {
176  DCHECK_EQ(Runtime::Current()->GetClassLinker()->GetImagePointerSize(), sizeof(void*));
177  ArtMethod* m = GetMethod();
178  if (m->IsStatic()) {
179    return nullptr;
180  } else if (m->IsNative()) {
181    if (cur_quick_frame_ != nullptr) {
182      HandleScope* hs = reinterpret_cast<HandleScope*>(
183          reinterpret_cast<char*>(cur_quick_frame_) + sizeof(ArtMethod*));
184      return hs->GetReference(0);
185    } else {
186      return cur_shadow_frame_->GetVRegReference(0);
187    }
188  } else if (m->IsProxyMethod()) {
189    if (cur_quick_frame_ != nullptr) {
190      return artQuickGetProxyThisObject(cur_quick_frame_);
191    } else {
192      return cur_shadow_frame_->GetVRegReference(0);
193    }
194  } else {
195    const DexFile::CodeItem* code_item = m->GetCodeItem();
196    if (code_item == nullptr) {
197      UNIMPLEMENTED(ERROR) << "Failed to determine this object of abstract or proxy method: "
198          << PrettyMethod(m);
199      return nullptr;
200    } else {
201      uint16_t reg = code_item->registers_size_ - code_item->ins_size_;
202      uint32_t value = 0;
203      bool success = GetVReg(m, reg, kReferenceVReg, &value);
204      // We currently always guarantee the `this` object is live throughout the method.
205      CHECK(success) << "Failed to read the this object in " << PrettyMethod(m);
206      return reinterpret_cast<mirror::Object*>(value);
207    }
208  }
209}
210
211size_t StackVisitor::GetNativePcOffset() const {
212  DCHECK(!IsShadowFrame());
213  return GetCurrentOatQuickMethodHeader()->NativeQuickPcOffset(cur_quick_frame_pc_);
214}
215
216bool StackVisitor::GetVRegFromDebuggerShadowFrame(uint16_t vreg,
217                                                  VRegKind kind,
218                                                  uint32_t* val) const {
219  size_t frame_id = const_cast<StackVisitor*>(this)->GetFrameId();
220  ShadowFrame* shadow_frame = thread_->FindDebuggerShadowFrame(frame_id);
221  if (shadow_frame != nullptr) {
222    bool* updated_vreg_flags = thread_->GetUpdatedVRegFlags(frame_id);
223    DCHECK(updated_vreg_flags != nullptr);
224    if (updated_vreg_flags[vreg]) {
225      // Value is set by the debugger.
226      if (kind == kReferenceVReg) {
227        *val = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(
228            shadow_frame->GetVRegReference(vreg)));
229      } else {
230        *val = shadow_frame->GetVReg(vreg);
231      }
232      return true;
233    }
234  }
235  // No value is set by the debugger.
236  return false;
237}
238
239bool StackVisitor::GetVReg(ArtMethod* m, uint16_t vreg, VRegKind kind, uint32_t* val) const {
240  if (cur_quick_frame_ != nullptr) {
241    DCHECK(context_ != nullptr);  // You can't reliably read registers without a context.
242    DCHECK(m == GetMethod());
243    // Check if there is value set by the debugger.
244    if (GetVRegFromDebuggerShadowFrame(vreg, kind, val)) {
245      return true;
246    }
247    DCHECK(cur_oat_quick_method_header_->IsOptimized());
248    return GetVRegFromOptimizedCode(m, vreg, kind, val);
249  } else {
250    DCHECK(cur_shadow_frame_ != nullptr);
251    if (kind == kReferenceVReg) {
252      *val = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(
253          cur_shadow_frame_->GetVRegReference(vreg)));
254    } else {
255      *val = cur_shadow_frame_->GetVReg(vreg);
256    }
257    return true;
258  }
259}
260
261bool StackVisitor::GetVRegFromOptimizedCode(ArtMethod* m, uint16_t vreg, VRegKind kind,
262                                            uint32_t* val) const {
263  DCHECK_EQ(m, GetMethod());
264  const DexFile::CodeItem* code_item = m->GetCodeItem();
265  DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be null or how would we compile
266                                                    // its instructions?
267  uint16_t number_of_dex_registers = code_item->registers_size_;
268  DCHECK_LT(vreg, code_item->registers_size_);
269  const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader();
270  CodeInfo code_info = method_header->GetOptimizedCodeInfo();
271  CodeInfoEncoding encoding = code_info.ExtractEncoding();
272
273  uint32_t native_pc_offset = method_header->NativeQuickPcOffset(cur_quick_frame_pc_);
274  StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
275  DCHECK(stack_map.IsValid());
276  size_t depth_in_stack_map = current_inlining_depth_ - 1;
277
278  DexRegisterMap dex_register_map = IsInInlinedFrame()
279      ? code_info.GetDexRegisterMapAtDepth(depth_in_stack_map,
280                                           code_info.GetInlineInfoOf(stack_map, encoding),
281                                           encoding,
282                                           number_of_dex_registers)
283      : code_info.GetDexRegisterMapOf(stack_map, encoding, number_of_dex_registers);
284
285  if (!dex_register_map.IsValid()) {
286    return false;
287  }
288  DexRegisterLocation::Kind location_kind =
289      dex_register_map.GetLocationKind(vreg, number_of_dex_registers, code_info, encoding);
290  switch (location_kind) {
291    case DexRegisterLocation::Kind::kInStack: {
292      const int32_t offset = dex_register_map.GetStackOffsetInBytes(vreg,
293                                                                    number_of_dex_registers,
294                                                                    code_info,
295                                                                    encoding);
296      const uint8_t* addr = reinterpret_cast<const uint8_t*>(cur_quick_frame_) + offset;
297      *val = *reinterpret_cast<const uint32_t*>(addr);
298      return true;
299    }
300    case DexRegisterLocation::Kind::kInRegister:
301    case DexRegisterLocation::Kind::kInRegisterHigh:
302    case DexRegisterLocation::Kind::kInFpuRegister:
303    case DexRegisterLocation::Kind::kInFpuRegisterHigh: {
304      uint32_t reg =
305          dex_register_map.GetMachineRegister(vreg, number_of_dex_registers, code_info, encoding);
306      return GetRegisterIfAccessible(reg, kind, val);
307    }
308    case DexRegisterLocation::Kind::kConstant:
309      *val = dex_register_map.GetConstant(vreg, number_of_dex_registers, code_info, encoding);
310      return true;
311    case DexRegisterLocation::Kind::kNone:
312      return false;
313    default:
314      LOG(FATAL)
315          << "Unexpected location kind "
316          << dex_register_map.GetLocationInternalKind(vreg,
317                                                      number_of_dex_registers,
318                                                      code_info,
319                                                      encoding);
320      UNREACHABLE();
321  }
322}
323
324bool StackVisitor::GetRegisterIfAccessible(uint32_t reg, VRegKind kind, uint32_t* val) const {
325  const bool is_float = (kind == kFloatVReg) || (kind == kDoubleLoVReg) || (kind == kDoubleHiVReg);
326
327  // X86 float registers are 64-bit and the logic below does not apply.
328  DCHECK(!is_float || kRuntimeISA != InstructionSet::kX86);
329
330  if (!IsAccessibleRegister(reg, is_float)) {
331    return false;
332  }
333  uintptr_t ptr_val = GetRegister(reg, is_float);
334  const bool target64 = Is64BitInstructionSet(kRuntimeISA);
335  if (target64) {
336    const bool wide_lo = (kind == kLongLoVReg) || (kind == kDoubleLoVReg);
337    const bool wide_hi = (kind == kLongHiVReg) || (kind == kDoubleHiVReg);
338    int64_t value_long = static_cast<int64_t>(ptr_val);
339    if (wide_lo) {
340      ptr_val = static_cast<uintptr_t>(Low32Bits(value_long));
341    } else if (wide_hi) {
342      ptr_val = static_cast<uintptr_t>(High32Bits(value_long));
343    }
344  }
345  *val = ptr_val;
346  return true;
347}
348
349bool StackVisitor::GetVRegPairFromDebuggerShadowFrame(uint16_t vreg,
350                                                      VRegKind kind_lo,
351                                                      VRegKind kind_hi,
352                                                      uint64_t* val) const {
353  uint32_t low_32bits;
354  uint32_t high_32bits;
355  bool success = GetVRegFromDebuggerShadowFrame(vreg, kind_lo, &low_32bits);
356  success &= GetVRegFromDebuggerShadowFrame(vreg + 1, kind_hi, &high_32bits);
357  if (success) {
358    *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits);
359  }
360  return success;
361}
362
363bool StackVisitor::GetVRegPair(ArtMethod* m, uint16_t vreg, VRegKind kind_lo,
364                               VRegKind kind_hi, uint64_t* val) const {
365  if (kind_lo == kLongLoVReg) {
366    DCHECK_EQ(kind_hi, kLongHiVReg);
367  } else if (kind_lo == kDoubleLoVReg) {
368    DCHECK_EQ(kind_hi, kDoubleHiVReg);
369  } else {
370    LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
371    UNREACHABLE();
372  }
373  // Check if there is value set by the debugger.
374  if (GetVRegPairFromDebuggerShadowFrame(vreg, kind_lo, kind_hi, val)) {
375    return true;
376  }
377  if (cur_quick_frame_ != nullptr) {
378    DCHECK(context_ != nullptr);  // You can't reliably read registers without a context.
379    DCHECK(m == GetMethod());
380    DCHECK(cur_oat_quick_method_header_->IsOptimized());
381    return GetVRegPairFromOptimizedCode(m, vreg, kind_lo, kind_hi, val);
382  } else {
383    DCHECK(cur_shadow_frame_ != nullptr);
384    *val = cur_shadow_frame_->GetVRegLong(vreg);
385    return true;
386  }
387}
388
389bool StackVisitor::GetVRegPairFromOptimizedCode(ArtMethod* m, uint16_t vreg,
390                                                VRegKind kind_lo, VRegKind kind_hi,
391                                                uint64_t* val) const {
392  uint32_t low_32bits;
393  uint32_t high_32bits;
394  bool success = GetVRegFromOptimizedCode(m, vreg, kind_lo, &low_32bits);
395  success &= GetVRegFromOptimizedCode(m, vreg + 1, kind_hi, &high_32bits);
396  if (success) {
397    *val = (static_cast<uint64_t>(high_32bits) << 32) | static_cast<uint64_t>(low_32bits);
398  }
399  return success;
400}
401
402bool StackVisitor::GetRegisterPairIfAccessible(uint32_t reg_lo, uint32_t reg_hi,
403                                               VRegKind kind_lo, uint64_t* val) const {
404  const bool is_float = (kind_lo == kDoubleLoVReg);
405  if (!IsAccessibleRegister(reg_lo, is_float) || !IsAccessibleRegister(reg_hi, is_float)) {
406    return false;
407  }
408  uintptr_t ptr_val_lo = GetRegister(reg_lo, is_float);
409  uintptr_t ptr_val_hi = GetRegister(reg_hi, is_float);
410  bool target64 = Is64BitInstructionSet(kRuntimeISA);
411  if (target64) {
412    int64_t value_long_lo = static_cast<int64_t>(ptr_val_lo);
413    int64_t value_long_hi = static_cast<int64_t>(ptr_val_hi);
414    ptr_val_lo = static_cast<uintptr_t>(Low32Bits(value_long_lo));
415    ptr_val_hi = static_cast<uintptr_t>(High32Bits(value_long_hi));
416  }
417  *val = (static_cast<uint64_t>(ptr_val_hi) << 32) | static_cast<uint32_t>(ptr_val_lo);
418  return true;
419}
420
421bool StackVisitor::SetVReg(ArtMethod* m,
422                           uint16_t vreg,
423                           uint32_t new_value,
424                           VRegKind kind) {
425  const DexFile::CodeItem* code_item = m->GetCodeItem();
426  if (code_item == nullptr) {
427    return false;
428  }
429  ShadowFrame* shadow_frame = GetCurrentShadowFrame();
430  if (shadow_frame == nullptr) {
431    // This is a compiled frame: we must prepare and update a shadow frame that will
432    // be executed by the interpreter after deoptimization of the stack.
433    const size_t frame_id = GetFrameId();
434    const uint16_t num_regs = code_item->registers_size_;
435    shadow_frame = thread_->FindOrCreateDebuggerShadowFrame(frame_id, num_regs, m, GetDexPc());
436    CHECK(shadow_frame != nullptr);
437    // Remember the vreg has been set for debugging and must not be overwritten by the
438    // original value during deoptimization of the stack.
439    thread_->GetUpdatedVRegFlags(frame_id)[vreg] = true;
440  }
441  if (kind == kReferenceVReg) {
442    shadow_frame->SetVRegReference(vreg, reinterpret_cast<mirror::Object*>(new_value));
443  } else {
444    shadow_frame->SetVReg(vreg, new_value);
445  }
446  return true;
447}
448
449bool StackVisitor::SetVRegPair(ArtMethod* m,
450                               uint16_t vreg,
451                               uint64_t new_value,
452                               VRegKind kind_lo,
453                               VRegKind kind_hi) {
454  if (kind_lo == kLongLoVReg) {
455    DCHECK_EQ(kind_hi, kLongHiVReg);
456  } else if (kind_lo == kDoubleLoVReg) {
457    DCHECK_EQ(kind_hi, kDoubleHiVReg);
458  } else {
459    LOG(FATAL) << "Expected long or double: kind_lo=" << kind_lo << ", kind_hi=" << kind_hi;
460    UNREACHABLE();
461  }
462  const DexFile::CodeItem* code_item = m->GetCodeItem();
463  if (code_item == nullptr) {
464    return false;
465  }
466  ShadowFrame* shadow_frame = GetCurrentShadowFrame();
467  if (shadow_frame == nullptr) {
468    // This is a compiled frame: we must prepare for deoptimization (see SetVRegFromDebugger).
469    const size_t frame_id = GetFrameId();
470    const uint16_t num_regs = code_item->registers_size_;
471    shadow_frame = thread_->FindOrCreateDebuggerShadowFrame(frame_id, num_regs, m, GetDexPc());
472    CHECK(shadow_frame != nullptr);
473    // Remember the vreg pair has been set for debugging and must not be overwritten by the
474    // original value during deoptimization of the stack.
475    thread_->GetUpdatedVRegFlags(frame_id)[vreg] = true;
476    thread_->GetUpdatedVRegFlags(frame_id)[vreg + 1] = true;
477  }
478  shadow_frame->SetVRegLong(vreg, new_value);
479  return true;
480}
481
482bool StackVisitor::IsAccessibleGPR(uint32_t reg) const {
483  DCHECK(context_ != nullptr);
484  return context_->IsAccessibleGPR(reg);
485}
486
487uintptr_t* StackVisitor::GetGPRAddress(uint32_t reg) const {
488  DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
489  DCHECK(context_ != nullptr);
490  return context_->GetGPRAddress(reg);
491}
492
493uintptr_t StackVisitor::GetGPR(uint32_t reg) const {
494  DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
495  DCHECK(context_ != nullptr);
496  return context_->GetGPR(reg);
497}
498
499bool StackVisitor::IsAccessibleFPR(uint32_t reg) const {
500  DCHECK(context_ != nullptr);
501  return context_->IsAccessibleFPR(reg);
502}
503
504uintptr_t StackVisitor::GetFPR(uint32_t reg) const {
505  DCHECK(cur_quick_frame_ != nullptr) << "This is a quick frame routine";
506  DCHECK(context_ != nullptr);
507  return context_->GetFPR(reg);
508}
509
510uintptr_t StackVisitor::GetReturnPc() const {
511  uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
512  DCHECK(sp != nullptr);
513  uint8_t* pc_addr = sp + GetCurrentQuickFrameInfo().GetReturnPcOffset();
514  return *reinterpret_cast<uintptr_t*>(pc_addr);
515}
516
517void StackVisitor::SetReturnPc(uintptr_t new_ret_pc) {
518  uint8_t* sp = reinterpret_cast<uint8_t*>(GetCurrentQuickFrame());
519  CHECK(sp != nullptr);
520  uint8_t* pc_addr = sp + GetCurrentQuickFrameInfo().GetReturnPcOffset();
521  *reinterpret_cast<uintptr_t*>(pc_addr) = new_ret_pc;
522}
523
524size_t StackVisitor::ComputeNumFrames(Thread* thread, StackWalkKind walk_kind) {
525  struct NumFramesVisitor : public StackVisitor {
526    NumFramesVisitor(Thread* thread_in, StackWalkKind walk_kind_in)
527        : StackVisitor(thread_in, nullptr, walk_kind_in), frames(0) {}
528
529    bool VisitFrame() OVERRIDE {
530      frames++;
531      return true;
532    }
533
534    size_t frames;
535  };
536  NumFramesVisitor visitor(thread, walk_kind);
537  visitor.WalkStack(true);
538  return visitor.frames;
539}
540
541bool StackVisitor::GetNextMethodAndDexPc(ArtMethod** next_method, uint32_t* next_dex_pc) {
542  struct HasMoreFramesVisitor : public StackVisitor {
543    HasMoreFramesVisitor(Thread* thread,
544                         StackWalkKind walk_kind,
545                         size_t num_frames,
546                         size_t frame_height)
547        : StackVisitor(thread, nullptr, walk_kind, num_frames),
548          frame_height_(frame_height),
549          found_frame_(false),
550          has_more_frames_(false),
551          next_method_(nullptr),
552          next_dex_pc_(0) {
553    }
554
555    bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
556      if (found_frame_) {
557        ArtMethod* method = GetMethod();
558        if (method != nullptr && !method->IsRuntimeMethod()) {
559          has_more_frames_ = true;
560          next_method_ = method;
561          next_dex_pc_ = GetDexPc();
562          return false;  // End stack walk once next method is found.
563        }
564      } else if (GetFrameHeight() == frame_height_) {
565        found_frame_ = true;
566      }
567      return true;
568    }
569
570    size_t frame_height_;
571    bool found_frame_;
572    bool has_more_frames_;
573    ArtMethod* next_method_;
574    uint32_t next_dex_pc_;
575  };
576  HasMoreFramesVisitor visitor(thread_, walk_kind_, GetNumFrames(), GetFrameHeight());
577  visitor.WalkStack(true);
578  *next_method = visitor.next_method_;
579  *next_dex_pc = visitor.next_dex_pc_;
580  return visitor.has_more_frames_;
581}
582
583void StackVisitor::DescribeStack(Thread* thread) {
584  struct DescribeStackVisitor : public StackVisitor {
585    explicit DescribeStackVisitor(Thread* thread_in)
586        : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames) {}
587
588    bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
589      LOG(INFO) << "Frame Id=" << GetFrameId() << " " << DescribeLocation();
590      return true;
591    }
592  };
593  DescribeStackVisitor visitor(thread);
594  visitor.WalkStack(true);
595}
596
597std::string StackVisitor::DescribeLocation() const {
598  std::string result("Visiting method '");
599  ArtMethod* m = GetMethod();
600  if (m == nullptr) {
601    return "upcall";
602  }
603  result += PrettyMethod(m);
604  result += StringPrintf("' at dex PC 0x%04x", GetDexPc());
605  if (!IsShadowFrame()) {
606    result += StringPrintf(" (native PC %p)", reinterpret_cast<void*>(GetCurrentQuickFramePc()));
607  }
608  return result;
609}
610
611static instrumentation::InstrumentationStackFrame& GetInstrumentationStackFrame(Thread* thread,
612                                                                                uint32_t depth) {
613  CHECK_LT(depth, thread->GetInstrumentationStack()->size());
614  return thread->GetInstrumentationStack()->at(depth);
615}
616
617static void AssertPcIsWithinQuickCode(ArtMethod* method, uintptr_t pc)
618    SHARED_REQUIRES(Locks::mutator_lock_) {
619  if (method->IsNative() || method->IsRuntimeMethod() || method->IsProxyMethod()) {
620    return;
621  }
622
623  if (pc == reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc())) {
624    return;
625  }
626
627  const void* code = method->GetEntryPointFromQuickCompiledCode();
628  if (code == GetQuickInstrumentationEntryPoint()) {
629    return;
630  }
631
632  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
633  if (class_linker->IsQuickToInterpreterBridge(code) ||
634      class_linker->IsQuickResolutionStub(code)) {
635    return;
636  }
637
638  // If we are the JIT then we may have just compiled the method after the
639  // IsQuickToInterpreterBridge check.
640  Runtime* runtime = Runtime::Current();
641  if (runtime->UseJitCompilation() && runtime->GetJit()->GetCodeCache()->ContainsPc(code)) {
642    return;
643  }
644
645  uint32_t code_size = OatQuickMethodHeader::FromEntryPoint(code)->code_size_;
646  uintptr_t code_start = reinterpret_cast<uintptr_t>(code);
647  CHECK(code_start <= pc && pc <= (code_start + code_size))
648      << PrettyMethod(method)
649      << " pc=" << std::hex << pc
650      << " code_start=" << code_start
651      << " code_size=" << code_size;
652}
653
654void StackVisitor::SanityCheckFrame() const {
655  if (kIsDebugBuild) {
656    ArtMethod* method = GetMethod();
657    auto* declaring_class = method->GetDeclaringClass();
658    // Runtime methods have null declaring class.
659    if (!method->IsRuntimeMethod()) {
660      CHECK(declaring_class != nullptr);
661      CHECK_EQ(declaring_class->GetClass(), declaring_class->GetClass()->GetClass())
662          << declaring_class;
663    } else {
664      CHECK(declaring_class == nullptr);
665    }
666    Runtime* const runtime = Runtime::Current();
667    LinearAlloc* const linear_alloc = runtime->GetLinearAlloc();
668    if (!linear_alloc->Contains(method)) {
669      // Check class linker linear allocs.
670      mirror::Class* klass = method->GetDeclaringClass();
671      LinearAlloc* const class_linear_alloc = (klass != nullptr)
672          ? runtime->GetClassLinker()->GetAllocatorForClassLoader(klass->GetClassLoader())
673          : linear_alloc;
674      if (!class_linear_alloc->Contains(method)) {
675        // Check image space.
676        bool in_image = false;
677        for (auto& space : runtime->GetHeap()->GetContinuousSpaces()) {
678          if (space->IsImageSpace()) {
679            auto* image_space = space->AsImageSpace();
680            const auto& header = image_space->GetImageHeader();
681            const ImageSection& methods = header.GetMethodsSection();
682            const ImageSection& runtime_methods = header.GetRuntimeMethodsSection();
683            const size_t offset =  reinterpret_cast<const uint8_t*>(method) - image_space->Begin();
684            if (methods.Contains(offset) || runtime_methods.Contains(offset)) {
685              in_image = true;
686              break;
687            }
688          }
689        }
690        CHECK(in_image) << PrettyMethod(method) << " not in linear alloc or image";
691      }
692    }
693    if (cur_quick_frame_ != nullptr) {
694      AssertPcIsWithinQuickCode(method, cur_quick_frame_pc_);
695      // Frame sanity.
696      size_t frame_size = GetCurrentQuickFrameInfo().FrameSizeInBytes();
697      CHECK_NE(frame_size, 0u);
698      // A rough guess at an upper size we expect to see for a frame.
699      // 256 registers
700      // 2 words HandleScope overhead
701      // 3+3 register spills
702      // TODO: this seems architecture specific for the case of JNI frames.
703      // TODO: 083-compiler-regressions ManyFloatArgs shows this estimate is wrong.
704      // const size_t kMaxExpectedFrameSize = (256 + 2 + 3 + 3) * sizeof(word);
705      const size_t kMaxExpectedFrameSize = 2 * KB;
706      CHECK_LE(frame_size, kMaxExpectedFrameSize) << PrettyMethod(method);
707      size_t return_pc_offset = GetCurrentQuickFrameInfo().GetReturnPcOffset();
708      CHECK_LT(return_pc_offset, frame_size);
709    }
710  }
711}
712
713// Counts the number of references in the parameter list of the corresponding method.
714// Note: Thus does _not_ include "this" for non-static methods.
715static uint32_t GetNumberOfReferenceArgsWithoutReceiver(ArtMethod* method)
716    SHARED_REQUIRES(Locks::mutator_lock_) {
717  uint32_t shorty_len;
718  const char* shorty = method->GetShorty(&shorty_len);
719  uint32_t refs = 0;
720  for (uint32_t i = 1; i < shorty_len ; ++i) {
721    if (shorty[i] == 'L') {
722      refs++;
723    }
724  }
725  return refs;
726}
727
728QuickMethodFrameInfo StackVisitor::GetCurrentQuickFrameInfo() const {
729  if (cur_oat_quick_method_header_ != nullptr) {
730    return cur_oat_quick_method_header_->GetFrameInfo();
731  }
732
733  ArtMethod* method = GetMethod();
734  Runtime* runtime = Runtime::Current();
735
736  if (method->IsAbstract()) {
737    return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
738  }
739
740  // This goes before IsProxyMethod since runtime methods have a null declaring class.
741  if (method->IsRuntimeMethod()) {
742    return runtime->GetRuntimeMethodFrameInfo(method);
743  }
744
745  if (method->IsProxyMethod()) {
746    // There is only one direct method of a proxy class: the constructor. A direct method is
747    // cloned from the original java.lang.reflect.Proxy and is executed as usual quick
748    // compiled method without any stubs. Therefore the method must have a OatQuickMethodHeader.
749    DCHECK(!method->IsDirect() && !method->IsConstructor())
750        << "Constructors of proxy classes must have a OatQuickMethodHeader";
751    return runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
752  }
753
754  // The only remaining case is if the method is native and uses the generic JNI stub.
755  DCHECK(method->IsNative());
756  ClassLinker* class_linker = runtime->GetClassLinker();
757  const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(method, sizeof(void*));
758  DCHECK(class_linker->IsQuickGenericJniStub(entry_point)) << PrettyMethod(method);
759  // Generic JNI frame.
760  uint32_t handle_refs = GetNumberOfReferenceArgsWithoutReceiver(method) + 1;
761  size_t scope_size = HandleScope::SizeOf(handle_refs);
762  QuickMethodFrameInfo callee_info = runtime->GetCalleeSaveMethodFrameInfo(Runtime::kRefsAndArgs);
763
764  // Callee saves + handle scope + method ref + alignment
765  // Note: -sizeof(void*) since callee-save frame stores a whole method pointer.
766  size_t frame_size = RoundUp(
767      callee_info.FrameSizeInBytes() - sizeof(void*) + sizeof(ArtMethod*) + scope_size,
768      kStackAlignment);
769  return QuickMethodFrameInfo(frame_size, callee_info.CoreSpillMask(), callee_info.FpSpillMask());
770}
771
772void StackVisitor::WalkStack(bool include_transitions) {
773  DCHECK(thread_ == Thread::Current() || thread_->IsSuspended());
774  CHECK_EQ(cur_depth_, 0U);
775  bool exit_stubs_installed = Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled();
776  uint32_t instrumentation_stack_depth = 0;
777  size_t inlined_frames_count = 0;
778
779  for (const ManagedStack* current_fragment = thread_->GetManagedStack();
780       current_fragment != nullptr; current_fragment = current_fragment->GetLink()) {
781    cur_shadow_frame_ = current_fragment->GetTopShadowFrame();
782    cur_quick_frame_ = current_fragment->GetTopQuickFrame();
783    cur_quick_frame_pc_ = 0;
784    cur_oat_quick_method_header_ = nullptr;
785
786    if (cur_quick_frame_ != nullptr) {  // Handle quick stack frames.
787      // Can't be both a shadow and a quick fragment.
788      DCHECK(current_fragment->GetTopShadowFrame() == nullptr);
789      ArtMethod* method = *cur_quick_frame_;
790      while (method != nullptr) {
791        cur_oat_quick_method_header_ = method->GetOatQuickMethodHeader(cur_quick_frame_pc_);
792        SanityCheckFrame();
793
794        if ((walk_kind_ == StackWalkKind::kIncludeInlinedFrames ||
795             walk_kind_ == StackWalkKind::kIncludeInlinedFramesNoResolve)
796            && (cur_oat_quick_method_header_ != nullptr)
797            && cur_oat_quick_method_header_->IsOptimized()) {
798          CodeInfo code_info = cur_oat_quick_method_header_->GetOptimizedCodeInfo();
799          CodeInfoEncoding encoding = code_info.ExtractEncoding();
800          uint32_t native_pc_offset =
801              cur_oat_quick_method_header_->NativeQuickPcOffset(cur_quick_frame_pc_);
802          StackMap stack_map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
803          if (stack_map.IsValid() && stack_map.HasInlineInfo(encoding.stack_map_encoding)) {
804            InlineInfo inline_info = code_info.GetInlineInfoOf(stack_map, encoding);
805            DCHECK_EQ(current_inlining_depth_, 0u);
806            for (current_inlining_depth_ = inline_info.GetDepth(encoding.inline_info_encoding);
807                 current_inlining_depth_ != 0;
808                 --current_inlining_depth_) {
809              bool should_continue = VisitFrame();
810              if (UNLIKELY(!should_continue)) {
811                return;
812              }
813              cur_depth_++;
814              inlined_frames_count++;
815            }
816          }
817        }
818
819        bool should_continue = VisitFrame();
820        if (UNLIKELY(!should_continue)) {
821          return;
822        }
823
824        QuickMethodFrameInfo frame_info = GetCurrentQuickFrameInfo();
825        if (context_ != nullptr) {
826          context_->FillCalleeSaves(reinterpret_cast<uint8_t*>(cur_quick_frame_), frame_info);
827        }
828        // Compute PC for next stack frame from return PC.
829        size_t frame_size = frame_info.FrameSizeInBytes();
830        size_t return_pc_offset = frame_size - sizeof(void*);
831        uint8_t* return_pc_addr = reinterpret_cast<uint8_t*>(cur_quick_frame_) + return_pc_offset;
832        uintptr_t return_pc = *reinterpret_cast<uintptr_t*>(return_pc_addr);
833
834        if (UNLIKELY(exit_stubs_installed)) {
835          // While profiling, the return pc is restored from the side stack, except when walking
836          // the stack for an exception where the side stack will be unwound in VisitFrame.
837          if (reinterpret_cast<uintptr_t>(GetQuickInstrumentationExitPc()) == return_pc) {
838            const instrumentation::InstrumentationStackFrame& instrumentation_frame =
839                GetInstrumentationStackFrame(thread_, instrumentation_stack_depth);
840            instrumentation_stack_depth++;
841            if (GetMethod() == Runtime::Current()->GetCalleeSaveMethod(Runtime::kSaveAll)) {
842              // Skip runtime save all callee frames which are used to deliver exceptions.
843            } else if (instrumentation_frame.interpreter_entry_) {
844              ArtMethod* callee = Runtime::Current()->GetCalleeSaveMethod(Runtime::kRefsAndArgs);
845              CHECK_EQ(GetMethod(), callee) << "Expected: " << PrettyMethod(callee) << " Found: "
846                                            << PrettyMethod(GetMethod());
847            } else {
848              CHECK_EQ(instrumentation_frame.method_, GetMethod())
849                  << "Expected: " << PrettyMethod(instrumentation_frame.method_)
850                  << " Found: " << PrettyMethod(GetMethod());
851            }
852            if (num_frames_ != 0) {
853              // Check agreement of frame Ids only if num_frames_ is computed to avoid infinite
854              // recursion.
855              size_t frame_id = instrumentation::Instrumentation::ComputeFrameId(
856                  thread_,
857                  cur_depth_,
858                  inlined_frames_count);
859              CHECK_EQ(instrumentation_frame.frame_id_, frame_id);
860            }
861            return_pc = instrumentation_frame.return_pc_;
862          }
863        }
864
865        cur_quick_frame_pc_ = return_pc;
866        uint8_t* next_frame = reinterpret_cast<uint8_t*>(cur_quick_frame_) + frame_size;
867        cur_quick_frame_ = reinterpret_cast<ArtMethod**>(next_frame);
868
869        if (kDebugStackWalk) {
870          LOG(INFO) << PrettyMethod(method) << "@" << method << " size=" << frame_size
871              << std::boolalpha
872              << " optimized=" << (cur_oat_quick_method_header_ != nullptr &&
873                                   cur_oat_quick_method_header_->IsOptimized())
874              << " native=" << method->IsNative()
875              << std::noboolalpha
876              << " entrypoints=" << method->GetEntryPointFromQuickCompiledCode()
877              << "," << method->GetEntryPointFromJni()
878              << " next=" << *cur_quick_frame_;
879        }
880
881        cur_depth_++;
882        method = *cur_quick_frame_;
883      }
884    } else if (cur_shadow_frame_ != nullptr) {
885      do {
886        SanityCheckFrame();
887        bool should_continue = VisitFrame();
888        if (UNLIKELY(!should_continue)) {
889          return;
890        }
891        cur_depth_++;
892        cur_shadow_frame_ = cur_shadow_frame_->GetLink();
893      } while (cur_shadow_frame_ != nullptr);
894    }
895    if (include_transitions) {
896      bool should_continue = VisitFrame();
897      if (!should_continue) {
898        return;
899      }
900    }
901    cur_depth_++;
902  }
903  if (num_frames_ != 0) {
904    CHECK_EQ(cur_depth_, num_frames_);
905  }
906}
907
908void JavaFrameRootInfo::Describe(std::ostream& os) const {
909  const StackVisitor* visitor = stack_visitor_;
910  CHECK(visitor != nullptr);
911  os << "Type=" << GetType() << " thread_id=" << GetThreadId() << " location=" <<
912      visitor->DescribeLocation() << " vreg=" << vreg_;
913}
914
915int StackVisitor::GetVRegOffsetFromQuickCode(const DexFile::CodeItem* code_item,
916                                             uint32_t core_spills, uint32_t fp_spills,
917                                             size_t frame_size, int reg, InstructionSet isa) {
918  size_t pointer_size = InstructionSetPointerSize(isa);
919  if (kIsDebugBuild) {
920    auto* runtime = Runtime::Current();
921    if (runtime != nullptr) {
922      CHECK_EQ(runtime->GetClassLinker()->GetImagePointerSize(), pointer_size);
923    }
924  }
925  DCHECK_ALIGNED(frame_size, kStackAlignment);
926  DCHECK_NE(reg, -1);
927  int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa)
928      + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa)
929      + sizeof(uint32_t);  // Filler.
930  int num_regs = code_item->registers_size_ - code_item->ins_size_;
931  int temp_threshold = code_item->registers_size_;
932  const int max_num_special_temps = 1;
933  if (reg == temp_threshold) {
934    // The current method pointer corresponds to special location on stack.
935    return 0;
936  } else if (reg >= temp_threshold + max_num_special_temps) {
937    /*
938     * Special temporaries may have custom locations and the logic above deals with that.
939     * However, non-special temporaries are placed relative to the outs.
940     */
941    int temps_start = code_item->outs_size_ * sizeof(uint32_t) + pointer_size /* art method */;
942    int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t);
943    return temps_start + relative_offset;
944  }  else if (reg < num_regs) {
945    int locals_start = frame_size - spill_size - num_regs * sizeof(uint32_t);
946    return locals_start + (reg * sizeof(uint32_t));
947  } else {
948    // Handle ins.
949    return frame_size + ((reg - num_regs) * sizeof(uint32_t)) + pointer_size /* art method */;
950  }
951}
952
953void LockCountData::AddMonitor(Thread* self, mirror::Object* obj) {
954  if (obj == nullptr) {
955    return;
956  }
957
958  // If there's an error during enter, we won't have locked the monitor. So check there's no
959  // exception.
960  if (self->IsExceptionPending()) {
961    return;
962  }
963
964  if (monitors_ == nullptr) {
965    monitors_.reset(new std::vector<mirror::Object*>());
966  }
967  monitors_->push_back(obj);
968}
969
970void LockCountData::RemoveMonitorOrThrow(Thread* self, const mirror::Object* obj) {
971  if (obj == nullptr) {
972    return;
973  }
974  bool found_object = false;
975  if (monitors_ != nullptr) {
976    // We need to remove one pointer to ref, as duplicates are used for counting recursive locks.
977    // We arbitrarily choose the first one.
978    auto it = std::find(monitors_->begin(), monitors_->end(), obj);
979    if (it != monitors_->end()) {
980      monitors_->erase(it);
981      found_object = true;
982    }
983  }
984  if (!found_object) {
985    // The object wasn't found. Time for an IllegalMonitorStateException.
986    // The order here isn't fully clear. Assume that any other pending exception is swallowed.
987    // TODO: Maybe make already pending exception a suppressed exception.
988    self->ClearException();
989    self->ThrowNewExceptionF("Ljava/lang/IllegalMonitorStateException;",
990                             "did not lock monitor on object of type '%s' before unlocking",
991                             PrettyTypeOf(const_cast<mirror::Object*>(obj)).c_str());
992  }
993}
994
995// Helper to unlock a monitor. Must be NO_THREAD_SAFETY_ANALYSIS, as we can't statically show
996// that the object was locked.
997void MonitorExitHelper(Thread* self, mirror::Object* obj) NO_THREAD_SAFETY_ANALYSIS {
998  DCHECK(self != nullptr);
999  DCHECK(obj != nullptr);
1000  obj->MonitorExit(self);
1001}
1002
1003bool LockCountData::CheckAllMonitorsReleasedOrThrow(Thread* self) {
1004  DCHECK(self != nullptr);
1005  if (monitors_ != nullptr) {
1006    if (!monitors_->empty()) {
1007      // There may be an exception pending, if the method is terminating abruptly. Clear it.
1008      // TODO: Should we add this as a suppressed exception?
1009      self->ClearException();
1010
1011      // OK, there are monitors that are still locked. To enforce structured locking (and avoid
1012      // deadlocks) we unlock all of them before we raise the IllegalMonitorState exception.
1013      for (mirror::Object* obj : *monitors_) {
1014        MonitorExitHelper(self, obj);
1015        // If this raised an exception, ignore. TODO: Should we add this as suppressed
1016        // exceptions?
1017        if (self->IsExceptionPending()) {
1018          self->ClearException();
1019        }
1020      }
1021      // Raise an exception, just give the first object as the sample.
1022      mirror::Object* first = (*monitors_)[0];
1023      self->ThrowNewExceptionF("Ljava/lang/IllegalMonitorStateException;",
1024                               "did not unlock monitor on object of type '%s'",
1025                               PrettyTypeOf(first).c_str());
1026
1027      // To make sure this path is not triggered again, clean out the monitors.
1028      monitors_->clear();
1029
1030      return false;
1031    }
1032  }
1033  return true;
1034}
1035
1036}  // namespace art
1037