debugger.cc revision 40c8141b48275afd1680b99878782848ab3a6761
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "debugger.h"
18
19#include <sys/uio.h>
20
21#include <set>
22
23#include "arch/context.h"
24#include "art_field-inl.h"
25#include "art_method-inl.h"
26#include "base/time_utils.h"
27#include "class_linker.h"
28#include "class_linker-inl.h"
29#include "dex_file-inl.h"
30#include "dex_instruction.h"
31#include "gc/accounting/card_table-inl.h"
32#include "gc/allocation_record.h"
33#include "gc/space/large_object_space.h"
34#include "gc/space/space-inl.h"
35#include "handle_scope.h"
36#include "jdwp/jdwp_priv.h"
37#include "jdwp/object_registry.h"
38#include "mirror/class.h"
39#include "mirror/class-inl.h"
40#include "mirror/class_loader.h"
41#include "mirror/object-inl.h"
42#include "mirror/object_array-inl.h"
43#include "mirror/string-inl.h"
44#include "mirror/throwable.h"
45#include "quick/inline_method_analyser.h"
46#include "reflection.h"
47#include "safe_map.h"
48#include "scoped_thread_state_change.h"
49#include "ScopedLocalRef.h"
50#include "ScopedPrimitiveArray.h"
51#include "handle_scope-inl.h"
52#include "thread_list.h"
53#include "utf.h"
54#include "verifier/method_verifier-inl.h"
55#include "well_known_classes.h"
56
57namespace art {
58
59// The key identifying the debugger to update instrumentation.
60static constexpr const char* kDbgInstrumentationKey = "Debugger";
61
62// Limit alloc_record_count to the 2BE value (64k-1) that is the limit of the current protocol.
63static uint16_t CappedAllocRecordCount(size_t alloc_record_count) {
64  const size_t cap = 0xffff;
65  if (alloc_record_count > cap) {
66    return cap;
67  }
68  return alloc_record_count;
69}
70
71class Breakpoint {
72 public:
73  Breakpoint(ArtMethod* method, uint32_t dex_pc,
74             DeoptimizationRequest::Kind deoptimization_kind)
75    SHARED_REQUIRES(Locks::mutator_lock_)
76    : method_(nullptr), dex_pc_(dex_pc), deoptimization_kind_(deoptimization_kind) {
77    CHECK(deoptimization_kind_ == DeoptimizationRequest::kNothing ||
78          deoptimization_kind_ == DeoptimizationRequest::kSelectiveDeoptimization ||
79          deoptimization_kind_ == DeoptimizationRequest::kFullDeoptimization);
80    ScopedObjectAccessUnchecked soa(Thread::Current());
81    method_ = soa.EncodeMethod(method);
82  }
83
84  Breakpoint(const Breakpoint& other) SHARED_REQUIRES(Locks::mutator_lock_)
85    : method_(nullptr), dex_pc_(other.dex_pc_),
86      deoptimization_kind_(other.deoptimization_kind_) {
87    ScopedObjectAccessUnchecked soa(Thread::Current());
88    method_ = soa.EncodeMethod(other.Method());
89  }
90
91  ArtMethod* Method() const SHARED_REQUIRES(Locks::mutator_lock_) {
92    ScopedObjectAccessUnchecked soa(Thread::Current());
93    return soa.DecodeMethod(method_);
94  }
95
96  uint32_t DexPc() const {
97    return dex_pc_;
98  }
99
100  DeoptimizationRequest::Kind GetDeoptimizationKind() const {
101    return deoptimization_kind_;
102  }
103
104 private:
105  // The location of this breakpoint.
106  jmethodID method_;
107  uint32_t dex_pc_;
108
109  // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
110  DeoptimizationRequest::Kind deoptimization_kind_;
111};
112
113static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
114    SHARED_REQUIRES(Locks::mutator_lock_) {
115  os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
116  return os;
117}
118
119class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
120 public:
121  DebugInstrumentationListener() {}
122  virtual ~DebugInstrumentationListener() {}
123
124  void MethodEntered(Thread* thread, mirror::Object* this_object, ArtMethod* method,
125                     uint32_t dex_pc)
126      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
127    if (method->IsNative()) {
128      // TODO: post location events is a suspension point and native method entry stubs aren't.
129      return;
130    }
131    if (IsListeningToDexPcMoved()) {
132      // We also listen to kDexPcMoved instrumentation event so we know the DexPcMoved method is
133      // going to be called right after us. To avoid sending JDWP events twice for this location,
134      // we report the event in DexPcMoved. However, we must remind this is method entry so we
135      // send the METHOD_ENTRY event. And we can also group it with other events for this location
136      // like BREAKPOINT or SINGLE_STEP (or even METHOD_EXIT if this is a RETURN instruction).
137      thread->SetDebugMethodEntry();
138    } else if (IsListeningToMethodExit() && IsReturn(method, dex_pc)) {
139      // We also listen to kMethodExited instrumentation event and the current instruction is a
140      // RETURN so we know the MethodExited method is going to be called right after us. To avoid
141      // sending JDWP events twice for this location, we report the event(s) in MethodExited.
142      // However, we must remind this is method entry so we send the METHOD_ENTRY event. And we can
143      // also group it with other events for this location like BREAKPOINT or SINGLE_STEP.
144      thread->SetDebugMethodEntry();
145    } else {
146      Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr);
147    }
148  }
149
150  void MethodExited(Thread* thread, mirror::Object* this_object, ArtMethod* method,
151                    uint32_t dex_pc, const JValue& return_value)
152      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
153    if (method->IsNative()) {
154      // TODO: post location events is a suspension point and native method entry stubs aren't.
155      return;
156    }
157    uint32_t events = Dbg::kMethodExit;
158    if (thread->IsDebugMethodEntry()) {
159      // It is also the method entry.
160      DCHECK(IsReturn(method, dex_pc));
161      events |= Dbg::kMethodEntry;
162      thread->ClearDebugMethodEntry();
163    }
164    Dbg::UpdateDebugger(thread, this_object, method, dex_pc, events, &return_value);
165  }
166
167  void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED,
168                    ArtMethod* method, uint32_t dex_pc)
169      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
170    // We're not recorded to listen to this kind of event, so complain.
171    LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
172               << " " << dex_pc;
173  }
174
175  void DexPcMoved(Thread* thread, mirror::Object* this_object, ArtMethod* method,
176                  uint32_t new_dex_pc)
177      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
178    if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) {
179      // We also listen to kMethodExited instrumentation event and the current instruction is a
180      // RETURN so we know the MethodExited method is going to be called right after us. Like in
181      // MethodEntered, we delegate event reporting to MethodExited.
182      // Besides, if this RETURN instruction is the only one in the method, we can send multiple
183      // JDWP events in the same packet: METHOD_ENTRY, METHOD_EXIT, BREAKPOINT and/or SINGLE_STEP.
184      // Therefore, we must not clear the debug method entry flag here.
185    } else {
186      uint32_t events = 0;
187      if (thread->IsDebugMethodEntry()) {
188        // It is also the method entry.
189        events = Dbg::kMethodEntry;
190        thread->ClearDebugMethodEntry();
191      }
192      Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, events, nullptr);
193    }
194  }
195
196  void FieldRead(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
197                 ArtMethod* method, uint32_t dex_pc, ArtField* field)
198      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
199    Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
200  }
201
202  void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
203                    ArtMethod* method, uint32_t dex_pc, ArtField* field,
204                    const JValue& field_value)
205      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
206    Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
207  }
208
209  void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, mirror::Throwable* exception_object)
210      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
211    Dbg::PostException(exception_object);
212  }
213
214  // We only care about how many backward branches were executed in the Jit.
215  void BackwardBranch(Thread* /*thread*/, ArtMethod* method, int32_t dex_pc_offset)
216      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
217    LOG(ERROR) << "Unexpected backward branch event in debugger " << PrettyMethod(method)
218               << " " << dex_pc_offset;
219  }
220
221 private:
222  static bool IsReturn(ArtMethod* method, uint32_t dex_pc)
223      SHARED_REQUIRES(Locks::mutator_lock_) {
224    const DexFile::CodeItem* code_item = method->GetCodeItem();
225    const Instruction* instruction = Instruction::At(&code_item->insns_[dex_pc]);
226    return instruction->IsReturn();
227  }
228
229  static bool IsListeningToDexPcMoved() SHARED_REQUIRES(Locks::mutator_lock_) {
230    return IsListeningTo(instrumentation::Instrumentation::kDexPcMoved);
231  }
232
233  static bool IsListeningToMethodExit() SHARED_REQUIRES(Locks::mutator_lock_) {
234    return IsListeningTo(instrumentation::Instrumentation::kMethodExited);
235  }
236
237  static bool IsListeningTo(instrumentation::Instrumentation::InstrumentationEvent event)
238      SHARED_REQUIRES(Locks::mutator_lock_) {
239    return (Dbg::GetInstrumentationEvents() & event) != 0;
240  }
241
242  DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
243} gDebugInstrumentationListener;
244
245// JDWP is allowed unless the Zygote forbids it.
246static bool gJdwpAllowed = true;
247
248// Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line?
249static bool gJdwpConfigured = false;
250
251// JDWP options for debugging. Only valid if IsJdwpConfigured() is true.
252static JDWP::JdwpOptions gJdwpOptions;
253
254// Runtime JDWP state.
255static JDWP::JdwpState* gJdwpState = nullptr;
256static bool gDebuggerConnected;  // debugger or DDMS is connected.
257
258static bool gDdmThreadNotification = false;
259
260// DDMS GC-related settings.
261static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER;
262static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER;
263static Dbg::HpsgWhat gDdmHpsgWhat;
264static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
265static Dbg::HpsgWhat gDdmNhsgWhat;
266
267bool Dbg::gDebuggerActive = false;
268bool Dbg::gDisposed = false;
269ObjectRegistry* Dbg::gRegistry = nullptr;
270
271// Deoptimization support.
272std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
273size_t Dbg::full_deoptimization_event_count_ = 0;
274
275// Instrumentation event reference counters.
276size_t Dbg::dex_pc_change_event_ref_count_ = 0;
277size_t Dbg::method_enter_event_ref_count_ = 0;
278size_t Dbg::method_exit_event_ref_count_ = 0;
279size_t Dbg::field_read_event_ref_count_ = 0;
280size_t Dbg::field_write_event_ref_count_ = 0;
281size_t Dbg::exception_catch_event_ref_count_ = 0;
282uint32_t Dbg::instrumentation_events_ = 0;
283
284// Breakpoints.
285static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
286
287void DebugInvokeReq::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
288  receiver.VisitRootIfNonNull(visitor, root_info);  // null for static method call.
289  klass.VisitRoot(visitor, root_info);
290}
291
292void SingleStepControl::AddDexPc(uint32_t dex_pc) {
293  dex_pcs_.insert(dex_pc);
294}
295
296bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const {
297  return dex_pcs_.find(dex_pc) == dex_pcs_.end();
298}
299
300static bool IsBreakpoint(const ArtMethod* m, uint32_t dex_pc)
301    REQUIRES(!Locks::breakpoint_lock_)
302    SHARED_REQUIRES(Locks::mutator_lock_) {
303  ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
304  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
305    if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) {
306      VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
307      return true;
308    }
309  }
310  return false;
311}
312
313static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
314    REQUIRES(!Locks::thread_suspend_count_lock_) {
315  MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
316  // A thread may be suspended for GC; in this code, we really want to know whether
317  // there's a debugger suspension active.
318  return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0;
319}
320
321static mirror::Array* DecodeNonNullArray(JDWP::RefTypeId id, JDWP::JdwpError* error)
322    SHARED_REQUIRES(Locks::mutator_lock_) {
323  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
324  if (o == nullptr) {
325    *error = JDWP::ERR_INVALID_OBJECT;
326    return nullptr;
327  }
328  if (!o->IsArrayInstance()) {
329    *error = JDWP::ERR_INVALID_ARRAY;
330    return nullptr;
331  }
332  *error = JDWP::ERR_NONE;
333  return o->AsArray();
334}
335
336static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError* error)
337    SHARED_REQUIRES(Locks::mutator_lock_) {
338  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
339  if (o == nullptr) {
340    *error = JDWP::ERR_INVALID_OBJECT;
341    return nullptr;
342  }
343  if (!o->IsClass()) {
344    *error = JDWP::ERR_INVALID_CLASS;
345    return nullptr;
346  }
347  *error = JDWP::ERR_NONE;
348  return o->AsClass();
349}
350
351static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id,
352                            JDWP::JdwpError* error)
353    SHARED_REQUIRES(Locks::mutator_lock_)
354    REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) {
355  mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id, error);
356  if (thread_peer == nullptr) {
357    // This isn't even an object.
358    *error = JDWP::ERR_INVALID_OBJECT;
359    return nullptr;
360  }
361
362  mirror::Class* java_lang_Thread = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
363  if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) {
364    // This isn't a thread.
365    *error = JDWP::ERR_INVALID_THREAD;
366    return nullptr;
367  }
368
369  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
370  Thread* thread = Thread::FromManagedThread(soa, thread_peer);
371  // If thread is null then this a java.lang.Thread without a Thread*. Must be a un-started or a
372  // zombie.
373  *error = (thread == nullptr) ? JDWP::ERR_THREAD_NOT_ALIVE : JDWP::ERR_NONE;
374  return thread;
375}
376
377static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) {
378  // JDWP deliberately uses the descriptor characters' ASCII values for its enum.
379  // Note that by "basic" we mean that we don't get more specific than JT_OBJECT.
380  return static_cast<JDWP::JdwpTag>(descriptor[0]);
381}
382
383static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
384    SHARED_REQUIRES(Locks::mutator_lock_) {
385  std::string temp;
386  const char* descriptor = klass->GetDescriptor(&temp);
387  return BasicTagFromDescriptor(descriptor);
388}
389
390static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
391    SHARED_REQUIRES(Locks::mutator_lock_) {
392  CHECK(c != nullptr);
393  if (c->IsArrayClass()) {
394    return JDWP::JT_ARRAY;
395  }
396  if (c->IsStringClass()) {
397    return JDWP::JT_STRING;
398  }
399  if (c->IsClassClass()) {
400    return JDWP::JT_CLASS_OBJECT;
401  }
402  {
403    mirror::Class* thread_class = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
404    if (thread_class->IsAssignableFrom(c)) {
405      return JDWP::JT_THREAD;
406    }
407  }
408  {
409    mirror::Class* thread_group_class =
410        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
411    if (thread_group_class->IsAssignableFrom(c)) {
412      return JDWP::JT_THREAD_GROUP;
413    }
414  }
415  {
416    mirror::Class* class_loader_class =
417        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader);
418    if (class_loader_class->IsAssignableFrom(c)) {
419      return JDWP::JT_CLASS_LOADER;
420    }
421  }
422  return JDWP::JT_OBJECT;
423}
424
425/*
426 * Objects declared to hold Object might actually hold a more specific
427 * type.  The debugger may take a special interest in these (e.g. it
428 * wants to display the contents of Strings), so we want to return an
429 * appropriate tag.
430 *
431 * Null objects are tagged JT_OBJECT.
432 */
433JDWP::JdwpTag Dbg::TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o) {
434  return (o == nullptr) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass());
435}
436
437static bool IsPrimitiveTag(JDWP::JdwpTag tag) {
438  switch (tag) {
439  case JDWP::JT_BOOLEAN:
440  case JDWP::JT_BYTE:
441  case JDWP::JT_CHAR:
442  case JDWP::JT_FLOAT:
443  case JDWP::JT_DOUBLE:
444  case JDWP::JT_INT:
445  case JDWP::JT_LONG:
446  case JDWP::JT_SHORT:
447  case JDWP::JT_VOID:
448    return true;
449  default:
450    return false;
451  }
452}
453
454void Dbg::StartJdwp() {
455  if (!gJdwpAllowed || !IsJdwpConfigured()) {
456    // No JDWP for you!
457    return;
458  }
459
460  CHECK(gRegistry == nullptr);
461  gRegistry = new ObjectRegistry;
462
463  // Init JDWP if the debugger is enabled. This may connect out to a
464  // debugger, passively listen for a debugger, or block waiting for a
465  // debugger.
466  gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions);
467  if (gJdwpState == nullptr) {
468    // We probably failed because some other process has the port already, which means that
469    // if we don't abort the user is likely to think they're talking to us when they're actually
470    // talking to that other process.
471    LOG(FATAL) << "Debugger thread failed to initialize";
472  }
473
474  // If a debugger has already attached, send the "welcome" message.
475  // This may cause us to suspend all threads.
476  if (gJdwpState->IsActive()) {
477    ScopedObjectAccess soa(Thread::Current());
478    gJdwpState->PostVMStart();
479  }
480}
481
482void Dbg::StopJdwp() {
483  // Post VM_DEATH event before the JDWP connection is closed (either by the JDWP thread or the
484  // destruction of gJdwpState).
485  if (gJdwpState != nullptr && gJdwpState->IsActive()) {
486    gJdwpState->PostVMDeath();
487  }
488  // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
489  Dispose();
490  delete gJdwpState;
491  gJdwpState = nullptr;
492  delete gRegistry;
493  gRegistry = nullptr;
494}
495
496void Dbg::GcDidFinish() {
497  if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
498    ScopedObjectAccess soa(Thread::Current());
499    VLOG(jdwp) << "Sending heap info to DDM";
500    DdmSendHeapInfo(gDdmHpifWhen);
501  }
502  if (gDdmHpsgWhen != HPSG_WHEN_NEVER) {
503    ScopedObjectAccess soa(Thread::Current());
504    VLOG(jdwp) << "Dumping heap to DDM";
505    DdmSendHeapSegments(false);
506  }
507  if (gDdmNhsgWhen != HPSG_WHEN_NEVER) {
508    ScopedObjectAccess soa(Thread::Current());
509    VLOG(jdwp) << "Dumping native heap to DDM";
510    DdmSendHeapSegments(true);
511  }
512}
513
514void Dbg::SetJdwpAllowed(bool allowed) {
515  gJdwpAllowed = allowed;
516}
517
518DebugInvokeReq* Dbg::GetInvokeReq() {
519  return Thread::Current()->GetInvokeReq();
520}
521
522Thread* Dbg::GetDebugThread() {
523  return (gJdwpState != nullptr) ? gJdwpState->GetDebugThread() : nullptr;
524}
525
526void Dbg::ClearWaitForEventThread() {
527  gJdwpState->ReleaseJdwpTokenForEvent();
528}
529
530void Dbg::Connected() {
531  CHECK(!gDebuggerConnected);
532  VLOG(jdwp) << "JDWP has attached";
533  gDebuggerConnected = true;
534  gDisposed = false;
535}
536
537bool Dbg::RequiresDeoptimization() {
538  // We don't need deoptimization if everything runs with interpreter after
539  // enabling -Xint mode.
540  return !Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly();
541}
542
543void Dbg::GoActive() {
544  // Enable all debugging features, including scans for breakpoints.
545  // This is a no-op if we're already active.
546  // Only called from the JDWP handler thread.
547  if (IsDebuggerActive()) {
548    return;
549  }
550
551  {
552    // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected?
553    ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
554    CHECK_EQ(gBreakpoints.size(), 0U);
555  }
556
557  {
558    MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
559    CHECK_EQ(deoptimization_requests_.size(), 0U);
560    CHECK_EQ(full_deoptimization_event_count_, 0U);
561    CHECK_EQ(dex_pc_change_event_ref_count_, 0U);
562    CHECK_EQ(method_enter_event_ref_count_, 0U);
563    CHECK_EQ(method_exit_event_ref_count_, 0U);
564    CHECK_EQ(field_read_event_ref_count_, 0U);
565    CHECK_EQ(field_write_event_ref_count_, 0U);
566    CHECK_EQ(exception_catch_event_ref_count_, 0U);
567  }
568
569  Runtime* runtime = Runtime::Current();
570  runtime->GetThreadList()->SuspendAll(__FUNCTION__);
571  Thread* self = Thread::Current();
572  ThreadState old_state = self->SetStateUnsafe(kRunnable);
573  CHECK_NE(old_state, kRunnable);
574  if (RequiresDeoptimization()) {
575    runtime->GetInstrumentation()->EnableDeoptimization();
576  }
577  instrumentation_events_ = 0;
578  gDebuggerActive = true;
579  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
580  runtime->GetThreadList()->ResumeAll();
581
582  LOG(INFO) << "Debugger is active";
583}
584
585void Dbg::Disconnected() {
586  CHECK(gDebuggerConnected);
587
588  LOG(INFO) << "Debugger is no longer active";
589
590  // Suspend all threads and exclusively acquire the mutator lock. Set the state of the thread
591  // to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener
592  // and clear the object registry.
593  Runtime* runtime = Runtime::Current();
594  runtime->GetThreadList()->SuspendAll(__FUNCTION__);
595  Thread* self = Thread::Current();
596  ThreadState old_state = self->SetStateUnsafe(kRunnable);
597
598  // Debugger may not be active at this point.
599  if (IsDebuggerActive()) {
600    {
601      // Since we're going to disable deoptimization, we clear the deoptimization requests queue.
602      // This prevents us from having any pending deoptimization request when the debugger attaches
603      // to us again while no event has been requested yet.
604      MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
605      deoptimization_requests_.clear();
606      full_deoptimization_event_count_ = 0U;
607    }
608    if (instrumentation_events_ != 0) {
609      runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
610                                                    instrumentation_events_);
611      instrumentation_events_ = 0;
612    }
613    if (RequiresDeoptimization()) {
614      runtime->GetInstrumentation()->DisableDeoptimization(kDbgInstrumentationKey);
615    }
616    gDebuggerActive = false;
617  }
618  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
619  runtime->GetThreadList()->ResumeAll();
620
621  {
622    ScopedObjectAccess soa(self);
623    gRegistry->Clear();
624  }
625
626  gDebuggerConnected = false;
627}
628
629void Dbg::ConfigureJdwp(const JDWP::JdwpOptions& jdwp_options) {
630  CHECK_NE(jdwp_options.transport, JDWP::kJdwpTransportUnknown);
631  gJdwpOptions = jdwp_options;
632  gJdwpConfigured = true;
633}
634
635bool Dbg::IsJdwpConfigured() {
636  return gJdwpConfigured;
637}
638
639int64_t Dbg::LastDebuggerActivity() {
640  return gJdwpState->LastDebuggerActivity();
641}
642
643void Dbg::UndoDebuggerSuspensions() {
644  Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
645}
646
647std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
648  JDWP::JdwpError error;
649  mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id, &error);
650  if (o == nullptr) {
651    if (error == JDWP::ERR_NONE) {
652      return "null";
653    } else {
654      return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
655    }
656  }
657  if (!o->IsClass()) {
658    return StringPrintf("non-class %p", o);  // This is only used for debugging output anyway.
659  }
660  return GetClassName(o->AsClass());
661}
662
663std::string Dbg::GetClassName(mirror::Class* klass) {
664  if (klass == nullptr) {
665    return "null";
666  }
667  std::string temp;
668  return DescriptorToName(klass->GetDescriptor(&temp));
669}
670
671JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId* class_object_id) {
672  JDWP::JdwpError status;
673  mirror::Class* c = DecodeClass(id, &status);
674  if (c == nullptr) {
675    *class_object_id = 0;
676    return status;
677  }
678  *class_object_id = gRegistry->Add(c);
679  return JDWP::ERR_NONE;
680}
681
682JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId* superclass_id) {
683  JDWP::JdwpError status;
684  mirror::Class* c = DecodeClass(id, &status);
685  if (c == nullptr) {
686    *superclass_id = 0;
687    return status;
688  }
689  if (c->IsInterface()) {
690    // http://code.google.com/p/android/issues/detail?id=20856
691    *superclass_id = 0;
692  } else {
693    *superclass_id = gRegistry->Add(c->GetSuperClass());
694  }
695  return JDWP::ERR_NONE;
696}
697
698JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
699  JDWP::JdwpError error;
700  mirror::Object* o = gRegistry->Get<mirror::Object*>(id, &error);
701  if (o == nullptr) {
702    return JDWP::ERR_INVALID_OBJECT;
703  }
704  expandBufAddObjectId(pReply, gRegistry->Add(o->GetClass()->GetClassLoader()));
705  return JDWP::ERR_NONE;
706}
707
708JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
709  JDWP::JdwpError error;
710  mirror::Class* c = DecodeClass(id, &error);
711  if (c == nullptr) {
712    return error;
713  }
714
715  uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask;
716
717  // Set ACC_SUPER. Dex files don't contain this flag but only classes are supposed to have it set,
718  // not interfaces.
719  // Class.getModifiers doesn't return it, but JDWP does, so we set it here.
720  if ((access_flags & kAccInterface) == 0) {
721    access_flags |= kAccSuper;
722  }
723
724  expandBufAdd4BE(pReply, access_flags);
725
726  return JDWP::ERR_NONE;
727}
728
729JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply) {
730  JDWP::JdwpError error;
731  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
732  if (o == nullptr) {
733    return JDWP::ERR_INVALID_OBJECT;
734  }
735
736  // Ensure all threads are suspended while we read objects' lock words.
737  Thread* self = Thread::Current();
738  CHECK_EQ(self->GetState(), kRunnable);
739  self->TransitionFromRunnableToSuspended(kSuspended);
740  Runtime::Current()->GetThreadList()->SuspendAll(__FUNCTION__);
741
742  MonitorInfo monitor_info(o);
743
744  Runtime::Current()->GetThreadList()->ResumeAll();
745  self->TransitionFromSuspendedToRunnable();
746
747  if (monitor_info.owner_ != nullptr) {
748    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeer()));
749  } else {
750    expandBufAddObjectId(reply, gRegistry->Add(nullptr));
751  }
752  expandBufAdd4BE(reply, monitor_info.entry_count_);
753  expandBufAdd4BE(reply, monitor_info.waiters_.size());
754  for (size_t i = 0; i < monitor_info.waiters_.size(); ++i) {
755    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters_[i]->GetPeer()));
756  }
757  return JDWP::ERR_NONE;
758}
759
760JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id,
761                                      std::vector<JDWP::ObjectId>* monitors,
762                                      std::vector<uint32_t>* stack_depths) {
763  struct OwnedMonitorVisitor : public StackVisitor {
764    OwnedMonitorVisitor(Thread* thread, Context* context,
765                        std::vector<JDWP::ObjectId>* monitor_vector,
766                        std::vector<uint32_t>* stack_depth_vector)
767        SHARED_REQUIRES(Locks::mutator_lock_)
768      : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
769        current_stack_depth(0),
770        monitors(monitor_vector),
771        stack_depths(stack_depth_vector) {}
772
773    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
774    // annotalysis.
775    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
776      if (!GetMethod()->IsRuntimeMethod()) {
777        Monitor::VisitLocks(this, AppendOwnedMonitors, this);
778        ++current_stack_depth;
779      }
780      return true;
781    }
782
783    static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg)
784        SHARED_REQUIRES(Locks::mutator_lock_) {
785      OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
786      visitor->monitors->push_back(gRegistry->Add(owned_monitor));
787      visitor->stack_depths->push_back(visitor->current_stack_depth);
788    }
789
790    size_t current_stack_depth;
791    std::vector<JDWP::ObjectId>* const monitors;
792    std::vector<uint32_t>* const stack_depths;
793  };
794
795  ScopedObjectAccessUnchecked soa(Thread::Current());
796  JDWP::JdwpError error;
797  Thread* thread = DecodeThread(soa, thread_id, &error);
798  if (thread == nullptr) {
799    return error;
800  }
801  if (!IsSuspendedForDebugger(soa, thread)) {
802    return JDWP::ERR_THREAD_NOT_SUSPENDED;
803  }
804  std::unique_ptr<Context> context(Context::Create());
805  OwnedMonitorVisitor visitor(thread, context.get(), monitors, stack_depths);
806  visitor.WalkStack();
807  return JDWP::ERR_NONE;
808}
809
810JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id,
811                                         JDWP::ObjectId* contended_monitor) {
812  ScopedObjectAccessUnchecked soa(Thread::Current());
813  *contended_monitor = 0;
814  JDWP::JdwpError error;
815  Thread* thread = DecodeThread(soa, thread_id, &error);
816  if (thread == nullptr) {
817    return error;
818  }
819  if (!IsSuspendedForDebugger(soa, thread)) {
820    return JDWP::ERR_THREAD_NOT_SUSPENDED;
821  }
822  mirror::Object* contended_monitor_obj = Monitor::GetContendedMonitor(thread);
823  // Add() requires the thread_list_lock_ not held to avoid the lock
824  // level violation.
825  *contended_monitor = gRegistry->Add(contended_monitor_obj);
826  return JDWP::ERR_NONE;
827}
828
829JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
830                                       std::vector<uint64_t>* counts) {
831  gc::Heap* heap = Runtime::Current()->GetHeap();
832  heap->CollectGarbage(false);
833  std::vector<mirror::Class*> classes;
834  counts->clear();
835  for (size_t i = 0; i < class_ids.size(); ++i) {
836    JDWP::JdwpError error;
837    mirror::Class* c = DecodeClass(class_ids[i], &error);
838    if (c == nullptr) {
839      return error;
840    }
841    classes.push_back(c);
842    counts->push_back(0);
843  }
844  heap->CountInstances(classes, false, &(*counts)[0]);
845  return JDWP::ERR_NONE;
846}
847
848JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count,
849                                  std::vector<JDWP::ObjectId>* instances) {
850  gc::Heap* heap = Runtime::Current()->GetHeap();
851  // We only want reachable instances, so do a GC.
852  heap->CollectGarbage(false);
853  JDWP::JdwpError error;
854  mirror::Class* c = DecodeClass(class_id, &error);
855  if (c == nullptr) {
856    return error;
857  }
858  std::vector<mirror::Object*> raw_instances;
859  Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
860  for (size_t i = 0; i < raw_instances.size(); ++i) {
861    instances->push_back(gRegistry->Add(raw_instances[i]));
862  }
863  return JDWP::ERR_NONE;
864}
865
866JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
867                                         std::vector<JDWP::ObjectId>* referring_objects) {
868  gc::Heap* heap = Runtime::Current()->GetHeap();
869  heap->CollectGarbage(false);
870  JDWP::JdwpError error;
871  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
872  if (o == nullptr) {
873    return JDWP::ERR_INVALID_OBJECT;
874  }
875  std::vector<mirror::Object*> raw_instances;
876  heap->GetReferringObjects(o, max_count, raw_instances);
877  for (size_t i = 0; i < raw_instances.size(); ++i) {
878    referring_objects->push_back(gRegistry->Add(raw_instances[i]));
879  }
880  return JDWP::ERR_NONE;
881}
882
883JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id) {
884  JDWP::JdwpError error;
885  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
886  if (o == nullptr) {
887    return JDWP::ERR_INVALID_OBJECT;
888  }
889  gRegistry->DisableCollection(object_id);
890  return JDWP::ERR_NONE;
891}
892
893JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id) {
894  JDWP::JdwpError error;
895  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
896  // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI
897  // also ignores these cases and never return an error. However it's not obvious why this command
898  // should behave differently from DisableCollection and IsCollected commands. So let's be more
899  // strict and return an error if this happens.
900  if (o == nullptr) {
901    return JDWP::ERR_INVALID_OBJECT;
902  }
903  gRegistry->EnableCollection(object_id);
904  return JDWP::ERR_NONE;
905}
906
907JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool* is_collected) {
908  *is_collected = true;
909  if (object_id == 0) {
910    // Null object id is invalid.
911    return JDWP::ERR_INVALID_OBJECT;
912  }
913  // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However
914  // the RI seems to ignore this and assume object has been collected.
915  JDWP::JdwpError error;
916  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
917  if (o != nullptr) {
918    *is_collected = gRegistry->IsCollected(object_id);
919  }
920  return JDWP::ERR_NONE;
921}
922
923void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count) {
924  gRegistry->DisposeObject(object_id, reference_count);
925}
926
927JDWP::JdwpTypeTag Dbg::GetTypeTag(mirror::Class* klass) {
928  DCHECK(klass != nullptr);
929  if (klass->IsArrayClass()) {
930    return JDWP::TT_ARRAY;
931  } else if (klass->IsInterface()) {
932    return JDWP::TT_INTERFACE;
933  } else {
934    return JDWP::TT_CLASS;
935  }
936}
937
938JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
939  JDWP::JdwpError error;
940  mirror::Class* c = DecodeClass(class_id, &error);
941  if (c == nullptr) {
942    return error;
943  }
944
945  JDWP::JdwpTypeTag type_tag = GetTypeTag(c);
946  expandBufAdd1(pReply, type_tag);
947  expandBufAddRefTypeId(pReply, class_id);
948  return JDWP::ERR_NONE;
949}
950
951// Get the complete list of reference classes (i.e. all classes except
952// the primitive types).
953// Returns a newly-allocated buffer full of RefTypeId values.
954class ClassListCreator : public ClassVisitor {
955 public:
956  explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes_(classes) {}
957
958  bool Visit(mirror::Class* c) OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
959    if (!c->IsPrimitive()) {
960      classes_->push_back(Dbg::GetObjectRegistry()->AddRefType(c));
961    }
962    return true;
963  }
964
965 private:
966  std::vector<JDWP::RefTypeId>* const classes_;
967};
968
969void Dbg::GetClassList(std::vector<JDWP::RefTypeId>* classes) {
970  ClassListCreator clc(classes);
971  Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&clc);
972}
973
974JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
975                                  uint32_t* pStatus, std::string* pDescriptor) {
976  JDWP::JdwpError error;
977  mirror::Class* c = DecodeClass(class_id, &error);
978  if (c == nullptr) {
979    return error;
980  }
981
982  if (c->IsArrayClass()) {
983    *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
984    *pTypeTag = JDWP::TT_ARRAY;
985  } else {
986    if (c->IsErroneous()) {
987      *pStatus = JDWP::CS_ERROR;
988    } else {
989      *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED;
990    }
991    *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
992  }
993
994  if (pDescriptor != nullptr) {
995    std::string temp;
996    *pDescriptor = c->GetDescriptor(&temp);
997  }
998  return JDWP::ERR_NONE;
999}
1000
1001void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>* ids) {
1002  std::vector<mirror::Class*> classes;
1003  Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
1004  ids->clear();
1005  for (size_t i = 0; i < classes.size(); ++i) {
1006    ids->push_back(gRegistry->Add(classes[i]));
1007  }
1008}
1009
1010JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply) {
1011  JDWP::JdwpError error;
1012  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1013  if (o == nullptr) {
1014    return JDWP::ERR_INVALID_OBJECT;
1015  }
1016
1017  JDWP::JdwpTypeTag type_tag = GetTypeTag(o->GetClass());
1018  JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass());
1019
1020  expandBufAdd1(pReply, type_tag);
1021  expandBufAddRefTypeId(pReply, type_id);
1022
1023  return JDWP::ERR_NONE;
1024}
1025
1026JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) {
1027  JDWP::JdwpError error;
1028  mirror::Class* c = DecodeClass(class_id, &error);
1029  if (c == nullptr) {
1030    return error;
1031  }
1032  std::string temp;
1033  *signature = c->GetDescriptor(&temp);
1034  return JDWP::ERR_NONE;
1035}
1036
1037JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string* result) {
1038  JDWP::JdwpError error;
1039  mirror::Class* c = DecodeClass(class_id, &error);
1040  if (c == nullptr) {
1041    return error;
1042  }
1043  const char* source_file = c->GetSourceFile();
1044  if (source_file == nullptr) {
1045    return JDWP::ERR_ABSENT_INFORMATION;
1046  }
1047  *result = source_file;
1048  return JDWP::ERR_NONE;
1049}
1050
1051JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t* tag) {
1052  ScopedObjectAccessUnchecked soa(Thread::Current());
1053  JDWP::JdwpError error;
1054  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1055  if (error != JDWP::ERR_NONE) {
1056    *tag = JDWP::JT_VOID;
1057    return error;
1058  }
1059  *tag = TagFromObject(soa, o);
1060  return JDWP::ERR_NONE;
1061}
1062
1063size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) {
1064  switch (tag) {
1065  case JDWP::JT_VOID:
1066    return 0;
1067  case JDWP::JT_BYTE:
1068  case JDWP::JT_BOOLEAN:
1069    return 1;
1070  case JDWP::JT_CHAR:
1071  case JDWP::JT_SHORT:
1072    return 2;
1073  case JDWP::JT_FLOAT:
1074  case JDWP::JT_INT:
1075    return 4;
1076  case JDWP::JT_ARRAY:
1077  case JDWP::JT_OBJECT:
1078  case JDWP::JT_STRING:
1079  case JDWP::JT_THREAD:
1080  case JDWP::JT_THREAD_GROUP:
1081  case JDWP::JT_CLASS_LOADER:
1082  case JDWP::JT_CLASS_OBJECT:
1083    return sizeof(JDWP::ObjectId);
1084  case JDWP::JT_DOUBLE:
1085  case JDWP::JT_LONG:
1086    return 8;
1087  default:
1088    LOG(FATAL) << "Unknown tag " << tag;
1089    return -1;
1090  }
1091}
1092
1093JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int32_t* length) {
1094  JDWP::JdwpError error;
1095  mirror::Array* a = DecodeNonNullArray(array_id, &error);
1096  if (a == nullptr) {
1097    return error;
1098  }
1099  *length = a->GetLength();
1100  return JDWP::ERR_NONE;
1101}
1102
1103JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) {
1104  JDWP::JdwpError error;
1105  mirror::Array* a = DecodeNonNullArray(array_id, &error);
1106  if (a == nullptr) {
1107    return error;
1108  }
1109
1110  if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) {
1111    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1112    return JDWP::ERR_INVALID_LENGTH;
1113  }
1114  JDWP::JdwpTag element_tag = BasicTagFromClass(a->GetClass()->GetComponentType());
1115  expandBufAdd1(pReply, element_tag);
1116  expandBufAdd4BE(pReply, count);
1117
1118  if (IsPrimitiveTag(element_tag)) {
1119    size_t width = GetTagWidth(element_tag);
1120    uint8_t* dst = expandBufAddSpace(pReply, count * width);
1121    if (width == 8) {
1122      const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0));
1123      for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]);
1124    } else if (width == 4) {
1125      const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0));
1126      for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]);
1127    } else if (width == 2) {
1128      const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0));
1129      for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]);
1130    } else {
1131      const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0));
1132      memcpy(dst, &src[offset * width], count * width);
1133    }
1134  } else {
1135    ScopedObjectAccessUnchecked soa(Thread::Current());
1136    mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>();
1137    for (int i = 0; i < count; ++i) {
1138      mirror::Object* element = oa->Get(offset + i);
1139      JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element)
1140                                                        : element_tag;
1141      expandBufAdd1(pReply, specific_tag);
1142      expandBufAddObjectId(pReply, gRegistry->Add(element));
1143    }
1144  }
1145
1146  return JDWP::ERR_NONE;
1147}
1148
1149template <typename T>
1150static void CopyArrayData(mirror::Array* a, JDWP::Request* src, int offset, int count)
1151    NO_THREAD_SAFETY_ANALYSIS {
1152  // TODO: fix when annotalysis correctly handles non-member functions.
1153  DCHECK(a->GetClass()->IsPrimitiveArray());
1154
1155  T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset));
1156  for (int i = 0; i < count; ++i) {
1157    *dst++ = src->ReadValue(sizeof(T));
1158  }
1159}
1160
1161JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
1162                                      JDWP::Request* request) {
1163  JDWP::JdwpError error;
1164  mirror::Array* dst = DecodeNonNullArray(array_id, &error);
1165  if (dst == nullptr) {
1166    return error;
1167  }
1168
1169  if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) {
1170    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1171    return JDWP::ERR_INVALID_LENGTH;
1172  }
1173  JDWP::JdwpTag element_tag = BasicTagFromClass(dst->GetClass()->GetComponentType());
1174
1175  if (IsPrimitiveTag(element_tag)) {
1176    size_t width = GetTagWidth(element_tag);
1177    if (width == 8) {
1178      CopyArrayData<uint64_t>(dst, request, offset, count);
1179    } else if (width == 4) {
1180      CopyArrayData<uint32_t>(dst, request, offset, count);
1181    } else if (width == 2) {
1182      CopyArrayData<uint16_t>(dst, request, offset, count);
1183    } else {
1184      CopyArrayData<uint8_t>(dst, request, offset, count);
1185    }
1186  } else {
1187    mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>();
1188    for (int i = 0; i < count; ++i) {
1189      JDWP::ObjectId id = request->ReadObjectId();
1190      mirror::Object* o = gRegistry->Get<mirror::Object*>(id, &error);
1191      if (error != JDWP::ERR_NONE) {
1192        return error;
1193      }
1194      oa->Set<false>(offset + i, o);
1195    }
1196  }
1197
1198  return JDWP::ERR_NONE;
1199}
1200
1201JDWP::JdwpError Dbg::CreateString(const std::string& str, JDWP::ObjectId* new_string_id) {
1202  Thread* self = Thread::Current();
1203  mirror::String* new_string = mirror::String::AllocFromModifiedUtf8(self, str.c_str());
1204  if (new_string == nullptr) {
1205    DCHECK(self->IsExceptionPending());
1206    self->ClearException();
1207    LOG(ERROR) << "Could not allocate string";
1208    *new_string_id = 0;
1209    return JDWP::ERR_OUT_OF_MEMORY;
1210  }
1211  *new_string_id = gRegistry->Add(new_string);
1212  return JDWP::ERR_NONE;
1213}
1214
1215JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_object_id) {
1216  JDWP::JdwpError error;
1217  mirror::Class* c = DecodeClass(class_id, &error);
1218  if (c == nullptr) {
1219    *new_object_id = 0;
1220    return error;
1221  }
1222  Thread* self = Thread::Current();
1223  mirror::Object* new_object = c->AllocObject(self);
1224  if (new_object == nullptr) {
1225    DCHECK(self->IsExceptionPending());
1226    self->ClearException();
1227    LOG(ERROR) << "Could not allocate object of type " << PrettyDescriptor(c);
1228    *new_object_id = 0;
1229    return JDWP::ERR_OUT_OF_MEMORY;
1230  }
1231  *new_object_id = gRegistry->Add(new_object);
1232  return JDWP::ERR_NONE;
1233}
1234
1235/*
1236 * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]".
1237 */
1238JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
1239                                       JDWP::ObjectId* new_array_id) {
1240  JDWP::JdwpError error;
1241  mirror::Class* c = DecodeClass(array_class_id, &error);
1242  if (c == nullptr) {
1243    *new_array_id = 0;
1244    return error;
1245  }
1246  Thread* self = Thread::Current();
1247  gc::Heap* heap = Runtime::Current()->GetHeap();
1248  mirror::Array* new_array = mirror::Array::Alloc<true>(self, c, length,
1249                                                        c->GetComponentSizeShift(),
1250                                                        heap->GetCurrentAllocator());
1251  if (new_array == nullptr) {
1252    DCHECK(self->IsExceptionPending());
1253    self->ClearException();
1254    LOG(ERROR) << "Could not allocate array of type " << PrettyDescriptor(c);
1255    *new_array_id = 0;
1256    return JDWP::ERR_OUT_OF_MEMORY;
1257  }
1258  *new_array_id = gRegistry->Add(new_array);
1259  return JDWP::ERR_NONE;
1260}
1261
1262JDWP::FieldId Dbg::ToFieldId(const ArtField* f) {
1263  return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
1264}
1265
1266static JDWP::MethodId ToMethodId(const ArtMethod* m)
1267    SHARED_REQUIRES(Locks::mutator_lock_) {
1268  return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
1269}
1270
1271static ArtField* FromFieldId(JDWP::FieldId fid)
1272    SHARED_REQUIRES(Locks::mutator_lock_) {
1273  return reinterpret_cast<ArtField*>(static_cast<uintptr_t>(fid));
1274}
1275
1276static ArtMethod* FromMethodId(JDWP::MethodId mid)
1277    SHARED_REQUIRES(Locks::mutator_lock_) {
1278  return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(mid));
1279}
1280
1281bool Dbg::MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread) {
1282  CHECK(event_thread != nullptr);
1283  JDWP::JdwpError error;
1284  mirror::Object* expected_thread_peer = gRegistry->Get<mirror::Object*>(
1285      expected_thread_id, &error);
1286  return expected_thread_peer == event_thread->GetPeer();
1287}
1288
1289bool Dbg::MatchLocation(const JDWP::JdwpLocation& expected_location,
1290                        const JDWP::EventLocation& event_location) {
1291  if (expected_location.dex_pc != event_location.dex_pc) {
1292    return false;
1293  }
1294  ArtMethod* m = FromMethodId(expected_location.method_id);
1295  return m == event_location.method;
1296}
1297
1298bool Dbg::MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id) {
1299  if (event_class == nullptr) {
1300    return false;
1301  }
1302  JDWP::JdwpError error;
1303  mirror::Class* expected_class = DecodeClass(class_id, &error);
1304  CHECK(expected_class != nullptr);
1305  return expected_class->IsAssignableFrom(event_class);
1306}
1307
1308bool Dbg::MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
1309                     ArtField* event_field) {
1310  ArtField* expected_field = FromFieldId(expected_field_id);
1311  if (expected_field != event_field) {
1312    return false;
1313  }
1314  return Dbg::MatchType(event_field->GetDeclaringClass(), expected_type_id);
1315}
1316
1317bool Dbg::MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance) {
1318  JDWP::JdwpError error;
1319  mirror::Object* modifier_instance = gRegistry->Get<mirror::Object*>(expected_instance_id, &error);
1320  return modifier_instance == event_instance;
1321}
1322
1323void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc) {
1324  if (m == nullptr) {
1325    memset(location, 0, sizeof(*location));
1326  } else {
1327    mirror::Class* c = m->GetDeclaringClass();
1328    location->type_tag = GetTypeTag(c);
1329    location->class_id = gRegistry->AddRefType(c);
1330    location->method_id = ToMethodId(m);
1331    location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc;
1332  }
1333}
1334
1335std::string Dbg::GetMethodName(JDWP::MethodId method_id) {
1336  ArtMethod* m = FromMethodId(method_id);
1337  if (m == nullptr) {
1338    return "null";
1339  }
1340  return m->GetInterfaceMethodIfProxy(sizeof(void*))->GetName();
1341}
1342
1343std::string Dbg::GetFieldName(JDWP::FieldId field_id) {
1344  ArtField* f = FromFieldId(field_id);
1345  if (f == nullptr) {
1346    return "null";
1347  }
1348  return f->GetName();
1349}
1350
1351/*
1352 * Augment the access flags for synthetic methods and fields by setting
1353 * the (as described by the spec) "0xf0000000 bit".  Also, strip out any
1354 * flags not specified by the Java programming language.
1355 */
1356static uint32_t MangleAccessFlags(uint32_t accessFlags) {
1357  accessFlags &= kAccJavaFlagsMask;
1358  if ((accessFlags & kAccSynthetic) != 0) {
1359    accessFlags |= 0xf0000000;
1360  }
1361  return accessFlags;
1362}
1363
1364/*
1365 * Circularly shifts registers so that arguments come first. Debuggers
1366 * expect slots to begin with arguments, but dex code places them at
1367 * the end.
1368 */
1369static uint16_t MangleSlot(uint16_t slot, ArtMethod* m)
1370    SHARED_REQUIRES(Locks::mutator_lock_) {
1371  const DexFile::CodeItem* code_item = m->GetCodeItem();
1372  if (code_item == nullptr) {
1373    // We should not get here for a method without code (native, proxy or abstract). Log it and
1374    // return the slot as is since all registers are arguments.
1375    LOG(WARNING) << "Trying to mangle slot for method without code " << PrettyMethod(m);
1376    return slot;
1377  }
1378  uint16_t ins_size = code_item->ins_size_;
1379  uint16_t locals_size = code_item->registers_size_ - ins_size;
1380  if (slot >= locals_size) {
1381    return slot - locals_size;
1382  } else {
1383    return slot + ins_size;
1384  }
1385}
1386
1387/*
1388 * Circularly shifts registers so that arguments come last. Reverts
1389 * slots to dex style argument placement.
1390 */
1391static uint16_t DemangleSlot(uint16_t slot, ArtMethod* m, JDWP::JdwpError* error)
1392    SHARED_REQUIRES(Locks::mutator_lock_) {
1393  const DexFile::CodeItem* code_item = m->GetCodeItem();
1394  if (code_item == nullptr) {
1395    // We should not get here for a method without code (native, proxy or abstract). Log it and
1396    // return the slot as is since all registers are arguments.
1397    LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m);
1398    uint16_t vreg_count = ArtMethod::NumArgRegisters(m->GetShorty());
1399    if (slot < vreg_count) {
1400      *error = JDWP::ERR_NONE;
1401      return slot;
1402    }
1403  } else {
1404    if (slot < code_item->registers_size_) {
1405      uint16_t ins_size = code_item->ins_size_;
1406      uint16_t locals_size = code_item->registers_size_ - ins_size;
1407      *error = JDWP::ERR_NONE;
1408      return (slot < ins_size) ? slot + locals_size : slot - ins_size;
1409    }
1410  }
1411
1412  // Slot is invalid in the method.
1413  LOG(ERROR) << "Invalid local slot " << slot << " for method " << PrettyMethod(m);
1414  *error = JDWP::ERR_INVALID_SLOT;
1415  return DexFile::kDexNoIndex16;
1416}
1417
1418JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic,
1419                                          JDWP::ExpandBuf* pReply) {
1420  JDWP::JdwpError error;
1421  mirror::Class* c = DecodeClass(class_id, &error);
1422  if (c == nullptr) {
1423    return error;
1424  }
1425
1426  size_t instance_field_count = c->NumInstanceFields();
1427  size_t static_field_count = c->NumStaticFields();
1428
1429  expandBufAdd4BE(pReply, instance_field_count + static_field_count);
1430
1431  for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
1432    ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) :
1433        c->GetStaticField(i - instance_field_count);
1434    expandBufAddFieldId(pReply, ToFieldId(f));
1435    expandBufAddUtf8String(pReply, f->GetName());
1436    expandBufAddUtf8String(pReply, f->GetTypeDescriptor());
1437    if (with_generic) {
1438      static const char genericSignature[1] = "";
1439      expandBufAddUtf8String(pReply, genericSignature);
1440    }
1441    expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags()));
1442  }
1443  return JDWP::ERR_NONE;
1444}
1445
1446JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic,
1447                                           JDWP::ExpandBuf* pReply) {
1448  JDWP::JdwpError error;
1449  mirror::Class* c = DecodeClass(class_id, &error);
1450  if (c == nullptr) {
1451    return error;
1452  }
1453
1454  size_t direct_method_count = c->NumDirectMethods();
1455  size_t virtual_method_count = c->NumVirtualMethods();
1456
1457  expandBufAdd4BE(pReply, direct_method_count + virtual_method_count);
1458
1459  auto* cl = Runtime::Current()->GetClassLinker();
1460  auto ptr_size = cl->GetImagePointerSize();
1461  for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) {
1462    ArtMethod* m = i < direct_method_count ?
1463        c->GetDirectMethod(i, ptr_size) : c->GetVirtualMethod(i - direct_method_count, ptr_size);
1464    expandBufAddMethodId(pReply, ToMethodId(m));
1465    expandBufAddUtf8String(pReply, m->GetInterfaceMethodIfProxy(sizeof(void*))->GetName());
1466    expandBufAddUtf8String(pReply,
1467                           m->GetInterfaceMethodIfProxy(sizeof(void*))->GetSignature().ToString());
1468    if (with_generic) {
1469      const char* generic_signature = "";
1470      expandBufAddUtf8String(pReply, generic_signature);
1471    }
1472    expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags()));
1473  }
1474  return JDWP::ERR_NONE;
1475}
1476
1477JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1478  JDWP::JdwpError error;
1479  Thread* self = Thread::Current();
1480  StackHandleScope<1> hs(self);
1481  Handle<mirror::Class> c(hs.NewHandle(DecodeClass(class_id, &error)));
1482  if (c.Get() == nullptr) {
1483    return error;
1484  }
1485  size_t interface_count = c->NumDirectInterfaces();
1486  expandBufAdd4BE(pReply, interface_count);
1487  for (size_t i = 0; i < interface_count; ++i) {
1488    expandBufAddRefTypeId(pReply,
1489                          gRegistry->AddRefType(mirror::Class::GetDirectInterface(self, c, i)));
1490  }
1491  return JDWP::ERR_NONE;
1492}
1493
1494void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply) {
1495  struct DebugCallbackContext {
1496    int numItems;
1497    JDWP::ExpandBuf* pReply;
1498
1499    static bool Callback(void* context, uint32_t address, uint32_t line_number) {
1500      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1501      expandBufAdd8BE(pContext->pReply, address);
1502      expandBufAdd4BE(pContext->pReply, line_number);
1503      pContext->numItems++;
1504      return false;
1505    }
1506  };
1507  ArtMethod* m = FromMethodId(method_id);
1508  const DexFile::CodeItem* code_item = m->GetCodeItem();
1509  uint64_t start, end;
1510  if (code_item == nullptr) {
1511    DCHECK(m->IsNative() || m->IsProxyMethod());
1512    start = -1;
1513    end = -1;
1514  } else {
1515    start = 0;
1516    // Return the index of the last instruction
1517    end = code_item->insns_size_in_code_units_ - 1;
1518  }
1519
1520  expandBufAdd8BE(pReply, start);
1521  expandBufAdd8BE(pReply, end);
1522
1523  // Add numLines later
1524  size_t numLinesOffset = expandBufGetLength(pReply);
1525  expandBufAdd4BE(pReply, 0);
1526
1527  DebugCallbackContext context;
1528  context.numItems = 0;
1529  context.pReply = pReply;
1530
1531  if (code_item != nullptr) {
1532    m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
1533                                     DebugCallbackContext::Callback, nullptr, &context);
1534  }
1535
1536  JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
1537}
1538
1539void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic,
1540                              JDWP::ExpandBuf* pReply) {
1541  struct DebugCallbackContext {
1542    ArtMethod* method;
1543    JDWP::ExpandBuf* pReply;
1544    size_t variable_count;
1545    bool with_generic;
1546
1547    static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress,
1548                         const char* name, const char* descriptor, const char* signature)
1549        SHARED_REQUIRES(Locks::mutator_lock_) {
1550      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1551
1552      VLOG(jdwp) << StringPrintf("    %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
1553                                 pContext->variable_count, startAddress, endAddress - startAddress,
1554                                 name, descriptor, signature, slot,
1555                                 MangleSlot(slot, pContext->method));
1556
1557      slot = MangleSlot(slot, pContext->method);
1558
1559      expandBufAdd8BE(pContext->pReply, startAddress);
1560      expandBufAddUtf8String(pContext->pReply, name);
1561      expandBufAddUtf8String(pContext->pReply, descriptor);
1562      if (pContext->with_generic) {
1563        expandBufAddUtf8String(pContext->pReply, signature);
1564      }
1565      expandBufAdd4BE(pContext->pReply, endAddress - startAddress);
1566      expandBufAdd4BE(pContext->pReply, slot);
1567
1568      ++pContext->variable_count;
1569    }
1570  };
1571  ArtMethod* m = FromMethodId(method_id);
1572
1573  // arg_count considers doubles and longs to take 2 units.
1574  // variable_count considers everything to take 1 unit.
1575  std::string shorty(m->GetShorty());
1576  expandBufAdd4BE(pReply, ArtMethod::NumArgRegisters(shorty));
1577
1578  // We don't know the total number of variables yet, so leave a blank and update it later.
1579  size_t variable_count_offset = expandBufGetLength(pReply);
1580  expandBufAdd4BE(pReply, 0);
1581
1582  DebugCallbackContext context;
1583  context.method = m;
1584  context.pReply = pReply;
1585  context.variable_count = 0;
1586  context.with_generic = with_generic;
1587
1588  const DexFile::CodeItem* code_item = m->GetCodeItem();
1589  if (code_item != nullptr) {
1590    m->GetDexFile()->DecodeDebugInfo(
1591        code_item, m->IsStatic(), m->GetDexMethodIndex(), nullptr, DebugCallbackContext::Callback,
1592        &context);
1593  }
1594
1595  JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count);
1596}
1597
1598void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
1599                                  JDWP::ExpandBuf* pReply) {
1600  ArtMethod* m = FromMethodId(method_id);
1601  JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty());
1602  OutputJValue(tag, return_value, pReply);
1603}
1604
1605void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
1606                           JDWP::ExpandBuf* pReply) {
1607  ArtField* f = FromFieldId(field_id);
1608  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1609  OutputJValue(tag, field_value, pReply);
1610}
1611
1612JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
1613                                  std::vector<uint8_t>* bytecodes) {
1614  ArtMethod* m = FromMethodId(method_id);
1615  if (m == nullptr) {
1616    return JDWP::ERR_INVALID_METHODID;
1617  }
1618  const DexFile::CodeItem* code_item = m->GetCodeItem();
1619  size_t byte_count = code_item->insns_size_in_code_units_ * 2;
1620  const uint8_t* begin = reinterpret_cast<const uint8_t*>(code_item->insns_);
1621  const uint8_t* end = begin + byte_count;
1622  for (const uint8_t* p = begin; p != end; ++p) {
1623    bytecodes->push_back(*p);
1624  }
1625  return JDWP::ERR_NONE;
1626}
1627
1628JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) {
1629  return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1630}
1631
1632JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) {
1633  return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1634}
1635
1636static JValue GetArtFieldValue(ArtField* f, mirror::Object* o)
1637    SHARED_REQUIRES(Locks::mutator_lock_) {
1638  Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
1639  JValue field_value;
1640  switch (fieldType) {
1641    case Primitive::kPrimBoolean:
1642      field_value.SetZ(f->GetBoolean(o));
1643      return field_value;
1644
1645    case Primitive::kPrimByte:
1646      field_value.SetB(f->GetByte(o));
1647      return field_value;
1648
1649    case Primitive::kPrimChar:
1650      field_value.SetC(f->GetChar(o));
1651      return field_value;
1652
1653    case Primitive::kPrimShort:
1654      field_value.SetS(f->GetShort(o));
1655      return field_value;
1656
1657    case Primitive::kPrimInt:
1658    case Primitive::kPrimFloat:
1659      // Int and Float must be treated as 32-bit values in JDWP.
1660      field_value.SetI(f->GetInt(o));
1661      return field_value;
1662
1663    case Primitive::kPrimLong:
1664    case Primitive::kPrimDouble:
1665      // Long and Double must be treated as 64-bit values in JDWP.
1666      field_value.SetJ(f->GetLong(o));
1667      return field_value;
1668
1669    case Primitive::kPrimNot:
1670      field_value.SetL(f->GetObject(o));
1671      return field_value;
1672
1673    case Primitive::kPrimVoid:
1674      LOG(FATAL) << "Attempt to read from field of type 'void'";
1675      UNREACHABLE();
1676  }
1677  LOG(FATAL) << "Attempt to read from field of unknown type";
1678  UNREACHABLE();
1679}
1680
1681static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
1682                                         JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
1683                                         bool is_static)
1684    SHARED_REQUIRES(Locks::mutator_lock_) {
1685  JDWP::JdwpError error;
1686  mirror::Class* c = DecodeClass(ref_type_id, &error);
1687  if (ref_type_id != 0 && c == nullptr) {
1688    return error;
1689  }
1690
1691  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error);
1692  if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
1693    return JDWP::ERR_INVALID_OBJECT;
1694  }
1695  ArtField* f = FromFieldId(field_id);
1696
1697  mirror::Class* receiver_class = c;
1698  if (receiver_class == nullptr && o != nullptr) {
1699    receiver_class = o->GetClass();
1700  }
1701  // TODO: should we give up now if receiver_class is null?
1702  if (receiver_class != nullptr && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
1703    LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class);
1704    return JDWP::ERR_INVALID_FIELDID;
1705  }
1706
1707  // The RI only enforces the static/non-static mismatch in one direction.
1708  // TODO: should we change the tests and check both?
1709  if (is_static) {
1710    if (!f->IsStatic()) {
1711      return JDWP::ERR_INVALID_FIELDID;
1712    }
1713  } else {
1714    if (f->IsStatic()) {
1715      LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.GetValues"
1716                   << " on static field " << PrettyField(f);
1717    }
1718  }
1719  if (f->IsStatic()) {
1720    o = f->GetDeclaringClass();
1721  }
1722
1723  JValue field_value(GetArtFieldValue(f, o));
1724  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1725  Dbg::OutputJValue(tag, &field_value, pReply);
1726  return JDWP::ERR_NONE;
1727}
1728
1729JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1730                                   JDWP::ExpandBuf* pReply) {
1731  return GetFieldValueImpl(0, object_id, field_id, pReply, false);
1732}
1733
1734JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id,
1735                                         JDWP::ExpandBuf* pReply) {
1736  return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true);
1737}
1738
1739static JDWP::JdwpError SetArtFieldValue(ArtField* f, mirror::Object* o, uint64_t value, int width)
1740    SHARED_REQUIRES(Locks::mutator_lock_) {
1741  Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
1742  // Debugging only happens at runtime so we know we are not running in a transaction.
1743  static constexpr bool kNoTransactionMode = false;
1744  switch (fieldType) {
1745    case Primitive::kPrimBoolean:
1746      CHECK_EQ(width, 1);
1747      f->SetBoolean<kNoTransactionMode>(o, static_cast<uint8_t>(value));
1748      return JDWP::ERR_NONE;
1749
1750    case Primitive::kPrimByte:
1751      CHECK_EQ(width, 1);
1752      f->SetByte<kNoTransactionMode>(o, static_cast<uint8_t>(value));
1753      return JDWP::ERR_NONE;
1754
1755    case Primitive::kPrimChar:
1756      CHECK_EQ(width, 2);
1757      f->SetChar<kNoTransactionMode>(o, static_cast<uint16_t>(value));
1758      return JDWP::ERR_NONE;
1759
1760    case Primitive::kPrimShort:
1761      CHECK_EQ(width, 2);
1762      f->SetShort<kNoTransactionMode>(o, static_cast<int16_t>(value));
1763      return JDWP::ERR_NONE;
1764
1765    case Primitive::kPrimInt:
1766    case Primitive::kPrimFloat:
1767      CHECK_EQ(width, 4);
1768      // Int and Float must be treated as 32-bit values in JDWP.
1769      f->SetInt<kNoTransactionMode>(o, static_cast<int32_t>(value));
1770      return JDWP::ERR_NONE;
1771
1772    case Primitive::kPrimLong:
1773    case Primitive::kPrimDouble:
1774      CHECK_EQ(width, 8);
1775      // Long and Double must be treated as 64-bit values in JDWP.
1776      f->SetLong<kNoTransactionMode>(o, value);
1777      return JDWP::ERR_NONE;
1778
1779    case Primitive::kPrimNot: {
1780      JDWP::JdwpError error;
1781      mirror::Object* v = Dbg::GetObjectRegistry()->Get<mirror::Object*>(value, &error);
1782      if (error != JDWP::ERR_NONE) {
1783        return JDWP::ERR_INVALID_OBJECT;
1784      }
1785      if (v != nullptr) {
1786        mirror::Class* field_type;
1787        {
1788          StackHandleScope<2> hs(Thread::Current());
1789          HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
1790          HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
1791          field_type = f->GetType<true>();
1792        }
1793        if (!field_type->IsAssignableFrom(v->GetClass())) {
1794          return JDWP::ERR_INVALID_OBJECT;
1795        }
1796      }
1797      f->SetObject<kNoTransactionMode>(o, v);
1798      return JDWP::ERR_NONE;
1799    }
1800
1801    case Primitive::kPrimVoid:
1802      LOG(FATAL) << "Attempt to write to field of type 'void'";
1803      UNREACHABLE();
1804  }
1805  LOG(FATAL) << "Attempt to write to field of unknown type";
1806  UNREACHABLE();
1807}
1808
1809static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1810                                         uint64_t value, int width, bool is_static)
1811    SHARED_REQUIRES(Locks::mutator_lock_) {
1812  JDWP::JdwpError error;
1813  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error);
1814  if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
1815    return JDWP::ERR_INVALID_OBJECT;
1816  }
1817  ArtField* f = FromFieldId(field_id);
1818
1819  // The RI only enforces the static/non-static mismatch in one direction.
1820  // TODO: should we change the tests and check both?
1821  if (is_static) {
1822    if (!f->IsStatic()) {
1823      return JDWP::ERR_INVALID_FIELDID;
1824    }
1825  } else {
1826    if (f->IsStatic()) {
1827      LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.SetValues"
1828                   << " on static field " << PrettyField(f);
1829    }
1830  }
1831  if (f->IsStatic()) {
1832    o = f->GetDeclaringClass();
1833  }
1834  return SetArtFieldValue(f, o, value, width);
1835}
1836
1837JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value,
1838                                   int width) {
1839  return SetFieldValueImpl(object_id, field_id, value, width, false);
1840}
1841
1842JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) {
1843  return SetFieldValueImpl(0, field_id, value, width, true);
1844}
1845
1846JDWP::JdwpError Dbg::StringToUtf8(JDWP::ObjectId string_id, std::string* str) {
1847  JDWP::JdwpError error;
1848  mirror::Object* obj = gRegistry->Get<mirror::Object*>(string_id, &error);
1849  if (error != JDWP::ERR_NONE) {
1850    return error;
1851  }
1852  if (obj == nullptr) {
1853    return JDWP::ERR_INVALID_OBJECT;
1854  }
1855  {
1856    ScopedObjectAccessUnchecked soa(Thread::Current());
1857    mirror::Class* java_lang_String = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_String);
1858    if (!java_lang_String->IsAssignableFrom(obj->GetClass())) {
1859      // This isn't a string.
1860      return JDWP::ERR_INVALID_STRING;
1861    }
1862  }
1863  *str = obj->AsString()->ToModifiedUtf8();
1864  return JDWP::ERR_NONE;
1865}
1866
1867void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) {
1868  if (IsPrimitiveTag(tag)) {
1869    expandBufAdd1(pReply, tag);
1870    if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) {
1871      expandBufAdd1(pReply, return_value->GetI());
1872    } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) {
1873      expandBufAdd2BE(pReply, return_value->GetI());
1874    } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) {
1875      expandBufAdd4BE(pReply, return_value->GetI());
1876    } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1877      expandBufAdd8BE(pReply, return_value->GetJ());
1878    } else {
1879      CHECK_EQ(tag, JDWP::JT_VOID);
1880    }
1881  } else {
1882    ScopedObjectAccessUnchecked soa(Thread::Current());
1883    mirror::Object* value = return_value->GetL();
1884    expandBufAdd1(pReply, TagFromObject(soa, value));
1885    expandBufAddObjectId(pReply, gRegistry->Add(value));
1886  }
1887}
1888
1889JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string* name) {
1890  ScopedObjectAccessUnchecked soa(Thread::Current());
1891  JDWP::JdwpError error;
1892  Thread* thread = DecodeThread(soa, thread_id, &error);
1893  UNUSED(thread);
1894  if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
1895    return error;
1896  }
1897
1898  // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName.
1899  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
1900  CHECK(thread_object != nullptr) << error;
1901  ArtField* java_lang_Thread_name_field =
1902      soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
1903  mirror::String* s =
1904      reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object));
1905  if (s != nullptr) {
1906    *name = s->ToModifiedUtf8();
1907  }
1908  return JDWP::ERR_NONE;
1909}
1910
1911JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
1912  ScopedObjectAccessUnchecked soa(Thread::Current());
1913  JDWP::JdwpError error;
1914  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
1915  if (error != JDWP::ERR_NONE) {
1916    return JDWP::ERR_INVALID_OBJECT;
1917  }
1918  ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroup");
1919  // Okay, so it's an object, but is it actually a thread?
1920  Thread* thread = DecodeThread(soa, thread_id, &error);
1921  UNUSED(thread);
1922  if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
1923    // Zombie threads are in the null group.
1924    expandBufAddObjectId(pReply, JDWP::ObjectId(0));
1925    error = JDWP::ERR_NONE;
1926  } else if (error == JDWP::ERR_NONE) {
1927    mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
1928    CHECK(c != nullptr);
1929    ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
1930    CHECK(f != nullptr);
1931    mirror::Object* group = f->GetObject(thread_object);
1932    CHECK(group != nullptr);
1933    JDWP::ObjectId thread_group_id = gRegistry->Add(group);
1934    expandBufAddObjectId(pReply, thread_group_id);
1935  }
1936  return error;
1937}
1938
1939static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa,
1940                                         JDWP::ObjectId thread_group_id, JDWP::JdwpError* error)
1941    SHARED_REQUIRES(Locks::mutator_lock_) {
1942  mirror::Object* thread_group = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_group_id,
1943                                                                                error);
1944  if (*error != JDWP::ERR_NONE) {
1945    return nullptr;
1946  }
1947  if (thread_group == nullptr) {
1948    *error = JDWP::ERR_INVALID_OBJECT;
1949    return nullptr;
1950  }
1951  mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
1952  CHECK(c != nullptr);
1953  if (!c->IsAssignableFrom(thread_group->GetClass())) {
1954    // This is not a java.lang.ThreadGroup.
1955    *error = JDWP::ERR_INVALID_THREAD_GROUP;
1956    return nullptr;
1957  }
1958  *error = JDWP::ERR_NONE;
1959  return thread_group;
1960}
1961
1962JDWP::JdwpError Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
1963  ScopedObjectAccessUnchecked soa(Thread::Current());
1964  JDWP::JdwpError error;
1965  mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
1966  if (error != JDWP::ERR_NONE) {
1967    return error;
1968  }
1969  ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupName");
1970  ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
1971  CHECK(f != nullptr);
1972  mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
1973
1974  std::string thread_group_name(s->ToModifiedUtf8());
1975  expandBufAddUtf8String(pReply, thread_group_name);
1976  return JDWP::ERR_NONE;
1977}
1978
1979JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
1980  ScopedObjectAccessUnchecked soa(Thread::Current());
1981  JDWP::JdwpError error;
1982  mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
1983  if (error != JDWP::ERR_NONE) {
1984    return error;
1985  }
1986  mirror::Object* parent;
1987  {
1988    ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupParent");
1989    ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_parent);
1990    CHECK(f != nullptr);
1991    parent = f->GetObject(thread_group);
1992  }
1993  JDWP::ObjectId parent_group_id = gRegistry->Add(parent);
1994  expandBufAddObjectId(pReply, parent_group_id);
1995  return JDWP::ERR_NONE;
1996}
1997
1998static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group,
1999                                 std::vector<JDWP::ObjectId>* child_thread_group_ids)
2000    SHARED_REQUIRES(Locks::mutator_lock_) {
2001  CHECK(thread_group != nullptr);
2002
2003  // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
2004  ArtField* groups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_groups);
2005  mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
2006  {
2007    // The "groups" field is declared as a java.util.List: check it really is
2008    // an instance of java.util.ArrayList.
2009    CHECK(groups_array_list != nullptr);
2010    mirror::Class* java_util_ArrayList_class =
2011        soa.Decode<mirror::Class*>(WellKnownClasses::java_util_ArrayList);
2012    CHECK(groups_array_list->InstanceOf(java_util_ArrayList_class));
2013  }
2014
2015  // Get the array and size out of the ArrayList<ThreadGroup>...
2016  ArtField* array_field = soa.DecodeField(WellKnownClasses::java_util_ArrayList_array);
2017  ArtField* size_field = soa.DecodeField(WellKnownClasses::java_util_ArrayList_size);
2018  mirror::ObjectArray<mirror::Object>* groups_array =
2019      array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
2020  const int32_t size = size_field->GetInt(groups_array_list);
2021
2022  // Copy the first 'size' elements out of the array into the result.
2023  ObjectRegistry* registry = Dbg::GetObjectRegistry();
2024  for (int32_t i = 0; i < size; ++i) {
2025    child_thread_group_ids->push_back(registry->Add(groups_array->Get(i)));
2026  }
2027}
2028
2029JDWP::JdwpError Dbg::GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
2030                                            JDWP::ExpandBuf* pReply) {
2031  ScopedObjectAccessUnchecked soa(Thread::Current());
2032  JDWP::JdwpError error;
2033  mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2034  if (error != JDWP::ERR_NONE) {
2035    return error;
2036  }
2037
2038  // Add child threads.
2039  {
2040    std::vector<JDWP::ObjectId> child_thread_ids;
2041    GetThreads(thread_group, &child_thread_ids);
2042    expandBufAdd4BE(pReply, child_thread_ids.size());
2043    for (JDWP::ObjectId child_thread_id : child_thread_ids) {
2044      expandBufAddObjectId(pReply, child_thread_id);
2045    }
2046  }
2047
2048  // Add child thread groups.
2049  {
2050    std::vector<JDWP::ObjectId> child_thread_groups_ids;
2051    GetChildThreadGroups(soa, thread_group, &child_thread_groups_ids);
2052    expandBufAdd4BE(pReply, child_thread_groups_ids.size());
2053    for (JDWP::ObjectId child_thread_group_id : child_thread_groups_ids) {
2054      expandBufAddObjectId(pReply, child_thread_group_id);
2055    }
2056  }
2057
2058  return JDWP::ERR_NONE;
2059}
2060
2061JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
2062  ScopedObjectAccessUnchecked soa(Thread::Current());
2063  ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
2064  mirror::Object* group = f->GetObject(f->GetDeclaringClass());
2065  return gRegistry->Add(group);
2066}
2067
2068JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
2069  switch (state) {
2070    case kBlocked:
2071      return JDWP::TS_MONITOR;
2072    case kNative:
2073    case kRunnable:
2074    case kSuspended:
2075      return JDWP::TS_RUNNING;
2076    case kSleeping:
2077      return JDWP::TS_SLEEPING;
2078    case kStarting:
2079    case kTerminated:
2080      return JDWP::TS_ZOMBIE;
2081    case kTimedWaiting:
2082    case kWaitingForCheckPointsToRun:
2083    case kWaitingForDebuggerSend:
2084    case kWaitingForDebuggerSuspension:
2085    case kWaitingForDebuggerToAttach:
2086    case kWaitingForDeoptimization:
2087    case kWaitingForGcToComplete:
2088    case kWaitingForGetObjectsAllocated:
2089    case kWaitingForJniOnLoad:
2090    case kWaitingForMethodTracingStart:
2091    case kWaitingForSignalCatcherOutput:
2092    case kWaitingForVisitObjects:
2093    case kWaitingInMainDebuggerLoop:
2094    case kWaitingInMainSignalCatcherLoop:
2095    case kWaitingPerformingGc:
2096    case kWaitingWeakRootRead:
2097    case kWaiting:
2098      return JDWP::TS_WAIT;
2099      // Don't add a 'default' here so the compiler can spot incompatible enum changes.
2100  }
2101  LOG(FATAL) << "Unknown thread state: " << state;
2102  return JDWP::TS_ZOMBIE;
2103}
2104
2105JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus,
2106                                     JDWP::JdwpSuspendStatus* pSuspendStatus) {
2107  ScopedObjectAccess soa(Thread::Current());
2108
2109  *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED;
2110
2111  JDWP::JdwpError error;
2112  Thread* thread = DecodeThread(soa, thread_id, &error);
2113  if (error != JDWP::ERR_NONE) {
2114    if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
2115      *pThreadStatus = JDWP::TS_ZOMBIE;
2116      return JDWP::ERR_NONE;
2117    }
2118    return error;
2119  }
2120
2121  if (IsSuspendedForDebugger(soa, thread)) {
2122    *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED;
2123  }
2124
2125  *pThreadStatus = ToJdwpThreadStatus(thread->GetState());
2126  return JDWP::ERR_NONE;
2127}
2128
2129JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
2130  ScopedObjectAccess soa(Thread::Current());
2131  JDWP::JdwpError error;
2132  Thread* thread = DecodeThread(soa, thread_id, &error);
2133  if (error != JDWP::ERR_NONE) {
2134    return error;
2135  }
2136  MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2137  expandBufAdd4BE(pReply, thread->GetDebugSuspendCount());
2138  return JDWP::ERR_NONE;
2139}
2140
2141JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) {
2142  ScopedObjectAccess soa(Thread::Current());
2143  JDWP::JdwpError error;
2144  Thread* thread = DecodeThread(soa, thread_id, &error);
2145  if (error != JDWP::ERR_NONE) {
2146    return error;
2147  }
2148  thread->Interrupt(soa.Self());
2149  return JDWP::ERR_NONE;
2150}
2151
2152static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
2153                                   mirror::Object* desired_thread_group, mirror::Object* peer)
2154    SHARED_REQUIRES(Locks::mutator_lock_) {
2155  // Do we want threads from all thread groups?
2156  if (desired_thread_group == nullptr) {
2157    return true;
2158  }
2159  ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
2160  DCHECK(thread_group_field != nullptr);
2161  mirror::Object* group = thread_group_field->GetObject(peer);
2162  return (group == desired_thread_group);
2163}
2164
2165void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids) {
2166  ScopedObjectAccessUnchecked soa(Thread::Current());
2167  std::list<Thread*> all_threads_list;
2168  {
2169    MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
2170    all_threads_list = Runtime::Current()->GetThreadList()->GetList();
2171  }
2172  for (Thread* t : all_threads_list) {
2173    if (t == Dbg::GetDebugThread()) {
2174      // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
2175      // query all threads, so it's easier if we just don't tell them about this thread.
2176      continue;
2177    }
2178    if (t->IsStillStarting()) {
2179      // This thread is being started (and has been registered in the thread list). However, it is
2180      // not completely started yet so we must ignore it.
2181      continue;
2182    }
2183    mirror::Object* peer = t->GetPeer();
2184    if (peer == nullptr) {
2185      // peer might be null if the thread is still starting up. We can't tell the debugger about
2186      // this thread yet.
2187      // TODO: if we identified threads to the debugger by their Thread*
2188      // rather than their peer's mirror::Object*, we could fix this.
2189      // Doing so might help us report ZOMBIE threads too.
2190      continue;
2191    }
2192    if (IsInDesiredThreadGroup(soa, thread_group, peer)) {
2193      thread_ids->push_back(gRegistry->Add(peer));
2194    }
2195  }
2196}
2197
2198static int GetStackDepth(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_) {
2199  struct CountStackDepthVisitor : public StackVisitor {
2200    explicit CountStackDepthVisitor(Thread* thread_in)
2201        : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2202          depth(0) {}
2203
2204    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2205    // annotalysis.
2206    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2207      if (!GetMethod()->IsRuntimeMethod()) {
2208        ++depth;
2209      }
2210      return true;
2211    }
2212    size_t depth;
2213  };
2214
2215  CountStackDepthVisitor visitor(thread);
2216  visitor.WalkStack();
2217  return visitor.depth;
2218}
2219
2220JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result) {
2221  ScopedObjectAccess soa(Thread::Current());
2222  JDWP::JdwpError error;
2223  *result = 0;
2224  Thread* thread = DecodeThread(soa, thread_id, &error);
2225  if (error != JDWP::ERR_NONE) {
2226    return error;
2227  }
2228  if (!IsSuspendedForDebugger(soa, thread)) {
2229    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2230  }
2231  *result = GetStackDepth(thread);
2232  return JDWP::ERR_NONE;
2233}
2234
2235JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
2236                                     size_t frame_count, JDWP::ExpandBuf* buf) {
2237  class GetFrameVisitor : public StackVisitor {
2238   public:
2239    GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in,
2240                    JDWP::ExpandBuf* buf_in)
2241        SHARED_REQUIRES(Locks::mutator_lock_)
2242        : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2243          depth_(0),
2244          start_frame_(start_frame_in),
2245          frame_count_(frame_count_in),
2246          buf_(buf_in) {
2247      expandBufAdd4BE(buf_, frame_count_);
2248    }
2249
2250    bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
2251      if (GetMethod()->IsRuntimeMethod()) {
2252        return true;  // The debugger can't do anything useful with a frame that has no Method*.
2253      }
2254      if (depth_ >= start_frame_ + frame_count_) {
2255        return false;
2256      }
2257      if (depth_ >= start_frame_) {
2258        JDWP::FrameId frame_id(GetFrameId());
2259        JDWP::JdwpLocation location;
2260        SetJdwpLocation(&location, GetMethod(), GetDexPc());
2261        VLOG(jdwp) << StringPrintf("    Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
2262        expandBufAdd8BE(buf_, frame_id);
2263        expandBufAddLocation(buf_, location);
2264      }
2265      ++depth_;
2266      return true;
2267    }
2268
2269   private:
2270    size_t depth_;
2271    const size_t start_frame_;
2272    const size_t frame_count_;
2273    JDWP::ExpandBuf* buf_;
2274  };
2275
2276  ScopedObjectAccessUnchecked soa(Thread::Current());
2277  JDWP::JdwpError error;
2278  Thread* thread = DecodeThread(soa, thread_id, &error);
2279  if (error != JDWP::ERR_NONE) {
2280    return error;
2281  }
2282  if (!IsSuspendedForDebugger(soa, thread)) {
2283    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2284  }
2285  GetFrameVisitor visitor(thread, start_frame, frame_count, buf);
2286  visitor.WalkStack();
2287  return JDWP::ERR_NONE;
2288}
2289
2290JDWP::ObjectId Dbg::GetThreadSelfId() {
2291  return GetThreadId(Thread::Current());
2292}
2293
2294JDWP::ObjectId Dbg::GetThreadId(Thread* thread) {
2295  ScopedObjectAccessUnchecked soa(Thread::Current());
2296  return gRegistry->Add(thread->GetPeer());
2297}
2298
2299void Dbg::SuspendVM() {
2300  Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
2301}
2302
2303void Dbg::ResumeVM() {
2304  Runtime::Current()->GetThreadList()->ResumeAllForDebugger();
2305}
2306
2307JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
2308  Thread* self = Thread::Current();
2309  ScopedLocalRef<jobject> peer(self->GetJniEnv(), nullptr);
2310  {
2311    ScopedObjectAccess soa(self);
2312    JDWP::JdwpError error;
2313    peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id, &error)));
2314  }
2315  if (peer.get() == nullptr) {
2316    return JDWP::ERR_THREAD_NOT_ALIVE;
2317  }
2318  // Suspend thread to build stack trace.
2319  bool timed_out;
2320  ThreadList* thread_list = Runtime::Current()->GetThreadList();
2321  Thread* thread = thread_list->SuspendThreadByPeer(peer.get(), request_suspension, true,
2322                                                    &timed_out);
2323  if (thread != nullptr) {
2324    return JDWP::ERR_NONE;
2325  } else if (timed_out) {
2326    return JDWP::ERR_INTERNAL;
2327  } else {
2328    return JDWP::ERR_THREAD_NOT_ALIVE;
2329  }
2330}
2331
2332void Dbg::ResumeThread(JDWP::ObjectId thread_id) {
2333  ScopedObjectAccessUnchecked soa(Thread::Current());
2334  JDWP::JdwpError error;
2335  mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id, &error);
2336  CHECK(peer != nullptr) << error;
2337  Thread* thread;
2338  {
2339    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2340    thread = Thread::FromManagedThread(soa, peer);
2341  }
2342  if (thread == nullptr) {
2343    LOG(WARNING) << "No such thread for resume: " << peer;
2344    return;
2345  }
2346  bool needs_resume;
2347  {
2348    MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2349    needs_resume = thread->GetSuspendCount() > 0;
2350  }
2351  if (needs_resume) {
2352    Runtime::Current()->GetThreadList()->Resume(thread, true);
2353  }
2354}
2355
2356void Dbg::SuspendSelf() {
2357  Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
2358}
2359
2360struct GetThisVisitor : public StackVisitor {
2361  GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in)
2362      SHARED_REQUIRES(Locks::mutator_lock_)
2363      : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2364        this_object(nullptr),
2365        frame_id(frame_id_in) {}
2366
2367  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2368  // annotalysis.
2369  virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2370    if (frame_id != GetFrameId()) {
2371      return true;  // continue
2372    } else {
2373      this_object = GetThisObject();
2374      return false;
2375    }
2376  }
2377
2378  mirror::Object* this_object;
2379  JDWP::FrameId frame_id;
2380};
2381
2382JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
2383                                   JDWP::ObjectId* result) {
2384  ScopedObjectAccessUnchecked soa(Thread::Current());
2385  JDWP::JdwpError error;
2386  Thread* thread = DecodeThread(soa, thread_id, &error);
2387  if (error != JDWP::ERR_NONE) {
2388    return error;
2389  }
2390  if (!IsSuspendedForDebugger(soa, thread)) {
2391    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2392  }
2393  std::unique_ptr<Context> context(Context::Create());
2394  GetThisVisitor visitor(thread, context.get(), frame_id);
2395  visitor.WalkStack();
2396  *result = gRegistry->Add(visitor.this_object);
2397  return JDWP::ERR_NONE;
2398}
2399
2400// Walks the stack until we find the frame with the given FrameId.
2401class FindFrameVisitor FINAL : public StackVisitor {
2402 public:
2403  FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
2404      SHARED_REQUIRES(Locks::mutator_lock_)
2405      : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2406        frame_id_(frame_id),
2407        error_(JDWP::ERR_INVALID_FRAMEID) {}
2408
2409  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2410  // annotalysis.
2411  bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2412    if (GetFrameId() != frame_id_) {
2413      return true;  // Not our frame, carry on.
2414    }
2415    ArtMethod* m = GetMethod();
2416    if (m->IsNative()) {
2417      // We can't read/write local value from/into native method.
2418      error_ = JDWP::ERR_OPAQUE_FRAME;
2419    } else {
2420      // We found our frame.
2421      error_ = JDWP::ERR_NONE;
2422    }
2423    return false;
2424  }
2425
2426  JDWP::JdwpError GetError() const {
2427    return error_;
2428  }
2429
2430 private:
2431  const JDWP::FrameId frame_id_;
2432  JDWP::JdwpError error_;
2433};
2434
2435JDWP::JdwpError Dbg::GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply) {
2436  JDWP::ObjectId thread_id = request->ReadThreadId();
2437  JDWP::FrameId frame_id = request->ReadFrameId();
2438
2439  ScopedObjectAccessUnchecked soa(Thread::Current());
2440  JDWP::JdwpError error;
2441  Thread* thread = DecodeThread(soa, thread_id, &error);
2442  if (error != JDWP::ERR_NONE) {
2443    return error;
2444  }
2445  if (!IsSuspendedForDebugger(soa, thread)) {
2446    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2447  }
2448  // Find the frame with the given frame_id.
2449  std::unique_ptr<Context> context(Context::Create());
2450  FindFrameVisitor visitor(thread, context.get(), frame_id);
2451  visitor.WalkStack();
2452  if (visitor.GetError() != JDWP::ERR_NONE) {
2453    return visitor.GetError();
2454  }
2455
2456  // Read the values from visitor's context.
2457  int32_t slot_count = request->ReadSigned32("slot count");
2458  expandBufAdd4BE(pReply, slot_count);     /* "int values" */
2459  for (int32_t i = 0; i < slot_count; ++i) {
2460    uint32_t slot = request->ReadUnsigned32("slot");
2461    JDWP::JdwpTag reqSigByte = request->ReadTag();
2462
2463    VLOG(jdwp) << "    --> slot " << slot << " " << reqSigByte;
2464
2465    size_t width = Dbg::GetTagWidth(reqSigByte);
2466    uint8_t* ptr = expandBufAddSpace(pReply, width + 1);
2467    error = Dbg::GetLocalValue(visitor, soa, slot, reqSigByte, ptr, width);
2468    if (error != JDWP::ERR_NONE) {
2469      return error;
2470    }
2471  }
2472  return JDWP::ERR_NONE;
2473}
2474
2475constexpr JDWP::JdwpError kStackFrameLocalAccessError = JDWP::ERR_ABSENT_INFORMATION;
2476
2477static std::string GetStackContextAsString(const StackVisitor& visitor)
2478    SHARED_REQUIRES(Locks::mutator_lock_) {
2479  return StringPrintf(" at DEX pc 0x%08x in method %s", visitor.GetDexPc(false),
2480                      PrettyMethod(visitor.GetMethod()).c_str());
2481}
2482
2483static JDWP::JdwpError FailGetLocalValue(const StackVisitor& visitor, uint16_t vreg,
2484                                         JDWP::JdwpTag tag)
2485    SHARED_REQUIRES(Locks::mutator_lock_) {
2486  LOG(ERROR) << "Failed to read " << tag << " local from register v" << vreg
2487             << GetStackContextAsString(visitor);
2488  return kStackFrameLocalAccessError;
2489}
2490
2491JDWP::JdwpError Dbg::GetLocalValue(const StackVisitor& visitor, ScopedObjectAccessUnchecked& soa,
2492                                   int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
2493  ArtMethod* m = visitor.GetMethod();
2494  JDWP::JdwpError error = JDWP::ERR_NONE;
2495  uint16_t vreg = DemangleSlot(slot, m, &error);
2496  if (error != JDWP::ERR_NONE) {
2497    return error;
2498  }
2499  // TODO: check that the tag is compatible with the actual type of the slot!
2500  switch (tag) {
2501    case JDWP::JT_BOOLEAN: {
2502      CHECK_EQ(width, 1U);
2503      uint32_t intVal;
2504      if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2505        return FailGetLocalValue(visitor, vreg, tag);
2506      }
2507      VLOG(jdwp) << "get boolean local " << vreg << " = " << intVal;
2508      JDWP::Set1(buf + 1, intVal != 0);
2509      break;
2510    }
2511    case JDWP::JT_BYTE: {
2512      CHECK_EQ(width, 1U);
2513      uint32_t intVal;
2514      if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2515        return FailGetLocalValue(visitor, vreg, tag);
2516      }
2517      VLOG(jdwp) << "get byte local " << vreg << " = " << intVal;
2518      JDWP::Set1(buf + 1, intVal);
2519      break;
2520    }
2521    case JDWP::JT_SHORT:
2522    case JDWP::JT_CHAR: {
2523      CHECK_EQ(width, 2U);
2524      uint32_t intVal;
2525      if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2526        return FailGetLocalValue(visitor, vreg, tag);
2527      }
2528      VLOG(jdwp) << "get short/char local " << vreg << " = " << intVal;
2529      JDWP::Set2BE(buf + 1, intVal);
2530      break;
2531    }
2532    case JDWP::JT_INT: {
2533      CHECK_EQ(width, 4U);
2534      uint32_t intVal;
2535      if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2536        return FailGetLocalValue(visitor, vreg, tag);
2537      }
2538      VLOG(jdwp) << "get int local " << vreg << " = " << intVal;
2539      JDWP::Set4BE(buf + 1, intVal);
2540      break;
2541    }
2542    case JDWP::JT_FLOAT: {
2543      CHECK_EQ(width, 4U);
2544      uint32_t intVal;
2545      if (!visitor.GetVReg(m, vreg, kFloatVReg, &intVal)) {
2546        return FailGetLocalValue(visitor, vreg, tag);
2547      }
2548      VLOG(jdwp) << "get float local " << vreg << " = " << intVal;
2549      JDWP::Set4BE(buf + 1, intVal);
2550      break;
2551    }
2552    case JDWP::JT_ARRAY:
2553    case JDWP::JT_CLASS_LOADER:
2554    case JDWP::JT_CLASS_OBJECT:
2555    case JDWP::JT_OBJECT:
2556    case JDWP::JT_STRING:
2557    case JDWP::JT_THREAD:
2558    case JDWP::JT_THREAD_GROUP: {
2559      CHECK_EQ(width, sizeof(JDWP::ObjectId));
2560      uint32_t intVal;
2561      if (!visitor.GetVReg(m, vreg, kReferenceVReg, &intVal)) {
2562        return FailGetLocalValue(visitor, vreg, tag);
2563      }
2564      mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal);
2565      VLOG(jdwp) << "get " << tag << " object local " << vreg << " = " << o;
2566      if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
2567        LOG(FATAL) << StringPrintf("Found invalid object %#" PRIxPTR " in register v%u",
2568                                   reinterpret_cast<uintptr_t>(o), vreg)
2569                                   << GetStackContextAsString(visitor);
2570        UNREACHABLE();
2571      }
2572      tag = TagFromObject(soa, o);
2573      JDWP::SetObjectId(buf + 1, gRegistry->Add(o));
2574      break;
2575    }
2576    case JDWP::JT_DOUBLE: {
2577      CHECK_EQ(width, 8U);
2578      uint64_t longVal;
2579      if (!visitor.GetVRegPair(m, vreg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
2580        return FailGetLocalValue(visitor, vreg, tag);
2581      }
2582      VLOG(jdwp) << "get double local " << vreg << " = " << longVal;
2583      JDWP::Set8BE(buf + 1, longVal);
2584      break;
2585    }
2586    case JDWP::JT_LONG: {
2587      CHECK_EQ(width, 8U);
2588      uint64_t longVal;
2589      if (!visitor.GetVRegPair(m, vreg, kLongLoVReg, kLongHiVReg, &longVal)) {
2590        return FailGetLocalValue(visitor, vreg, tag);
2591      }
2592      VLOG(jdwp) << "get long local " << vreg << " = " << longVal;
2593      JDWP::Set8BE(buf + 1, longVal);
2594      break;
2595    }
2596    default:
2597      LOG(FATAL) << "Unknown tag " << tag;
2598      UNREACHABLE();
2599  }
2600
2601  // Prepend tag, which may have been updated.
2602  JDWP::Set1(buf, tag);
2603  return JDWP::ERR_NONE;
2604}
2605
2606JDWP::JdwpError Dbg::SetLocalValues(JDWP::Request* request) {
2607  JDWP::ObjectId thread_id = request->ReadThreadId();
2608  JDWP::FrameId frame_id = request->ReadFrameId();
2609
2610  ScopedObjectAccessUnchecked soa(Thread::Current());
2611  JDWP::JdwpError error;
2612  Thread* thread = DecodeThread(soa, thread_id, &error);
2613  if (error != JDWP::ERR_NONE) {
2614    return error;
2615  }
2616  if (!IsSuspendedForDebugger(soa, thread)) {
2617    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2618  }
2619  // Find the frame with the given frame_id.
2620  std::unique_ptr<Context> context(Context::Create());
2621  FindFrameVisitor visitor(thread, context.get(), frame_id);
2622  visitor.WalkStack();
2623  if (visitor.GetError() != JDWP::ERR_NONE) {
2624    return visitor.GetError();
2625  }
2626
2627  // Writes the values into visitor's context.
2628  int32_t slot_count = request->ReadSigned32("slot count");
2629  for (int32_t i = 0; i < slot_count; ++i) {
2630    uint32_t slot = request->ReadUnsigned32("slot");
2631    JDWP::JdwpTag sigByte = request->ReadTag();
2632    size_t width = Dbg::GetTagWidth(sigByte);
2633    uint64_t value = request->ReadValue(width);
2634
2635    VLOG(jdwp) << "    --> slot " << slot << " " << sigByte << " " << value;
2636    error = Dbg::SetLocalValue(visitor, slot, sigByte, value, width);
2637    if (error != JDWP::ERR_NONE) {
2638      return error;
2639    }
2640  }
2641  return JDWP::ERR_NONE;
2642}
2643
2644template<typename T>
2645static JDWP::JdwpError FailSetLocalValue(const StackVisitor& visitor, uint16_t vreg,
2646                                         JDWP::JdwpTag tag, T value)
2647    SHARED_REQUIRES(Locks::mutator_lock_) {
2648  LOG(ERROR) << "Failed to write " << tag << " local " << value
2649             << " (0x" << std::hex << value << ") into register v" << vreg
2650             << GetStackContextAsString(visitor);
2651  return kStackFrameLocalAccessError;
2652}
2653
2654JDWP::JdwpError Dbg::SetLocalValue(StackVisitor& visitor, int slot, JDWP::JdwpTag tag,
2655                                   uint64_t value, size_t width) {
2656  ArtMethod* m = visitor.GetMethod();
2657  JDWP::JdwpError error = JDWP::ERR_NONE;
2658  uint16_t vreg = DemangleSlot(slot, m, &error);
2659  if (error != JDWP::ERR_NONE) {
2660    return error;
2661  }
2662  // TODO: check that the tag is compatible with the actual type of the slot!
2663  switch (tag) {
2664    case JDWP::JT_BOOLEAN:
2665    case JDWP::JT_BYTE:
2666      CHECK_EQ(width, 1U);
2667      if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
2668        return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2669      }
2670      break;
2671    case JDWP::JT_SHORT:
2672    case JDWP::JT_CHAR:
2673      CHECK_EQ(width, 2U);
2674      if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
2675        return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2676      }
2677      break;
2678    case JDWP::JT_INT:
2679      CHECK_EQ(width, 4U);
2680      if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
2681        return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2682      }
2683      break;
2684    case JDWP::JT_FLOAT:
2685      CHECK_EQ(width, 4U);
2686      if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kFloatVReg)) {
2687        return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2688      }
2689      break;
2690    case JDWP::JT_ARRAY:
2691    case JDWP::JT_CLASS_LOADER:
2692    case JDWP::JT_CLASS_OBJECT:
2693    case JDWP::JT_OBJECT:
2694    case JDWP::JT_STRING:
2695    case JDWP::JT_THREAD:
2696    case JDWP::JT_THREAD_GROUP: {
2697      CHECK_EQ(width, sizeof(JDWP::ObjectId));
2698      mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value),
2699                                                          &error);
2700      if (error != JDWP::ERR_NONE) {
2701        VLOG(jdwp) << tag << " object " << o << " is an invalid object";
2702        return JDWP::ERR_INVALID_OBJECT;
2703      }
2704      if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)),
2705                                 kReferenceVReg)) {
2706        return FailSetLocalValue(visitor, vreg, tag, reinterpret_cast<uintptr_t>(o));
2707      }
2708      break;
2709    }
2710    case JDWP::JT_DOUBLE: {
2711      CHECK_EQ(width, 8U);
2712      if (!visitor.SetVRegPair(m, vreg, value, kDoubleLoVReg, kDoubleHiVReg)) {
2713        return FailSetLocalValue(visitor, vreg, tag, value);
2714      }
2715      break;
2716    }
2717    case JDWP::JT_LONG: {
2718      CHECK_EQ(width, 8U);
2719      if (!visitor.SetVRegPair(m, vreg, value, kLongLoVReg, kLongHiVReg)) {
2720        return FailSetLocalValue(visitor, vreg, tag, value);
2721      }
2722      break;
2723    }
2724    default:
2725      LOG(FATAL) << "Unknown tag " << tag;
2726      UNREACHABLE();
2727  }
2728  return JDWP::ERR_NONE;
2729}
2730
2731static void SetEventLocation(JDWP::EventLocation* location, ArtMethod* m, uint32_t dex_pc)
2732    SHARED_REQUIRES(Locks::mutator_lock_) {
2733  DCHECK(location != nullptr);
2734  if (m == nullptr) {
2735    memset(location, 0, sizeof(*location));
2736  } else {
2737    location->method = m;
2738    location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint32_t>(-1) : dex_pc;
2739  }
2740}
2741
2742void Dbg::PostLocationEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object,
2743                            int event_flags, const JValue* return_value) {
2744  if (!IsDebuggerActive()) {
2745    return;
2746  }
2747  DCHECK(m != nullptr);
2748  DCHECK_EQ(m->IsStatic(), this_object == nullptr);
2749  JDWP::EventLocation location;
2750  SetEventLocation(&location, m, dex_pc);
2751
2752  // We need to be sure no exception is pending when calling JdwpState::PostLocationEvent.
2753  // This is required to be able to call JNI functions to create JDWP ids. To achieve this,
2754  // we temporarily clear the current thread's exception (if any) and will restore it after
2755  // the call.
2756  // Note: the only way to get a pending exception here is to suspend on a move-exception
2757  // instruction.
2758  Thread* const self = Thread::Current();
2759  StackHandleScope<1> hs(self);
2760  Handle<mirror::Throwable> pending_exception(hs.NewHandle(self->GetException()));
2761  self->ClearException();
2762  if (kIsDebugBuild && pending_exception.Get() != nullptr) {
2763    const DexFile::CodeItem* code_item = location.method->GetCodeItem();
2764    const Instruction* instr = Instruction::At(&code_item->insns_[location.dex_pc]);
2765    CHECK_EQ(Instruction::MOVE_EXCEPTION, instr->Opcode());
2766  }
2767
2768  gJdwpState->PostLocationEvent(&location, this_object, event_flags, return_value);
2769
2770  if (pending_exception.Get() != nullptr) {
2771    self->SetException(pending_exception.Get());
2772  }
2773}
2774
2775void Dbg::PostFieldAccessEvent(ArtMethod* m, int dex_pc,
2776                               mirror::Object* this_object, ArtField* f) {
2777  if (!IsDebuggerActive()) {
2778    return;
2779  }
2780  DCHECK(m != nullptr);
2781  DCHECK(f != nullptr);
2782  JDWP::EventLocation location;
2783  SetEventLocation(&location, m, dex_pc);
2784
2785  gJdwpState->PostFieldEvent(&location, f, this_object, nullptr, false);
2786}
2787
2788void Dbg::PostFieldModificationEvent(ArtMethod* m, int dex_pc,
2789                                     mirror::Object* this_object, ArtField* f,
2790                                     const JValue* field_value) {
2791  if (!IsDebuggerActive()) {
2792    return;
2793  }
2794  DCHECK(m != nullptr);
2795  DCHECK(f != nullptr);
2796  DCHECK(field_value != nullptr);
2797  JDWP::EventLocation location;
2798  SetEventLocation(&location, m, dex_pc);
2799
2800  gJdwpState->PostFieldEvent(&location, f, this_object, field_value, true);
2801}
2802
2803/**
2804 * Finds the location where this exception will be caught. We search until we reach the top
2805 * frame, in which case this exception is considered uncaught.
2806 */
2807class CatchLocationFinder : public StackVisitor {
2808 public:
2809  CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context)
2810      SHARED_REQUIRES(Locks::mutator_lock_)
2811    : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2812      self_(self),
2813      exception_(exception),
2814      handle_scope_(self),
2815      this_at_throw_(handle_scope_.NewHandle<mirror::Object>(nullptr)),
2816      catch_method_(nullptr),
2817      throw_method_(nullptr),
2818      catch_dex_pc_(DexFile::kDexNoIndex),
2819      throw_dex_pc_(DexFile::kDexNoIndex) {
2820  }
2821
2822  bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
2823    ArtMethod* method = GetMethod();
2824    DCHECK(method != nullptr);
2825    if (method->IsRuntimeMethod()) {
2826      // Ignore callee save method.
2827      DCHECK(method->IsCalleeSaveMethod());
2828      return true;
2829    }
2830
2831    uint32_t dex_pc = GetDexPc();
2832    if (throw_method_ == nullptr) {
2833      // First Java method found. It is either the method that threw the exception,
2834      // or the Java native method that is reporting an exception thrown by
2835      // native code.
2836      this_at_throw_.Assign(GetThisObject());
2837      throw_method_ = method;
2838      throw_dex_pc_ = dex_pc;
2839    }
2840
2841    if (dex_pc != DexFile::kDexNoIndex) {
2842      StackHandleScope<1> hs(self_);
2843      uint32_t found_dex_pc;
2844      Handle<mirror::Class> exception_class(hs.NewHandle(exception_->GetClass()));
2845      bool unused_clear_exception;
2846      found_dex_pc = method->FindCatchBlock(exception_class, dex_pc, &unused_clear_exception);
2847      if (found_dex_pc != DexFile::kDexNoIndex) {
2848        catch_method_ = method;
2849        catch_dex_pc_ = found_dex_pc;
2850        return false;  // End stack walk.
2851      }
2852    }
2853    return true;  // Continue stack walk.
2854  }
2855
2856  ArtMethod* GetCatchMethod() SHARED_REQUIRES(Locks::mutator_lock_) {
2857    return catch_method_;
2858  }
2859
2860  ArtMethod* GetThrowMethod() SHARED_REQUIRES(Locks::mutator_lock_) {
2861    return throw_method_;
2862  }
2863
2864  mirror::Object* GetThisAtThrow() SHARED_REQUIRES(Locks::mutator_lock_) {
2865    return this_at_throw_.Get();
2866  }
2867
2868  uint32_t GetCatchDexPc() const {
2869    return catch_dex_pc_;
2870  }
2871
2872  uint32_t GetThrowDexPc() const {
2873    return throw_dex_pc_;
2874  }
2875
2876 private:
2877  Thread* const self_;
2878  const Handle<mirror::Throwable>& exception_;
2879  StackHandleScope<1> handle_scope_;
2880  MutableHandle<mirror::Object> this_at_throw_;
2881  ArtMethod* catch_method_;
2882  ArtMethod* throw_method_;
2883  uint32_t catch_dex_pc_;
2884  uint32_t throw_dex_pc_;
2885
2886  DISALLOW_COPY_AND_ASSIGN(CatchLocationFinder);
2887};
2888
2889void Dbg::PostException(mirror::Throwable* exception_object) {
2890  if (!IsDebuggerActive()) {
2891    return;
2892  }
2893  Thread* const self = Thread::Current();
2894  StackHandleScope<1> handle_scope(self);
2895  Handle<mirror::Throwable> h_exception(handle_scope.NewHandle(exception_object));
2896  std::unique_ptr<Context> context(Context::Create());
2897  CatchLocationFinder clf(self, h_exception, context.get());
2898  clf.WalkStack(/* include_transitions */ false);
2899  JDWP::EventLocation exception_throw_location;
2900  SetEventLocation(&exception_throw_location, clf.GetThrowMethod(), clf.GetThrowDexPc());
2901  JDWP::EventLocation exception_catch_location;
2902  SetEventLocation(&exception_catch_location, clf.GetCatchMethod(), clf.GetCatchDexPc());
2903
2904  gJdwpState->PostException(&exception_throw_location, h_exception.Get(), &exception_catch_location,
2905                            clf.GetThisAtThrow());
2906}
2907
2908void Dbg::PostClassPrepare(mirror::Class* c) {
2909  if (!IsDebuggerActive()) {
2910    return;
2911  }
2912  gJdwpState->PostClassPrepare(c);
2913}
2914
2915void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
2916                         ArtMethod* m, uint32_t dex_pc,
2917                         int event_flags, const JValue* return_value) {
2918  if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
2919    return;
2920  }
2921
2922  if (IsBreakpoint(m, dex_pc)) {
2923    event_flags |= kBreakpoint;
2924  }
2925
2926  // If the debugger is single-stepping one of our threads, check to
2927  // see if we're that thread and we've reached a step point.
2928  const SingleStepControl* single_step_control = thread->GetSingleStepControl();
2929  if (single_step_control != nullptr) {
2930    CHECK(!m->IsNative());
2931    if (single_step_control->GetStepDepth() == JDWP::SD_INTO) {
2932      // Step into method calls.  We break when the line number
2933      // or method pointer changes.  If we're in SS_MIN mode, we
2934      // always stop.
2935      if (single_step_control->GetMethod() != m) {
2936        event_flags |= kSingleStep;
2937        VLOG(jdwp) << "SS new method";
2938      } else if (single_step_control->GetStepSize() == JDWP::SS_MIN) {
2939        event_flags |= kSingleStep;
2940        VLOG(jdwp) << "SS new instruction";
2941      } else if (single_step_control->ContainsDexPc(dex_pc)) {
2942        event_flags |= kSingleStep;
2943        VLOG(jdwp) << "SS new line";
2944      }
2945    } else if (single_step_control->GetStepDepth() == JDWP::SD_OVER) {
2946      // Step over method calls.  We break when the line number is
2947      // different and the frame depth is <= the original frame
2948      // depth.  (We can't just compare on the method, because we
2949      // might get unrolled past it by an exception, and it's tricky
2950      // to identify recursion.)
2951
2952      int stack_depth = GetStackDepth(thread);
2953
2954      if (stack_depth < single_step_control->GetStackDepth()) {
2955        // Popped up one or more frames, always trigger.
2956        event_flags |= kSingleStep;
2957        VLOG(jdwp) << "SS method pop";
2958      } else if (stack_depth == single_step_control->GetStackDepth()) {
2959        // Same depth, see if we moved.
2960        if (single_step_control->GetStepSize() == JDWP::SS_MIN) {
2961          event_flags |= kSingleStep;
2962          VLOG(jdwp) << "SS new instruction";
2963        } else if (single_step_control->ContainsDexPc(dex_pc)) {
2964          event_flags |= kSingleStep;
2965          VLOG(jdwp) << "SS new line";
2966        }
2967      }
2968    } else {
2969      CHECK_EQ(single_step_control->GetStepDepth(), JDWP::SD_OUT);
2970      // Return from the current method.  We break when the frame
2971      // depth pops up.
2972
2973      // This differs from the "method exit" break in that it stops
2974      // with the PC at the next instruction in the returned-to
2975      // function, rather than the end of the returning function.
2976
2977      int stack_depth = GetStackDepth(thread);
2978      if (stack_depth < single_step_control->GetStackDepth()) {
2979        event_flags |= kSingleStep;
2980        VLOG(jdwp) << "SS method pop";
2981      }
2982    }
2983  }
2984
2985  // If there's something interesting going on, see if it matches one
2986  // of the debugger filters.
2987  if (event_flags != 0) {
2988    Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, return_value);
2989  }
2990}
2991
2992size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) {
2993  switch (instrumentation_event) {
2994    case instrumentation::Instrumentation::kMethodEntered:
2995      return &method_enter_event_ref_count_;
2996    case instrumentation::Instrumentation::kMethodExited:
2997      return &method_exit_event_ref_count_;
2998    case instrumentation::Instrumentation::kDexPcMoved:
2999      return &dex_pc_change_event_ref_count_;
3000    case instrumentation::Instrumentation::kFieldRead:
3001      return &field_read_event_ref_count_;
3002    case instrumentation::Instrumentation::kFieldWritten:
3003      return &field_write_event_ref_count_;
3004    case instrumentation::Instrumentation::kExceptionCaught:
3005      return &exception_catch_event_ref_count_;
3006    default:
3007      return nullptr;
3008  }
3009}
3010
3011// Process request while all mutator threads are suspended.
3012void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
3013  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
3014  switch (request.GetKind()) {
3015    case DeoptimizationRequest::kNothing:
3016      LOG(WARNING) << "Ignoring empty deoptimization request.";
3017      break;
3018    case DeoptimizationRequest::kRegisterForEvent:
3019      VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x",
3020                                 request.InstrumentationEvent());
3021      instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent());
3022      instrumentation_events_ |= request.InstrumentationEvent();
3023      break;
3024    case DeoptimizationRequest::kUnregisterForEvent:
3025      VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x",
3026                                 request.InstrumentationEvent());
3027      instrumentation->RemoveListener(&gDebugInstrumentationListener,
3028                                      request.InstrumentationEvent());
3029      instrumentation_events_ &= ~request.InstrumentationEvent();
3030      break;
3031    case DeoptimizationRequest::kFullDeoptimization:
3032      VLOG(jdwp) << "Deoptimize the world ...";
3033      instrumentation->DeoptimizeEverything(kDbgInstrumentationKey);
3034      VLOG(jdwp) << "Deoptimize the world DONE";
3035      break;
3036    case DeoptimizationRequest::kFullUndeoptimization:
3037      VLOG(jdwp) << "Undeoptimize the world ...";
3038      instrumentation->UndeoptimizeEverything(kDbgInstrumentationKey);
3039      VLOG(jdwp) << "Undeoptimize the world DONE";
3040      break;
3041    case DeoptimizationRequest::kSelectiveDeoptimization:
3042      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " ...";
3043      instrumentation->Deoptimize(request.Method());
3044      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " DONE";
3045      break;
3046    case DeoptimizationRequest::kSelectiveUndeoptimization:
3047      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " ...";
3048      instrumentation->Undeoptimize(request.Method());
3049      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " DONE";
3050      break;
3051    default:
3052      LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind();
3053      break;
3054  }
3055}
3056
3057void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
3058  if (req.GetKind() == DeoptimizationRequest::kNothing) {
3059    // Nothing to do.
3060    return;
3061  }
3062  MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
3063  RequestDeoptimizationLocked(req);
3064}
3065
3066void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
3067  switch (req.GetKind()) {
3068    case DeoptimizationRequest::kRegisterForEvent: {
3069      DCHECK_NE(req.InstrumentationEvent(), 0u);
3070      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
3071      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
3072                                                req.InstrumentationEvent());
3073      if (*counter == 0) {
3074        VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x",
3075                                   deoptimization_requests_.size(), req.InstrumentationEvent());
3076        deoptimization_requests_.push_back(req);
3077      }
3078      *counter = *counter + 1;
3079      break;
3080    }
3081    case DeoptimizationRequest::kUnregisterForEvent: {
3082      DCHECK_NE(req.InstrumentationEvent(), 0u);
3083      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
3084      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
3085                                                req.InstrumentationEvent());
3086      *counter = *counter - 1;
3087      if (*counter == 0) {
3088        VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x",
3089                                   deoptimization_requests_.size(), req.InstrumentationEvent());
3090        deoptimization_requests_.push_back(req);
3091      }
3092      break;
3093    }
3094    case DeoptimizationRequest::kFullDeoptimization: {
3095      DCHECK(req.Method() == nullptr);
3096      if (full_deoptimization_event_count_ == 0) {
3097        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3098                   << " for full deoptimization";
3099        deoptimization_requests_.push_back(req);
3100      }
3101      ++full_deoptimization_event_count_;
3102      break;
3103    }
3104    case DeoptimizationRequest::kFullUndeoptimization: {
3105      DCHECK(req.Method() == nullptr);
3106      DCHECK_GT(full_deoptimization_event_count_, 0U);
3107      --full_deoptimization_event_count_;
3108      if (full_deoptimization_event_count_ == 0) {
3109        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3110                   << " for full undeoptimization";
3111        deoptimization_requests_.push_back(req);
3112      }
3113      break;
3114    }
3115    case DeoptimizationRequest::kSelectiveDeoptimization: {
3116      DCHECK(req.Method() != nullptr);
3117      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3118                 << " for deoptimization of " << PrettyMethod(req.Method());
3119      deoptimization_requests_.push_back(req);
3120      break;
3121    }
3122    case DeoptimizationRequest::kSelectiveUndeoptimization: {
3123      DCHECK(req.Method() != nullptr);
3124      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3125                 << " for undeoptimization of " << PrettyMethod(req.Method());
3126      deoptimization_requests_.push_back(req);
3127      break;
3128    }
3129    default: {
3130      LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind();
3131      break;
3132    }
3133  }
3134}
3135
3136void Dbg::ManageDeoptimization() {
3137  Thread* const self = Thread::Current();
3138  {
3139    // Avoid suspend/resume if there is no pending request.
3140    MutexLock mu(self, *Locks::deoptimization_lock_);
3141    if (deoptimization_requests_.empty()) {
3142      return;
3143    }
3144  }
3145  CHECK_EQ(self->GetState(), kRunnable);
3146  self->TransitionFromRunnableToSuspended(kWaitingForDeoptimization);
3147  // We need to suspend mutator threads first.
3148  Runtime* const runtime = Runtime::Current();
3149  runtime->GetThreadList()->SuspendAll(__FUNCTION__);
3150  const ThreadState old_state = self->SetStateUnsafe(kRunnable);
3151  {
3152    MutexLock mu(self, *Locks::deoptimization_lock_);
3153    size_t req_index = 0;
3154    for (DeoptimizationRequest& request : deoptimization_requests_) {
3155      VLOG(jdwp) << "Process deoptimization request #" << req_index++;
3156      ProcessDeoptimizationRequest(request);
3157    }
3158    deoptimization_requests_.clear();
3159  }
3160  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
3161  runtime->GetThreadList()->ResumeAll();
3162  self->TransitionFromSuspendedToRunnable();
3163}
3164
3165static bool IsMethodPossiblyInlined(Thread* self, ArtMethod* m)
3166    SHARED_REQUIRES(Locks::mutator_lock_) {
3167  const DexFile::CodeItem* code_item = m->GetCodeItem();
3168  if (code_item == nullptr) {
3169    // TODO We should not be asked to watch location in a native or abstract method so the code item
3170    // should never be null. We could just check we never encounter this case.
3171    return false;
3172  }
3173  // Note: method verifier may cause thread suspension.
3174  self->AssertThreadSuspensionIsAllowable();
3175  StackHandleScope<2> hs(self);
3176  mirror::Class* declaring_class = m->GetDeclaringClass();
3177  Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
3178  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
3179  verifier::MethodVerifier verifier(self, dex_cache->GetDexFile(), dex_cache, class_loader,
3180                                    &m->GetClassDef(), code_item, m->GetDexMethodIndex(), m,
3181                                    m->GetAccessFlags(), false, true, false, true);
3182  // Note: we don't need to verify the method.
3183  return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr);
3184}
3185
3186static const Breakpoint* FindFirstBreakpointForMethod(ArtMethod* m)
3187    SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3188  for (Breakpoint& breakpoint : gBreakpoints) {
3189    if (breakpoint.Method() == m) {
3190      return &breakpoint;
3191    }
3192  }
3193  return nullptr;
3194}
3195
3196bool Dbg::MethodHasAnyBreakpoints(ArtMethod* method) {
3197  ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
3198  return FindFirstBreakpointForMethod(method) != nullptr;
3199}
3200
3201// Sanity checks all existing breakpoints on the same method.
3202static void SanityCheckExistingBreakpoints(ArtMethod* m,
3203                                           DeoptimizationRequest::Kind deoptimization_kind)
3204    SHARED_REQUIRES(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3205  for (const Breakpoint& breakpoint : gBreakpoints) {
3206    if (breakpoint.Method() == m) {
3207      CHECK_EQ(deoptimization_kind, breakpoint.GetDeoptimizationKind());
3208    }
3209  }
3210  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
3211  if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
3212    // We should have deoptimized everything but not "selectively" deoptimized this method.
3213    CHECK(instrumentation->AreAllMethodsDeoptimized());
3214    CHECK(!instrumentation->IsDeoptimized(m));
3215  } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3216    // We should have "selectively" deoptimized this method.
3217    // Note: while we have not deoptimized everything for this method, we may have done it for
3218    // another event.
3219    CHECK(instrumentation->IsDeoptimized(m));
3220  } else {
3221    // This method does not require deoptimization.
3222    CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
3223    CHECK(!instrumentation->IsDeoptimized(m));
3224  }
3225}
3226
3227// Returns the deoptimization kind required to set a breakpoint in a method.
3228// If a breakpoint has already been set, we also return the first breakpoint
3229// through the given 'existing_brkpt' pointer.
3230static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self,
3231                                                                 ArtMethod* m,
3232                                                                 const Breakpoint** existing_brkpt)
3233    SHARED_REQUIRES(Locks::mutator_lock_) {
3234  if (!Dbg::RequiresDeoptimization()) {
3235    // We already run in interpreter-only mode so we don't need to deoptimize anything.
3236    VLOG(jdwp) << "No need for deoptimization when fully running with interpreter for method "
3237               << PrettyMethod(m);
3238    return DeoptimizationRequest::kNothing;
3239  }
3240  const Breakpoint* first_breakpoint;
3241  {
3242    ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
3243    first_breakpoint = FindFirstBreakpointForMethod(m);
3244    *existing_brkpt = first_breakpoint;
3245  }
3246
3247  if (first_breakpoint == nullptr) {
3248    // There is no breakpoint on this method yet: we need to deoptimize. If this method may be
3249    // inlined, we deoptimize everything; otherwise we deoptimize only this method.
3250    // Note: IsMethodPossiblyInlined goes into the method verifier and may cause thread suspension.
3251    // Therefore we must not hold any lock when we call it.
3252    bool need_full_deoptimization = IsMethodPossiblyInlined(self, m);
3253    if (need_full_deoptimization) {
3254      VLOG(jdwp) << "Need full deoptimization because of possible inlining of method "
3255                 << PrettyMethod(m);
3256      return DeoptimizationRequest::kFullDeoptimization;
3257    } else {
3258      // We don't need to deoptimize if the method has not been compiled.
3259      ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
3260      const bool is_compiled = class_linker->GetOatMethodQuickCodeFor(m) != nullptr;
3261      if (is_compiled) {
3262        // If the method may be called through its direct code pointer (without loading
3263        // its updated entrypoint), we need full deoptimization to not miss the breakpoint.
3264        if (class_linker->MayBeCalledWithDirectCodePointer(m)) {
3265          VLOG(jdwp) << "Need full deoptimization because of possible direct code call "
3266                     << "into image for compiled method " << PrettyMethod(m);
3267          return DeoptimizationRequest::kFullDeoptimization;
3268        } else {
3269          VLOG(jdwp) << "Need selective deoptimization for compiled method " << PrettyMethod(m);
3270          return DeoptimizationRequest::kSelectiveDeoptimization;
3271        }
3272      } else {
3273        // Method is not compiled: we don't need to deoptimize.
3274        VLOG(jdwp) << "No need for deoptimization for non-compiled method " << PrettyMethod(m);
3275        return DeoptimizationRequest::kNothing;
3276      }
3277    }
3278  } else {
3279    // There is at least one breakpoint for this method: we don't need to deoptimize.
3280    // Let's check that all breakpoints are configured the same way for deoptimization.
3281    VLOG(jdwp) << "Breakpoint already set: no deoptimization is required";
3282    DeoptimizationRequest::Kind deoptimization_kind = first_breakpoint->GetDeoptimizationKind();
3283    if (kIsDebugBuild) {
3284      ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
3285      SanityCheckExistingBreakpoints(m, deoptimization_kind);
3286    }
3287    return DeoptimizationRequest::kNothing;
3288  }
3289}
3290
3291// Installs a breakpoint at the specified location. Also indicates through the deoptimization
3292// request if we need to deoptimize.
3293void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3294  Thread* const self = Thread::Current();
3295  ArtMethod* m = FromMethodId(location->method_id);
3296  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3297
3298  const Breakpoint* existing_breakpoint = nullptr;
3299  const DeoptimizationRequest::Kind deoptimization_kind =
3300      GetRequiredDeoptimizationKind(self, m, &existing_breakpoint);
3301  req->SetKind(deoptimization_kind);
3302  if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3303    req->SetMethod(m);
3304  } else {
3305    CHECK(deoptimization_kind == DeoptimizationRequest::kNothing ||
3306          deoptimization_kind == DeoptimizationRequest::kFullDeoptimization);
3307    req->SetMethod(nullptr);
3308  }
3309
3310  {
3311    WriterMutexLock mu(self, *Locks::breakpoint_lock_);
3312    // If there is at least one existing breakpoint on the same method, the new breakpoint
3313    // must have the same deoptimization kind than the existing breakpoint(s).
3314    DeoptimizationRequest::Kind breakpoint_deoptimization_kind;
3315    if (existing_breakpoint != nullptr) {
3316      breakpoint_deoptimization_kind = existing_breakpoint->GetDeoptimizationKind();
3317    } else {
3318      breakpoint_deoptimization_kind = deoptimization_kind;
3319    }
3320    gBreakpoints.push_back(Breakpoint(m, location->dex_pc, breakpoint_deoptimization_kind));
3321    VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
3322               << gBreakpoints[gBreakpoints.size() - 1];
3323  }
3324}
3325
3326// Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
3327// request if we need to undeoptimize.
3328void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3329  WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
3330  ArtMethod* m = FromMethodId(location->method_id);
3331  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3332  DeoptimizationRequest::Kind deoptimization_kind = DeoptimizationRequest::kNothing;
3333  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
3334    if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].Method() == m) {
3335      VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
3336      deoptimization_kind = gBreakpoints[i].GetDeoptimizationKind();
3337      DCHECK_EQ(deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization,
3338                Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3339      gBreakpoints.erase(gBreakpoints.begin() + i);
3340      break;
3341    }
3342  }
3343  const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
3344  if (existing_breakpoint == nullptr) {
3345    // There is no more breakpoint on this method: we need to undeoptimize.
3346    if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
3347      // This method required full deoptimization: we need to undeoptimize everything.
3348      req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
3349      req->SetMethod(nullptr);
3350    } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3351      // This method required selective deoptimization: we need to undeoptimize only that method.
3352      req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
3353      req->SetMethod(m);
3354    } else {
3355      // This method had no need for deoptimization: do nothing.
3356      CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
3357      req->SetKind(DeoptimizationRequest::kNothing);
3358      req->SetMethod(nullptr);
3359    }
3360  } else {
3361    // There is at least one breakpoint for this method: we don't need to undeoptimize.
3362    req->SetKind(DeoptimizationRequest::kNothing);
3363    req->SetMethod(nullptr);
3364    if (kIsDebugBuild) {
3365      SanityCheckExistingBreakpoints(m, deoptimization_kind);
3366    }
3367  }
3368}
3369
3370bool Dbg::IsForcedInterpreterNeededForCallingImpl(Thread* thread, ArtMethod* m) {
3371  const SingleStepControl* const ssc = thread->GetSingleStepControl();
3372  if (ssc == nullptr) {
3373    // If we are not single-stepping, then we don't have to force interpreter.
3374    return false;
3375  }
3376  if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
3377    // If we are in interpreter only mode, then we don't have to force interpreter.
3378    return false;
3379  }
3380
3381  if (!m->IsNative() && !m->IsProxyMethod()) {
3382    // If we want to step into a method, then we have to force interpreter on that call.
3383    if (ssc->GetStepDepth() == JDWP::SD_INTO) {
3384      return true;
3385    }
3386  }
3387  return false;
3388}
3389
3390bool Dbg::IsForcedInterpreterNeededForResolutionImpl(Thread* thread, ArtMethod* m) {
3391  instrumentation::Instrumentation* const instrumentation =
3392      Runtime::Current()->GetInstrumentation();
3393  // If we are in interpreter only mode, then we don't have to force interpreter.
3394  if (instrumentation->InterpretOnly()) {
3395    return false;
3396  }
3397  // We can only interpret pure Java method.
3398  if (m->IsNative() || m->IsProxyMethod()) {
3399    return false;
3400  }
3401  const SingleStepControl* const ssc = thread->GetSingleStepControl();
3402  if (ssc != nullptr) {
3403    // If we want to step into a method, then we have to force interpreter on that call.
3404    if (ssc->GetStepDepth() == JDWP::SD_INTO) {
3405      return true;
3406    }
3407    // If we are stepping out from a static initializer, by issuing a step
3408    // in or step over, that was implicitly invoked by calling a static method,
3409    // then we need to step into that method. Having a lower stack depth than
3410    // the one the single step control has indicates that the step originates
3411    // from the static initializer.
3412    if (ssc->GetStepDepth() != JDWP::SD_OUT &&
3413        ssc->GetStackDepth() > GetStackDepth(thread)) {
3414      return true;
3415    }
3416  }
3417  // There are cases where we have to force interpreter on deoptimized methods,
3418  // because in some cases the call will not be performed by invoking an entry
3419  // point that has been replaced by the deoptimization, but instead by directly
3420  // invoking the compiled code of the method, for example.
3421  return instrumentation->IsDeoptimized(m);
3422}
3423
3424bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, ArtMethod* m) {
3425  // The upcall can be null and in that case we don't need to do anything.
3426  if (m == nullptr) {
3427    return false;
3428  }
3429  instrumentation::Instrumentation* const instrumentation =
3430      Runtime::Current()->GetInstrumentation();
3431  // If we are in interpreter only mode, then we don't have to force interpreter.
3432  if (instrumentation->InterpretOnly()) {
3433    return false;
3434  }
3435  // We can only interpret pure Java method.
3436  if (m->IsNative() || m->IsProxyMethod()) {
3437    return false;
3438  }
3439  const SingleStepControl* const ssc = thread->GetSingleStepControl();
3440  if (ssc != nullptr) {
3441    // If we are stepping out from a static initializer, by issuing a step
3442    // out, that was implicitly invoked by calling a static method, then we
3443    // need to step into the caller of that method. Having a lower stack
3444    // depth than the one the single step control has indicates that the
3445    // step originates from the static initializer.
3446    if (ssc->GetStepDepth() == JDWP::SD_OUT &&
3447        ssc->GetStackDepth() > GetStackDepth(thread)) {
3448      return true;
3449    }
3450  }
3451  // If we are returning from a static intializer, that was implicitly
3452  // invoked by calling a static method and the caller is deoptimized,
3453  // then we have to deoptimize the stack without forcing interpreter
3454  // on the static method that was called originally. This problem can
3455  // be solved easily by forcing instrumentation on the called method,
3456  // because the instrumentation exit hook will recognise the need of
3457  // stack deoptimization by calling IsForcedInterpreterNeededForUpcall.
3458  return instrumentation->IsDeoptimized(m);
3459}
3460
3461bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m) {
3462  // The upcall can be null and in that case we don't need to do anything.
3463  if (m == nullptr) {
3464    return false;
3465  }
3466  instrumentation::Instrumentation* const instrumentation =
3467      Runtime::Current()->GetInstrumentation();
3468  // If we are in interpreter only mode, then we don't have to force interpreter.
3469  if (instrumentation->InterpretOnly()) {
3470    return false;
3471  }
3472  // We can only interpret pure Java method.
3473  if (m->IsNative() || m->IsProxyMethod()) {
3474    return false;
3475  }
3476  const SingleStepControl* const ssc = thread->GetSingleStepControl();
3477  if (ssc != nullptr) {
3478    // The debugger is not interested in what is happening under the level
3479    // of the step, thus we only force interpreter when we are not below of
3480    // the step.
3481    if (ssc->GetStackDepth() >= GetStackDepth(thread)) {
3482      return true;
3483    }
3484  }
3485  // We have to require stack deoptimization if the upcall is deoptimized.
3486  return instrumentation->IsDeoptimized(m);
3487}
3488
3489// Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
3490// cause suspension if the thread is the current thread.
3491class ScopedThreadSuspension {
3492 public:
3493  ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
3494      REQUIRES(!Locks::thread_list_lock_)
3495      SHARED_REQUIRES(Locks::mutator_lock_) :
3496      thread_(nullptr),
3497      error_(JDWP::ERR_NONE),
3498      self_suspend_(false),
3499      other_suspend_(false) {
3500    ScopedObjectAccessUnchecked soa(self);
3501    thread_ = DecodeThread(soa, thread_id, &error_);
3502    if (error_ == JDWP::ERR_NONE) {
3503      if (thread_ == soa.Self()) {
3504        self_suspend_ = true;
3505      } else {
3506        soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3507        jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id);
3508        bool timed_out;
3509        ThreadList* thread_list = Runtime::Current()->GetThreadList();
3510        Thread* suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true,
3511                                                                    &timed_out);
3512        CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
3513        if (suspended_thread == nullptr) {
3514          // Thread terminated from under us while suspending.
3515          error_ = JDWP::ERR_INVALID_THREAD;
3516        } else {
3517          CHECK_EQ(suspended_thread, thread_);
3518          other_suspend_ = true;
3519        }
3520      }
3521    }
3522  }
3523
3524  Thread* GetThread() const {
3525    return thread_;
3526  }
3527
3528  JDWP::JdwpError GetError() const {
3529    return error_;
3530  }
3531
3532  ~ScopedThreadSuspension() {
3533    if (other_suspend_) {
3534      Runtime::Current()->GetThreadList()->Resume(thread_, true);
3535    }
3536  }
3537
3538 private:
3539  Thread* thread_;
3540  JDWP::JdwpError error_;
3541  bool self_suspend_;
3542  bool other_suspend_;
3543};
3544
3545JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size,
3546                                   JDWP::JdwpStepDepth step_depth) {
3547  Thread* self = Thread::Current();
3548  ScopedThreadSuspension sts(self, thread_id);
3549  if (sts.GetError() != JDWP::ERR_NONE) {
3550    return sts.GetError();
3551  }
3552
3553  // Work out what ArtMethod* we're in, the current line number, and how deep the stack currently
3554  // is for step-out.
3555  struct SingleStepStackVisitor : public StackVisitor {
3556    explicit SingleStepStackVisitor(Thread* thread) SHARED_REQUIRES(Locks::mutator_lock_)
3557        : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
3558          stack_depth(0),
3559          method(nullptr),
3560          line_number(-1) {}
3561
3562    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
3563    // annotalysis.
3564    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
3565      ArtMethod* m = GetMethod();
3566      if (!m->IsRuntimeMethod()) {
3567        ++stack_depth;
3568        if (method == nullptr) {
3569          mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
3570          method = m;
3571          if (dex_cache != nullptr) {
3572            const DexFile& dex_file = *dex_cache->GetDexFile();
3573            line_number = dex_file.GetLineNumFromPC(m, GetDexPc());
3574          }
3575        }
3576      }
3577      return true;
3578    }
3579
3580    int stack_depth;
3581    ArtMethod* method;
3582    int32_t line_number;
3583  };
3584
3585  Thread* const thread = sts.GetThread();
3586  SingleStepStackVisitor visitor(thread);
3587  visitor.WalkStack();
3588
3589  // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
3590  struct DebugCallbackContext {
3591    explicit DebugCallbackContext(SingleStepControl* single_step_control_cb,
3592                                  int32_t line_number_cb, const DexFile::CodeItem* code_item)
3593      : single_step_control_(single_step_control_cb), line_number_(line_number_cb),
3594        code_item_(code_item), last_pc_valid(false), last_pc(0) {
3595    }
3596
3597    static bool Callback(void* raw_context, uint32_t address, uint32_t line_number_cb) {
3598      DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
3599      if (static_cast<int32_t>(line_number_cb) == context->line_number_) {
3600        if (!context->last_pc_valid) {
3601          // Everything from this address until the next line change is ours.
3602          context->last_pc = address;
3603          context->last_pc_valid = true;
3604        }
3605        // Otherwise, if we're already in a valid range for this line,
3606        // just keep going (shouldn't really happen)...
3607      } else if (context->last_pc_valid) {  // and the line number is new
3608        // Add everything from the last entry up until here to the set
3609        for (uint32_t dex_pc = context->last_pc; dex_pc < address; ++dex_pc) {
3610          context->single_step_control_->AddDexPc(dex_pc);
3611        }
3612        context->last_pc_valid = false;
3613      }
3614      return false;  // There may be multiple entries for any given line.
3615    }
3616
3617    ~DebugCallbackContext() {
3618      // If the line number was the last in the position table...
3619      if (last_pc_valid) {
3620        size_t end = code_item_->insns_size_in_code_units_;
3621        for (uint32_t dex_pc = last_pc; dex_pc < end; ++dex_pc) {
3622          single_step_control_->AddDexPc(dex_pc);
3623        }
3624      }
3625    }
3626
3627    SingleStepControl* const single_step_control_;
3628    const int32_t line_number_;
3629    const DexFile::CodeItem* const code_item_;
3630    bool last_pc_valid;
3631    uint32_t last_pc;
3632  };
3633
3634  // Allocate single step.
3635  SingleStepControl* single_step_control =
3636      new (std::nothrow) SingleStepControl(step_size, step_depth,
3637                                           visitor.stack_depth, visitor.method);
3638  if (single_step_control == nullptr) {
3639    LOG(ERROR) << "Failed to allocate SingleStepControl";
3640    return JDWP::ERR_OUT_OF_MEMORY;
3641  }
3642
3643  ArtMethod* m = single_step_control->GetMethod();
3644  const int32_t line_number = visitor.line_number;
3645  // Note: if the thread is not running Java code (pure native thread), there is no "current"
3646  // method on the stack (and no line number either).
3647  if (m != nullptr && !m->IsNative()) {
3648    const DexFile::CodeItem* const code_item = m->GetCodeItem();
3649    DebugCallbackContext context(single_step_control, line_number, code_item);
3650    m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
3651                                     DebugCallbackContext::Callback, nullptr, &context);
3652  }
3653
3654  // Activate single-step in the thread.
3655  thread->ActivateSingleStepControl(single_step_control);
3656
3657  if (VLOG_IS_ON(jdwp)) {
3658    VLOG(jdwp) << "Single-step thread: " << *thread;
3659    VLOG(jdwp) << "Single-step step size: " << single_step_control->GetStepSize();
3660    VLOG(jdwp) << "Single-step step depth: " << single_step_control->GetStepDepth();
3661    VLOG(jdwp) << "Single-step current method: " << PrettyMethod(single_step_control->GetMethod());
3662    VLOG(jdwp) << "Single-step current line: " << line_number;
3663    VLOG(jdwp) << "Single-step current stack depth: " << single_step_control->GetStackDepth();
3664    VLOG(jdwp) << "Single-step dex_pc values:";
3665    for (uint32_t dex_pc : single_step_control->GetDexPcs()) {
3666      VLOG(jdwp) << StringPrintf(" %#x", dex_pc);
3667    }
3668  }
3669
3670  return JDWP::ERR_NONE;
3671}
3672
3673void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) {
3674  ScopedObjectAccessUnchecked soa(Thread::Current());
3675  JDWP::JdwpError error;
3676  Thread* thread = DecodeThread(soa, thread_id, &error);
3677  if (error == JDWP::ERR_NONE) {
3678    thread->DeactivateSingleStepControl();
3679  }
3680}
3681
3682static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
3683  switch (tag) {
3684    default:
3685      LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag);
3686      UNREACHABLE();
3687
3688    // Primitives.
3689    case JDWP::JT_BYTE:    return 'B';
3690    case JDWP::JT_CHAR:    return 'C';
3691    case JDWP::JT_FLOAT:   return 'F';
3692    case JDWP::JT_DOUBLE:  return 'D';
3693    case JDWP::JT_INT:     return 'I';
3694    case JDWP::JT_LONG:    return 'J';
3695    case JDWP::JT_SHORT:   return 'S';
3696    case JDWP::JT_VOID:    return 'V';
3697    case JDWP::JT_BOOLEAN: return 'Z';
3698
3699    // Reference types.
3700    case JDWP::JT_ARRAY:
3701    case JDWP::JT_OBJECT:
3702    case JDWP::JT_STRING:
3703    case JDWP::JT_THREAD:
3704    case JDWP::JT_THREAD_GROUP:
3705    case JDWP::JT_CLASS_LOADER:
3706    case JDWP::JT_CLASS_OBJECT:
3707      return 'L';
3708  }
3709}
3710
3711JDWP::JdwpError Dbg::PrepareInvokeMethod(uint32_t request_id, JDWP::ObjectId thread_id,
3712                                         JDWP::ObjectId object_id, JDWP::RefTypeId class_id,
3713                                         JDWP::MethodId method_id, uint32_t arg_count,
3714                                         uint64_t arg_values[], JDWP::JdwpTag* arg_types,
3715                                         uint32_t options) {
3716  Thread* const self = Thread::Current();
3717  CHECK_EQ(self, GetDebugThread()) << "This must be called by the JDWP thread";
3718
3719  ThreadList* thread_list = Runtime::Current()->GetThreadList();
3720  Thread* targetThread = nullptr;
3721  {
3722    ScopedObjectAccessUnchecked soa(self);
3723    JDWP::JdwpError error;
3724    targetThread = DecodeThread(soa, thread_id, &error);
3725    if (error != JDWP::ERR_NONE) {
3726      LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id;
3727      return error;
3728    }
3729    if (targetThread->GetInvokeReq() != nullptr) {
3730      // Thread is already invoking a method on behalf of the debugger.
3731      LOG(ERROR) << "InvokeMethod request for thread already invoking a method: " << *targetThread;
3732      return JDWP::ERR_ALREADY_INVOKING;
3733    }
3734    if (!targetThread->IsReadyForDebugInvoke()) {
3735      // Thread is not suspended by an event so it cannot invoke a method.
3736      LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
3737      return JDWP::ERR_INVALID_THREAD;
3738    }
3739
3740    /*
3741     * We currently have a bug where we don't successfully resume the
3742     * target thread if the suspend count is too deep.  We're expected to
3743     * require one "resume" for each "suspend", but when asked to execute
3744     * a method we have to resume fully and then re-suspend it back to the
3745     * same level.  (The easiest way to cause this is to type "suspend"
3746     * multiple times in jdb.)
3747     *
3748     * It's unclear what this means when the event specifies "resume all"
3749     * and some threads are suspended more deeply than others.  This is
3750     * a rare problem, so for now we just prevent it from hanging forever
3751     * by rejecting the method invocation request.  Without this, we will
3752     * be stuck waiting on a suspended thread.
3753     */
3754    int suspend_count;
3755    {
3756      MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
3757      suspend_count = targetThread->GetSuspendCount();
3758    }
3759    if (suspend_count > 1) {
3760      LOG(ERROR) << *targetThread << " suspend count too deep for method invocation: " << suspend_count;
3761      return JDWP::ERR_THREAD_SUSPENDED;  // Probably not expected here.
3762    }
3763
3764    mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id, &error);
3765    if (error != JDWP::ERR_NONE) {
3766      return JDWP::ERR_INVALID_OBJECT;
3767    }
3768
3769    gRegistry->Get<mirror::Object*>(thread_id, &error);
3770    if (error != JDWP::ERR_NONE) {
3771      return JDWP::ERR_INVALID_OBJECT;
3772    }
3773
3774    mirror::Class* c = DecodeClass(class_id, &error);
3775    if (c == nullptr) {
3776      return error;
3777    }
3778
3779    ArtMethod* m = FromMethodId(method_id);
3780    if (m->IsStatic() != (receiver == nullptr)) {
3781      return JDWP::ERR_INVALID_METHODID;
3782    }
3783    if (m->IsStatic()) {
3784      if (m->GetDeclaringClass() != c) {
3785        return JDWP::ERR_INVALID_METHODID;
3786      }
3787    } else {
3788      if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
3789        return JDWP::ERR_INVALID_METHODID;
3790      }
3791    }
3792
3793    // Check the argument list matches the method.
3794    uint32_t shorty_len = 0;
3795    const char* shorty = m->GetShorty(&shorty_len);
3796    if (shorty_len - 1 != arg_count) {
3797      return JDWP::ERR_ILLEGAL_ARGUMENT;
3798    }
3799
3800    {
3801      StackHandleScope<2> hs(soa.Self());
3802      HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver));
3803      HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c));
3804      const DexFile::TypeList* types = m->GetParameterTypeList();
3805      for (size_t i = 0; i < arg_count; ++i) {
3806        if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) {
3807          return JDWP::ERR_ILLEGAL_ARGUMENT;
3808        }
3809
3810        if (shorty[i + 1] == 'L') {
3811          // Did we really get an argument of an appropriate reference type?
3812          mirror::Class* parameter_type =
3813              m->GetClassFromTypeIndex(types->GetTypeItem(i).type_idx_, true);
3814          mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i], &error);
3815          if (error != JDWP::ERR_NONE) {
3816            return JDWP::ERR_INVALID_OBJECT;
3817          }
3818          if (argument != nullptr && !argument->InstanceOf(parameter_type)) {
3819            return JDWP::ERR_ILLEGAL_ARGUMENT;
3820          }
3821
3822          // Turn the on-the-wire ObjectId into a jobject.
3823          jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]);
3824          v.l = gRegistry->GetJObject(arg_values[i]);
3825        }
3826      }
3827    }
3828
3829    // Allocates a DebugInvokeReq.
3830    DebugInvokeReq* req = new (std::nothrow) DebugInvokeReq(request_id, thread_id, receiver, c, m,
3831                                                            options, arg_values, arg_count);
3832    if (req == nullptr) {
3833      LOG(ERROR) << "Failed to allocate DebugInvokeReq";
3834      return JDWP::ERR_OUT_OF_MEMORY;
3835    }
3836
3837    // Attaches the DebugInvokeReq to the target thread so it executes the method when
3838    // it is resumed. Once the invocation completes, the target thread will delete it before
3839    // suspending itself (see ThreadList::SuspendSelfForDebugger).
3840    targetThread->SetDebugInvokeReq(req);
3841  }
3842
3843  // The fact that we've released the thread list lock is a bit risky --- if the thread goes
3844  // away we're sitting high and dry -- but we must release this before the UndoDebuggerSuspensions
3845  // call.
3846
3847  if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3848    VLOG(jdwp) << "      Resuming all threads";
3849    thread_list->UndoDebuggerSuspensions();
3850  } else {
3851    VLOG(jdwp) << "      Resuming event thread only";
3852    thread_list->Resume(targetThread, true);
3853  }
3854
3855  return JDWP::ERR_NONE;
3856}
3857
3858void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
3859  Thread* const self = Thread::Current();
3860  CHECK_NE(self, GetDebugThread()) << "This must be called by the event thread";
3861
3862  ScopedObjectAccess soa(self);
3863
3864  // We can be called while an exception is pending. We need
3865  // to preserve that across the method invocation.
3866  StackHandleScope<1> hs(soa.Self());
3867  Handle<mirror::Throwable> old_exception = hs.NewHandle(soa.Self()->GetException());
3868  soa.Self()->ClearException();
3869
3870  // Execute the method then sends reply to the debugger.
3871  ExecuteMethodWithoutPendingException(soa, pReq);
3872
3873  // If an exception was pending before the invoke, restore it now.
3874  if (old_exception.Get() != nullptr) {
3875    soa.Self()->SetException(old_exception.Get());
3876  }
3877}
3878
3879// Helper function: write a variable-width value into the output input buffer.
3880static void WriteValue(JDWP::ExpandBuf* pReply, int width, uint64_t value) {
3881  switch (width) {
3882    case 1:
3883      expandBufAdd1(pReply, value);
3884      break;
3885    case 2:
3886      expandBufAdd2BE(pReply, value);
3887      break;
3888    case 4:
3889      expandBufAdd4BE(pReply, value);
3890      break;
3891    case 8:
3892      expandBufAdd8BE(pReply, value);
3893      break;
3894    default:
3895      LOG(FATAL) << width;
3896      UNREACHABLE();
3897  }
3898}
3899
3900void Dbg::ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq) {
3901  soa.Self()->AssertNoPendingException();
3902
3903  // Translate the method through the vtable, unless the debugger wants to suppress it.
3904  ArtMethod* m = pReq->method;
3905  size_t image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
3906  if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver.Read() != nullptr) {
3907    ArtMethod* actual_method =
3908        pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m, image_pointer_size);
3909    if (actual_method != m) {
3910      VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m)
3911                 << " to " << PrettyMethod(actual_method);
3912      m = actual_method;
3913    }
3914  }
3915  VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m)
3916             << " receiver=" << pReq->receiver.Read()
3917             << " arg_count=" << pReq->arg_count;
3918  CHECK(m != nullptr);
3919
3920  static_assert(sizeof(jvalue) == sizeof(uint64_t), "jvalue and uint64_t have different sizes.");
3921
3922  // Invoke the method.
3923  ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(pReq->receiver.Read()));
3924  JValue result = InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(m),
3925                                    reinterpret_cast<jvalue*>(pReq->arg_values.get()));
3926
3927  // Prepare JDWP ids for the reply.
3928  JDWP::JdwpTag result_tag = BasicTagFromDescriptor(m->GetShorty());
3929  const bool is_object_result = (result_tag == JDWP::JT_OBJECT);
3930  StackHandleScope<2> hs(soa.Self());
3931  Handle<mirror::Object> object_result = hs.NewHandle(is_object_result ? result.GetL() : nullptr);
3932  Handle<mirror::Throwable> exception = hs.NewHandle(soa.Self()->GetException());
3933  soa.Self()->ClearException();
3934
3935  if (!IsDebuggerActive()) {
3936    // The debugger detached: we must not re-suspend threads. We also don't need to fill the reply
3937    // because it won't be sent either.
3938    return;
3939  }
3940
3941  JDWP::ObjectId exceptionObjectId = gRegistry->Add(exception);
3942  uint64_t result_value = 0;
3943  if (exceptionObjectId != 0) {
3944    VLOG(jdwp) << "  JDWP invocation returning with exception=" << exception.Get()
3945               << " " << exception->Dump();
3946    result_value = 0;
3947  } else if (is_object_result) {
3948    /* if no exception was thrown, examine object result more closely */
3949    JDWP::JdwpTag new_tag = TagFromObject(soa, object_result.Get());
3950    if (new_tag != result_tag) {
3951      VLOG(jdwp) << "  JDWP promoted result from " << result_tag << " to " << new_tag;
3952      result_tag = new_tag;
3953    }
3954
3955    // Register the object in the registry and reference its ObjectId. This ensures
3956    // GC safety and prevents from accessing stale reference if the object is moved.
3957    result_value = gRegistry->Add(object_result.Get());
3958  } else {
3959    // Primitive result.
3960    DCHECK(IsPrimitiveTag(result_tag));
3961    result_value = result.GetJ();
3962  }
3963  const bool is_constructor = m->IsConstructor() && !m->IsStatic();
3964  if (is_constructor) {
3965    // If we invoked a constructor (which actually returns void), return the receiver,
3966    // unless we threw, in which case we return null.
3967    result_tag = JDWP::JT_OBJECT;
3968    if (exceptionObjectId == 0) {
3969      // TODO we could keep the receiver ObjectId in the DebugInvokeReq to avoid looking into the
3970      // object registry.
3971      result_value = GetObjectRegistry()->Add(pReq->receiver.Read());
3972    } else {
3973      result_value = 0;
3974    }
3975  }
3976
3977  // Suspend other threads if the invoke is not single-threaded.
3978  if ((pReq->options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3979    soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3980    VLOG(jdwp) << "      Suspending all threads";
3981    Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
3982    soa.Self()->TransitionFromSuspendedToRunnable();
3983  }
3984
3985  VLOG(jdwp) << "  --> returned " << result_tag
3986             << StringPrintf(" %#" PRIx64 " (except=%#" PRIx64 ")", result_value,
3987                             exceptionObjectId);
3988
3989  // Show detailed debug output.
3990  if (result_tag == JDWP::JT_STRING && exceptionObjectId == 0) {
3991    if (result_value != 0) {
3992      if (VLOG_IS_ON(jdwp)) {
3993        std::string result_string;
3994        JDWP::JdwpError error = Dbg::StringToUtf8(result_value, &result_string);
3995        CHECK_EQ(error, JDWP::ERR_NONE);
3996        VLOG(jdwp) << "      string '" << result_string << "'";
3997      }
3998    } else {
3999      VLOG(jdwp) << "      string (null)";
4000    }
4001  }
4002
4003  // Attach the reply to DebugInvokeReq so it can be sent to the debugger when the event thread
4004  // is ready to suspend.
4005  BuildInvokeReply(pReq->reply, pReq->request_id, result_tag, result_value, exceptionObjectId);
4006}
4007
4008void Dbg::BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id, JDWP::JdwpTag result_tag,
4009                           uint64_t result_value, JDWP::ObjectId exception) {
4010  // Make room for the JDWP header since we do not know the size of the reply yet.
4011  JDWP::expandBufAddSpace(pReply, kJDWPHeaderLen);
4012
4013  size_t width = GetTagWidth(result_tag);
4014  JDWP::expandBufAdd1(pReply, result_tag);
4015  if (width != 0) {
4016    WriteValue(pReply, width, result_value);
4017  }
4018  JDWP::expandBufAdd1(pReply, JDWP::JT_OBJECT);
4019  JDWP::expandBufAddObjectId(pReply, exception);
4020
4021  // Now we know the size, we can complete the JDWP header.
4022  uint8_t* buf = expandBufGetBuffer(pReply);
4023  JDWP::Set4BE(buf + kJDWPHeaderSizeOffset, expandBufGetLength(pReply));
4024  JDWP::Set4BE(buf + kJDWPHeaderIdOffset, request_id);
4025  JDWP::Set1(buf + kJDWPHeaderFlagsOffset, kJDWPFlagReply);  // flags
4026  JDWP::Set2BE(buf + kJDWPHeaderErrorCodeOffset, JDWP::ERR_NONE);
4027}
4028
4029void Dbg::FinishInvokeMethod(DebugInvokeReq* pReq) {
4030  CHECK_NE(Thread::Current(), GetDebugThread()) << "This must be called by the event thread";
4031
4032  JDWP::ExpandBuf* const pReply = pReq->reply;
4033  CHECK(pReply != nullptr) << "No reply attached to DebugInvokeReq";
4034
4035  // We need to prevent other threads (including JDWP thread) from interacting with the debugger
4036  // while we send the reply but are not yet suspended. The JDWP token will be released just before
4037  // we suspend ourself again (see ThreadList::SuspendSelfForDebugger).
4038  gJdwpState->AcquireJdwpTokenForEvent(pReq->thread_id);
4039
4040  // Send the reply unless the debugger detached before the completion of the method.
4041  if (IsDebuggerActive()) {
4042    const size_t replyDataLength = expandBufGetLength(pReply) - kJDWPHeaderLen;
4043    VLOG(jdwp) << StringPrintf("REPLY INVOKE id=0x%06x (length=%zu)",
4044                               pReq->request_id, replyDataLength);
4045
4046    gJdwpState->SendRequest(pReply);
4047  } else {
4048    VLOG(jdwp) << "Not sending invoke reply because debugger detached";
4049  }
4050}
4051
4052/*
4053 * "request" contains a full JDWP packet, possibly with multiple chunks.  We
4054 * need to process each, accumulate the replies, and ship the whole thing
4055 * back.
4056 *
4057 * Returns "true" if we have a reply.  The reply buffer is newly allocated,
4058 * and includes the chunk type/length, followed by the data.
4059 *
4060 * OLD-TODO: we currently assume that the request and reply include a single
4061 * chunk.  If this becomes inconvenient we will need to adapt.
4062 */
4063bool Dbg::DdmHandlePacket(JDWP::Request* request, uint8_t** pReplyBuf, int* pReplyLen) {
4064  Thread* self = Thread::Current();
4065  JNIEnv* env = self->GetJniEnv();
4066
4067  uint32_t type = request->ReadUnsigned32("type");
4068  uint32_t length = request->ReadUnsigned32("length");
4069
4070  // Create a byte[] corresponding to 'request'.
4071  size_t request_length = request->size();
4072  ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(request_length));
4073  if (dataArray.get() == nullptr) {
4074    LOG(WARNING) << "byte[] allocation failed: " << request_length;
4075    env->ExceptionClear();
4076    return false;
4077  }
4078  env->SetByteArrayRegion(dataArray.get(), 0, request_length,
4079                          reinterpret_cast<const jbyte*>(request->data()));
4080  request->Skip(request_length);
4081
4082  // Run through and find all chunks.  [Currently just find the first.]
4083  ScopedByteArrayRO contents(env, dataArray.get());
4084  if (length != request_length) {
4085    LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length);
4086    return false;
4087  }
4088
4089  // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)".
4090  ScopedLocalRef<jobject> chunk(env, env->CallStaticObjectMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
4091                                                                 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch,
4092                                                                 type, dataArray.get(), 0, length));
4093  if (env->ExceptionCheck()) {
4094    LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type);
4095    env->ExceptionDescribe();
4096    env->ExceptionClear();
4097    return false;
4098  }
4099
4100  if (chunk.get() == nullptr) {
4101    return false;
4102  }
4103
4104  /*
4105   * Pull the pieces out of the chunk.  We copy the results into a
4106   * newly-allocated buffer that the caller can free.  We don't want to
4107   * continue using the Chunk object because nothing has a reference to it.
4108   *
4109   * We could avoid this by returning type/data/offset/length and having
4110   * the caller be aware of the object lifetime issues, but that
4111   * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work
4112   * if we have responses for multiple chunks.
4113   *
4114   * So we're pretty much stuck with copying data around multiple times.
4115   */
4116  ScopedLocalRef<jbyteArray> replyData(env, reinterpret_cast<jbyteArray>(env->GetObjectField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data)));
4117  jint offset = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset);
4118  length = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length);
4119  type = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type);
4120
4121  VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length);
4122  if (length == 0 || replyData.get() == nullptr) {
4123    return false;
4124  }
4125
4126  const int kChunkHdrLen = 8;
4127  uint8_t* reply = new uint8_t[length + kChunkHdrLen];
4128  if (reply == nullptr) {
4129    LOG(WARNING) << "malloc failed: " << (length + kChunkHdrLen);
4130    return false;
4131  }
4132  JDWP::Set4BE(reply + 0, type);
4133  JDWP::Set4BE(reply + 4, length);
4134  env->GetByteArrayRegion(replyData.get(), offset, length, reinterpret_cast<jbyte*>(reply + kChunkHdrLen));
4135
4136  *pReplyBuf = reply;
4137  *pReplyLen = length + kChunkHdrLen;
4138
4139  VLOG(jdwp) << StringPrintf("dvmHandleDdm returning type=%.4s %p len=%d", reinterpret_cast<char*>(reply), reply, length);
4140  return true;
4141}
4142
4143void Dbg::DdmBroadcast(bool connect) {
4144  VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
4145
4146  Thread* self = Thread::Current();
4147  if (self->GetState() != kRunnable) {
4148    LOG(ERROR) << "DDM broadcast in thread state " << self->GetState();
4149    /* try anyway? */
4150  }
4151
4152  JNIEnv* env = self->GetJniEnv();
4153  jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/;
4154  env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
4155                            WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast,
4156                            event);
4157  if (env->ExceptionCheck()) {
4158    LOG(ERROR) << "DdmServer.broadcast " << event << " failed";
4159    env->ExceptionDescribe();
4160    env->ExceptionClear();
4161  }
4162}
4163
4164void Dbg::DdmConnected() {
4165  Dbg::DdmBroadcast(true);
4166}
4167
4168void Dbg::DdmDisconnected() {
4169  Dbg::DdmBroadcast(false);
4170  gDdmThreadNotification = false;
4171}
4172
4173/*
4174 * Send a notification when a thread starts, stops, or changes its name.
4175 *
4176 * Because we broadcast the full set of threads when the notifications are
4177 * first enabled, it's possible for "thread" to be actively executing.
4178 */
4179void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
4180  if (!gDdmThreadNotification) {
4181    return;
4182  }
4183
4184  if (type == CHUNK_TYPE("THDE")) {
4185    uint8_t buf[4];
4186    JDWP::Set4BE(&buf[0], t->GetThreadId());
4187    Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf);
4188  } else {
4189    CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
4190    ScopedObjectAccessUnchecked soa(Thread::Current());
4191    StackHandleScope<1> hs(soa.Self());
4192    Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa)));
4193    size_t char_count = (name.Get() != nullptr) ? name->GetLength() : 0;
4194    const jchar* chars = (name.Get() != nullptr) ? name->GetValue() : nullptr;
4195
4196    std::vector<uint8_t> bytes;
4197    JDWP::Append4BE(bytes, t->GetThreadId());
4198    JDWP::AppendUtf16BE(bytes, chars, char_count);
4199    CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
4200    Dbg::DdmSendChunk(type, bytes);
4201  }
4202}
4203
4204void Dbg::DdmSetThreadNotification(bool enable) {
4205  // Enable/disable thread notifications.
4206  gDdmThreadNotification = enable;
4207  if (enable) {
4208    // Suspend the VM then post thread start notifications for all threads. Threads attaching will
4209    // see a suspension in progress and block until that ends. They then post their own start
4210    // notification.
4211    SuspendVM();
4212    std::list<Thread*> threads;
4213    Thread* self = Thread::Current();
4214    {
4215      MutexLock mu(self, *Locks::thread_list_lock_);
4216      threads = Runtime::Current()->GetThreadList()->GetList();
4217    }
4218    {
4219      ScopedObjectAccess soa(self);
4220      for (Thread* thread : threads) {
4221        Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
4222      }
4223    }
4224    ResumeVM();
4225  }
4226}
4227
4228void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
4229  if (IsDebuggerActive()) {
4230    gJdwpState->PostThreadChange(t, type == CHUNK_TYPE("THCR"));
4231  }
4232  Dbg::DdmSendThreadNotification(t, type);
4233}
4234
4235void Dbg::PostThreadStart(Thread* t) {
4236  Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR"));
4237}
4238
4239void Dbg::PostThreadDeath(Thread* t) {
4240  Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
4241}
4242
4243void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) {
4244  CHECK(buf != nullptr);
4245  iovec vec[1];
4246  vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf));
4247  vec[0].iov_len = byte_count;
4248  Dbg::DdmSendChunkV(type, vec, 1);
4249}
4250
4251void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) {
4252  DdmSendChunk(type, bytes.size(), &bytes[0]);
4253}
4254
4255void Dbg::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) {
4256  if (gJdwpState == nullptr) {
4257    VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
4258  } else {
4259    gJdwpState->DdmSendChunkV(type, iov, iov_count);
4260  }
4261}
4262
4263JDWP::JdwpState* Dbg::GetJdwpState() {
4264  return gJdwpState;
4265}
4266
4267int Dbg::DdmHandleHpifChunk(HpifWhen when) {
4268  if (when == HPIF_WHEN_NOW) {
4269    DdmSendHeapInfo(when);
4270    return true;
4271  }
4272
4273  if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
4274    LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
4275    return false;
4276  }
4277
4278  gDdmHpifWhen = when;
4279  return true;
4280}
4281
4282bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
4283  if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) {
4284    LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when);
4285    return false;
4286  }
4287
4288  if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) {
4289    LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what);
4290    return false;
4291  }
4292
4293  if (native) {
4294    gDdmNhsgWhen = when;
4295    gDdmNhsgWhat = what;
4296  } else {
4297    gDdmHpsgWhen = when;
4298    gDdmHpsgWhat = what;
4299  }
4300  return true;
4301}
4302
4303void Dbg::DdmSendHeapInfo(HpifWhen reason) {
4304  // If there's a one-shot 'when', reset it.
4305  if (reason == gDdmHpifWhen) {
4306    if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) {
4307      gDdmHpifWhen = HPIF_WHEN_NEVER;
4308    }
4309  }
4310
4311  /*
4312   * Chunk HPIF (client --> server)
4313   *
4314   * Heap Info. General information about the heap,
4315   * suitable for a summary display.
4316   *
4317   *   [u4]: number of heaps
4318   *
4319   *   For each heap:
4320   *     [u4]: heap ID
4321   *     [u8]: timestamp in ms since Unix epoch
4322   *     [u1]: capture reason (same as 'when' value from server)
4323   *     [u4]: max heap size in bytes (-Xmx)
4324   *     [u4]: current heap size in bytes
4325   *     [u4]: current number of bytes allocated
4326   *     [u4]: current number of objects allocated
4327   */
4328  uint8_t heap_count = 1;
4329  gc::Heap* heap = Runtime::Current()->GetHeap();
4330  std::vector<uint8_t> bytes;
4331  JDWP::Append4BE(bytes, heap_count);
4332  JDWP::Append4BE(bytes, 1);  // Heap id (bogus; we only have one heap).
4333  JDWP::Append8BE(bytes, MilliTime());
4334  JDWP::Append1BE(bytes, reason);
4335  JDWP::Append4BE(bytes, heap->GetMaxMemory());  // Max allowed heap size in bytes.
4336  JDWP::Append4BE(bytes, heap->GetTotalMemory());  // Current heap size in bytes.
4337  JDWP::Append4BE(bytes, heap->GetBytesAllocated());
4338  JDWP::Append4BE(bytes, heap->GetObjectsAllocated());
4339  CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
4340  Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes);
4341}
4342
4343enum HpsgSolidity {
4344  SOLIDITY_FREE = 0,
4345  SOLIDITY_HARD = 1,
4346  SOLIDITY_SOFT = 2,
4347  SOLIDITY_WEAK = 3,
4348  SOLIDITY_PHANTOM = 4,
4349  SOLIDITY_FINALIZABLE = 5,
4350  SOLIDITY_SWEEP = 6,
4351};
4352
4353enum HpsgKind {
4354  KIND_OBJECT = 0,
4355  KIND_CLASS_OBJECT = 1,
4356  KIND_ARRAY_1 = 2,
4357  KIND_ARRAY_2 = 3,
4358  KIND_ARRAY_4 = 4,
4359  KIND_ARRAY_8 = 5,
4360  KIND_UNKNOWN = 6,
4361  KIND_NATIVE = 7,
4362};
4363
4364#define HPSG_PARTIAL (1<<7)
4365#define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
4366
4367class HeapChunkContext {
4368 public:
4369  // Maximum chunk size.  Obtain this from the formula:
4370  // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
4371  HeapChunkContext(bool merge, bool native)
4372      : buf_(16384 - 16),
4373        type_(0),
4374        chunk_overhead_(0) {
4375    Reset();
4376    if (native) {
4377      type_ = CHUNK_TYPE("NHSG");
4378    } else {
4379      type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO");
4380    }
4381  }
4382
4383  ~HeapChunkContext() {
4384    if (p_ > &buf_[0]) {
4385      Flush();
4386    }
4387  }
4388
4389  void SetChunkOverhead(size_t chunk_overhead) {
4390    chunk_overhead_ = chunk_overhead;
4391  }
4392
4393  void ResetStartOfNextChunk() {
4394    startOfNextMemoryChunk_ = nullptr;
4395  }
4396
4397  void EnsureHeader(const void* chunk_ptr) {
4398    if (!needHeader_) {
4399      return;
4400    }
4401
4402    // Start a new HPSx chunk.
4403    JDWP::Write4BE(&p_, 1);  // Heap id (bogus; we only have one heap).
4404    JDWP::Write1BE(&p_, 8);  // Size of allocation unit, in bytes.
4405
4406    JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr));  // virtual address of segment start.
4407    JDWP::Write4BE(&p_, 0);  // offset of this piece (relative to the virtual address).
4408    // [u4]: length of piece, in allocation units
4409    // We won't know this until we're done, so save the offset and stuff in a dummy value.
4410    pieceLenField_ = p_;
4411    JDWP::Write4BE(&p_, 0x55555555);
4412    needHeader_ = false;
4413  }
4414
4415  void Flush() SHARED_REQUIRES(Locks::mutator_lock_) {
4416    if (pieceLenField_ == nullptr) {
4417      // Flush immediately post Reset (maybe back-to-back Flush). Ignore.
4418      CHECK(needHeader_);
4419      return;
4420    }
4421    // Patch the "length of piece" field.
4422    CHECK_LE(&buf_[0], pieceLenField_);
4423    CHECK_LE(pieceLenField_, p_);
4424    JDWP::Set4BE(pieceLenField_, totalAllocationUnits_);
4425
4426    Dbg::DdmSendChunk(type_, p_ - &buf_[0], &buf_[0]);
4427    Reset();
4428  }
4429
4430  static void HeapChunkJavaCallback(void* start, void* end, size_t used_bytes, void* arg)
4431      SHARED_REQUIRES(Locks::heap_bitmap_lock_,
4432                            Locks::mutator_lock_) {
4433    reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkJavaCallback(start, end, used_bytes);
4434  }
4435
4436  static void HeapChunkNativeCallback(void* start, void* end, size_t used_bytes, void* arg)
4437      SHARED_REQUIRES(Locks::mutator_lock_) {
4438    reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkNativeCallback(start, end, used_bytes);
4439  }
4440
4441 private:
4442  enum { ALLOCATION_UNIT_SIZE = 8 };
4443
4444  void Reset() {
4445    p_ = &buf_[0];
4446    ResetStartOfNextChunk();
4447    totalAllocationUnits_ = 0;
4448    needHeader_ = true;
4449    pieceLenField_ = nullptr;
4450  }
4451
4452  bool IsNative() const {
4453    return type_ == CHUNK_TYPE("NHSG");
4454  }
4455
4456  // Returns true if the object is not an empty chunk.
4457  bool ProcessRecord(void* start, size_t used_bytes) SHARED_REQUIRES(Locks::mutator_lock_) {
4458    // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
4459    // in the following code not to allocate memory, by ensuring buf_ is of the correct size
4460    if (used_bytes == 0) {
4461      if (start == nullptr) {
4462        // Reset for start of new heap.
4463        startOfNextMemoryChunk_ = nullptr;
4464        Flush();
4465      }
4466      // Only process in use memory so that free region information
4467      // also includes dlmalloc book keeping.
4468      return false;
4469    }
4470    if (startOfNextMemoryChunk_ != nullptr) {
4471      // Transmit any pending free memory. Native free memory of over kMaxFreeLen could be because
4472      // of the use of mmaps, so don't report. If not free memory then start a new segment.
4473      bool flush = true;
4474      if (start > startOfNextMemoryChunk_) {
4475        const size_t kMaxFreeLen = 2 * kPageSize;
4476        void* free_start = startOfNextMemoryChunk_;
4477        void* free_end = start;
4478        const size_t free_len =
4479            reinterpret_cast<uintptr_t>(free_end) - reinterpret_cast<uintptr_t>(free_start);
4480        if (!IsNative() || free_len < kMaxFreeLen) {
4481          AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), free_start, free_len, IsNative());
4482          flush = false;
4483        }
4484      }
4485      if (flush) {
4486        startOfNextMemoryChunk_ = nullptr;
4487        Flush();
4488      }
4489    }
4490    return true;
4491  }
4492
4493  void HeapChunkNativeCallback(void* start, void* /*end*/, size_t used_bytes)
4494      SHARED_REQUIRES(Locks::mutator_lock_) {
4495    if (ProcessRecord(start, used_bytes)) {
4496      uint8_t state = ExamineNativeObject(start);
4497      AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/);
4498      startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
4499    }
4500  }
4501
4502  void HeapChunkJavaCallback(void* start, void* /*end*/, size_t used_bytes)
4503      SHARED_REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
4504    if (ProcessRecord(start, used_bytes)) {
4505      // Determine the type of this chunk.
4506      // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
4507      // If it's the same, we should combine them.
4508      uint8_t state = ExamineJavaObject(reinterpret_cast<mirror::Object*>(start));
4509      AppendChunk(state, start, used_bytes + chunk_overhead_, false /*is_native*/);
4510      startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
4511    }
4512  }
4513
4514  void AppendChunk(uint8_t state, void* ptr, size_t length, bool is_native)
4515      SHARED_REQUIRES(Locks::mutator_lock_) {
4516    // Make sure there's enough room left in the buffer.
4517    // We need to use two bytes for every fractional 256 allocation units used by the chunk plus
4518    // 17 bytes for any header.
4519    const size_t needed = ((RoundUp(length / ALLOCATION_UNIT_SIZE, 256) / 256) * 2) + 17;
4520    size_t byte_left = &buf_.back() - p_;
4521    if (byte_left < needed) {
4522      if (is_native) {
4523      // Cannot trigger memory allocation while walking native heap.
4524        return;
4525      }
4526      Flush();
4527    }
4528
4529    byte_left = &buf_.back() - p_;
4530    if (byte_left < needed) {
4531      LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", "
4532          << needed << " bytes)";
4533      return;
4534    }
4535    EnsureHeader(ptr);
4536    // Write out the chunk description.
4537    length /= ALLOCATION_UNIT_SIZE;   // Convert to allocation units.
4538    totalAllocationUnits_ += length;
4539    while (length > 256) {
4540      *p_++ = state | HPSG_PARTIAL;
4541      *p_++ = 255;     // length - 1
4542      length -= 256;
4543    }
4544    *p_++ = state;
4545    *p_++ = length - 1;
4546  }
4547
4548  uint8_t ExamineNativeObject(const void* p) SHARED_REQUIRES(Locks::mutator_lock_) {
4549    return p == nullptr ? HPSG_STATE(SOLIDITY_FREE, 0) : HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4550  }
4551
4552  uint8_t ExamineJavaObject(mirror::Object* o)
4553      SHARED_REQUIRES(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
4554    if (o == nullptr) {
4555      return HPSG_STATE(SOLIDITY_FREE, 0);
4556    }
4557    // It's an allocated chunk. Figure out what it is.
4558    gc::Heap* heap = Runtime::Current()->GetHeap();
4559    if (!heap->IsLiveObjectLocked(o)) {
4560      LOG(ERROR) << "Invalid object in managed heap: " << o;
4561      return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4562    }
4563    mirror::Class* c = o->GetClass();
4564    if (c == nullptr) {
4565      // The object was probably just created but hasn't been initialized yet.
4566      return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4567    }
4568    if (!heap->IsValidObjectAddress(c)) {
4569      LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
4570      return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4571    }
4572    if (c->GetClass() == nullptr) {
4573      LOG(ERROR) << "Null class of class " << c << " for object " << o;
4574      return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4575    }
4576    if (c->IsClassClass()) {
4577      return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
4578    }
4579    if (c->IsArrayClass()) {
4580      switch (c->GetComponentSize()) {
4581      case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
4582      case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
4583      case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4584      case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
4585      }
4586    }
4587    return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4588  }
4589
4590  std::vector<uint8_t> buf_;
4591  uint8_t* p_;
4592  uint8_t* pieceLenField_;
4593  void* startOfNextMemoryChunk_;
4594  size_t totalAllocationUnits_;
4595  uint32_t type_;
4596  bool needHeader_;
4597  size_t chunk_overhead_;
4598
4599  DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
4600};
4601
4602static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
4603    SHARED_REQUIRES(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
4604  const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
4605  HeapChunkContext::HeapChunkJavaCallback(
4606      obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
4607}
4608
4609void Dbg::DdmSendHeapSegments(bool native) {
4610  Dbg::HpsgWhen when = native ? gDdmNhsgWhen : gDdmHpsgWhen;
4611  Dbg::HpsgWhat what = native ? gDdmNhsgWhat : gDdmHpsgWhat;
4612  if (when == HPSG_WHEN_NEVER) {
4613    return;
4614  }
4615  // Figure out what kind of chunks we'll be sending.
4616  CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS)
4617      << static_cast<int>(what);
4618
4619  // First, send a heap start chunk.
4620  uint8_t heap_id[4];
4621  JDWP::Set4BE(&heap_id[0], 1);  // Heap id (bogus; we only have one heap).
4622  Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id);
4623  Thread* self = Thread::Current();
4624  Locks::mutator_lock_->AssertSharedHeld(self);
4625
4626  // Send a series of heap segment chunks.
4627  HeapChunkContext context(what == HPSG_WHAT_MERGED_OBJECTS, native);
4628  if (native) {
4629#if defined(__ANDROID__) && defined(USE_DLMALLOC)
4630    dlmalloc_inspect_all(HeapChunkContext::HeapChunkNativeCallback, &context);
4631    HeapChunkContext::HeapChunkNativeCallback(nullptr, nullptr, 0, &context);  // Indicate end of a space.
4632#else
4633    UNIMPLEMENTED(WARNING) << "Native heap inspection is only supported with dlmalloc";
4634#endif
4635  } else {
4636    gc::Heap* heap = Runtime::Current()->GetHeap();
4637    for (const auto& space : heap->GetContinuousSpaces()) {
4638      if (space->IsDlMallocSpace()) {
4639        ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4640        // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
4641        // allocation then the first sizeof(size_t) may belong to it.
4642        context.SetChunkOverhead(sizeof(size_t));
4643        space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
4644      } else if (space->IsRosAllocSpace()) {
4645        context.SetChunkOverhead(0);
4646        // Need to acquire the mutator lock before the heap bitmap lock with exclusive access since
4647        // RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock.
4648        self->TransitionFromRunnableToSuspended(kSuspended);
4649        ThreadList* tl = Runtime::Current()->GetThreadList();
4650        tl->SuspendAll(__FUNCTION__);
4651        {
4652          ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4653          space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
4654        }
4655        tl->ResumeAll();
4656        self->TransitionFromSuspendedToRunnable();
4657      } else if (space->IsBumpPointerSpace()) {
4658        ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4659        context.SetChunkOverhead(0);
4660        space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
4661        HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
4662      } else if (space->IsRegionSpace()) {
4663        heap->IncrementDisableMovingGC(self);
4664        self->TransitionFromRunnableToSuspended(kSuspended);
4665        ThreadList* tl = Runtime::Current()->GetThreadList();
4666        tl->SuspendAll(__FUNCTION__);
4667        ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4668        context.SetChunkOverhead(0);
4669        space->AsRegionSpace()->Walk(BumpPointerSpaceCallback, &context);
4670        HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
4671        tl->ResumeAll();
4672        self->TransitionFromSuspendedToRunnable();
4673        heap->DecrementDisableMovingGC(self);
4674      } else {
4675        UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
4676      }
4677      context.ResetStartOfNextChunk();
4678    }
4679    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4680    // Walk the large objects, these are not in the AllocSpace.
4681    context.SetChunkOverhead(0);
4682    heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
4683  }
4684
4685  // Finally, send a heap end chunk.
4686  Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
4687}
4688
4689void Dbg::SetAllocTrackingEnabled(bool enable) {
4690  gc::AllocRecordObjectMap::SetAllocTrackingEnabled(enable);
4691}
4692
4693void Dbg::DumpRecentAllocations() {
4694  ScopedObjectAccess soa(Thread::Current());
4695  MutexLock mu(soa.Self(), *Locks::alloc_tracker_lock_);
4696  if (!Runtime::Current()->GetHeap()->IsAllocTrackingEnabled()) {
4697    LOG(INFO) << "Not recording tracked allocations";
4698    return;
4699  }
4700  gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords();
4701  CHECK(records != nullptr);
4702
4703  const uint16_t capped_count = CappedAllocRecordCount(records->GetRecentAllocationSize());
4704  uint16_t count = capped_count;
4705
4706  LOG(INFO) << "Tracked allocations, (count=" << count << ")";
4707  for (auto it = records->RBegin(), end = records->REnd();
4708      count > 0 && it != end; count--, it++) {
4709    const gc::AllocRecord* record = it->second;
4710
4711    LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->GetTid(), record->ByteCount())
4712              << PrettyClass(record->GetClass());
4713
4714    for (size_t stack_frame = 0, depth = record->GetDepth(); stack_frame < depth; ++stack_frame) {
4715      const gc::AllocRecordStackTraceElement& stack_element = record->StackElement(stack_frame);
4716      ArtMethod* m = stack_element.GetMethod();
4717      LOG(INFO) << "    " << PrettyMethod(m) << " line " << stack_element.ComputeLineNumber();
4718    }
4719
4720    // pause periodically to help logcat catch up
4721    if ((count % 5) == 0) {
4722      usleep(40000);
4723    }
4724  }
4725}
4726
4727class StringTable {
4728 public:
4729  StringTable() {
4730  }
4731
4732  void Add(const std::string& str) {
4733    table_.insert(str);
4734  }
4735
4736  void Add(const char* str) {
4737    table_.insert(str);
4738  }
4739
4740  size_t IndexOf(const char* s) const {
4741    auto it = table_.find(s);
4742    if (it == table_.end()) {
4743      LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
4744    }
4745    return std::distance(table_.begin(), it);
4746  }
4747
4748  size_t Size() const {
4749    return table_.size();
4750  }
4751
4752  void WriteTo(std::vector<uint8_t>& bytes) const {
4753    for (const std::string& str : table_) {
4754      const char* s = str.c_str();
4755      size_t s_len = CountModifiedUtf8Chars(s);
4756      std::unique_ptr<uint16_t[]> s_utf16(new uint16_t[s_len]);
4757      ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
4758      JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len);
4759    }
4760  }
4761
4762 private:
4763  std::set<std::string> table_;
4764  DISALLOW_COPY_AND_ASSIGN(StringTable);
4765};
4766
4767static const char* GetMethodSourceFile(ArtMethod* method)
4768    SHARED_REQUIRES(Locks::mutator_lock_) {
4769  DCHECK(method != nullptr);
4770  const char* source_file = method->GetDeclaringClassSourceFile();
4771  return (source_file != nullptr) ? source_file : "";
4772}
4773
4774/*
4775 * The data we send to DDMS contains everything we have recorded.
4776 *
4777 * Message header (all values big-endian):
4778 * (1b) message header len (to allow future expansion); includes itself
4779 * (1b) entry header len
4780 * (1b) stack frame len
4781 * (2b) number of entries
4782 * (4b) offset to string table from start of message
4783 * (2b) number of class name strings
4784 * (2b) number of method name strings
4785 * (2b) number of source file name strings
4786 * For each entry:
4787 *   (4b) total allocation size
4788 *   (2b) thread id
4789 *   (2b) allocated object's class name index
4790 *   (1b) stack depth
4791 *   For each stack frame:
4792 *     (2b) method's class name
4793 *     (2b) method name
4794 *     (2b) method source file
4795 *     (2b) line number, clipped to 32767; -2 if native; -1 if no source
4796 * (xb) class name strings
4797 * (xb) method name strings
4798 * (xb) source file strings
4799 *
4800 * As with other DDM traffic, strings are sent as a 4-byte length
4801 * followed by UTF-16 data.
4802 *
4803 * We send up 16-bit unsigned indexes into string tables.  In theory there
4804 * can be (kMaxAllocRecordStackDepth * alloc_record_max_) unique strings in
4805 * each table, but in practice there should be far fewer.
4806 *
4807 * The chief reason for using a string table here is to keep the size of
4808 * the DDMS message to a minimum.  This is partly to make the protocol
4809 * efficient, but also because we have to form the whole thing up all at
4810 * once in a memory buffer.
4811 *
4812 * We use separate string tables for class names, method names, and source
4813 * files to keep the indexes small.  There will generally be no overlap
4814 * between the contents of these tables.
4815 */
4816jbyteArray Dbg::GetRecentAllocations() {
4817  if ((false)) {
4818    DumpRecentAllocations();
4819  }
4820
4821  Thread* self = Thread::Current();
4822  std::vector<uint8_t> bytes;
4823  {
4824    MutexLock mu(self, *Locks::alloc_tracker_lock_);
4825    gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords();
4826    // In case this method is called when allocation tracker is disabled,
4827    // we should still send some data back.
4828    gc::AllocRecordObjectMap dummy;
4829    if (records == nullptr) {
4830      CHECK(!Runtime::Current()->GetHeap()->IsAllocTrackingEnabled());
4831      records = &dummy;
4832    }
4833    // We don't need to wait on the condition variable records->new_record_condition_, because this
4834    // function only reads the class objects, which are already marked so it doesn't change their
4835    // reachability.
4836
4837    //
4838    // Part 1: generate string tables.
4839    //
4840    StringTable class_names;
4841    StringTable method_names;
4842    StringTable filenames;
4843
4844    const uint16_t capped_count = CappedAllocRecordCount(records->GetRecentAllocationSize());
4845    uint16_t count = capped_count;
4846    for (auto it = records->RBegin(), end = records->REnd();
4847         count > 0 && it != end; count--, it++) {
4848      const gc::AllocRecord* record = it->second;
4849      std::string temp;
4850      class_names.Add(record->GetClassDescriptor(&temp));
4851      for (size_t i = 0, depth = record->GetDepth(); i < depth; i++) {
4852        ArtMethod* m = record->StackElement(i).GetMethod();
4853        class_names.Add(m->GetDeclaringClassDescriptor());
4854        method_names.Add(m->GetName());
4855        filenames.Add(GetMethodSourceFile(m));
4856      }
4857    }
4858
4859    LOG(INFO) << "recent allocation records: " << capped_count;
4860    LOG(INFO) << "allocation records all objects: " << records->Size();
4861
4862    //
4863    // Part 2: Generate the output and store it in the buffer.
4864    //
4865
4866    // (1b) message header len (to allow future expansion); includes itself
4867    // (1b) entry header len
4868    // (1b) stack frame len
4869    const int kMessageHeaderLen = 15;
4870    const int kEntryHeaderLen = 9;
4871    const int kStackFrameLen = 8;
4872    JDWP::Append1BE(bytes, kMessageHeaderLen);
4873    JDWP::Append1BE(bytes, kEntryHeaderLen);
4874    JDWP::Append1BE(bytes, kStackFrameLen);
4875
4876    // (2b) number of entries
4877    // (4b) offset to string table from start of message
4878    // (2b) number of class name strings
4879    // (2b) number of method name strings
4880    // (2b) number of source file name strings
4881    JDWP::Append2BE(bytes, capped_count);
4882    size_t string_table_offset = bytes.size();
4883    JDWP::Append4BE(bytes, 0);  // We'll patch this later...
4884    JDWP::Append2BE(bytes, class_names.Size());
4885    JDWP::Append2BE(bytes, method_names.Size());
4886    JDWP::Append2BE(bytes, filenames.Size());
4887
4888    std::string temp;
4889    count = capped_count;
4890    // The last "count" number of allocation records in "records" are the most recent "count" number
4891    // of allocations. Reverse iterate to get them. The most recent allocation is sent first.
4892    for (auto it = records->RBegin(), end = records->REnd();
4893         count > 0 && it != end; count--, it++) {
4894      // For each entry:
4895      // (4b) total allocation size
4896      // (2b) thread id
4897      // (2b) allocated object's class name index
4898      // (1b) stack depth
4899      const gc::AllocRecord* record = it->second;
4900      size_t stack_depth = record->GetDepth();
4901      size_t allocated_object_class_name_index =
4902          class_names.IndexOf(record->GetClassDescriptor(&temp));
4903      JDWP::Append4BE(bytes, record->ByteCount());
4904      JDWP::Append2BE(bytes, static_cast<uint16_t>(record->GetTid()));
4905      JDWP::Append2BE(bytes, allocated_object_class_name_index);
4906      JDWP::Append1BE(bytes, stack_depth);
4907
4908      for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) {
4909        // For each stack frame:
4910        // (2b) method's class name
4911        // (2b) method name
4912        // (2b) method source file
4913        // (2b) line number, clipped to 32767; -2 if native; -1 if no source
4914        ArtMethod* m = record->StackElement(stack_frame).GetMethod();
4915        size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor());
4916        size_t method_name_index = method_names.IndexOf(m->GetName());
4917        size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m));
4918        JDWP::Append2BE(bytes, class_name_index);
4919        JDWP::Append2BE(bytes, method_name_index);
4920        JDWP::Append2BE(bytes, file_name_index);
4921        JDWP::Append2BE(bytes, record->StackElement(stack_frame).ComputeLineNumber());
4922      }
4923    }
4924
4925    // (xb) class name strings
4926    // (xb) method name strings
4927    // (xb) source file strings
4928    JDWP::Set4BE(&bytes[string_table_offset], bytes.size());
4929    class_names.WriteTo(bytes);
4930    method_names.WriteTo(bytes);
4931    filenames.WriteTo(bytes);
4932  }
4933  JNIEnv* env = self->GetJniEnv();
4934  jbyteArray result = env->NewByteArray(bytes.size());
4935  if (result != nullptr) {
4936    env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
4937  }
4938  return result;
4939}
4940
4941ArtMethod* DeoptimizationRequest::Method() const {
4942  ScopedObjectAccessUnchecked soa(Thread::Current());
4943  return soa.DecodeMethod(method_);
4944}
4945
4946void DeoptimizationRequest::SetMethod(ArtMethod* m) {
4947  ScopedObjectAccessUnchecked soa(Thread::Current());
4948  method_ = soa.EncodeMethod(m);
4949}
4950
4951}  // namespace art
4952