1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "debugger.h"
18
19#include <sys/uio.h>
20
21#include <set>
22
23#include "android-base/stringprintf.h"
24
25#include "arch/context.h"
26#include "art_field-inl.h"
27#include "art_method-inl.h"
28#include "base/enums.h"
29#include "base/time_utils.h"
30#include "class_linker.h"
31#include "class_linker-inl.h"
32#include "dex_file-inl.h"
33#include "dex_file_annotations.h"
34#include "dex_instruction.h"
35#include "entrypoints/runtime_asm_entrypoints.h"
36#include "gc/accounting/card_table-inl.h"
37#include "gc/allocation_record.h"
38#include "gc/scoped_gc_critical_section.h"
39#include "gc/space/large_object_space.h"
40#include "gc/space/space-inl.h"
41#include "handle_scope.h"
42#include "jdwp/jdwp_priv.h"
43#include "jdwp/object_registry.h"
44#include "jni_internal.h"
45#include "jvalue-inl.h"
46#include "mirror/class.h"
47#include "mirror/class-inl.h"
48#include "mirror/class_loader.h"
49#include "mirror/object-inl.h"
50#include "mirror/object_array-inl.h"
51#include "mirror/string-inl.h"
52#include "mirror/throwable.h"
53#include "obj_ptr-inl.h"
54#include "reflection.h"
55#include "safe_map.h"
56#include "scoped_thread_state_change-inl.h"
57#include "ScopedLocalRef.h"
58#include "ScopedPrimitiveArray.h"
59#include "handle_scope-inl.h"
60#include "thread_list.h"
61#include "utf.h"
62#include "well_known_classes.h"
63
64namespace art {
65
66using android::base::StringPrintf;
67
68// The key identifying the debugger to update instrumentation.
69static constexpr const char* kDbgInstrumentationKey = "Debugger";
70
71// Limit alloc_record_count to the 2BE value (64k-1) that is the limit of the current protocol.
72static uint16_t CappedAllocRecordCount(size_t alloc_record_count) {
73  const size_t cap = 0xffff;
74  if (alloc_record_count > cap) {
75    return cap;
76  }
77  return alloc_record_count;
78}
79
80// Takes a method and returns a 'canonical' one if the method is default (and therefore potentially
81// copied from some other class). This ensures that the debugger does not get confused as to which
82// method we are in.
83static ArtMethod* GetCanonicalMethod(ArtMethod* m)
84    REQUIRES_SHARED(Locks::mutator_lock_) {
85  if (LIKELY(!m->IsDefault())) {
86    return m;
87  } else {
88    mirror::Class* declaring_class = m->GetDeclaringClass();
89    return declaring_class->FindDeclaredVirtualMethod(declaring_class->GetDexCache(),
90                                                      m->GetDexMethodIndex(),
91                                                      kRuntimePointerSize);
92  }
93}
94
95class Breakpoint : public ValueObject {
96 public:
97  Breakpoint(ArtMethod* method, uint32_t dex_pc, DeoptimizationRequest::Kind deoptimization_kind)
98    : method_(GetCanonicalMethod(method)),
99      dex_pc_(dex_pc),
100      deoptimization_kind_(deoptimization_kind) {
101    CHECK(deoptimization_kind_ == DeoptimizationRequest::kNothing ||
102          deoptimization_kind_ == DeoptimizationRequest::kSelectiveDeoptimization ||
103          deoptimization_kind_ == DeoptimizationRequest::kFullDeoptimization);
104  }
105
106  Breakpoint(const Breakpoint& other) REQUIRES_SHARED(Locks::mutator_lock_)
107    : method_(other.method_),
108      dex_pc_(other.dex_pc_),
109      deoptimization_kind_(other.deoptimization_kind_) {}
110
111  // Method() is called from root visiting, do not use ScopedObjectAccess here or it can cause
112  // GC to deadlock if another thread tries to call SuspendAll while the GC is in a runnable state.
113  ArtMethod* Method() const {
114    return method_;
115  }
116
117  uint32_t DexPc() const {
118    return dex_pc_;
119  }
120
121  DeoptimizationRequest::Kind GetDeoptimizationKind() const {
122    return deoptimization_kind_;
123  }
124
125  // Returns true if the method of this breakpoint and the passed in method should be considered the
126  // same. That is, they are either the same method or they are copied from the same method.
127  bool IsInMethod(ArtMethod* m) const REQUIRES_SHARED(Locks::mutator_lock_) {
128    return method_ == GetCanonicalMethod(m);
129  }
130
131 private:
132  // The location of this breakpoint.
133  ArtMethod* method_;
134  uint32_t dex_pc_;
135
136  // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
137  DeoptimizationRequest::Kind deoptimization_kind_;
138};
139
140static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
141    REQUIRES_SHARED(Locks::mutator_lock_) {
142  os << StringPrintf("Breakpoint[%s @%#x]", ArtMethod::PrettyMethod(rhs.Method()).c_str(),
143                     rhs.DexPc());
144  return os;
145}
146
147class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
148 public:
149  DebugInstrumentationListener() {}
150  virtual ~DebugInstrumentationListener() {}
151
152  void MethodEntered(Thread* thread, mirror::Object* this_object, ArtMethod* method,
153                     uint32_t dex_pc)
154      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
155    if (method->IsNative()) {
156      // TODO: post location events is a suspension point and native method entry stubs aren't.
157      return;
158    }
159    if (IsListeningToDexPcMoved()) {
160      // We also listen to kDexPcMoved instrumentation event so we know the DexPcMoved method is
161      // going to be called right after us. To avoid sending JDWP events twice for this location,
162      // we report the event in DexPcMoved. However, we must remind this is method entry so we
163      // send the METHOD_ENTRY event. And we can also group it with other events for this location
164      // like BREAKPOINT or SINGLE_STEP (or even METHOD_EXIT if this is a RETURN instruction).
165      thread->SetDebugMethodEntry();
166    } else if (IsListeningToMethodExit() && IsReturn(method, dex_pc)) {
167      // We also listen to kMethodExited instrumentation event and the current instruction is a
168      // RETURN so we know the MethodExited method is going to be called right after us. To avoid
169      // sending JDWP events twice for this location, we report the event(s) in MethodExited.
170      // However, we must remind this is method entry so we send the METHOD_ENTRY event. And we can
171      // also group it with other events for this location like BREAKPOINT or SINGLE_STEP.
172      thread->SetDebugMethodEntry();
173    } else {
174      Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr);
175    }
176  }
177
178  void MethodExited(Thread* thread, mirror::Object* this_object, ArtMethod* method,
179                    uint32_t dex_pc, const JValue& return_value)
180      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
181    if (method->IsNative()) {
182      // TODO: post location events is a suspension point and native method entry stubs aren't.
183      return;
184    }
185    uint32_t events = Dbg::kMethodExit;
186    if (thread->IsDebugMethodEntry()) {
187      // It is also the method entry.
188      DCHECK(IsReturn(method, dex_pc));
189      events |= Dbg::kMethodEntry;
190      thread->ClearDebugMethodEntry();
191    }
192    Dbg::UpdateDebugger(thread, this_object, method, dex_pc, events, &return_value);
193  }
194
195  void MethodUnwind(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object ATTRIBUTE_UNUSED,
196                    ArtMethod* method, uint32_t dex_pc)
197      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
198    // We're not recorded to listen to this kind of event, so complain.
199    LOG(ERROR) << "Unexpected method unwind event in debugger " << ArtMethod::PrettyMethod(method)
200               << " " << dex_pc;
201  }
202
203  void DexPcMoved(Thread* thread, mirror::Object* this_object, ArtMethod* method,
204                  uint32_t new_dex_pc)
205      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
206    if (IsListeningToMethodExit() && IsReturn(method, new_dex_pc)) {
207      // We also listen to kMethodExited instrumentation event and the current instruction is a
208      // RETURN so we know the MethodExited method is going to be called right after us. Like in
209      // MethodEntered, we delegate event reporting to MethodExited.
210      // Besides, if this RETURN instruction is the only one in the method, we can send multiple
211      // JDWP events in the same packet: METHOD_ENTRY, METHOD_EXIT, BREAKPOINT and/or SINGLE_STEP.
212      // Therefore, we must not clear the debug method entry flag here.
213    } else {
214      uint32_t events = 0;
215      if (thread->IsDebugMethodEntry()) {
216        // It is also the method entry.
217        events = Dbg::kMethodEntry;
218        thread->ClearDebugMethodEntry();
219      }
220      Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, events, nullptr);
221    }
222  }
223
224  void FieldRead(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
225                 ArtMethod* method, uint32_t dex_pc, ArtField* field)
226      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
227    Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
228  }
229
230  void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
231                    ArtMethod* method, uint32_t dex_pc, ArtField* field,
232                    const JValue& field_value)
233      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
234    Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
235  }
236
237  void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, mirror::Throwable* exception_object)
238      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
239    Dbg::PostException(exception_object);
240  }
241
242  // We only care about branches in the Jit.
243  void Branch(Thread* /*thread*/, ArtMethod* method, uint32_t dex_pc, int32_t dex_pc_offset)
244      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
245    LOG(ERROR) << "Unexpected branch event in debugger " << ArtMethod::PrettyMethod(method)
246               << " " << dex_pc << ", " << dex_pc_offset;
247  }
248
249  // We only care about invokes in the Jit.
250  void InvokeVirtualOrInterface(Thread* thread ATTRIBUTE_UNUSED,
251                                mirror::Object*,
252                                ArtMethod* method,
253                                uint32_t dex_pc,
254                                ArtMethod*)
255      OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
256    LOG(ERROR) << "Unexpected invoke event in debugger " << ArtMethod::PrettyMethod(method)
257               << " " << dex_pc;
258  }
259
260 private:
261  static bool IsReturn(ArtMethod* method, uint32_t dex_pc)
262      REQUIRES_SHARED(Locks::mutator_lock_) {
263    const DexFile::CodeItem* code_item = method->GetCodeItem();
264    const Instruction* instruction = Instruction::At(&code_item->insns_[dex_pc]);
265    return instruction->IsReturn();
266  }
267
268  static bool IsListeningToDexPcMoved() REQUIRES_SHARED(Locks::mutator_lock_) {
269    return IsListeningTo(instrumentation::Instrumentation::kDexPcMoved);
270  }
271
272  static bool IsListeningToMethodExit() REQUIRES_SHARED(Locks::mutator_lock_) {
273    return IsListeningTo(instrumentation::Instrumentation::kMethodExited);
274  }
275
276  static bool IsListeningTo(instrumentation::Instrumentation::InstrumentationEvent event)
277      REQUIRES_SHARED(Locks::mutator_lock_) {
278    return (Dbg::GetInstrumentationEvents() & event) != 0;
279  }
280
281  DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
282} gDebugInstrumentationListener;
283
284// JDWP is allowed unless the Zygote forbids it.
285static bool gJdwpAllowed = true;
286
287// Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line?
288static bool gJdwpConfigured = false;
289
290// JDWP options for debugging. Only valid if IsJdwpConfigured() is true.
291static JDWP::JdwpOptions gJdwpOptions;
292
293// Runtime JDWP state.
294static JDWP::JdwpState* gJdwpState = nullptr;
295static bool gDebuggerConnected;  // debugger or DDMS is connected.
296
297static bool gDdmThreadNotification = false;
298
299// DDMS GC-related settings.
300static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER;
301static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER;
302static Dbg::HpsgWhat gDdmHpsgWhat;
303static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
304static Dbg::HpsgWhat gDdmNhsgWhat;
305
306bool Dbg::gDebuggerActive = false;
307bool Dbg::gDisposed = false;
308ObjectRegistry* Dbg::gRegistry = nullptr;
309
310// Deoptimization support.
311std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
312size_t Dbg::full_deoptimization_event_count_ = 0;
313
314// Instrumentation event reference counters.
315size_t Dbg::dex_pc_change_event_ref_count_ = 0;
316size_t Dbg::method_enter_event_ref_count_ = 0;
317size_t Dbg::method_exit_event_ref_count_ = 0;
318size_t Dbg::field_read_event_ref_count_ = 0;
319size_t Dbg::field_write_event_ref_count_ = 0;
320size_t Dbg::exception_catch_event_ref_count_ = 0;
321uint32_t Dbg::instrumentation_events_ = 0;
322
323Dbg::DbgThreadLifecycleCallback Dbg::thread_lifecycle_callback_;
324Dbg::DbgClassLoadCallback Dbg::class_load_callback_;
325
326// Breakpoints.
327static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
328
329void DebugInvokeReq::VisitRoots(RootVisitor* visitor, const RootInfo& root_info) {
330  receiver.VisitRootIfNonNull(visitor, root_info);  // null for static method call.
331  klass.VisitRoot(visitor, root_info);
332}
333
334void SingleStepControl::AddDexPc(uint32_t dex_pc) {
335  dex_pcs_.insert(dex_pc);
336}
337
338bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const {
339  return dex_pcs_.find(dex_pc) == dex_pcs_.end();
340}
341
342static bool IsBreakpoint(ArtMethod* m, uint32_t dex_pc)
343    REQUIRES(!Locks::breakpoint_lock_)
344    REQUIRES_SHARED(Locks::mutator_lock_) {
345  ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
346  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
347    if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].IsInMethod(m)) {
348      VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
349      return true;
350    }
351  }
352  return false;
353}
354
355static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
356    REQUIRES(!Locks::thread_suspend_count_lock_) {
357  MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
358  // A thread may be suspended for GC; in this code, we really want to know whether
359  // there's a debugger suspension active.
360  return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0;
361}
362
363static mirror::Array* DecodeNonNullArray(JDWP::RefTypeId id, JDWP::JdwpError* error)
364    REQUIRES_SHARED(Locks::mutator_lock_) {
365  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
366  if (o == nullptr) {
367    *error = JDWP::ERR_INVALID_OBJECT;
368    return nullptr;
369  }
370  if (!o->IsArrayInstance()) {
371    *error = JDWP::ERR_INVALID_ARRAY;
372    return nullptr;
373  }
374  *error = JDWP::ERR_NONE;
375  return o->AsArray();
376}
377
378static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError* error)
379    REQUIRES_SHARED(Locks::mutator_lock_) {
380  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
381  if (o == nullptr) {
382    *error = JDWP::ERR_INVALID_OBJECT;
383    return nullptr;
384  }
385  if (!o->IsClass()) {
386    *error = JDWP::ERR_INVALID_CLASS;
387    return nullptr;
388  }
389  *error = JDWP::ERR_NONE;
390  return o->AsClass();
391}
392
393static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id,
394                            JDWP::JdwpError* error)
395    REQUIRES_SHARED(Locks::mutator_lock_)
396    REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_) {
397  mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id, error);
398  if (thread_peer == nullptr) {
399    // This isn't even an object.
400    *error = JDWP::ERR_INVALID_OBJECT;
401    return nullptr;
402  }
403
404  ObjPtr<mirror::Class> java_lang_Thread =
405      soa.Decode<mirror::Class>(WellKnownClasses::java_lang_Thread);
406  if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) {
407    // This isn't a thread.
408    *error = JDWP::ERR_INVALID_THREAD;
409    return nullptr;
410  }
411
412  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
413  Thread* thread = Thread::FromManagedThread(soa, thread_peer);
414  // If thread is null then this a java.lang.Thread without a Thread*. Must be a un-started or a
415  // zombie.
416  *error = (thread == nullptr) ? JDWP::ERR_THREAD_NOT_ALIVE : JDWP::ERR_NONE;
417  return thread;
418}
419
420static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) {
421  // JDWP deliberately uses the descriptor characters' ASCII values for its enum.
422  // Note that by "basic" we mean that we don't get more specific than JT_OBJECT.
423  return static_cast<JDWP::JdwpTag>(descriptor[0]);
424}
425
426static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
427    REQUIRES_SHARED(Locks::mutator_lock_) {
428  std::string temp;
429  const char* descriptor = klass->GetDescriptor(&temp);
430  return BasicTagFromDescriptor(descriptor);
431}
432
433static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
434    REQUIRES_SHARED(Locks::mutator_lock_) {
435  CHECK(c != nullptr);
436  if (c->IsArrayClass()) {
437    return JDWP::JT_ARRAY;
438  }
439  if (c->IsStringClass()) {
440    return JDWP::JT_STRING;
441  }
442  if (c->IsClassClass()) {
443    return JDWP::JT_CLASS_OBJECT;
444  }
445  {
446    ObjPtr<mirror::Class> thread_class =
447        soa.Decode<mirror::Class>(WellKnownClasses::java_lang_Thread);
448    if (thread_class->IsAssignableFrom(c)) {
449      return JDWP::JT_THREAD;
450    }
451  }
452  {
453    ObjPtr<mirror::Class> thread_group_class =
454        soa.Decode<mirror::Class>(WellKnownClasses::java_lang_ThreadGroup);
455    if (thread_group_class->IsAssignableFrom(c)) {
456      return JDWP::JT_THREAD_GROUP;
457    }
458  }
459  {
460    ObjPtr<mirror::Class> class_loader_class =
461        soa.Decode<mirror::Class>(WellKnownClasses::java_lang_ClassLoader);
462    if (class_loader_class->IsAssignableFrom(c)) {
463      return JDWP::JT_CLASS_LOADER;
464    }
465  }
466  return JDWP::JT_OBJECT;
467}
468
469/*
470 * Objects declared to hold Object might actually hold a more specific
471 * type.  The debugger may take a special interest in these (e.g. it
472 * wants to display the contents of Strings), so we want to return an
473 * appropriate tag.
474 *
475 * Null objects are tagged JT_OBJECT.
476 */
477JDWP::JdwpTag Dbg::TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o) {
478  return (o == nullptr) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass());
479}
480
481static bool IsPrimitiveTag(JDWP::JdwpTag tag) {
482  switch (tag) {
483  case JDWP::JT_BOOLEAN:
484  case JDWP::JT_BYTE:
485  case JDWP::JT_CHAR:
486  case JDWP::JT_FLOAT:
487  case JDWP::JT_DOUBLE:
488  case JDWP::JT_INT:
489  case JDWP::JT_LONG:
490  case JDWP::JT_SHORT:
491  case JDWP::JT_VOID:
492    return true;
493  default:
494    return false;
495  }
496}
497
498void Dbg::StartJdwp() {
499  if (!gJdwpAllowed || !IsJdwpConfigured()) {
500    // No JDWP for you!
501    return;
502  }
503
504  CHECK(gRegistry == nullptr);
505  gRegistry = new ObjectRegistry;
506
507  // Init JDWP if the debugger is enabled. This may connect out to a
508  // debugger, passively listen for a debugger, or block waiting for a
509  // debugger.
510  gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions);
511  if (gJdwpState == nullptr) {
512    // We probably failed because some other process has the port already, which means that
513    // if we don't abort the user is likely to think they're talking to us when they're actually
514    // talking to that other process.
515    LOG(FATAL) << "Debugger thread failed to initialize";
516  }
517
518  // If a debugger has already attached, send the "welcome" message.
519  // This may cause us to suspend all threads.
520  if (gJdwpState->IsActive()) {
521    ScopedObjectAccess soa(Thread::Current());
522    gJdwpState->PostVMStart();
523  }
524}
525
526void Dbg::StopJdwp() {
527  // Post VM_DEATH event before the JDWP connection is closed (either by the JDWP thread or the
528  // destruction of gJdwpState).
529  if (gJdwpState != nullptr && gJdwpState->IsActive()) {
530    gJdwpState->PostVMDeath();
531  }
532  // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
533  Dispose();
534  delete gJdwpState;
535  gJdwpState = nullptr;
536  delete gRegistry;
537  gRegistry = nullptr;
538}
539
540void Dbg::GcDidFinish() {
541  if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
542    ScopedObjectAccess soa(Thread::Current());
543    VLOG(jdwp) << "Sending heap info to DDM";
544    DdmSendHeapInfo(gDdmHpifWhen);
545  }
546  if (gDdmHpsgWhen != HPSG_WHEN_NEVER) {
547    ScopedObjectAccess soa(Thread::Current());
548    VLOG(jdwp) << "Dumping heap to DDM";
549    DdmSendHeapSegments(false);
550  }
551  if (gDdmNhsgWhen != HPSG_WHEN_NEVER) {
552    ScopedObjectAccess soa(Thread::Current());
553    VLOG(jdwp) << "Dumping native heap to DDM";
554    DdmSendHeapSegments(true);
555  }
556}
557
558void Dbg::SetJdwpAllowed(bool allowed) {
559  gJdwpAllowed = allowed;
560}
561
562bool Dbg::IsJdwpAllowed() {
563  return gJdwpAllowed;
564}
565
566DebugInvokeReq* Dbg::GetInvokeReq() {
567  return Thread::Current()->GetInvokeReq();
568}
569
570Thread* Dbg::GetDebugThread() {
571  return (gJdwpState != nullptr) ? gJdwpState->GetDebugThread() : nullptr;
572}
573
574void Dbg::ClearWaitForEventThread() {
575  gJdwpState->ReleaseJdwpTokenForEvent();
576}
577
578void Dbg::Connected() {
579  CHECK(!gDebuggerConnected);
580  VLOG(jdwp) << "JDWP has attached";
581  gDebuggerConnected = true;
582  gDisposed = false;
583}
584
585bool Dbg::RequiresDeoptimization() {
586  // We don't need deoptimization if everything runs with interpreter after
587  // enabling -Xint mode.
588  return !Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly();
589}
590
591void Dbg::GoActive() {
592  // Enable all debugging features, including scans for breakpoints.
593  // This is a no-op if we're already active.
594  // Only called from the JDWP handler thread.
595  if (IsDebuggerActive()) {
596    return;
597  }
598
599  Thread* const self = Thread::Current();
600  {
601    // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected?
602    ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
603    CHECK_EQ(gBreakpoints.size(), 0U);
604  }
605
606  {
607    MutexLock mu(self, *Locks::deoptimization_lock_);
608    CHECK_EQ(deoptimization_requests_.size(), 0U);
609    CHECK_EQ(full_deoptimization_event_count_, 0U);
610    CHECK_EQ(dex_pc_change_event_ref_count_, 0U);
611    CHECK_EQ(method_enter_event_ref_count_, 0U);
612    CHECK_EQ(method_exit_event_ref_count_, 0U);
613    CHECK_EQ(field_read_event_ref_count_, 0U);
614    CHECK_EQ(field_write_event_ref_count_, 0U);
615    CHECK_EQ(exception_catch_event_ref_count_, 0U);
616  }
617
618  Runtime* runtime = Runtime::Current();
619  // Best effort deoptimization if the runtime is non-Java debuggable. This happens when
620  // ro.debuggable is set, but the application is not debuggable, or when a standalone
621  // dalvikvm invocation is not passed the debuggable option (-Xcompiler-option --debuggable).
622  //
623  // The performance cost of this is non-negligible during native-debugging due to the
624  // forced JIT, so we keep the AOT code in that case in exchange for limited native debugging.
625  if (!runtime->IsJavaDebuggable() &&
626      !runtime->GetInstrumentation()->IsForcedInterpretOnly() &&
627      !runtime->IsNativeDebuggable()) {
628    runtime->DeoptimizeBootImage();
629  }
630
631  ScopedSuspendAll ssa(__FUNCTION__);
632  if (RequiresDeoptimization()) {
633    runtime->GetInstrumentation()->EnableDeoptimization();
634  }
635  instrumentation_events_ = 0;
636  gDebuggerActive = true;
637  LOG(INFO) << "Debugger is active";
638}
639
640void Dbg::Disconnected() {
641  CHECK(gDebuggerConnected);
642
643  LOG(INFO) << "Debugger is no longer active";
644
645  // Suspend all threads and exclusively acquire the mutator lock. Remove the debugger as a listener
646  // and clear the object registry.
647  Runtime* runtime = Runtime::Current();
648  Thread* self = Thread::Current();
649  {
650    // Required for DisableDeoptimization.
651    gc::ScopedGCCriticalSection gcs(self,
652                                    gc::kGcCauseInstrumentation,
653                                    gc::kCollectorTypeInstrumentation);
654    ScopedSuspendAll ssa(__FUNCTION__);
655    // Debugger may not be active at this point.
656    if (IsDebuggerActive()) {
657      {
658        // Since we're going to disable deoptimization, we clear the deoptimization requests queue.
659        // This prevents us from having any pending deoptimization request when the debugger attaches
660        // to us again while no event has been requested yet.
661        MutexLock mu(self, *Locks::deoptimization_lock_);
662        deoptimization_requests_.clear();
663        full_deoptimization_event_count_ = 0U;
664      }
665      if (instrumentation_events_ != 0) {
666        runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
667                                                      instrumentation_events_);
668        instrumentation_events_ = 0;
669      }
670      if (RequiresDeoptimization()) {
671        runtime->GetInstrumentation()->DisableDeoptimization(kDbgInstrumentationKey);
672      }
673      gDebuggerActive = false;
674    }
675  }
676
677  {
678    ScopedObjectAccess soa(self);
679    gRegistry->Clear();
680  }
681
682  gDebuggerConnected = false;
683}
684
685void Dbg::ConfigureJdwp(const JDWP::JdwpOptions& jdwp_options) {
686  CHECK_NE(jdwp_options.transport, JDWP::kJdwpTransportUnknown);
687  gJdwpOptions = jdwp_options;
688  gJdwpConfigured = true;
689}
690
691bool Dbg::IsJdwpConfigured() {
692  return gJdwpConfigured;
693}
694
695int64_t Dbg::LastDebuggerActivity() {
696  return gJdwpState->LastDebuggerActivity();
697}
698
699void Dbg::UndoDebuggerSuspensions() {
700  Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
701}
702
703std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
704  JDWP::JdwpError error;
705  mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id, &error);
706  if (o == nullptr) {
707    if (error == JDWP::ERR_NONE) {
708      return "null";
709    } else {
710      return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
711    }
712  }
713  if (!o->IsClass()) {
714    return StringPrintf("non-class %p", o);  // This is only used for debugging output anyway.
715  }
716  return GetClassName(o->AsClass());
717}
718
719std::string Dbg::GetClassName(mirror::Class* klass) {
720  if (klass == nullptr) {
721    return "null";
722  }
723  std::string temp;
724  return DescriptorToName(klass->GetDescriptor(&temp));
725}
726
727JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId* class_object_id) {
728  JDWP::JdwpError status;
729  mirror::Class* c = DecodeClass(id, &status);
730  if (c == nullptr) {
731    *class_object_id = 0;
732    return status;
733  }
734  *class_object_id = gRegistry->Add(c);
735  return JDWP::ERR_NONE;
736}
737
738JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId* superclass_id) {
739  JDWP::JdwpError status;
740  mirror::Class* c = DecodeClass(id, &status);
741  if (c == nullptr) {
742    *superclass_id = 0;
743    return status;
744  }
745  if (c->IsInterface()) {
746    // http://code.google.com/p/android/issues/detail?id=20856
747    *superclass_id = 0;
748  } else {
749    *superclass_id = gRegistry->Add(c->GetSuperClass());
750  }
751  return JDWP::ERR_NONE;
752}
753
754JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
755  JDWP::JdwpError error;
756  mirror::Class* c = DecodeClass(id, &error);
757  if (c == nullptr) {
758    return error;
759  }
760  expandBufAddObjectId(pReply, gRegistry->Add(c->GetClassLoader()));
761  return JDWP::ERR_NONE;
762}
763
764JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
765  JDWP::JdwpError error;
766  mirror::Class* c = DecodeClass(id, &error);
767  if (c == nullptr) {
768    return error;
769  }
770
771  uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask;
772
773  // Set ACC_SUPER. Dex files don't contain this flag but only classes are supposed to have it set,
774  // not interfaces.
775  // Class.getModifiers doesn't return it, but JDWP does, so we set it here.
776  if ((access_flags & kAccInterface) == 0) {
777    access_flags |= kAccSuper;
778  }
779
780  expandBufAdd4BE(pReply, access_flags);
781
782  return JDWP::ERR_NONE;
783}
784
785JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply) {
786  JDWP::JdwpError error;
787  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
788  if (o == nullptr) {
789    return JDWP::ERR_INVALID_OBJECT;
790  }
791
792  // Ensure all threads are suspended while we read objects' lock words.
793  Thread* self = Thread::Current();
794  CHECK_EQ(self->GetState(), kRunnable);
795
796  MonitorInfo monitor_info;
797  {
798    ScopedThreadSuspension sts(self, kSuspended);
799    ScopedSuspendAll ssa(__FUNCTION__);
800    monitor_info = MonitorInfo(o);
801  }
802  if (monitor_info.owner_ != nullptr) {
803    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeerFromOtherThread()));
804  } else {
805    expandBufAddObjectId(reply, gRegistry->Add(nullptr));
806  }
807  expandBufAdd4BE(reply, monitor_info.entry_count_);
808  expandBufAdd4BE(reply, monitor_info.waiters_.size());
809  for (size_t i = 0; i < monitor_info.waiters_.size(); ++i) {
810    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters_[i]->GetPeerFromOtherThread()));
811  }
812  return JDWP::ERR_NONE;
813}
814
815JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id,
816                                      std::vector<JDWP::ObjectId>* monitors,
817                                      std::vector<uint32_t>* stack_depths) {
818  struct OwnedMonitorVisitor : public StackVisitor {
819    OwnedMonitorVisitor(Thread* thread, Context* context,
820                        std::vector<JDWP::ObjectId>* monitor_vector,
821                        std::vector<uint32_t>* stack_depth_vector)
822        REQUIRES_SHARED(Locks::mutator_lock_)
823      : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
824        current_stack_depth(0),
825        monitors(monitor_vector),
826        stack_depths(stack_depth_vector) {}
827
828    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
829    // annotalysis.
830    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
831      if (!GetMethod()->IsRuntimeMethod()) {
832        Monitor::VisitLocks(this, AppendOwnedMonitors, this);
833        ++current_stack_depth;
834      }
835      return true;
836    }
837
838    static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg)
839        REQUIRES_SHARED(Locks::mutator_lock_) {
840      OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
841      visitor->monitors->push_back(gRegistry->Add(owned_monitor));
842      visitor->stack_depths->push_back(visitor->current_stack_depth);
843    }
844
845    size_t current_stack_depth;
846    std::vector<JDWP::ObjectId>* const monitors;
847    std::vector<uint32_t>* const stack_depths;
848  };
849
850  ScopedObjectAccessUnchecked soa(Thread::Current());
851  JDWP::JdwpError error;
852  Thread* thread = DecodeThread(soa, thread_id, &error);
853  if (thread == nullptr) {
854    return error;
855  }
856  if (!IsSuspendedForDebugger(soa, thread)) {
857    return JDWP::ERR_THREAD_NOT_SUSPENDED;
858  }
859  std::unique_ptr<Context> context(Context::Create());
860  OwnedMonitorVisitor visitor(thread, context.get(), monitors, stack_depths);
861  visitor.WalkStack();
862  return JDWP::ERR_NONE;
863}
864
865JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id,
866                                         JDWP::ObjectId* contended_monitor) {
867  ScopedObjectAccessUnchecked soa(Thread::Current());
868  *contended_monitor = 0;
869  JDWP::JdwpError error;
870  Thread* thread = DecodeThread(soa, thread_id, &error);
871  if (thread == nullptr) {
872    return error;
873  }
874  if (!IsSuspendedForDebugger(soa, thread)) {
875    return JDWP::ERR_THREAD_NOT_SUSPENDED;
876  }
877  mirror::Object* contended_monitor_obj = Monitor::GetContendedMonitor(thread);
878  // Add() requires the thread_list_lock_ not held to avoid the lock
879  // level violation.
880  *contended_monitor = gRegistry->Add(contended_monitor_obj);
881  return JDWP::ERR_NONE;
882}
883
884JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
885                                       std::vector<uint64_t>* counts) {
886  gc::Heap* heap = Runtime::Current()->GetHeap();
887  heap->CollectGarbage(false);
888  VariableSizedHandleScope hs(Thread::Current());
889  std::vector<Handle<mirror::Class>> classes;
890  counts->clear();
891  for (size_t i = 0; i < class_ids.size(); ++i) {
892    JDWP::JdwpError error;
893    ObjPtr<mirror::Class> c = DecodeClass(class_ids[i], &error);
894    if (c == nullptr) {
895      return error;
896    }
897    classes.push_back(hs.NewHandle(c));
898    counts->push_back(0);
899  }
900  heap->CountInstances(classes, false, &(*counts)[0]);
901  return JDWP::ERR_NONE;
902}
903
904JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count,
905                                  std::vector<JDWP::ObjectId>* instances) {
906  gc::Heap* heap = Runtime::Current()->GetHeap();
907  // We only want reachable instances, so do a GC.
908  heap->CollectGarbage(false);
909  JDWP::JdwpError error;
910  ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
911  if (c == nullptr) {
912    return error;
913  }
914  VariableSizedHandleScope hs(Thread::Current());
915  std::vector<Handle<mirror::Object>> raw_instances;
916  Runtime::Current()->GetHeap()->GetInstances(hs, hs.NewHandle(c), max_count, raw_instances);
917  for (size_t i = 0; i < raw_instances.size(); ++i) {
918    instances->push_back(gRegistry->Add(raw_instances[i].Get()));
919  }
920  return JDWP::ERR_NONE;
921}
922
923JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
924                                         std::vector<JDWP::ObjectId>* referring_objects) {
925  gc::Heap* heap = Runtime::Current()->GetHeap();
926  heap->CollectGarbage(false);
927  JDWP::JdwpError error;
928  ObjPtr<mirror::Object> o = gRegistry->Get<mirror::Object*>(object_id, &error);
929  if (o == nullptr) {
930    return JDWP::ERR_INVALID_OBJECT;
931  }
932  VariableSizedHandleScope hs(Thread::Current());
933  std::vector<Handle<mirror::Object>> raw_instances;
934  heap->GetReferringObjects(hs, hs.NewHandle(o), max_count, raw_instances);
935  for (size_t i = 0; i < raw_instances.size(); ++i) {
936    referring_objects->push_back(gRegistry->Add(raw_instances[i].Get()));
937  }
938  return JDWP::ERR_NONE;
939}
940
941JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id) {
942  JDWP::JdwpError error;
943  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
944  if (o == nullptr) {
945    return JDWP::ERR_INVALID_OBJECT;
946  }
947  gRegistry->DisableCollection(object_id);
948  return JDWP::ERR_NONE;
949}
950
951JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id) {
952  JDWP::JdwpError error;
953  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
954  // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI
955  // also ignores these cases and never return an error. However it's not obvious why this command
956  // should behave differently from DisableCollection and IsCollected commands. So let's be more
957  // strict and return an error if this happens.
958  if (o == nullptr) {
959    return JDWP::ERR_INVALID_OBJECT;
960  }
961  gRegistry->EnableCollection(object_id);
962  return JDWP::ERR_NONE;
963}
964
965JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool* is_collected) {
966  *is_collected = true;
967  if (object_id == 0) {
968    // Null object id is invalid.
969    return JDWP::ERR_INVALID_OBJECT;
970  }
971  // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However
972  // the RI seems to ignore this and assume object has been collected.
973  JDWP::JdwpError error;
974  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
975  if (o != nullptr) {
976    *is_collected = gRegistry->IsCollected(object_id);
977  }
978  return JDWP::ERR_NONE;
979}
980
981void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count) {
982  gRegistry->DisposeObject(object_id, reference_count);
983}
984
985JDWP::JdwpTypeTag Dbg::GetTypeTag(ObjPtr<mirror::Class> klass) {
986  DCHECK(klass != nullptr);
987  if (klass->IsArrayClass()) {
988    return JDWP::TT_ARRAY;
989  } else if (klass->IsInterface()) {
990    return JDWP::TT_INTERFACE;
991  } else {
992    return JDWP::TT_CLASS;
993  }
994}
995
996JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
997  JDWP::JdwpError error;
998  mirror::Class* c = DecodeClass(class_id, &error);
999  if (c == nullptr) {
1000    return error;
1001  }
1002
1003  JDWP::JdwpTypeTag type_tag = GetTypeTag(c);
1004  expandBufAdd1(pReply, type_tag);
1005  expandBufAddRefTypeId(pReply, class_id);
1006  return JDWP::ERR_NONE;
1007}
1008
1009// Get the complete list of reference classes (i.e. all classes except
1010// the primitive types).
1011// Returns a newly-allocated buffer full of RefTypeId values.
1012class ClassListCreator : public ClassVisitor {
1013 public:
1014  explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes) : classes_(classes) {}
1015
1016  bool operator()(ObjPtr<mirror::Class> c) OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
1017    if (!c->IsPrimitive()) {
1018      classes_->push_back(Dbg::GetObjectRegistry()->AddRefType(c));
1019    }
1020    return true;
1021  }
1022
1023 private:
1024  std::vector<JDWP::RefTypeId>* const classes_;
1025};
1026
1027void Dbg::GetClassList(std::vector<JDWP::RefTypeId>* classes) {
1028  ClassListCreator clc(classes);
1029  Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(&clc);
1030}
1031
1032JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
1033                                  uint32_t* pStatus, std::string* pDescriptor) {
1034  JDWP::JdwpError error;
1035  mirror::Class* c = DecodeClass(class_id, &error);
1036  if (c == nullptr) {
1037    return error;
1038  }
1039
1040  if (c->IsArrayClass()) {
1041    *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
1042    *pTypeTag = JDWP::TT_ARRAY;
1043  } else {
1044    if (c->IsErroneous()) {
1045      *pStatus = JDWP::CS_ERROR;
1046    } else {
1047      *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED;
1048    }
1049    *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
1050  }
1051
1052  if (pDescriptor != nullptr) {
1053    std::string temp;
1054    *pDescriptor = c->GetDescriptor(&temp);
1055  }
1056  return JDWP::ERR_NONE;
1057}
1058
1059void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>* ids) {
1060  std::vector<ObjPtr<mirror::Class>> classes;
1061  Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
1062  ids->clear();
1063  for (ObjPtr<mirror::Class> c : classes) {
1064    ids->push_back(gRegistry->Add(c));
1065  }
1066}
1067
1068JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply) {
1069  JDWP::JdwpError error;
1070  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1071  if (o == nullptr) {
1072    return JDWP::ERR_INVALID_OBJECT;
1073  }
1074
1075  JDWP::JdwpTypeTag type_tag = GetTypeTag(o->GetClass());
1076  JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass());
1077
1078  expandBufAdd1(pReply, type_tag);
1079  expandBufAddRefTypeId(pReply, type_id);
1080
1081  return JDWP::ERR_NONE;
1082}
1083
1084JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) {
1085  JDWP::JdwpError error;
1086  mirror::Class* c = DecodeClass(class_id, &error);
1087  if (c == nullptr) {
1088    return error;
1089  }
1090  std::string temp;
1091  *signature = c->GetDescriptor(&temp);
1092  return JDWP::ERR_NONE;
1093}
1094
1095JDWP::JdwpError Dbg::GetSourceDebugExtension(JDWP::RefTypeId class_id,
1096                                             std::string* extension_data) {
1097  JDWP::JdwpError error;
1098  mirror::Class* c = DecodeClass(class_id, &error);
1099  if (c == nullptr) {
1100    return error;
1101  }
1102  StackHandleScope<1> hs(Thread::Current());
1103  Handle<mirror::Class> klass(hs.NewHandle(c));
1104  const char* data = annotations::GetSourceDebugExtension(klass);
1105  if (data == nullptr) {
1106    return JDWP::ERR_ABSENT_INFORMATION;
1107  }
1108  *extension_data = data;
1109  return JDWP::ERR_NONE;
1110}
1111
1112JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string* result) {
1113  JDWP::JdwpError error;
1114  mirror::Class* c = DecodeClass(class_id, &error);
1115  if (c == nullptr) {
1116    return error;
1117  }
1118  const char* source_file = c->GetSourceFile();
1119  if (source_file == nullptr) {
1120    return JDWP::ERR_ABSENT_INFORMATION;
1121  }
1122  *result = source_file;
1123  return JDWP::ERR_NONE;
1124}
1125
1126JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t* tag) {
1127  ScopedObjectAccessUnchecked soa(Thread::Current());
1128  JDWP::JdwpError error;
1129  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1130  if (error != JDWP::ERR_NONE) {
1131    *tag = JDWP::JT_VOID;
1132    return error;
1133  }
1134  *tag = TagFromObject(soa, o);
1135  return JDWP::ERR_NONE;
1136}
1137
1138size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) {
1139  switch (tag) {
1140  case JDWP::JT_VOID:
1141    return 0;
1142  case JDWP::JT_BYTE:
1143  case JDWP::JT_BOOLEAN:
1144    return 1;
1145  case JDWP::JT_CHAR:
1146  case JDWP::JT_SHORT:
1147    return 2;
1148  case JDWP::JT_FLOAT:
1149  case JDWP::JT_INT:
1150    return 4;
1151  case JDWP::JT_ARRAY:
1152  case JDWP::JT_OBJECT:
1153  case JDWP::JT_STRING:
1154  case JDWP::JT_THREAD:
1155  case JDWP::JT_THREAD_GROUP:
1156  case JDWP::JT_CLASS_LOADER:
1157  case JDWP::JT_CLASS_OBJECT:
1158    return sizeof(JDWP::ObjectId);
1159  case JDWP::JT_DOUBLE:
1160  case JDWP::JT_LONG:
1161    return 8;
1162  default:
1163    LOG(FATAL) << "Unknown tag " << tag;
1164    return -1;
1165  }
1166}
1167
1168JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int32_t* length) {
1169  JDWP::JdwpError error;
1170  mirror::Array* a = DecodeNonNullArray(array_id, &error);
1171  if (a == nullptr) {
1172    return error;
1173  }
1174  *length = a->GetLength();
1175  return JDWP::ERR_NONE;
1176}
1177
1178JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) {
1179  JDWP::JdwpError error;
1180  mirror::Array* a = DecodeNonNullArray(array_id, &error);
1181  if (a == nullptr) {
1182    return error;
1183  }
1184
1185  if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) {
1186    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1187    return JDWP::ERR_INVALID_LENGTH;
1188  }
1189  JDWP::JdwpTag element_tag = BasicTagFromClass(a->GetClass()->GetComponentType());
1190  expandBufAdd1(pReply, element_tag);
1191  expandBufAdd4BE(pReply, count);
1192
1193  if (IsPrimitiveTag(element_tag)) {
1194    size_t width = GetTagWidth(element_tag);
1195    uint8_t* dst = expandBufAddSpace(pReply, count * width);
1196    if (width == 8) {
1197      const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0));
1198      for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]);
1199    } else if (width == 4) {
1200      const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0));
1201      for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]);
1202    } else if (width == 2) {
1203      const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0));
1204      for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]);
1205    } else {
1206      const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0));
1207      memcpy(dst, &src[offset * width], count * width);
1208    }
1209  } else {
1210    ScopedObjectAccessUnchecked soa(Thread::Current());
1211    mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>();
1212    for (int i = 0; i < count; ++i) {
1213      mirror::Object* element = oa->Get(offset + i);
1214      JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element)
1215                                                        : element_tag;
1216      expandBufAdd1(pReply, specific_tag);
1217      expandBufAddObjectId(pReply, gRegistry->Add(element));
1218    }
1219  }
1220
1221  return JDWP::ERR_NONE;
1222}
1223
1224template <typename T>
1225static void CopyArrayData(mirror::Array* a, JDWP::Request* src, int offset, int count)
1226    NO_THREAD_SAFETY_ANALYSIS {
1227  // TODO: fix when annotalysis correctly handles non-member functions.
1228  DCHECK(a->GetClass()->IsPrimitiveArray());
1229
1230  T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset));
1231  for (int i = 0; i < count; ++i) {
1232    *dst++ = src->ReadValue(sizeof(T));
1233  }
1234}
1235
1236JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
1237                                      JDWP::Request* request) {
1238  JDWP::JdwpError error;
1239  mirror::Array* dst = DecodeNonNullArray(array_id, &error);
1240  if (dst == nullptr) {
1241    return error;
1242  }
1243
1244  if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) {
1245    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1246    return JDWP::ERR_INVALID_LENGTH;
1247  }
1248  JDWP::JdwpTag element_tag = BasicTagFromClass(dst->GetClass()->GetComponentType());
1249
1250  if (IsPrimitiveTag(element_tag)) {
1251    size_t width = GetTagWidth(element_tag);
1252    if (width == 8) {
1253      CopyArrayData<uint64_t>(dst, request, offset, count);
1254    } else if (width == 4) {
1255      CopyArrayData<uint32_t>(dst, request, offset, count);
1256    } else if (width == 2) {
1257      CopyArrayData<uint16_t>(dst, request, offset, count);
1258    } else {
1259      CopyArrayData<uint8_t>(dst, request, offset, count);
1260    }
1261  } else {
1262    mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>();
1263    for (int i = 0; i < count; ++i) {
1264      JDWP::ObjectId id = request->ReadObjectId();
1265      mirror::Object* o = gRegistry->Get<mirror::Object*>(id, &error);
1266      if (error != JDWP::ERR_NONE) {
1267        return error;
1268      }
1269      // Check if the object's type is compatible with the array's type.
1270      if (o != nullptr && !o->InstanceOf(oa->GetClass()->GetComponentType())) {
1271        return JDWP::ERR_TYPE_MISMATCH;
1272      }
1273      oa->Set<false>(offset + i, o);
1274    }
1275  }
1276
1277  return JDWP::ERR_NONE;
1278}
1279
1280JDWP::JdwpError Dbg::CreateString(const std::string& str, JDWP::ObjectId* new_string_id) {
1281  Thread* self = Thread::Current();
1282  mirror::String* new_string = mirror::String::AllocFromModifiedUtf8(self, str.c_str());
1283  if (new_string == nullptr) {
1284    DCHECK(self->IsExceptionPending());
1285    self->ClearException();
1286    LOG(ERROR) << "Could not allocate string";
1287    *new_string_id = 0;
1288    return JDWP::ERR_OUT_OF_MEMORY;
1289  }
1290  *new_string_id = gRegistry->Add(new_string);
1291  return JDWP::ERR_NONE;
1292}
1293
1294JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_object_id) {
1295  JDWP::JdwpError error;
1296  mirror::Class* c = DecodeClass(class_id, &error);
1297  if (c == nullptr) {
1298    *new_object_id = 0;
1299    return error;
1300  }
1301  Thread* self = Thread::Current();
1302  ObjPtr<mirror::Object> new_object;
1303  if (c->IsStringClass()) {
1304    // Special case for java.lang.String.
1305    gc::AllocatorType allocator_type = Runtime::Current()->GetHeap()->GetCurrentAllocator();
1306    new_object = mirror::String::AllocEmptyString<true>(self, allocator_type);
1307  } else {
1308    new_object = c->AllocObject(self);
1309  }
1310  if (new_object == nullptr) {
1311    DCHECK(self->IsExceptionPending());
1312    self->ClearException();
1313    LOG(ERROR) << "Could not allocate object of type " << mirror::Class::PrettyDescriptor(c);
1314    *new_object_id = 0;
1315    return JDWP::ERR_OUT_OF_MEMORY;
1316  }
1317  *new_object_id = gRegistry->Add(new_object.Ptr());
1318  return JDWP::ERR_NONE;
1319}
1320
1321/*
1322 * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]".
1323 */
1324JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
1325                                       JDWP::ObjectId* new_array_id) {
1326  JDWP::JdwpError error;
1327  mirror::Class* c = DecodeClass(array_class_id, &error);
1328  if (c == nullptr) {
1329    *new_array_id = 0;
1330    return error;
1331  }
1332  Thread* self = Thread::Current();
1333  gc::Heap* heap = Runtime::Current()->GetHeap();
1334  mirror::Array* new_array = mirror::Array::Alloc<true>(self, c, length,
1335                                                        c->GetComponentSizeShift(),
1336                                                        heap->GetCurrentAllocator());
1337  if (new_array == nullptr) {
1338    DCHECK(self->IsExceptionPending());
1339    self->ClearException();
1340    LOG(ERROR) << "Could not allocate array of type " << mirror::Class::PrettyDescriptor(c);
1341    *new_array_id = 0;
1342    return JDWP::ERR_OUT_OF_MEMORY;
1343  }
1344  *new_array_id = gRegistry->Add(new_array);
1345  return JDWP::ERR_NONE;
1346}
1347
1348JDWP::FieldId Dbg::ToFieldId(const ArtField* f) {
1349  return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
1350}
1351
1352static JDWP::MethodId ToMethodId(ArtMethod* m)
1353    REQUIRES_SHARED(Locks::mutator_lock_) {
1354  return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(GetCanonicalMethod(m)));
1355}
1356
1357static ArtField* FromFieldId(JDWP::FieldId fid)
1358    REQUIRES_SHARED(Locks::mutator_lock_) {
1359  return reinterpret_cast<ArtField*>(static_cast<uintptr_t>(fid));
1360}
1361
1362static ArtMethod* FromMethodId(JDWP::MethodId mid)
1363    REQUIRES_SHARED(Locks::mutator_lock_) {
1364  return reinterpret_cast<ArtMethod*>(static_cast<uintptr_t>(mid));
1365}
1366
1367bool Dbg::MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread) {
1368  CHECK(event_thread != nullptr);
1369  JDWP::JdwpError error;
1370  mirror::Object* expected_thread_peer = gRegistry->Get<mirror::Object*>(
1371      expected_thread_id, &error);
1372  return expected_thread_peer == event_thread->GetPeerFromOtherThread();
1373}
1374
1375bool Dbg::MatchLocation(const JDWP::JdwpLocation& expected_location,
1376                        const JDWP::EventLocation& event_location) {
1377  if (expected_location.dex_pc != event_location.dex_pc) {
1378    return false;
1379  }
1380  ArtMethod* m = FromMethodId(expected_location.method_id);
1381  return m == event_location.method;
1382}
1383
1384bool Dbg::MatchType(ObjPtr<mirror::Class> event_class, JDWP::RefTypeId class_id) {
1385  if (event_class == nullptr) {
1386    return false;
1387  }
1388  JDWP::JdwpError error;
1389  ObjPtr<mirror::Class> expected_class = DecodeClass(class_id, &error);
1390  CHECK(expected_class != nullptr);
1391  return expected_class->IsAssignableFrom(event_class);
1392}
1393
1394bool Dbg::MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
1395                     ArtField* event_field) {
1396  ArtField* expected_field = FromFieldId(expected_field_id);
1397  if (expected_field != event_field) {
1398    return false;
1399  }
1400  return Dbg::MatchType(event_field->GetDeclaringClass(), expected_type_id);
1401}
1402
1403bool Dbg::MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance) {
1404  JDWP::JdwpError error;
1405  mirror::Object* modifier_instance = gRegistry->Get<mirror::Object*>(expected_instance_id, &error);
1406  return modifier_instance == event_instance;
1407}
1408
1409void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, ArtMethod* m, uint32_t dex_pc) {
1410  if (m == nullptr) {
1411    memset(location, 0, sizeof(*location));
1412  } else {
1413    mirror::Class* c = m->GetDeclaringClass();
1414    location->type_tag = GetTypeTag(c);
1415    location->class_id = gRegistry->AddRefType(c);
1416    // The RI Seems to return 0 for all obsolete methods. For compatibility we shall do the same.
1417    location->method_id = m->IsObsolete() ? 0 : ToMethodId(m);
1418    location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc;
1419  }
1420}
1421
1422std::string Dbg::GetMethodName(JDWP::MethodId method_id) {
1423  ArtMethod* m = FromMethodId(method_id);
1424  if (m == nullptr) {
1425    return "null";
1426  }
1427  return m->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName();
1428}
1429
1430bool Dbg::IsMethodObsolete(JDWP::MethodId method_id) {
1431  ArtMethod* m = FromMethodId(method_id);
1432  if (m == nullptr) {
1433    // NB Since we return 0 as MID for obsolete methods we want to default to true here.
1434    return true;
1435  }
1436  return m->IsObsolete();
1437}
1438
1439std::string Dbg::GetFieldName(JDWP::FieldId field_id) {
1440  ArtField* f = FromFieldId(field_id);
1441  if (f == nullptr) {
1442    return "null";
1443  }
1444  return f->GetName();
1445}
1446
1447/*
1448 * Augment the access flags for synthetic methods and fields by setting
1449 * the (as described by the spec) "0xf0000000 bit".  Also, strip out any
1450 * flags not specified by the Java programming language.
1451 */
1452static uint32_t MangleAccessFlags(uint32_t accessFlags) {
1453  accessFlags &= kAccJavaFlagsMask;
1454  if ((accessFlags & kAccSynthetic) != 0) {
1455    accessFlags |= 0xf0000000;
1456  }
1457  return accessFlags;
1458}
1459
1460/*
1461 * Circularly shifts registers so that arguments come first. Debuggers
1462 * expect slots to begin with arguments, but dex code places them at
1463 * the end.
1464 */
1465static uint16_t MangleSlot(uint16_t slot, ArtMethod* m)
1466    REQUIRES_SHARED(Locks::mutator_lock_) {
1467  const DexFile::CodeItem* code_item = m->GetCodeItem();
1468  if (code_item == nullptr) {
1469    // We should not get here for a method without code (native, proxy or abstract). Log it and
1470    // return the slot as is since all registers are arguments.
1471    LOG(WARNING) << "Trying to mangle slot for method without code " << m->PrettyMethod();
1472    return slot;
1473  }
1474  uint16_t ins_size = code_item->ins_size_;
1475  uint16_t locals_size = code_item->registers_size_ - ins_size;
1476  if (slot >= locals_size) {
1477    return slot - locals_size;
1478  } else {
1479    return slot + ins_size;
1480  }
1481}
1482
1483static size_t GetMethodNumArgRegistersIncludingThis(ArtMethod* method)
1484    REQUIRES_SHARED(Locks::mutator_lock_) {
1485  uint32_t num_registers = ArtMethod::NumArgRegisters(method->GetShorty());
1486  if (!method->IsStatic()) {
1487    ++num_registers;
1488  }
1489  return num_registers;
1490}
1491
1492/*
1493 * Circularly shifts registers so that arguments come last. Reverts
1494 * slots to dex style argument placement.
1495 */
1496static uint16_t DemangleSlot(uint16_t slot, ArtMethod* m, JDWP::JdwpError* error)
1497    REQUIRES_SHARED(Locks::mutator_lock_) {
1498  const DexFile::CodeItem* code_item = m->GetCodeItem();
1499  if (code_item == nullptr) {
1500    // We should not get here for a method without code (native, proxy or abstract). Log it and
1501    // return the slot as is since all registers are arguments.
1502    LOG(WARNING) << "Trying to demangle slot for method without code "
1503                 << m->PrettyMethod();
1504    uint16_t vreg_count = GetMethodNumArgRegistersIncludingThis(m);
1505    if (slot < vreg_count) {
1506      *error = JDWP::ERR_NONE;
1507      return slot;
1508    }
1509  } else {
1510    if (slot < code_item->registers_size_) {
1511      uint16_t ins_size = code_item->ins_size_;
1512      uint16_t locals_size = code_item->registers_size_ - ins_size;
1513      *error = JDWP::ERR_NONE;
1514      return (slot < ins_size) ? slot + locals_size : slot - ins_size;
1515    }
1516  }
1517
1518  // Slot is invalid in the method.
1519  LOG(ERROR) << "Invalid local slot " << slot << " for method " << m->PrettyMethod();
1520  *error = JDWP::ERR_INVALID_SLOT;
1521  return DexFile::kDexNoIndex16;
1522}
1523
1524JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic,
1525                                          JDWP::ExpandBuf* pReply) {
1526  JDWP::JdwpError error;
1527  mirror::Class* c = DecodeClass(class_id, &error);
1528  if (c == nullptr) {
1529    return error;
1530  }
1531
1532  size_t instance_field_count = c->NumInstanceFields();
1533  size_t static_field_count = c->NumStaticFields();
1534
1535  expandBufAdd4BE(pReply, instance_field_count + static_field_count);
1536
1537  for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
1538    ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) :
1539        c->GetStaticField(i - instance_field_count);
1540    expandBufAddFieldId(pReply, ToFieldId(f));
1541    expandBufAddUtf8String(pReply, f->GetName());
1542    expandBufAddUtf8String(pReply, f->GetTypeDescriptor());
1543    if (with_generic) {
1544      static const char genericSignature[1] = "";
1545      expandBufAddUtf8String(pReply, genericSignature);
1546    }
1547    expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags()));
1548  }
1549  return JDWP::ERR_NONE;
1550}
1551
1552JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic,
1553                                           JDWP::ExpandBuf* pReply) {
1554  JDWP::JdwpError error;
1555  mirror::Class* c = DecodeClass(class_id, &error);
1556  if (c == nullptr) {
1557    return error;
1558  }
1559
1560  expandBufAdd4BE(pReply, c->NumMethods());
1561
1562  auto* cl = Runtime::Current()->GetClassLinker();
1563  auto ptr_size = cl->GetImagePointerSize();
1564  for (ArtMethod& m : c->GetMethods(ptr_size)) {
1565    expandBufAddMethodId(pReply, ToMethodId(&m));
1566    expandBufAddUtf8String(pReply, m.GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName());
1567    expandBufAddUtf8String(
1568        pReply, m.GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetSignature().ToString());
1569    if (with_generic) {
1570      const char* generic_signature = "";
1571      expandBufAddUtf8String(pReply, generic_signature);
1572    }
1573    expandBufAdd4BE(pReply, MangleAccessFlags(m.GetAccessFlags()));
1574  }
1575  return JDWP::ERR_NONE;
1576}
1577
1578JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1579  JDWP::JdwpError error;
1580  Thread* self = Thread::Current();
1581  ObjPtr<mirror::Class> c = DecodeClass(class_id, &error);
1582  if (c == nullptr) {
1583    return error;
1584  }
1585  size_t interface_count = c->NumDirectInterfaces();
1586  expandBufAdd4BE(pReply, interface_count);
1587  for (size_t i = 0; i < interface_count; ++i) {
1588    ObjPtr<mirror::Class> interface = mirror::Class::GetDirectInterface(self, c, i);
1589    DCHECK(interface != nullptr);
1590    expandBufAddRefTypeId(pReply, gRegistry->AddRefType(interface));
1591  }
1592  return JDWP::ERR_NONE;
1593}
1594
1595void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply) {
1596  struct DebugCallbackContext {
1597    int numItems;
1598    JDWP::ExpandBuf* pReply;
1599
1600    static bool Callback(void* context, const DexFile::PositionInfo& entry) {
1601      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1602      expandBufAdd8BE(pContext->pReply, entry.address_);
1603      expandBufAdd4BE(pContext->pReply, entry.line_);
1604      pContext->numItems++;
1605      return false;
1606    }
1607  };
1608  ArtMethod* m = FromMethodId(method_id);
1609  const DexFile::CodeItem* code_item = m->GetCodeItem();
1610  uint64_t start, end;
1611  if (code_item == nullptr) {
1612    DCHECK(m->IsNative() || m->IsProxyMethod());
1613    start = -1;
1614    end = -1;
1615  } else {
1616    start = 0;
1617    // Return the index of the last instruction
1618    end = code_item->insns_size_in_code_units_ - 1;
1619  }
1620
1621  expandBufAdd8BE(pReply, start);
1622  expandBufAdd8BE(pReply, end);
1623
1624  // Add numLines later
1625  size_t numLinesOffset = expandBufGetLength(pReply);
1626  expandBufAdd4BE(pReply, 0);
1627
1628  DebugCallbackContext context;
1629  context.numItems = 0;
1630  context.pReply = pReply;
1631
1632  if (code_item != nullptr) {
1633    m->GetDexFile()->DecodeDebugPositionInfo(code_item, DebugCallbackContext::Callback, &context);
1634  }
1635
1636  JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
1637}
1638
1639void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic,
1640                              JDWP::ExpandBuf* pReply) {
1641  struct DebugCallbackContext {
1642    ArtMethod* method;
1643    JDWP::ExpandBuf* pReply;
1644    size_t variable_count;
1645    bool with_generic;
1646
1647    static void Callback(void* context, const DexFile::LocalInfo& entry)
1648        REQUIRES_SHARED(Locks::mutator_lock_) {
1649      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1650
1651      uint16_t slot = entry.reg_;
1652      VLOG(jdwp) << StringPrintf("    %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
1653                                 pContext->variable_count, entry.start_address_,
1654                                 entry.end_address_ - entry.start_address_,
1655                                 entry.name_, entry.descriptor_, entry.signature_, slot,
1656                                 MangleSlot(slot, pContext->method));
1657
1658      slot = MangleSlot(slot, pContext->method);
1659
1660      expandBufAdd8BE(pContext->pReply, entry.start_address_);
1661      expandBufAddUtf8String(pContext->pReply, entry.name_);
1662      expandBufAddUtf8String(pContext->pReply, entry.descriptor_);
1663      if (pContext->with_generic) {
1664        expandBufAddUtf8String(pContext->pReply, entry.signature_);
1665      }
1666      expandBufAdd4BE(pContext->pReply, entry.end_address_- entry.start_address_);
1667      expandBufAdd4BE(pContext->pReply, slot);
1668
1669      ++pContext->variable_count;
1670    }
1671  };
1672  ArtMethod* m = FromMethodId(method_id);
1673
1674  // arg_count considers doubles and longs to take 2 units.
1675  // variable_count considers everything to take 1 unit.
1676  expandBufAdd4BE(pReply, GetMethodNumArgRegistersIncludingThis(m));
1677
1678  // We don't know the total number of variables yet, so leave a blank and update it later.
1679  size_t variable_count_offset = expandBufGetLength(pReply);
1680  expandBufAdd4BE(pReply, 0);
1681
1682  DebugCallbackContext context;
1683  context.method = m;
1684  context.pReply = pReply;
1685  context.variable_count = 0;
1686  context.with_generic = with_generic;
1687
1688  const DexFile::CodeItem* code_item = m->GetCodeItem();
1689  if (code_item != nullptr) {
1690    m->GetDexFile()->DecodeDebugLocalInfo(
1691        code_item, m->IsStatic(), m->GetDexMethodIndex(), DebugCallbackContext::Callback,
1692        &context);
1693  }
1694
1695  JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count);
1696}
1697
1698void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
1699                                  JDWP::ExpandBuf* pReply) {
1700  ArtMethod* m = FromMethodId(method_id);
1701  JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty());
1702  OutputJValue(tag, return_value, pReply);
1703}
1704
1705void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
1706                           JDWP::ExpandBuf* pReply) {
1707  ArtField* f = FromFieldId(field_id);
1708  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1709  OutputJValue(tag, field_value, pReply);
1710}
1711
1712JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
1713                                  std::vector<uint8_t>* bytecodes) {
1714  ArtMethod* m = FromMethodId(method_id);
1715  if (m == nullptr) {
1716    return JDWP::ERR_INVALID_METHODID;
1717  }
1718  const DexFile::CodeItem* code_item = m->GetCodeItem();
1719  size_t byte_count = code_item->insns_size_in_code_units_ * 2;
1720  const uint8_t* begin = reinterpret_cast<const uint8_t*>(code_item->insns_);
1721  const uint8_t* end = begin + byte_count;
1722  for (const uint8_t* p = begin; p != end; ++p) {
1723    bytecodes->push_back(*p);
1724  }
1725  return JDWP::ERR_NONE;
1726}
1727
1728JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) {
1729  return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1730}
1731
1732JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) {
1733  return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1734}
1735
1736static JValue GetArtFieldValue(ArtField* f, mirror::Object* o)
1737    REQUIRES_SHARED(Locks::mutator_lock_) {
1738  Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
1739  JValue field_value;
1740  switch (fieldType) {
1741    case Primitive::kPrimBoolean:
1742      field_value.SetZ(f->GetBoolean(o));
1743      return field_value;
1744
1745    case Primitive::kPrimByte:
1746      field_value.SetB(f->GetByte(o));
1747      return field_value;
1748
1749    case Primitive::kPrimChar:
1750      field_value.SetC(f->GetChar(o));
1751      return field_value;
1752
1753    case Primitive::kPrimShort:
1754      field_value.SetS(f->GetShort(o));
1755      return field_value;
1756
1757    case Primitive::kPrimInt:
1758    case Primitive::kPrimFloat:
1759      // Int and Float must be treated as 32-bit values in JDWP.
1760      field_value.SetI(f->GetInt(o));
1761      return field_value;
1762
1763    case Primitive::kPrimLong:
1764    case Primitive::kPrimDouble:
1765      // Long and Double must be treated as 64-bit values in JDWP.
1766      field_value.SetJ(f->GetLong(o));
1767      return field_value;
1768
1769    case Primitive::kPrimNot:
1770      field_value.SetL(f->GetObject(o).Ptr());
1771      return field_value;
1772
1773    case Primitive::kPrimVoid:
1774      LOG(FATAL) << "Attempt to read from field of type 'void'";
1775      UNREACHABLE();
1776  }
1777  LOG(FATAL) << "Attempt to read from field of unknown type";
1778  UNREACHABLE();
1779}
1780
1781static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
1782                                         JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
1783                                         bool is_static)
1784    REQUIRES_SHARED(Locks::mutator_lock_) {
1785  JDWP::JdwpError error;
1786  mirror::Class* c = DecodeClass(ref_type_id, &error);
1787  if (ref_type_id != 0 && c == nullptr) {
1788    return error;
1789  }
1790
1791  Thread* self = Thread::Current();
1792  StackHandleScope<2> hs(self);
1793  MutableHandle<mirror::Object>
1794      o(hs.NewHandle(Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error)));
1795  if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
1796    return JDWP::ERR_INVALID_OBJECT;
1797  }
1798  ArtField* f = FromFieldId(field_id);
1799
1800  mirror::Class* receiver_class = c;
1801  if (receiver_class == nullptr && o != nullptr) {
1802    receiver_class = o->GetClass();
1803  }
1804
1805  // TODO: should we give up now if receiver_class is null?
1806  if (receiver_class != nullptr && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
1807    LOG(INFO) << "ERR_INVALID_FIELDID: " << f->PrettyField() << " "
1808              << receiver_class->PrettyClass();
1809    return JDWP::ERR_INVALID_FIELDID;
1810  }
1811
1812  // Ensure the field's class is initialized.
1813  Handle<mirror::Class> klass(hs.NewHandle(f->GetDeclaringClass()));
1814  if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, klass, true, false)) {
1815    LOG(WARNING) << "Not able to initialize class for SetValues: "
1816                 << mirror::Class::PrettyClass(klass.Get());
1817  }
1818
1819  // The RI only enforces the static/non-static mismatch in one direction.
1820  // TODO: should we change the tests and check both?
1821  if (is_static) {
1822    if (!f->IsStatic()) {
1823      return JDWP::ERR_INVALID_FIELDID;
1824    }
1825  } else {
1826    if (f->IsStatic()) {
1827      LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.GetValues"
1828                   << " on static field " << f->PrettyField();
1829    }
1830  }
1831  if (f->IsStatic()) {
1832    o.Assign(f->GetDeclaringClass());
1833  }
1834
1835  JValue field_value(GetArtFieldValue(f, o.Get()));
1836  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1837  Dbg::OutputJValue(tag, &field_value, pReply);
1838  return JDWP::ERR_NONE;
1839}
1840
1841JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1842                                   JDWP::ExpandBuf* pReply) {
1843  return GetFieldValueImpl(0, object_id, field_id, pReply, false);
1844}
1845
1846JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id,
1847                                         JDWP::ExpandBuf* pReply) {
1848  return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true);
1849}
1850
1851static JDWP::JdwpError SetArtFieldValue(ArtField* f, mirror::Object* o, uint64_t value, int width)
1852    REQUIRES_SHARED(Locks::mutator_lock_) {
1853  Primitive::Type fieldType = f->GetTypeAsPrimitiveType();
1854  // Debugging only happens at runtime so we know we are not running in a transaction.
1855  static constexpr bool kNoTransactionMode = false;
1856  switch (fieldType) {
1857    case Primitive::kPrimBoolean:
1858      CHECK_EQ(width, 1);
1859      f->SetBoolean<kNoTransactionMode>(o, static_cast<uint8_t>(value));
1860      return JDWP::ERR_NONE;
1861
1862    case Primitive::kPrimByte:
1863      CHECK_EQ(width, 1);
1864      f->SetByte<kNoTransactionMode>(o, static_cast<uint8_t>(value));
1865      return JDWP::ERR_NONE;
1866
1867    case Primitive::kPrimChar:
1868      CHECK_EQ(width, 2);
1869      f->SetChar<kNoTransactionMode>(o, static_cast<uint16_t>(value));
1870      return JDWP::ERR_NONE;
1871
1872    case Primitive::kPrimShort:
1873      CHECK_EQ(width, 2);
1874      f->SetShort<kNoTransactionMode>(o, static_cast<int16_t>(value));
1875      return JDWP::ERR_NONE;
1876
1877    case Primitive::kPrimInt:
1878    case Primitive::kPrimFloat:
1879      CHECK_EQ(width, 4);
1880      // Int and Float must be treated as 32-bit values in JDWP.
1881      f->SetInt<kNoTransactionMode>(o, static_cast<int32_t>(value));
1882      return JDWP::ERR_NONE;
1883
1884    case Primitive::kPrimLong:
1885    case Primitive::kPrimDouble:
1886      CHECK_EQ(width, 8);
1887      // Long and Double must be treated as 64-bit values in JDWP.
1888      f->SetLong<kNoTransactionMode>(o, value);
1889      return JDWP::ERR_NONE;
1890
1891    case Primitive::kPrimNot: {
1892      JDWP::JdwpError error;
1893      mirror::Object* v = Dbg::GetObjectRegistry()->Get<mirror::Object*>(value, &error);
1894      if (error != JDWP::ERR_NONE) {
1895        return JDWP::ERR_INVALID_OBJECT;
1896      }
1897      if (v != nullptr) {
1898        ObjPtr<mirror::Class> field_type;
1899        {
1900          StackHandleScope<2> hs(Thread::Current());
1901          HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
1902          HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
1903          field_type = f->GetType<true>();
1904        }
1905        if (!field_type->IsAssignableFrom(v->GetClass())) {
1906          return JDWP::ERR_INVALID_OBJECT;
1907        }
1908      }
1909      f->SetObject<kNoTransactionMode>(o, v);
1910      return JDWP::ERR_NONE;
1911    }
1912
1913    case Primitive::kPrimVoid:
1914      LOG(FATAL) << "Attempt to write to field of type 'void'";
1915      UNREACHABLE();
1916  }
1917  LOG(FATAL) << "Attempt to write to field of unknown type";
1918  UNREACHABLE();
1919}
1920
1921static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1922                                         uint64_t value, int width, bool is_static)
1923    REQUIRES_SHARED(Locks::mutator_lock_) {
1924  JDWP::JdwpError error;
1925  Thread* self = Thread::Current();
1926  StackHandleScope<2> hs(self);
1927  MutableHandle<mirror::Object>
1928      o(hs.NewHandle(Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error)));
1929  if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
1930    return JDWP::ERR_INVALID_OBJECT;
1931  }
1932  ArtField* f = FromFieldId(field_id);
1933
1934  // Ensure the field's class is initialized.
1935  Handle<mirror::Class> klass(hs.NewHandle(f->GetDeclaringClass()));
1936  if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(self, klass, true, false)) {
1937    LOG(WARNING) << "Not able to initialize class for SetValues: "
1938                 << mirror::Class::PrettyClass(klass.Get());
1939  }
1940
1941  // The RI only enforces the static/non-static mismatch in one direction.
1942  // TODO: should we change the tests and check both?
1943  if (is_static) {
1944    if (!f->IsStatic()) {
1945      return JDWP::ERR_INVALID_FIELDID;
1946    }
1947  } else {
1948    if (f->IsStatic()) {
1949      LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.SetValues"
1950                   << " on static field " << f->PrettyField();
1951    }
1952  }
1953  if (f->IsStatic()) {
1954    o.Assign(f->GetDeclaringClass());
1955  }
1956  return SetArtFieldValue(f, o.Get(), value, width);
1957}
1958
1959JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value,
1960                                   int width) {
1961  return SetFieldValueImpl(object_id, field_id, value, width, false);
1962}
1963
1964JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) {
1965  return SetFieldValueImpl(0, field_id, value, width, true);
1966}
1967
1968JDWP::JdwpError Dbg::StringToUtf8(JDWP::ObjectId string_id, std::string* str) {
1969  JDWP::JdwpError error;
1970  mirror::Object* obj = gRegistry->Get<mirror::Object*>(string_id, &error);
1971  if (error != JDWP::ERR_NONE) {
1972    return error;
1973  }
1974  if (obj == nullptr) {
1975    return JDWP::ERR_INVALID_OBJECT;
1976  }
1977  {
1978    ScopedObjectAccessUnchecked soa(Thread::Current());
1979    ObjPtr<mirror::Class> java_lang_String =
1980        soa.Decode<mirror::Class>(WellKnownClasses::java_lang_String);
1981    if (!java_lang_String->IsAssignableFrom(obj->GetClass())) {
1982      // This isn't a string.
1983      return JDWP::ERR_INVALID_STRING;
1984    }
1985  }
1986  *str = obj->AsString()->ToModifiedUtf8();
1987  return JDWP::ERR_NONE;
1988}
1989
1990void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) {
1991  if (IsPrimitiveTag(tag)) {
1992    expandBufAdd1(pReply, tag);
1993    if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) {
1994      expandBufAdd1(pReply, return_value->GetI());
1995    } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) {
1996      expandBufAdd2BE(pReply, return_value->GetI());
1997    } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) {
1998      expandBufAdd4BE(pReply, return_value->GetI());
1999    } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
2000      expandBufAdd8BE(pReply, return_value->GetJ());
2001    } else {
2002      CHECK_EQ(tag, JDWP::JT_VOID);
2003    }
2004  } else {
2005    ScopedObjectAccessUnchecked soa(Thread::Current());
2006    mirror::Object* value = return_value->GetL();
2007    expandBufAdd1(pReply, TagFromObject(soa, value));
2008    expandBufAddObjectId(pReply, gRegistry->Add(value));
2009  }
2010}
2011
2012JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string* name) {
2013  ScopedObjectAccessUnchecked soa(Thread::Current());
2014  JDWP::JdwpError error;
2015  DecodeThread(soa, thread_id, &error);
2016  if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
2017    return error;
2018  }
2019
2020  // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName.
2021  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
2022  CHECK(thread_object != nullptr) << error;
2023  ArtField* java_lang_Thread_name_field =
2024      jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name);
2025  ObjPtr<mirror::String> s(java_lang_Thread_name_field->GetObject(thread_object)->AsString());
2026  if (s != nullptr) {
2027    *name = s->ToModifiedUtf8();
2028  }
2029  return JDWP::ERR_NONE;
2030}
2031
2032JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
2033  ScopedObjectAccessUnchecked soa(Thread::Current());
2034  JDWP::JdwpError error;
2035  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
2036  if (error != JDWP::ERR_NONE) {
2037    return JDWP::ERR_INVALID_OBJECT;
2038  }
2039  ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroup");
2040  // Okay, so it's an object, but is it actually a thread?
2041  DecodeThread(soa, thread_id, &error);
2042  if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
2043    // Zombie threads are in the null group.
2044    expandBufAddObjectId(pReply, JDWP::ObjectId(0));
2045    error = JDWP::ERR_NONE;
2046  } else if (error == JDWP::ERR_NONE) {
2047    ObjPtr<mirror::Class> c = soa.Decode<mirror::Class>(WellKnownClasses::java_lang_Thread);
2048    CHECK(c != nullptr);
2049    ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group);
2050    CHECK(f != nullptr);
2051    ObjPtr<mirror::Object> group = f->GetObject(thread_object);
2052    CHECK(group != nullptr);
2053    JDWP::ObjectId thread_group_id = gRegistry->Add(group);
2054    expandBufAddObjectId(pReply, thread_group_id);
2055  }
2056  return error;
2057}
2058
2059static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa,
2060                                         JDWP::ObjectId thread_group_id, JDWP::JdwpError* error)
2061    REQUIRES_SHARED(Locks::mutator_lock_) {
2062  mirror::Object* thread_group = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_group_id,
2063                                                                                error);
2064  if (*error != JDWP::ERR_NONE) {
2065    return nullptr;
2066  }
2067  if (thread_group == nullptr) {
2068    *error = JDWP::ERR_INVALID_OBJECT;
2069    return nullptr;
2070  }
2071  ObjPtr<mirror::Class> c =
2072      soa.Decode<mirror::Class>(WellKnownClasses::java_lang_ThreadGroup);
2073  CHECK(c != nullptr);
2074  if (!c->IsAssignableFrom(thread_group->GetClass())) {
2075    // This is not a java.lang.ThreadGroup.
2076    *error = JDWP::ERR_INVALID_THREAD_GROUP;
2077    return nullptr;
2078  }
2079  *error = JDWP::ERR_NONE;
2080  return thread_group;
2081}
2082
2083JDWP::JdwpError Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
2084  ScopedObjectAccessUnchecked soa(Thread::Current());
2085  JDWP::JdwpError error;
2086  mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2087  if (error != JDWP::ERR_NONE) {
2088    return error;
2089  }
2090  ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroupName");
2091  ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_name);
2092  CHECK(f != nullptr);
2093  ObjPtr<mirror::String> s = f->GetObject(thread_group)->AsString();
2094
2095  std::string thread_group_name(s->ToModifiedUtf8());
2096  expandBufAddUtf8String(pReply, thread_group_name);
2097  return JDWP::ERR_NONE;
2098}
2099
2100JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
2101  ScopedObjectAccessUnchecked soa(Thread::Current());
2102  JDWP::JdwpError error;
2103  mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2104  if (error != JDWP::ERR_NONE) {
2105    return error;
2106  }
2107  ObjPtr<mirror::Object> parent;
2108  {
2109    ScopedAssertNoThreadSuspension ants("Debugger: GetThreadGroupParent");
2110    ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_parent);
2111    CHECK(f != nullptr);
2112    parent = f->GetObject(thread_group);
2113  }
2114  JDWP::ObjectId parent_group_id = gRegistry->Add(parent);
2115  expandBufAddObjectId(pReply, parent_group_id);
2116  return JDWP::ERR_NONE;
2117}
2118
2119static void GetChildThreadGroups(mirror::Object* thread_group,
2120                                 std::vector<JDWP::ObjectId>* child_thread_group_ids)
2121    REQUIRES_SHARED(Locks::mutator_lock_) {
2122  CHECK(thread_group != nullptr);
2123
2124  // Get the int "ngroups" count of this thread group...
2125  ArtField* ngroups_field = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_ngroups);
2126  CHECK(ngroups_field != nullptr);
2127  const int32_t size = ngroups_field->GetInt(thread_group);
2128  if (size == 0) {
2129    return;
2130  }
2131
2132  // Get the ThreadGroup[] "groups" out of this thread group...
2133  ArtField* groups_field = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_groups);
2134  ObjPtr<mirror::Object> groups_array = groups_field->GetObject(thread_group);
2135
2136  CHECK(groups_array != nullptr);
2137  CHECK(groups_array->IsObjectArray());
2138
2139  ObjPtr<mirror::ObjectArray<mirror::Object>> groups_array_as_array =
2140      groups_array->AsObjectArray<mirror::Object>();
2141
2142  // Copy the first 'size' elements out of the array into the result.
2143  ObjectRegistry* registry = Dbg::GetObjectRegistry();
2144  for (int32_t i = 0; i < size; ++i) {
2145    child_thread_group_ids->push_back(registry->Add(groups_array_as_array->Get(i)));
2146  }
2147}
2148
2149JDWP::JdwpError Dbg::GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
2150                                            JDWP::ExpandBuf* pReply) {
2151  ScopedObjectAccessUnchecked soa(Thread::Current());
2152  JDWP::JdwpError error;
2153  mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2154  if (error != JDWP::ERR_NONE) {
2155    return error;
2156  }
2157
2158  // Add child threads.
2159  {
2160    std::vector<JDWP::ObjectId> child_thread_ids;
2161    GetThreads(thread_group, &child_thread_ids);
2162    expandBufAdd4BE(pReply, child_thread_ids.size());
2163    for (JDWP::ObjectId child_thread_id : child_thread_ids) {
2164      expandBufAddObjectId(pReply, child_thread_id);
2165    }
2166  }
2167
2168  // Add child thread groups.
2169  {
2170    std::vector<JDWP::ObjectId> child_thread_groups_ids;
2171    GetChildThreadGroups(thread_group, &child_thread_groups_ids);
2172    expandBufAdd4BE(pReply, child_thread_groups_ids.size());
2173    for (JDWP::ObjectId child_thread_group_id : child_thread_groups_ids) {
2174      expandBufAddObjectId(pReply, child_thread_group_id);
2175    }
2176  }
2177
2178  return JDWP::ERR_NONE;
2179}
2180
2181JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
2182  ScopedObjectAccessUnchecked soa(Thread::Current());
2183  ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
2184  ObjPtr<mirror::Object> group = f->GetObject(f->GetDeclaringClass());
2185  return gRegistry->Add(group);
2186}
2187
2188JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
2189  switch (state) {
2190    case kBlocked:
2191      return JDWP::TS_MONITOR;
2192    case kNative:
2193    case kRunnable:
2194    case kSuspended:
2195      return JDWP::TS_RUNNING;
2196    case kSleeping:
2197      return JDWP::TS_SLEEPING;
2198    case kStarting:
2199    case kTerminated:
2200      return JDWP::TS_ZOMBIE;
2201    case kTimedWaiting:
2202    case kWaitingForCheckPointsToRun:
2203    case kWaitingForDebuggerSend:
2204    case kWaitingForDebuggerSuspension:
2205    case kWaitingForDebuggerToAttach:
2206    case kWaitingForDeoptimization:
2207    case kWaitingForGcToComplete:
2208    case kWaitingForGetObjectsAllocated:
2209    case kWaitingForJniOnLoad:
2210    case kWaitingForMethodTracingStart:
2211    case kWaitingForSignalCatcherOutput:
2212    case kWaitingForVisitObjects:
2213    case kWaitingInMainDebuggerLoop:
2214    case kWaitingInMainSignalCatcherLoop:
2215    case kWaitingPerformingGc:
2216    case kWaitingWeakGcRootRead:
2217    case kWaitingForGcThreadFlip:
2218    case kWaiting:
2219      return JDWP::TS_WAIT;
2220      // Don't add a 'default' here so the compiler can spot incompatible enum changes.
2221  }
2222  LOG(FATAL) << "Unknown thread state: " << state;
2223  return JDWP::TS_ZOMBIE;
2224}
2225
2226JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus,
2227                                     JDWP::JdwpSuspendStatus* pSuspendStatus) {
2228  ScopedObjectAccess soa(Thread::Current());
2229
2230  *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED;
2231
2232  JDWP::JdwpError error;
2233  Thread* thread = DecodeThread(soa, thread_id, &error);
2234  if (error != JDWP::ERR_NONE) {
2235    if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
2236      *pThreadStatus = JDWP::TS_ZOMBIE;
2237      return JDWP::ERR_NONE;
2238    }
2239    return error;
2240  }
2241
2242  if (IsSuspendedForDebugger(soa, thread)) {
2243    *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED;
2244  }
2245
2246  *pThreadStatus = ToJdwpThreadStatus(thread->GetState());
2247  return JDWP::ERR_NONE;
2248}
2249
2250JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
2251  ScopedObjectAccess soa(Thread::Current());
2252  JDWP::JdwpError error;
2253  Thread* thread = DecodeThread(soa, thread_id, &error);
2254  if (error != JDWP::ERR_NONE) {
2255    return error;
2256  }
2257  MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2258  expandBufAdd4BE(pReply, thread->GetDebugSuspendCount());
2259  return JDWP::ERR_NONE;
2260}
2261
2262JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) {
2263  ScopedObjectAccess soa(Thread::Current());
2264  JDWP::JdwpError error;
2265  Thread* thread = DecodeThread(soa, thread_id, &error);
2266  if (error != JDWP::ERR_NONE) {
2267    return error;
2268  }
2269  thread->Interrupt(soa.Self());
2270  return JDWP::ERR_NONE;
2271}
2272
2273static bool IsInDesiredThreadGroup(mirror::Object* desired_thread_group, mirror::Object* peer)
2274    REQUIRES_SHARED(Locks::mutator_lock_) {
2275  // Do we want threads from all thread groups?
2276  if (desired_thread_group == nullptr) {
2277    return true;
2278  }
2279  ArtField* thread_group_field = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group);
2280  DCHECK(thread_group_field != nullptr);
2281  ObjPtr<mirror::Object> group = thread_group_field->GetObject(peer);
2282  return (group == desired_thread_group);
2283}
2284
2285void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids) {
2286  ScopedObjectAccessUnchecked soa(Thread::Current());
2287  std::list<Thread*> all_threads_list;
2288  {
2289    MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
2290    all_threads_list = Runtime::Current()->GetThreadList()->GetList();
2291  }
2292  for (Thread* t : all_threads_list) {
2293    if (t == Dbg::GetDebugThread()) {
2294      // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
2295      // query all threads, so it's easier if we just don't tell them about this thread.
2296      continue;
2297    }
2298    if (t->IsStillStarting()) {
2299      // This thread is being started (and has been registered in the thread list). However, it is
2300      // not completely started yet so we must ignore it.
2301      continue;
2302    }
2303    mirror::Object* peer = t->GetPeerFromOtherThread();
2304    if (peer == nullptr) {
2305      // peer might be null if the thread is still starting up. We can't tell the debugger about
2306      // this thread yet.
2307      // TODO: if we identified threads to the debugger by their Thread*
2308      // rather than their peer's mirror::Object*, we could fix this.
2309      // Doing so might help us report ZOMBIE threads too.
2310      continue;
2311    }
2312    if (IsInDesiredThreadGroup(thread_group, peer)) {
2313      thread_ids->push_back(gRegistry->Add(peer));
2314    }
2315  }
2316}
2317
2318static int GetStackDepth(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_) {
2319  struct CountStackDepthVisitor : public StackVisitor {
2320    explicit CountStackDepthVisitor(Thread* thread_in)
2321        : StackVisitor(thread_in, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2322          depth(0) {}
2323
2324    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2325    // annotalysis.
2326    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2327      if (!GetMethod()->IsRuntimeMethod()) {
2328        ++depth;
2329      }
2330      return true;
2331    }
2332    size_t depth;
2333  };
2334
2335  CountStackDepthVisitor visitor(thread);
2336  visitor.WalkStack();
2337  return visitor.depth;
2338}
2339
2340JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result) {
2341  ScopedObjectAccess soa(Thread::Current());
2342  JDWP::JdwpError error;
2343  *result = 0;
2344  Thread* thread = DecodeThread(soa, thread_id, &error);
2345  if (error != JDWP::ERR_NONE) {
2346    return error;
2347  }
2348  if (!IsSuspendedForDebugger(soa, thread)) {
2349    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2350  }
2351  *result = GetStackDepth(thread);
2352  return JDWP::ERR_NONE;
2353}
2354
2355JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
2356                                     size_t frame_count, JDWP::ExpandBuf* buf) {
2357  class GetFrameVisitor : public StackVisitor {
2358   public:
2359    GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in,
2360                    JDWP::ExpandBuf* buf_in)
2361        REQUIRES_SHARED(Locks::mutator_lock_)
2362        : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2363          depth_(0),
2364          start_frame_(start_frame_in),
2365          frame_count_(frame_count_in),
2366          buf_(buf_in) {
2367      expandBufAdd4BE(buf_, frame_count_);
2368    }
2369
2370    bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
2371      if (GetMethod()->IsRuntimeMethod()) {
2372        return true;  // The debugger can't do anything useful with a frame that has no Method*.
2373      }
2374      if (depth_ >= start_frame_ + frame_count_) {
2375        return false;
2376      }
2377      if (depth_ >= start_frame_) {
2378        JDWP::FrameId frame_id(GetFrameId());
2379        JDWP::JdwpLocation location;
2380        SetJdwpLocation(&location, GetMethod(), GetDexPc());
2381        VLOG(jdwp) << StringPrintf("    Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
2382        expandBufAdd8BE(buf_, frame_id);
2383        expandBufAddLocation(buf_, location);
2384      }
2385      ++depth_;
2386      return true;
2387    }
2388
2389   private:
2390    size_t depth_;
2391    const size_t start_frame_;
2392    const size_t frame_count_;
2393    JDWP::ExpandBuf* buf_;
2394  };
2395
2396  ScopedObjectAccessUnchecked soa(Thread::Current());
2397  JDWP::JdwpError error;
2398  Thread* thread = DecodeThread(soa, thread_id, &error);
2399  if (error != JDWP::ERR_NONE) {
2400    return error;
2401  }
2402  if (!IsSuspendedForDebugger(soa, thread)) {
2403    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2404  }
2405  GetFrameVisitor visitor(thread, start_frame, frame_count, buf);
2406  visitor.WalkStack();
2407  return JDWP::ERR_NONE;
2408}
2409
2410JDWP::ObjectId Dbg::GetThreadSelfId() {
2411  return GetThreadId(Thread::Current());
2412}
2413
2414JDWP::ObjectId Dbg::GetThreadId(Thread* thread) {
2415  ScopedObjectAccessUnchecked soa(Thread::Current());
2416  return gRegistry->Add(thread->GetPeerFromOtherThread());
2417}
2418
2419void Dbg::SuspendVM() {
2420  // Avoid a deadlock between GC and debugger where GC gets suspended during GC. b/25800335.
2421  gc::ScopedGCCriticalSection gcs(Thread::Current(),
2422                                  gc::kGcCauseDebugger,
2423                                  gc::kCollectorTypeDebugger);
2424  Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
2425}
2426
2427void Dbg::ResumeVM() {
2428  Runtime::Current()->GetThreadList()->ResumeAllForDebugger();
2429}
2430
2431JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
2432  Thread* self = Thread::Current();
2433  ScopedLocalRef<jobject> peer(self->GetJniEnv(), nullptr);
2434  {
2435    ScopedObjectAccess soa(self);
2436    JDWP::JdwpError error;
2437    peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id, &error)));
2438  }
2439  if (peer.get() == nullptr) {
2440    return JDWP::ERR_THREAD_NOT_ALIVE;
2441  }
2442  // Suspend thread to build stack trace.
2443  bool timed_out;
2444  ThreadList* thread_list = Runtime::Current()->GetThreadList();
2445  Thread* thread = thread_list->SuspendThreadByPeer(peer.get(),
2446                                                    request_suspension,
2447                                                    /* debug_suspension */ true,
2448                                                    &timed_out);
2449  if (thread != nullptr) {
2450    return JDWP::ERR_NONE;
2451  } else if (timed_out) {
2452    return JDWP::ERR_INTERNAL;
2453  } else {
2454    return JDWP::ERR_THREAD_NOT_ALIVE;
2455  }
2456}
2457
2458void Dbg::ResumeThread(JDWP::ObjectId thread_id) {
2459  ScopedObjectAccessUnchecked soa(Thread::Current());
2460  JDWP::JdwpError error;
2461  mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id, &error);
2462  CHECK(peer != nullptr) << error;
2463  Thread* thread;
2464  {
2465    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2466    thread = Thread::FromManagedThread(soa, peer);
2467  }
2468  if (thread == nullptr) {
2469    LOG(WARNING) << "No such thread for resume: " << peer;
2470    return;
2471  }
2472  bool needs_resume;
2473  {
2474    MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2475    needs_resume = thread->GetDebugSuspendCount() > 0;
2476  }
2477  if (needs_resume) {
2478    Runtime::Current()->GetThreadList()->Resume(thread, true);
2479  }
2480}
2481
2482void Dbg::SuspendSelf() {
2483  Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
2484}
2485
2486struct GetThisVisitor : public StackVisitor {
2487  GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in)
2488      REQUIRES_SHARED(Locks::mutator_lock_)
2489      : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2490        this_object(nullptr),
2491        frame_id(frame_id_in) {}
2492
2493  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2494  // annotalysis.
2495  virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2496    if (frame_id != GetFrameId()) {
2497      return true;  // continue
2498    } else {
2499      this_object = GetThisObject();
2500      return false;
2501    }
2502  }
2503
2504  mirror::Object* this_object;
2505  JDWP::FrameId frame_id;
2506};
2507
2508JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
2509                                   JDWP::ObjectId* result) {
2510  ScopedObjectAccessUnchecked soa(Thread::Current());
2511  JDWP::JdwpError error;
2512  Thread* thread = DecodeThread(soa, thread_id, &error);
2513  if (error != JDWP::ERR_NONE) {
2514    return error;
2515  }
2516  if (!IsSuspendedForDebugger(soa, thread)) {
2517    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2518  }
2519  std::unique_ptr<Context> context(Context::Create());
2520  GetThisVisitor visitor(thread, context.get(), frame_id);
2521  visitor.WalkStack();
2522  *result = gRegistry->Add(visitor.this_object);
2523  return JDWP::ERR_NONE;
2524}
2525
2526// Walks the stack until we find the frame with the given FrameId.
2527class FindFrameVisitor FINAL : public StackVisitor {
2528 public:
2529  FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
2530      REQUIRES_SHARED(Locks::mutator_lock_)
2531      : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2532        frame_id_(frame_id),
2533        error_(JDWP::ERR_INVALID_FRAMEID) {}
2534
2535  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2536  // annotalysis.
2537  bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2538    if (GetFrameId() != frame_id_) {
2539      return true;  // Not our frame, carry on.
2540    }
2541    ArtMethod* m = GetMethod();
2542    if (m->IsNative()) {
2543      // We can't read/write local value from/into native method.
2544      error_ = JDWP::ERR_OPAQUE_FRAME;
2545    } else {
2546      // We found our frame.
2547      error_ = JDWP::ERR_NONE;
2548    }
2549    return false;
2550  }
2551
2552  JDWP::JdwpError GetError() const {
2553    return error_;
2554  }
2555
2556 private:
2557  const JDWP::FrameId frame_id_;
2558  JDWP::JdwpError error_;
2559
2560  DISALLOW_COPY_AND_ASSIGN(FindFrameVisitor);
2561};
2562
2563JDWP::JdwpError Dbg::GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply) {
2564  JDWP::ObjectId thread_id = request->ReadThreadId();
2565  JDWP::FrameId frame_id = request->ReadFrameId();
2566
2567  ScopedObjectAccessUnchecked soa(Thread::Current());
2568  JDWP::JdwpError error;
2569  Thread* thread = DecodeThread(soa, thread_id, &error);
2570  if (error != JDWP::ERR_NONE) {
2571    return error;
2572  }
2573  if (!IsSuspendedForDebugger(soa, thread)) {
2574    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2575  }
2576  // Find the frame with the given frame_id.
2577  std::unique_ptr<Context> context(Context::Create());
2578  FindFrameVisitor visitor(thread, context.get(), frame_id);
2579  visitor.WalkStack();
2580  if (visitor.GetError() != JDWP::ERR_NONE) {
2581    return visitor.GetError();
2582  }
2583
2584  // Read the values from visitor's context.
2585  int32_t slot_count = request->ReadSigned32("slot count");
2586  expandBufAdd4BE(pReply, slot_count);     /* "int values" */
2587  for (int32_t i = 0; i < slot_count; ++i) {
2588    uint32_t slot = request->ReadUnsigned32("slot");
2589    JDWP::JdwpTag reqSigByte = request->ReadTag();
2590
2591    VLOG(jdwp) << "    --> slot " << slot << " " << reqSigByte;
2592
2593    size_t width = Dbg::GetTagWidth(reqSigByte);
2594    uint8_t* ptr = expandBufAddSpace(pReply, width + 1);
2595    error = Dbg::GetLocalValue(visitor, soa, slot, reqSigByte, ptr, width);
2596    if (error != JDWP::ERR_NONE) {
2597      return error;
2598    }
2599  }
2600  return JDWP::ERR_NONE;
2601}
2602
2603constexpr JDWP::JdwpError kStackFrameLocalAccessError = JDWP::ERR_ABSENT_INFORMATION;
2604
2605static std::string GetStackContextAsString(const StackVisitor& visitor)
2606    REQUIRES_SHARED(Locks::mutator_lock_) {
2607  return StringPrintf(" at DEX pc 0x%08x in method %s", visitor.GetDexPc(false),
2608                      ArtMethod::PrettyMethod(visitor.GetMethod()).c_str());
2609}
2610
2611static JDWP::JdwpError FailGetLocalValue(const StackVisitor& visitor, uint16_t vreg,
2612                                         JDWP::JdwpTag tag)
2613    REQUIRES_SHARED(Locks::mutator_lock_) {
2614  LOG(ERROR) << "Failed to read " << tag << " local from register v" << vreg
2615             << GetStackContextAsString(visitor);
2616  return kStackFrameLocalAccessError;
2617}
2618
2619JDWP::JdwpError Dbg::GetLocalValue(const StackVisitor& visitor, ScopedObjectAccessUnchecked& soa,
2620                                   int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
2621  ArtMethod* m = visitor.GetMethod();
2622  JDWP::JdwpError error = JDWP::ERR_NONE;
2623  uint16_t vreg = DemangleSlot(slot, m, &error);
2624  if (error != JDWP::ERR_NONE) {
2625    return error;
2626  }
2627  // TODO: check that the tag is compatible with the actual type of the slot!
2628  switch (tag) {
2629    case JDWP::JT_BOOLEAN: {
2630      CHECK_EQ(width, 1U);
2631      uint32_t intVal;
2632      if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2633        return FailGetLocalValue(visitor, vreg, tag);
2634      }
2635      VLOG(jdwp) << "get boolean local " << vreg << " = " << intVal;
2636      JDWP::Set1(buf + 1, intVal != 0);
2637      break;
2638    }
2639    case JDWP::JT_BYTE: {
2640      CHECK_EQ(width, 1U);
2641      uint32_t intVal;
2642      if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2643        return FailGetLocalValue(visitor, vreg, tag);
2644      }
2645      VLOG(jdwp) << "get byte local " << vreg << " = " << intVal;
2646      JDWP::Set1(buf + 1, intVal);
2647      break;
2648    }
2649    case JDWP::JT_SHORT:
2650    case JDWP::JT_CHAR: {
2651      CHECK_EQ(width, 2U);
2652      uint32_t intVal;
2653      if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2654        return FailGetLocalValue(visitor, vreg, tag);
2655      }
2656      VLOG(jdwp) << "get short/char local " << vreg << " = " << intVal;
2657      JDWP::Set2BE(buf + 1, intVal);
2658      break;
2659    }
2660    case JDWP::JT_INT: {
2661      CHECK_EQ(width, 4U);
2662      uint32_t intVal;
2663      if (!visitor.GetVReg(m, vreg, kIntVReg, &intVal)) {
2664        return FailGetLocalValue(visitor, vreg, tag);
2665      }
2666      VLOG(jdwp) << "get int local " << vreg << " = " << intVal;
2667      JDWP::Set4BE(buf + 1, intVal);
2668      break;
2669    }
2670    case JDWP::JT_FLOAT: {
2671      CHECK_EQ(width, 4U);
2672      uint32_t intVal;
2673      if (!visitor.GetVReg(m, vreg, kFloatVReg, &intVal)) {
2674        return FailGetLocalValue(visitor, vreg, tag);
2675      }
2676      VLOG(jdwp) << "get float local " << vreg << " = " << intVal;
2677      JDWP::Set4BE(buf + 1, intVal);
2678      break;
2679    }
2680    case JDWP::JT_ARRAY:
2681    case JDWP::JT_CLASS_LOADER:
2682    case JDWP::JT_CLASS_OBJECT:
2683    case JDWP::JT_OBJECT:
2684    case JDWP::JT_STRING:
2685    case JDWP::JT_THREAD:
2686    case JDWP::JT_THREAD_GROUP: {
2687      CHECK_EQ(width, sizeof(JDWP::ObjectId));
2688      uint32_t intVal;
2689      if (!visitor.GetVReg(m, vreg, kReferenceVReg, &intVal)) {
2690        return FailGetLocalValue(visitor, vreg, tag);
2691      }
2692      mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal);
2693      VLOG(jdwp) << "get " << tag << " object local " << vreg << " = " << o;
2694      if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
2695        LOG(FATAL) << StringPrintf("Found invalid object %#" PRIxPTR " in register v%u",
2696                                   reinterpret_cast<uintptr_t>(o), vreg)
2697                                   << GetStackContextAsString(visitor);
2698        UNREACHABLE();
2699      }
2700      tag = TagFromObject(soa, o);
2701      JDWP::SetObjectId(buf + 1, gRegistry->Add(o));
2702      break;
2703    }
2704    case JDWP::JT_DOUBLE: {
2705      CHECK_EQ(width, 8U);
2706      uint64_t longVal;
2707      if (!visitor.GetVRegPair(m, vreg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
2708        return FailGetLocalValue(visitor, vreg, tag);
2709      }
2710      VLOG(jdwp) << "get double local " << vreg << " = " << longVal;
2711      JDWP::Set8BE(buf + 1, longVal);
2712      break;
2713    }
2714    case JDWP::JT_LONG: {
2715      CHECK_EQ(width, 8U);
2716      uint64_t longVal;
2717      if (!visitor.GetVRegPair(m, vreg, kLongLoVReg, kLongHiVReg, &longVal)) {
2718        return FailGetLocalValue(visitor, vreg, tag);
2719      }
2720      VLOG(jdwp) << "get long local " << vreg << " = " << longVal;
2721      JDWP::Set8BE(buf + 1, longVal);
2722      break;
2723    }
2724    default:
2725      LOG(FATAL) << "Unknown tag " << tag;
2726      UNREACHABLE();
2727  }
2728
2729  // Prepend tag, which may have been updated.
2730  JDWP::Set1(buf, tag);
2731  return JDWP::ERR_NONE;
2732}
2733
2734JDWP::JdwpError Dbg::SetLocalValues(JDWP::Request* request) {
2735  JDWP::ObjectId thread_id = request->ReadThreadId();
2736  JDWP::FrameId frame_id = request->ReadFrameId();
2737
2738  ScopedObjectAccessUnchecked soa(Thread::Current());
2739  JDWP::JdwpError error;
2740  Thread* thread = DecodeThread(soa, thread_id, &error);
2741  if (error != JDWP::ERR_NONE) {
2742    return error;
2743  }
2744  if (!IsSuspendedForDebugger(soa, thread)) {
2745    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2746  }
2747  // Find the frame with the given frame_id.
2748  std::unique_ptr<Context> context(Context::Create());
2749  FindFrameVisitor visitor(thread, context.get(), frame_id);
2750  visitor.WalkStack();
2751  if (visitor.GetError() != JDWP::ERR_NONE) {
2752    return visitor.GetError();
2753  }
2754
2755  // Writes the values into visitor's context.
2756  int32_t slot_count = request->ReadSigned32("slot count");
2757  for (int32_t i = 0; i < slot_count; ++i) {
2758    uint32_t slot = request->ReadUnsigned32("slot");
2759    JDWP::JdwpTag sigByte = request->ReadTag();
2760    size_t width = Dbg::GetTagWidth(sigByte);
2761    uint64_t value = request->ReadValue(width);
2762
2763    VLOG(jdwp) << "    --> slot " << slot << " " << sigByte << " " << value;
2764    error = Dbg::SetLocalValue(thread, visitor, slot, sigByte, value, width);
2765    if (error != JDWP::ERR_NONE) {
2766      return error;
2767    }
2768  }
2769  return JDWP::ERR_NONE;
2770}
2771
2772template<typename T>
2773static JDWP::JdwpError FailSetLocalValue(const StackVisitor& visitor, uint16_t vreg,
2774                                         JDWP::JdwpTag tag, T value)
2775    REQUIRES_SHARED(Locks::mutator_lock_) {
2776  LOG(ERROR) << "Failed to write " << tag << " local " << value
2777             << " (0x" << std::hex << value << ") into register v" << vreg
2778             << GetStackContextAsString(visitor);
2779  return kStackFrameLocalAccessError;
2780}
2781
2782JDWP::JdwpError Dbg::SetLocalValue(Thread* thread, StackVisitor& visitor, int slot,
2783                                   JDWP::JdwpTag tag, uint64_t value, size_t width) {
2784  ArtMethod* m = visitor.GetMethod();
2785  JDWP::JdwpError error = JDWP::ERR_NONE;
2786  uint16_t vreg = DemangleSlot(slot, m, &error);
2787  if (error != JDWP::ERR_NONE) {
2788    return error;
2789  }
2790  // TODO: check that the tag is compatible with the actual type of the slot!
2791  switch (tag) {
2792    case JDWP::JT_BOOLEAN:
2793    case JDWP::JT_BYTE:
2794      CHECK_EQ(width, 1U);
2795      if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
2796        return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2797      }
2798      break;
2799    case JDWP::JT_SHORT:
2800    case JDWP::JT_CHAR:
2801      CHECK_EQ(width, 2U);
2802      if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
2803        return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2804      }
2805      break;
2806    case JDWP::JT_INT:
2807      CHECK_EQ(width, 4U);
2808      if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kIntVReg)) {
2809        return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2810      }
2811      break;
2812    case JDWP::JT_FLOAT:
2813      CHECK_EQ(width, 4U);
2814      if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(value), kFloatVReg)) {
2815        return FailSetLocalValue(visitor, vreg, tag, static_cast<uint32_t>(value));
2816      }
2817      break;
2818    case JDWP::JT_ARRAY:
2819    case JDWP::JT_CLASS_LOADER:
2820    case JDWP::JT_CLASS_OBJECT:
2821    case JDWP::JT_OBJECT:
2822    case JDWP::JT_STRING:
2823    case JDWP::JT_THREAD:
2824    case JDWP::JT_THREAD_GROUP: {
2825      CHECK_EQ(width, sizeof(JDWP::ObjectId));
2826      mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value),
2827                                                          &error);
2828      if (error != JDWP::ERR_NONE) {
2829        VLOG(jdwp) << tag << " object " << o << " is an invalid object";
2830        return JDWP::ERR_INVALID_OBJECT;
2831      }
2832      if (!visitor.SetVReg(m, vreg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)),
2833                                 kReferenceVReg)) {
2834        return FailSetLocalValue(visitor, vreg, tag, reinterpret_cast<uintptr_t>(o));
2835      }
2836      break;
2837    }
2838    case JDWP::JT_DOUBLE: {
2839      CHECK_EQ(width, 8U);
2840      if (!visitor.SetVRegPair(m, vreg, value, kDoubleLoVReg, kDoubleHiVReg)) {
2841        return FailSetLocalValue(visitor, vreg, tag, value);
2842      }
2843      break;
2844    }
2845    case JDWP::JT_LONG: {
2846      CHECK_EQ(width, 8U);
2847      if (!visitor.SetVRegPair(m, vreg, value, kLongLoVReg, kLongHiVReg)) {
2848        return FailSetLocalValue(visitor, vreg, tag, value);
2849      }
2850      break;
2851    }
2852    default:
2853      LOG(FATAL) << "Unknown tag " << tag;
2854      UNREACHABLE();
2855  }
2856
2857  // If we set the local variable in a compiled frame, we need to trigger a deoptimization of
2858  // the stack so we continue execution with the interpreter using the new value(s) of the updated
2859  // local variable(s). To achieve this, we install instrumentation exit stub on each method of the
2860  // thread's stack. The stub will cause the deoptimization to happen.
2861  if (!visitor.IsShadowFrame() && thread->HasDebuggerShadowFrames()) {
2862    Runtime::Current()->GetInstrumentation()->InstrumentThreadStack(thread);
2863  }
2864
2865  return JDWP::ERR_NONE;
2866}
2867
2868static void SetEventLocation(JDWP::EventLocation* location, ArtMethod* m, uint32_t dex_pc)
2869    REQUIRES_SHARED(Locks::mutator_lock_) {
2870  DCHECK(location != nullptr);
2871  if (m == nullptr) {
2872    memset(location, 0, sizeof(*location));
2873  } else {
2874    location->method = GetCanonicalMethod(m);
2875    location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint32_t>(-1) : dex_pc;
2876  }
2877}
2878
2879void Dbg::PostLocationEvent(ArtMethod* m, int dex_pc, mirror::Object* this_object,
2880                            int event_flags, const JValue* return_value) {
2881  if (!IsDebuggerActive()) {
2882    return;
2883  }
2884  DCHECK(m != nullptr);
2885  DCHECK_EQ(m->IsStatic(), this_object == nullptr);
2886  JDWP::EventLocation location;
2887  SetEventLocation(&location, m, dex_pc);
2888
2889  // We need to be sure no exception is pending when calling JdwpState::PostLocationEvent.
2890  // This is required to be able to call JNI functions to create JDWP ids. To achieve this,
2891  // we temporarily clear the current thread's exception (if any) and will restore it after
2892  // the call.
2893  // Note: the only way to get a pending exception here is to suspend on a move-exception
2894  // instruction.
2895  Thread* const self = Thread::Current();
2896  StackHandleScope<1> hs(self);
2897  Handle<mirror::Throwable> pending_exception(hs.NewHandle(self->GetException()));
2898  self->ClearException();
2899  if (kIsDebugBuild && pending_exception != nullptr) {
2900    const DexFile::CodeItem* code_item = location.method->GetCodeItem();
2901    const Instruction* instr = Instruction::At(&code_item->insns_[location.dex_pc]);
2902    CHECK_EQ(Instruction::MOVE_EXCEPTION, instr->Opcode());
2903  }
2904
2905  gJdwpState->PostLocationEvent(&location, this_object, event_flags, return_value);
2906
2907  if (pending_exception != nullptr) {
2908    self->SetException(pending_exception.Get());
2909  }
2910}
2911
2912void Dbg::PostFieldAccessEvent(ArtMethod* m, int dex_pc,
2913                               mirror::Object* this_object, ArtField* f) {
2914  if (!IsDebuggerActive()) {
2915    return;
2916  }
2917  DCHECK(m != nullptr);
2918  DCHECK(f != nullptr);
2919  JDWP::EventLocation location;
2920  SetEventLocation(&location, m, dex_pc);
2921
2922  gJdwpState->PostFieldEvent(&location, f, this_object, nullptr, false);
2923}
2924
2925void Dbg::PostFieldModificationEvent(ArtMethod* m, int dex_pc,
2926                                     mirror::Object* this_object, ArtField* f,
2927                                     const JValue* field_value) {
2928  if (!IsDebuggerActive()) {
2929    return;
2930  }
2931  DCHECK(m != nullptr);
2932  DCHECK(f != nullptr);
2933  DCHECK(field_value != nullptr);
2934  JDWP::EventLocation location;
2935  SetEventLocation(&location, m, dex_pc);
2936
2937  gJdwpState->PostFieldEvent(&location, f, this_object, field_value, true);
2938}
2939
2940/**
2941 * Finds the location where this exception will be caught. We search until we reach the top
2942 * frame, in which case this exception is considered uncaught.
2943 */
2944class CatchLocationFinder : public StackVisitor {
2945 public:
2946  CatchLocationFinder(Thread* self, const Handle<mirror::Throwable>& exception, Context* context)
2947      REQUIRES_SHARED(Locks::mutator_lock_)
2948    : StackVisitor(self, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2949      exception_(exception),
2950      handle_scope_(self),
2951      this_at_throw_(handle_scope_.NewHandle<mirror::Object>(nullptr)),
2952      catch_method_(nullptr),
2953      throw_method_(nullptr),
2954      catch_dex_pc_(DexFile::kDexNoIndex),
2955      throw_dex_pc_(DexFile::kDexNoIndex) {
2956  }
2957
2958  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
2959    ArtMethod* method = GetMethod();
2960    DCHECK(method != nullptr);
2961    if (method->IsRuntimeMethod()) {
2962      // Ignore callee save method.
2963      DCHECK(method->IsCalleeSaveMethod());
2964      return true;
2965    }
2966
2967    uint32_t dex_pc = GetDexPc();
2968    if (throw_method_ == nullptr) {
2969      // First Java method found. It is either the method that threw the exception,
2970      // or the Java native method that is reporting an exception thrown by
2971      // native code.
2972      this_at_throw_.Assign(GetThisObject());
2973      throw_method_ = method;
2974      throw_dex_pc_ = dex_pc;
2975    }
2976
2977    if (dex_pc != DexFile::kDexNoIndex) {
2978      StackHandleScope<1> hs(GetThread());
2979      uint32_t found_dex_pc;
2980      Handle<mirror::Class> exception_class(hs.NewHandle(exception_->GetClass()));
2981      bool unused_clear_exception;
2982      found_dex_pc = method->FindCatchBlock(exception_class, dex_pc, &unused_clear_exception);
2983      if (found_dex_pc != DexFile::kDexNoIndex) {
2984        catch_method_ = method;
2985        catch_dex_pc_ = found_dex_pc;
2986        return false;  // End stack walk.
2987      }
2988    }
2989    return true;  // Continue stack walk.
2990  }
2991
2992  ArtMethod* GetCatchMethod() REQUIRES_SHARED(Locks::mutator_lock_) {
2993    return catch_method_;
2994  }
2995
2996  ArtMethod* GetThrowMethod() REQUIRES_SHARED(Locks::mutator_lock_) {
2997    return throw_method_;
2998  }
2999
3000  mirror::Object* GetThisAtThrow() REQUIRES_SHARED(Locks::mutator_lock_) {
3001    return this_at_throw_.Get();
3002  }
3003
3004  uint32_t GetCatchDexPc() const {
3005    return catch_dex_pc_;
3006  }
3007
3008  uint32_t GetThrowDexPc() const {
3009    return throw_dex_pc_;
3010  }
3011
3012 private:
3013  const Handle<mirror::Throwable>& exception_;
3014  StackHandleScope<1> handle_scope_;
3015  MutableHandle<mirror::Object> this_at_throw_;
3016  ArtMethod* catch_method_;
3017  ArtMethod* throw_method_;
3018  uint32_t catch_dex_pc_;
3019  uint32_t throw_dex_pc_;
3020
3021  DISALLOW_COPY_AND_ASSIGN(CatchLocationFinder);
3022};
3023
3024void Dbg::PostException(mirror::Throwable* exception_object) {
3025  if (!IsDebuggerActive()) {
3026    return;
3027  }
3028  Thread* const self = Thread::Current();
3029  StackHandleScope<1> handle_scope(self);
3030  Handle<mirror::Throwable> h_exception(handle_scope.NewHandle(exception_object));
3031  std::unique_ptr<Context> context(Context::Create());
3032  CatchLocationFinder clf(self, h_exception, context.get());
3033  clf.WalkStack(/* include_transitions */ false);
3034  JDWP::EventLocation exception_throw_location;
3035  SetEventLocation(&exception_throw_location, clf.GetThrowMethod(), clf.GetThrowDexPc());
3036  JDWP::EventLocation exception_catch_location;
3037  SetEventLocation(&exception_catch_location, clf.GetCatchMethod(), clf.GetCatchDexPc());
3038
3039  gJdwpState->PostException(&exception_throw_location, h_exception.Get(), &exception_catch_location,
3040                            clf.GetThisAtThrow());
3041}
3042
3043void Dbg::PostClassPrepare(mirror::Class* c) {
3044  if (!IsDebuggerActive()) {
3045    return;
3046  }
3047  gJdwpState->PostClassPrepare(c);
3048}
3049
3050void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
3051                         ArtMethod* m, uint32_t dex_pc,
3052                         int event_flags, const JValue* return_value) {
3053  if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
3054    return;
3055  }
3056
3057  if (IsBreakpoint(m, dex_pc)) {
3058    event_flags |= kBreakpoint;
3059  }
3060
3061  // If the debugger is single-stepping one of our threads, check to
3062  // see if we're that thread and we've reached a step point.
3063  const SingleStepControl* single_step_control = thread->GetSingleStepControl();
3064  if (single_step_control != nullptr) {
3065    CHECK(!m->IsNative());
3066    if (single_step_control->GetStepDepth() == JDWP::SD_INTO) {
3067      // Step into method calls.  We break when the line number
3068      // or method pointer changes.  If we're in SS_MIN mode, we
3069      // always stop.
3070      if (single_step_control->GetMethod() != m) {
3071        event_flags |= kSingleStep;
3072        VLOG(jdwp) << "SS new method";
3073      } else if (single_step_control->GetStepSize() == JDWP::SS_MIN) {
3074        event_flags |= kSingleStep;
3075        VLOG(jdwp) << "SS new instruction";
3076      } else if (single_step_control->ContainsDexPc(dex_pc)) {
3077        event_flags |= kSingleStep;
3078        VLOG(jdwp) << "SS new line";
3079      }
3080    } else if (single_step_control->GetStepDepth() == JDWP::SD_OVER) {
3081      // Step over method calls.  We break when the line number is
3082      // different and the frame depth is <= the original frame
3083      // depth.  (We can't just compare on the method, because we
3084      // might get unrolled past it by an exception, and it's tricky
3085      // to identify recursion.)
3086
3087      int stack_depth = GetStackDepth(thread);
3088
3089      if (stack_depth < single_step_control->GetStackDepth()) {
3090        // Popped up one or more frames, always trigger.
3091        event_flags |= kSingleStep;
3092        VLOG(jdwp) << "SS method pop";
3093      } else if (stack_depth == single_step_control->GetStackDepth()) {
3094        // Same depth, see if we moved.
3095        if (single_step_control->GetStepSize() == JDWP::SS_MIN) {
3096          event_flags |= kSingleStep;
3097          VLOG(jdwp) << "SS new instruction";
3098        } else if (single_step_control->ContainsDexPc(dex_pc)) {
3099          event_flags |= kSingleStep;
3100          VLOG(jdwp) << "SS new line";
3101        }
3102      }
3103    } else {
3104      CHECK_EQ(single_step_control->GetStepDepth(), JDWP::SD_OUT);
3105      // Return from the current method.  We break when the frame
3106      // depth pops up.
3107
3108      // This differs from the "method exit" break in that it stops
3109      // with the PC at the next instruction in the returned-to
3110      // function, rather than the end of the returning function.
3111
3112      int stack_depth = GetStackDepth(thread);
3113      if (stack_depth < single_step_control->GetStackDepth()) {
3114        event_flags |= kSingleStep;
3115        VLOG(jdwp) << "SS method pop";
3116      }
3117    }
3118  }
3119
3120  // If there's something interesting going on, see if it matches one
3121  // of the debugger filters.
3122  if (event_flags != 0) {
3123    Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, return_value);
3124  }
3125}
3126
3127size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) {
3128  switch (instrumentation_event) {
3129    case instrumentation::Instrumentation::kMethodEntered:
3130      return &method_enter_event_ref_count_;
3131    case instrumentation::Instrumentation::kMethodExited:
3132      return &method_exit_event_ref_count_;
3133    case instrumentation::Instrumentation::kDexPcMoved:
3134      return &dex_pc_change_event_ref_count_;
3135    case instrumentation::Instrumentation::kFieldRead:
3136      return &field_read_event_ref_count_;
3137    case instrumentation::Instrumentation::kFieldWritten:
3138      return &field_write_event_ref_count_;
3139    case instrumentation::Instrumentation::kExceptionCaught:
3140      return &exception_catch_event_ref_count_;
3141    default:
3142      return nullptr;
3143  }
3144}
3145
3146// Process request while all mutator threads are suspended.
3147void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
3148  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
3149  switch (request.GetKind()) {
3150    case DeoptimizationRequest::kNothing:
3151      LOG(WARNING) << "Ignoring empty deoptimization request.";
3152      break;
3153    case DeoptimizationRequest::kRegisterForEvent:
3154      VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x",
3155                                 request.InstrumentationEvent());
3156      instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent());
3157      instrumentation_events_ |= request.InstrumentationEvent();
3158      break;
3159    case DeoptimizationRequest::kUnregisterForEvent:
3160      VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x",
3161                                 request.InstrumentationEvent());
3162      instrumentation->RemoveListener(&gDebugInstrumentationListener,
3163                                      request.InstrumentationEvent());
3164      instrumentation_events_ &= ~request.InstrumentationEvent();
3165      break;
3166    case DeoptimizationRequest::kFullDeoptimization:
3167      VLOG(jdwp) << "Deoptimize the world ...";
3168      instrumentation->DeoptimizeEverything(kDbgInstrumentationKey);
3169      VLOG(jdwp) << "Deoptimize the world DONE";
3170      break;
3171    case DeoptimizationRequest::kFullUndeoptimization:
3172      VLOG(jdwp) << "Undeoptimize the world ...";
3173      instrumentation->UndeoptimizeEverything(kDbgInstrumentationKey);
3174      VLOG(jdwp) << "Undeoptimize the world DONE";
3175      break;
3176    case DeoptimizationRequest::kSelectiveDeoptimization:
3177      VLOG(jdwp) << "Deoptimize method " << ArtMethod::PrettyMethod(request.Method()) << " ...";
3178      instrumentation->Deoptimize(request.Method());
3179      VLOG(jdwp) << "Deoptimize method " << ArtMethod::PrettyMethod(request.Method()) << " DONE";
3180      break;
3181    case DeoptimizationRequest::kSelectiveUndeoptimization:
3182      VLOG(jdwp) << "Undeoptimize method " << ArtMethod::PrettyMethod(request.Method()) << " ...";
3183      instrumentation->Undeoptimize(request.Method());
3184      VLOG(jdwp) << "Undeoptimize method " << ArtMethod::PrettyMethod(request.Method()) << " DONE";
3185      break;
3186    default:
3187      LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind();
3188      break;
3189  }
3190}
3191
3192void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
3193  if (req.GetKind() == DeoptimizationRequest::kNothing) {
3194    // Nothing to do.
3195    return;
3196  }
3197  MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
3198  RequestDeoptimizationLocked(req);
3199}
3200
3201void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
3202  switch (req.GetKind()) {
3203    case DeoptimizationRequest::kRegisterForEvent: {
3204      DCHECK_NE(req.InstrumentationEvent(), 0u);
3205      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
3206      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
3207                                                req.InstrumentationEvent());
3208      if (*counter == 0) {
3209        VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x",
3210                                   deoptimization_requests_.size(), req.InstrumentationEvent());
3211        deoptimization_requests_.push_back(req);
3212      }
3213      *counter = *counter + 1;
3214      break;
3215    }
3216    case DeoptimizationRequest::kUnregisterForEvent: {
3217      DCHECK_NE(req.InstrumentationEvent(), 0u);
3218      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
3219      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
3220                                                req.InstrumentationEvent());
3221      *counter = *counter - 1;
3222      if (*counter == 0) {
3223        VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x",
3224                                   deoptimization_requests_.size(), req.InstrumentationEvent());
3225        deoptimization_requests_.push_back(req);
3226      }
3227      break;
3228    }
3229    case DeoptimizationRequest::kFullDeoptimization: {
3230      DCHECK(req.Method() == nullptr);
3231      if (full_deoptimization_event_count_ == 0) {
3232        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3233                   << " for full deoptimization";
3234        deoptimization_requests_.push_back(req);
3235      }
3236      ++full_deoptimization_event_count_;
3237      break;
3238    }
3239    case DeoptimizationRequest::kFullUndeoptimization: {
3240      DCHECK(req.Method() == nullptr);
3241      DCHECK_GT(full_deoptimization_event_count_, 0U);
3242      --full_deoptimization_event_count_;
3243      if (full_deoptimization_event_count_ == 0) {
3244        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3245                   << " for full undeoptimization";
3246        deoptimization_requests_.push_back(req);
3247      }
3248      break;
3249    }
3250    case DeoptimizationRequest::kSelectiveDeoptimization: {
3251      DCHECK(req.Method() != nullptr);
3252      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3253                 << " for deoptimization of " << req.Method()->PrettyMethod();
3254      deoptimization_requests_.push_back(req);
3255      break;
3256    }
3257    case DeoptimizationRequest::kSelectiveUndeoptimization: {
3258      DCHECK(req.Method() != nullptr);
3259      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3260                 << " for undeoptimization of " << req.Method()->PrettyMethod();
3261      deoptimization_requests_.push_back(req);
3262      break;
3263    }
3264    default: {
3265      LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind();
3266      break;
3267    }
3268  }
3269}
3270
3271void Dbg::ManageDeoptimization() {
3272  Thread* const self = Thread::Current();
3273  {
3274    // Avoid suspend/resume if there is no pending request.
3275    MutexLock mu(self, *Locks::deoptimization_lock_);
3276    if (deoptimization_requests_.empty()) {
3277      return;
3278    }
3279  }
3280  CHECK_EQ(self->GetState(), kRunnable);
3281  ScopedThreadSuspension sts(self, kWaitingForDeoptimization);
3282  // Required for ProcessDeoptimizationRequest.
3283  gc::ScopedGCCriticalSection gcs(self,
3284                                  gc::kGcCauseInstrumentation,
3285                                  gc::kCollectorTypeInstrumentation);
3286  // We need to suspend mutator threads first.
3287  ScopedSuspendAll ssa(__FUNCTION__);
3288  const ThreadState old_state = self->SetStateUnsafe(kRunnable);
3289  {
3290    MutexLock mu(self, *Locks::deoptimization_lock_);
3291    size_t req_index = 0;
3292    for (DeoptimizationRequest& request : deoptimization_requests_) {
3293      VLOG(jdwp) << "Process deoptimization request #" << req_index++;
3294      ProcessDeoptimizationRequest(request);
3295    }
3296    deoptimization_requests_.clear();
3297  }
3298  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
3299}
3300
3301static const Breakpoint* FindFirstBreakpointForMethod(ArtMethod* m)
3302    REQUIRES_SHARED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3303  for (Breakpoint& breakpoint : gBreakpoints) {
3304    if (breakpoint.IsInMethod(m)) {
3305      return &breakpoint;
3306    }
3307  }
3308  return nullptr;
3309}
3310
3311bool Dbg::MethodHasAnyBreakpoints(ArtMethod* method) {
3312  ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
3313  return FindFirstBreakpointForMethod(method) != nullptr;
3314}
3315
3316// Sanity checks all existing breakpoints on the same method.
3317static void SanityCheckExistingBreakpoints(ArtMethod* m,
3318                                           DeoptimizationRequest::Kind deoptimization_kind)
3319    REQUIRES_SHARED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3320  for (const Breakpoint& breakpoint : gBreakpoints) {
3321    if (breakpoint.IsInMethod(m)) {
3322      CHECK_EQ(deoptimization_kind, breakpoint.GetDeoptimizationKind());
3323    }
3324  }
3325  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
3326  if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
3327    // We should have deoptimized everything but not "selectively" deoptimized this method.
3328    CHECK(instrumentation->AreAllMethodsDeoptimized());
3329    CHECK(!instrumentation->IsDeoptimized(m));
3330  } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3331    // We should have "selectively" deoptimized this method.
3332    // Note: while we have not deoptimized everything for this method, we may have done it for
3333    // another event.
3334    CHECK(instrumentation->IsDeoptimized(m));
3335  } else {
3336    // This method does not require deoptimization.
3337    CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
3338    CHECK(!instrumentation->IsDeoptimized(m));
3339  }
3340}
3341
3342// Returns the deoptimization kind required to set a breakpoint in a method.
3343// If a breakpoint has already been set, we also return the first breakpoint
3344// through the given 'existing_brkpt' pointer.
3345static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self,
3346                                                                 ArtMethod* m,
3347                                                                 const Breakpoint** existing_brkpt)
3348    REQUIRES_SHARED(Locks::mutator_lock_) {
3349  if (!Dbg::RequiresDeoptimization()) {
3350    // We already run in interpreter-only mode so we don't need to deoptimize anything.
3351    VLOG(jdwp) << "No need for deoptimization when fully running with interpreter for method "
3352               << ArtMethod::PrettyMethod(m);
3353    return DeoptimizationRequest::kNothing;
3354  }
3355  const Breakpoint* first_breakpoint;
3356  {
3357    ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
3358    first_breakpoint = FindFirstBreakpointForMethod(m);
3359    *existing_brkpt = first_breakpoint;
3360  }
3361
3362  if (first_breakpoint == nullptr) {
3363    // There is no breakpoint on this method yet: we need to deoptimize. If this method is default,
3364    // we deoptimize everything; otherwise we deoptimize only this method. We
3365    // deoptimize with defaults because we do not know everywhere they are used. It is possible some
3366    // of the copies could be missed.
3367    // TODO Deoptimizing on default methods might not be necessary in all cases.
3368    bool need_full_deoptimization = m->IsDefault();
3369    if (need_full_deoptimization) {
3370      VLOG(jdwp) << "Need full deoptimization because of copying of method "
3371                 << ArtMethod::PrettyMethod(m);
3372      return DeoptimizationRequest::kFullDeoptimization;
3373    } else {
3374      // We don't need to deoptimize if the method has not been compiled.
3375      const bool is_compiled = m->HasAnyCompiledCode();
3376      if (is_compiled) {
3377        VLOG(jdwp) << "Need selective deoptimization for compiled method "
3378                   << ArtMethod::PrettyMethod(m);
3379        return DeoptimizationRequest::kSelectiveDeoptimization;
3380      } else {
3381        // Method is not compiled: we don't need to deoptimize.
3382        VLOG(jdwp) << "No need for deoptimization for non-compiled method "
3383                   << ArtMethod::PrettyMethod(m);
3384        return DeoptimizationRequest::kNothing;
3385      }
3386    }
3387  } else {
3388    // There is at least one breakpoint for this method: we don't need to deoptimize.
3389    // Let's check that all breakpoints are configured the same way for deoptimization.
3390    VLOG(jdwp) << "Breakpoint already set: no deoptimization is required";
3391    DeoptimizationRequest::Kind deoptimization_kind = first_breakpoint->GetDeoptimizationKind();
3392    if (kIsDebugBuild) {
3393      ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
3394      SanityCheckExistingBreakpoints(m, deoptimization_kind);
3395    }
3396    return DeoptimizationRequest::kNothing;
3397  }
3398}
3399
3400// Installs a breakpoint at the specified location. Also indicates through the deoptimization
3401// request if we need to deoptimize.
3402void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3403  Thread* const self = Thread::Current();
3404  ArtMethod* m = FromMethodId(location->method_id);
3405  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3406
3407  const Breakpoint* existing_breakpoint = nullptr;
3408  const DeoptimizationRequest::Kind deoptimization_kind =
3409      GetRequiredDeoptimizationKind(self, m, &existing_breakpoint);
3410  req->SetKind(deoptimization_kind);
3411  if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3412    req->SetMethod(m);
3413  } else {
3414    CHECK(deoptimization_kind == DeoptimizationRequest::kNothing ||
3415          deoptimization_kind == DeoptimizationRequest::kFullDeoptimization);
3416    req->SetMethod(nullptr);
3417  }
3418
3419  {
3420    WriterMutexLock mu(self, *Locks::breakpoint_lock_);
3421    // If there is at least one existing breakpoint on the same method, the new breakpoint
3422    // must have the same deoptimization kind than the existing breakpoint(s).
3423    DeoptimizationRequest::Kind breakpoint_deoptimization_kind;
3424    if (existing_breakpoint != nullptr) {
3425      breakpoint_deoptimization_kind = existing_breakpoint->GetDeoptimizationKind();
3426    } else {
3427      breakpoint_deoptimization_kind = deoptimization_kind;
3428    }
3429    gBreakpoints.push_back(Breakpoint(m, location->dex_pc, breakpoint_deoptimization_kind));
3430    VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
3431               << gBreakpoints[gBreakpoints.size() - 1];
3432  }
3433}
3434
3435// Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
3436// request if we need to undeoptimize.
3437void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3438  WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
3439  ArtMethod* m = FromMethodId(location->method_id);
3440  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3441  DeoptimizationRequest::Kind deoptimization_kind = DeoptimizationRequest::kNothing;
3442  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
3443    if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].IsInMethod(m)) {
3444      VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
3445      deoptimization_kind = gBreakpoints[i].GetDeoptimizationKind();
3446      DCHECK_EQ(deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization,
3447                Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3448      gBreakpoints.erase(gBreakpoints.begin() + i);
3449      break;
3450    }
3451  }
3452  const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
3453  if (existing_breakpoint == nullptr) {
3454    // There is no more breakpoint on this method: we need to undeoptimize.
3455    if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
3456      // This method required full deoptimization: we need to undeoptimize everything.
3457      req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
3458      req->SetMethod(nullptr);
3459    } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3460      // This method required selective deoptimization: we need to undeoptimize only that method.
3461      req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
3462      req->SetMethod(m);
3463    } else {
3464      // This method had no need for deoptimization: do nothing.
3465      CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
3466      req->SetKind(DeoptimizationRequest::kNothing);
3467      req->SetMethod(nullptr);
3468    }
3469  } else {
3470    // There is at least one breakpoint for this method: we don't need to undeoptimize.
3471    req->SetKind(DeoptimizationRequest::kNothing);
3472    req->SetMethod(nullptr);
3473    if (kIsDebugBuild) {
3474      SanityCheckExistingBreakpoints(m, deoptimization_kind);
3475    }
3476  }
3477}
3478
3479bool Dbg::IsForcedInterpreterNeededForCallingImpl(Thread* thread, ArtMethod* m) {
3480  const SingleStepControl* const ssc = thread->GetSingleStepControl();
3481  if (ssc == nullptr) {
3482    // If we are not single-stepping, then we don't have to force interpreter.
3483    return false;
3484  }
3485  if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
3486    // If we are in interpreter only mode, then we don't have to force interpreter.
3487    return false;
3488  }
3489
3490  if (!m->IsNative() && !m->IsProxyMethod()) {
3491    // If we want to step into a method, then we have to force interpreter on that call.
3492    if (ssc->GetStepDepth() == JDWP::SD_INTO) {
3493      return true;
3494    }
3495  }
3496  return false;
3497}
3498
3499bool Dbg::IsForcedInterpreterNeededForResolutionImpl(Thread* thread, ArtMethod* m) {
3500  instrumentation::Instrumentation* const instrumentation =
3501      Runtime::Current()->GetInstrumentation();
3502  // If we are in interpreter only mode, then we don't have to force interpreter.
3503  if (instrumentation->InterpretOnly()) {
3504    return false;
3505  }
3506  // We can only interpret pure Java method.
3507  if (m->IsNative() || m->IsProxyMethod()) {
3508    return false;
3509  }
3510  const SingleStepControl* const ssc = thread->GetSingleStepControl();
3511  if (ssc != nullptr) {
3512    // If we want to step into a method, then we have to force interpreter on that call.
3513    if (ssc->GetStepDepth() == JDWP::SD_INTO) {
3514      return true;
3515    }
3516    // If we are stepping out from a static initializer, by issuing a step
3517    // in or step over, that was implicitly invoked by calling a static method,
3518    // then we need to step into that method. Having a lower stack depth than
3519    // the one the single step control has indicates that the step originates
3520    // from the static initializer.
3521    if (ssc->GetStepDepth() != JDWP::SD_OUT &&
3522        ssc->GetStackDepth() > GetStackDepth(thread)) {
3523      return true;
3524    }
3525  }
3526  // There are cases where we have to force interpreter on deoptimized methods,
3527  // because in some cases the call will not be performed by invoking an entry
3528  // point that has been replaced by the deoptimization, but instead by directly
3529  // invoking the compiled code of the method, for example.
3530  return instrumentation->IsDeoptimized(m);
3531}
3532
3533bool Dbg::IsForcedInstrumentationNeededForResolutionImpl(Thread* thread, ArtMethod* m) {
3534  // The upcall can be null and in that case we don't need to do anything.
3535  if (m == nullptr) {
3536    return false;
3537  }
3538  instrumentation::Instrumentation* const instrumentation =
3539      Runtime::Current()->GetInstrumentation();
3540  // If we are in interpreter only mode, then we don't have to force interpreter.
3541  if (instrumentation->InterpretOnly()) {
3542    return false;
3543  }
3544  // We can only interpret pure Java method.
3545  if (m->IsNative() || m->IsProxyMethod()) {
3546    return false;
3547  }
3548  const SingleStepControl* const ssc = thread->GetSingleStepControl();
3549  if (ssc != nullptr) {
3550    // If we are stepping out from a static initializer, by issuing a step
3551    // out, that was implicitly invoked by calling a static method, then we
3552    // need to step into the caller of that method. Having a lower stack
3553    // depth than the one the single step control has indicates that the
3554    // step originates from the static initializer.
3555    if (ssc->GetStepDepth() == JDWP::SD_OUT &&
3556        ssc->GetStackDepth() > GetStackDepth(thread)) {
3557      return true;
3558    }
3559  }
3560  // If we are returning from a static intializer, that was implicitly
3561  // invoked by calling a static method and the caller is deoptimized,
3562  // then we have to deoptimize the stack without forcing interpreter
3563  // on the static method that was called originally. This problem can
3564  // be solved easily by forcing instrumentation on the called method,
3565  // because the instrumentation exit hook will recognise the need of
3566  // stack deoptimization by calling IsForcedInterpreterNeededForUpcall.
3567  return instrumentation->IsDeoptimized(m);
3568}
3569
3570bool Dbg::IsForcedInterpreterNeededForUpcallImpl(Thread* thread, ArtMethod* m) {
3571  // The upcall can be null and in that case we don't need to do anything.
3572  if (m == nullptr) {
3573    return false;
3574  }
3575  instrumentation::Instrumentation* const instrumentation =
3576      Runtime::Current()->GetInstrumentation();
3577  // If we are in interpreter only mode, then we don't have to force interpreter.
3578  if (instrumentation->InterpretOnly()) {
3579    return false;
3580  }
3581  // We can only interpret pure Java method.
3582  if (m->IsNative() || m->IsProxyMethod()) {
3583    return false;
3584  }
3585  const SingleStepControl* const ssc = thread->GetSingleStepControl();
3586  if (ssc != nullptr) {
3587    // The debugger is not interested in what is happening under the level
3588    // of the step, thus we only force interpreter when we are not below of
3589    // the step.
3590    if (ssc->GetStackDepth() >= GetStackDepth(thread)) {
3591      return true;
3592    }
3593  }
3594  if (thread->HasDebuggerShadowFrames()) {
3595    // We need to deoptimize the stack for the exception handling flow so that
3596    // we don't miss any deoptimization that should be done when there are
3597    // debugger shadow frames.
3598    return true;
3599  }
3600  // We have to require stack deoptimization if the upcall is deoptimized.
3601  return instrumentation->IsDeoptimized(m);
3602}
3603
3604class NeedsDeoptimizationVisitor : public StackVisitor {
3605 public:
3606  explicit NeedsDeoptimizationVisitor(Thread* self)
3607      REQUIRES_SHARED(Locks::mutator_lock_)
3608    : StackVisitor(self, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
3609      needs_deoptimization_(false) {}
3610
3611  bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) {
3612    // The visitor is meant to be used when handling exception from compiled code only.
3613    CHECK(!IsShadowFrame()) << "We only expect to visit compiled frame: "
3614                            << ArtMethod::PrettyMethod(GetMethod());
3615    ArtMethod* method = GetMethod();
3616    if (method == nullptr) {
3617      // We reach an upcall and don't need to deoptimize this part of the stack (ManagedFragment)
3618      // so we can stop the visit.
3619      DCHECK(!needs_deoptimization_);
3620      return false;
3621    }
3622    if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) {
3623      // We found a compiled frame in the stack but instrumentation is set to interpret
3624      // everything: we need to deoptimize.
3625      needs_deoptimization_ = true;
3626      return false;
3627    }
3628    if (Runtime::Current()->GetInstrumentation()->IsDeoptimized(method)) {
3629      // We found a deoptimized method in the stack.
3630      needs_deoptimization_ = true;
3631      return false;
3632    }
3633    ShadowFrame* frame = GetThread()->FindDebuggerShadowFrame(GetFrameId());
3634    if (frame != nullptr) {
3635      // The debugger allocated a ShadowFrame to update a variable in the stack: we need to
3636      // deoptimize the stack to execute (and deallocate) this frame.
3637      needs_deoptimization_ = true;
3638      return false;
3639    }
3640    return true;
3641  }
3642
3643  bool NeedsDeoptimization() const {
3644    return needs_deoptimization_;
3645  }
3646
3647 private:
3648  // Do we need to deoptimize the stack?
3649  bool needs_deoptimization_;
3650
3651  DISALLOW_COPY_AND_ASSIGN(NeedsDeoptimizationVisitor);
3652};
3653
3654// Do we need to deoptimize the stack to handle an exception?
3655bool Dbg::IsForcedInterpreterNeededForExceptionImpl(Thread* thread) {
3656  const SingleStepControl* const ssc = thread->GetSingleStepControl();
3657  if (ssc != nullptr) {
3658    // We deopt to step into the catch handler.
3659    return true;
3660  }
3661  // Deoptimization is required if at least one method in the stack needs it. However we
3662  // skip frames that will be unwound (thus not executed).
3663  NeedsDeoptimizationVisitor visitor(thread);
3664  visitor.WalkStack(true);  // includes upcall.
3665  return visitor.NeedsDeoptimization();
3666}
3667
3668// Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
3669// cause suspension if the thread is the current thread.
3670class ScopedDebuggerThreadSuspension {
3671 public:
3672  ScopedDebuggerThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
3673      REQUIRES(!Locks::thread_list_lock_)
3674      REQUIRES_SHARED(Locks::mutator_lock_) :
3675      thread_(nullptr),
3676      error_(JDWP::ERR_NONE),
3677      self_suspend_(false),
3678      other_suspend_(false) {
3679    ScopedObjectAccessUnchecked soa(self);
3680    thread_ = DecodeThread(soa, thread_id, &error_);
3681    if (error_ == JDWP::ERR_NONE) {
3682      if (thread_ == soa.Self()) {
3683        self_suspend_ = true;
3684      } else {
3685        Thread* suspended_thread;
3686        {
3687          ScopedThreadSuspension sts(self, kWaitingForDebuggerSuspension);
3688          jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id);
3689          bool timed_out;
3690          ThreadList* const thread_list = Runtime::Current()->GetThreadList();
3691          suspended_thread = thread_list->SuspendThreadByPeer(thread_peer,
3692                                                              /* request_suspension */ true,
3693                                                              /* debug_suspension */ true,
3694                                                              &timed_out);
3695        }
3696        if (suspended_thread == nullptr) {
3697          // Thread terminated from under us while suspending.
3698          error_ = JDWP::ERR_INVALID_THREAD;
3699        } else {
3700          CHECK_EQ(suspended_thread, thread_);
3701          other_suspend_ = true;
3702        }
3703      }
3704    }
3705  }
3706
3707  Thread* GetThread() const {
3708    return thread_;
3709  }
3710
3711  JDWP::JdwpError GetError() const {
3712    return error_;
3713  }
3714
3715  ~ScopedDebuggerThreadSuspension() {
3716    if (other_suspend_) {
3717      Runtime::Current()->GetThreadList()->Resume(thread_, true);
3718    }
3719  }
3720
3721 private:
3722  Thread* thread_;
3723  JDWP::JdwpError error_;
3724  bool self_suspend_;
3725  bool other_suspend_;
3726};
3727
3728JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size,
3729                                   JDWP::JdwpStepDepth step_depth) {
3730  Thread* self = Thread::Current();
3731  ScopedDebuggerThreadSuspension sts(self, thread_id);
3732  if (sts.GetError() != JDWP::ERR_NONE) {
3733    return sts.GetError();
3734  }
3735
3736  // Work out what ArtMethod* we're in, the current line number, and how deep the stack currently
3737  // is for step-out.
3738  struct SingleStepStackVisitor : public StackVisitor {
3739    explicit SingleStepStackVisitor(Thread* thread) REQUIRES_SHARED(Locks::mutator_lock_)
3740        : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
3741          stack_depth(0),
3742          method(nullptr),
3743          line_number(-1) {}
3744
3745    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
3746    // annotalysis.
3747    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
3748      ArtMethod* m = GetMethod();
3749      if (!m->IsRuntimeMethod()) {
3750        ++stack_depth;
3751        if (method == nullptr) {
3752          const DexFile* dex_file = m->GetDexFile();
3753          method = m;
3754          if (dex_file != nullptr) {
3755            line_number = annotations::GetLineNumFromPC(dex_file, m, GetDexPc());
3756          }
3757        }
3758      }
3759      return true;
3760    }
3761
3762    int stack_depth;
3763    ArtMethod* method;
3764    int32_t line_number;
3765  };
3766
3767  Thread* const thread = sts.GetThread();
3768  SingleStepStackVisitor visitor(thread);
3769  visitor.WalkStack();
3770
3771  // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
3772  struct DebugCallbackContext {
3773    DebugCallbackContext(SingleStepControl* single_step_control_cb,
3774                         int32_t line_number_cb, const DexFile::CodeItem* code_item)
3775        : single_step_control_(single_step_control_cb), line_number_(line_number_cb),
3776          code_item_(code_item), last_pc_valid(false), last_pc(0) {
3777    }
3778
3779    static bool Callback(void* raw_context, const DexFile::PositionInfo& entry) {
3780      DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
3781      if (static_cast<int32_t>(entry.line_) == context->line_number_) {
3782        if (!context->last_pc_valid) {
3783          // Everything from this address until the next line change is ours.
3784          context->last_pc = entry.address_;
3785          context->last_pc_valid = true;
3786        }
3787        // Otherwise, if we're already in a valid range for this line,
3788        // just keep going (shouldn't really happen)...
3789      } else if (context->last_pc_valid) {  // and the line number is new
3790        // Add everything from the last entry up until here to the set
3791        for (uint32_t dex_pc = context->last_pc; dex_pc < entry.address_; ++dex_pc) {
3792          context->single_step_control_->AddDexPc(dex_pc);
3793        }
3794        context->last_pc_valid = false;
3795      }
3796      return false;  // There may be multiple entries for any given line.
3797    }
3798
3799    ~DebugCallbackContext() {
3800      // If the line number was the last in the position table...
3801      if (last_pc_valid) {
3802        size_t end = code_item_->insns_size_in_code_units_;
3803        for (uint32_t dex_pc = last_pc; dex_pc < end; ++dex_pc) {
3804          single_step_control_->AddDexPc(dex_pc);
3805        }
3806      }
3807    }
3808
3809    SingleStepControl* const single_step_control_;
3810    const int32_t line_number_;
3811    const DexFile::CodeItem* const code_item_;
3812    bool last_pc_valid;
3813    uint32_t last_pc;
3814  };
3815
3816  // Allocate single step.
3817  SingleStepControl* single_step_control =
3818      new (std::nothrow) SingleStepControl(step_size, step_depth,
3819                                           visitor.stack_depth, visitor.method);
3820  if (single_step_control == nullptr) {
3821    LOG(ERROR) << "Failed to allocate SingleStepControl";
3822    return JDWP::ERR_OUT_OF_MEMORY;
3823  }
3824
3825  ArtMethod* m = single_step_control->GetMethod();
3826  const int32_t line_number = visitor.line_number;
3827  // Note: if the thread is not running Java code (pure native thread), there is no "current"
3828  // method on the stack (and no line number either).
3829  if (m != nullptr && !m->IsNative()) {
3830    const DexFile::CodeItem* const code_item = m->GetCodeItem();
3831    DebugCallbackContext context(single_step_control, line_number, code_item);
3832    m->GetDexFile()->DecodeDebugPositionInfo(code_item, DebugCallbackContext::Callback, &context);
3833  }
3834
3835  // Activate single-step in the thread.
3836  thread->ActivateSingleStepControl(single_step_control);
3837
3838  if (VLOG_IS_ON(jdwp)) {
3839    VLOG(jdwp) << "Single-step thread: " << *thread;
3840    VLOG(jdwp) << "Single-step step size: " << single_step_control->GetStepSize();
3841    VLOG(jdwp) << "Single-step step depth: " << single_step_control->GetStepDepth();
3842    VLOG(jdwp) << "Single-step current method: "
3843               << ArtMethod::PrettyMethod(single_step_control->GetMethod());
3844    VLOG(jdwp) << "Single-step current line: " << line_number;
3845    VLOG(jdwp) << "Single-step current stack depth: " << single_step_control->GetStackDepth();
3846    VLOG(jdwp) << "Single-step dex_pc values:";
3847    for (uint32_t dex_pc : single_step_control->GetDexPcs()) {
3848      VLOG(jdwp) << StringPrintf(" %#x", dex_pc);
3849    }
3850  }
3851
3852  return JDWP::ERR_NONE;
3853}
3854
3855void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) {
3856  ScopedObjectAccessUnchecked soa(Thread::Current());
3857  JDWP::JdwpError error;
3858  Thread* thread = DecodeThread(soa, thread_id, &error);
3859  if (error == JDWP::ERR_NONE) {
3860    thread->DeactivateSingleStepControl();
3861  }
3862}
3863
3864static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
3865  switch (tag) {
3866    default:
3867      LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag);
3868      UNREACHABLE();
3869
3870    // Primitives.
3871    case JDWP::JT_BYTE:    return 'B';
3872    case JDWP::JT_CHAR:    return 'C';
3873    case JDWP::JT_FLOAT:   return 'F';
3874    case JDWP::JT_DOUBLE:  return 'D';
3875    case JDWP::JT_INT:     return 'I';
3876    case JDWP::JT_LONG:    return 'J';
3877    case JDWP::JT_SHORT:   return 'S';
3878    case JDWP::JT_VOID:    return 'V';
3879    case JDWP::JT_BOOLEAN: return 'Z';
3880
3881    // Reference types.
3882    case JDWP::JT_ARRAY:
3883    case JDWP::JT_OBJECT:
3884    case JDWP::JT_STRING:
3885    case JDWP::JT_THREAD:
3886    case JDWP::JT_THREAD_GROUP:
3887    case JDWP::JT_CLASS_LOADER:
3888    case JDWP::JT_CLASS_OBJECT:
3889      return 'L';
3890  }
3891}
3892
3893JDWP::JdwpError Dbg::PrepareInvokeMethod(uint32_t request_id, JDWP::ObjectId thread_id,
3894                                         JDWP::ObjectId object_id, JDWP::RefTypeId class_id,
3895                                         JDWP::MethodId method_id, uint32_t arg_count,
3896                                         uint64_t arg_values[], JDWP::JdwpTag* arg_types,
3897                                         uint32_t options) {
3898  Thread* const self = Thread::Current();
3899  CHECK_EQ(self, GetDebugThread()) << "This must be called by the JDWP thread";
3900  const bool resume_all_threads = ((options & JDWP::INVOKE_SINGLE_THREADED) == 0);
3901
3902  ThreadList* thread_list = Runtime::Current()->GetThreadList();
3903  Thread* targetThread = nullptr;
3904  {
3905    ScopedObjectAccessUnchecked soa(self);
3906    JDWP::JdwpError error;
3907    targetThread = DecodeThread(soa, thread_id, &error);
3908    if (error != JDWP::ERR_NONE) {
3909      LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id;
3910      return error;
3911    }
3912    if (targetThread->GetInvokeReq() != nullptr) {
3913      // Thread is already invoking a method on behalf of the debugger.
3914      LOG(ERROR) << "InvokeMethod request for thread already invoking a method: " << *targetThread;
3915      return JDWP::ERR_ALREADY_INVOKING;
3916    }
3917    if (!targetThread->IsReadyForDebugInvoke()) {
3918      // Thread is not suspended by an event so it cannot invoke a method.
3919      LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
3920      return JDWP::ERR_INVALID_THREAD;
3921    }
3922
3923    /*
3924     * According to the JDWP specs, we are expected to resume all threads (or only the
3925     * target thread) once. So if a thread has been suspended more than once (either by
3926     * the debugger for an event or by the runtime for GC), it will remain suspended before
3927     * the invoke is executed. This means the debugger is responsible to properly resume all
3928     * the threads it has suspended so the target thread can execute the method.
3929     *
3930     * However, for compatibility reason with older versions of debuggers (like Eclipse), we
3931     * fully resume all threads (by canceling *all* debugger suspensions) when the debugger
3932     * wants us to resume all threads. This is to avoid ending up in deadlock situation.
3933     *
3934     * On the other hand, if we are asked to only resume the target thread, then we follow the
3935     * JDWP specs by resuming that thread only once. This means the thread will remain suspended
3936     * if it has been suspended more than once before the invoke (and again, this is the
3937     * responsibility of the debugger to properly resume that thread before invoking a method).
3938     */
3939    int suspend_count;
3940    {
3941      MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
3942      suspend_count = targetThread->GetSuspendCount();
3943    }
3944    if (suspend_count > 1 && resume_all_threads) {
3945      // The target thread will remain suspended even after we resume it. Let's emit a warning
3946      // to indicate the invoke won't be executed until the thread is resumed.
3947      LOG(WARNING) << *targetThread << " suspended more than once (suspend count == "
3948                   << suspend_count << "). This thread will invoke the method only once "
3949                   << "it is fully resumed.";
3950    }
3951
3952    mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id, &error);
3953    if (error != JDWP::ERR_NONE) {
3954      return JDWP::ERR_INVALID_OBJECT;
3955    }
3956
3957    gRegistry->Get<mirror::Object*>(thread_id, &error);
3958    if (error != JDWP::ERR_NONE) {
3959      return JDWP::ERR_INVALID_OBJECT;
3960    }
3961
3962    mirror::Class* c = DecodeClass(class_id, &error);
3963    if (c == nullptr) {
3964      return error;
3965    }
3966
3967    ArtMethod* m = FromMethodId(method_id);
3968    if (m->IsStatic() != (receiver == nullptr)) {
3969      return JDWP::ERR_INVALID_METHODID;
3970    }
3971    if (m->IsStatic()) {
3972      if (m->GetDeclaringClass() != c) {
3973        return JDWP::ERR_INVALID_METHODID;
3974      }
3975    } else {
3976      if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
3977        return JDWP::ERR_INVALID_METHODID;
3978      }
3979    }
3980
3981    // Check the argument list matches the method.
3982    uint32_t shorty_len = 0;
3983    const char* shorty = m->GetShorty(&shorty_len);
3984    if (shorty_len - 1 != arg_count) {
3985      return JDWP::ERR_ILLEGAL_ARGUMENT;
3986    }
3987
3988    {
3989      StackHandleScope<2> hs(soa.Self());
3990      HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver));
3991      HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c));
3992      const DexFile::TypeList* types = m->GetParameterTypeList();
3993      for (size_t i = 0; i < arg_count; ++i) {
3994        if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) {
3995          return JDWP::ERR_ILLEGAL_ARGUMENT;
3996        }
3997
3998        if (shorty[i + 1] == 'L') {
3999          // Did we really get an argument of an appropriate reference type?
4000          mirror::Class* parameter_type =
4001              m->GetClassFromTypeIndex(types->GetTypeItem(i).type_idx_, true /* resolve */);
4002          mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i], &error);
4003          if (error != JDWP::ERR_NONE) {
4004            return JDWP::ERR_INVALID_OBJECT;
4005          }
4006          if (argument != nullptr && !argument->InstanceOf(parameter_type)) {
4007            return JDWP::ERR_ILLEGAL_ARGUMENT;
4008          }
4009
4010          // Turn the on-the-wire ObjectId into a jobject.
4011          jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]);
4012          v.l = gRegistry->GetJObject(arg_values[i]);
4013        }
4014      }
4015    }
4016
4017    // Allocates a DebugInvokeReq.
4018    DebugInvokeReq* req = new (std::nothrow) DebugInvokeReq(request_id, thread_id, receiver, c, m,
4019                                                            options, arg_values, arg_count);
4020    if (req == nullptr) {
4021      LOG(ERROR) << "Failed to allocate DebugInvokeReq";
4022      return JDWP::ERR_OUT_OF_MEMORY;
4023    }
4024
4025    // Attaches the DebugInvokeReq to the target thread so it executes the method when
4026    // it is resumed. Once the invocation completes, the target thread will delete it before
4027    // suspending itself (see ThreadList::SuspendSelfForDebugger).
4028    targetThread->SetDebugInvokeReq(req);
4029  }
4030
4031  // The fact that we've released the thread list lock is a bit risky --- if the thread goes
4032  // away we're sitting high and dry -- but we must release this before the UndoDebuggerSuspensions
4033  // call.
4034  if (resume_all_threads) {
4035    VLOG(jdwp) << "      Resuming all threads";
4036    thread_list->UndoDebuggerSuspensions();
4037  } else {
4038    VLOG(jdwp) << "      Resuming event thread only";
4039    thread_list->Resume(targetThread, true);
4040  }
4041
4042  return JDWP::ERR_NONE;
4043}
4044
4045void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
4046  Thread* const self = Thread::Current();
4047  CHECK_NE(self, GetDebugThread()) << "This must be called by the event thread";
4048
4049  ScopedObjectAccess soa(self);
4050
4051  // We can be called while an exception is pending. We need
4052  // to preserve that across the method invocation.
4053  StackHandleScope<1> hs(soa.Self());
4054  Handle<mirror::Throwable> old_exception = hs.NewHandle(soa.Self()->GetException());
4055  soa.Self()->ClearException();
4056
4057  // Execute the method then sends reply to the debugger.
4058  ExecuteMethodWithoutPendingException(soa, pReq);
4059
4060  // If an exception was pending before the invoke, restore it now.
4061  if (old_exception != nullptr) {
4062    soa.Self()->SetException(old_exception.Get());
4063  }
4064}
4065
4066// Helper function: write a variable-width value into the output input buffer.
4067static void WriteValue(JDWP::ExpandBuf* pReply, int width, uint64_t value) {
4068  switch (width) {
4069    case 1:
4070      expandBufAdd1(pReply, value);
4071      break;
4072    case 2:
4073      expandBufAdd2BE(pReply, value);
4074      break;
4075    case 4:
4076      expandBufAdd4BE(pReply, value);
4077      break;
4078    case 8:
4079      expandBufAdd8BE(pReply, value);
4080      break;
4081    default:
4082      LOG(FATAL) << width;
4083      UNREACHABLE();
4084  }
4085}
4086
4087void Dbg::ExecuteMethodWithoutPendingException(ScopedObjectAccess& soa, DebugInvokeReq* pReq) {
4088  soa.Self()->AssertNoPendingException();
4089
4090  // Translate the method through the vtable, unless the debugger wants to suppress it.
4091  ArtMethod* m = pReq->method;
4092  PointerSize image_pointer_size = Runtime::Current()->GetClassLinker()->GetImagePointerSize();
4093  if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver.Read() != nullptr) {
4094    ArtMethod* actual_method =
4095        pReq->klass.Read()->FindVirtualMethodForVirtualOrInterface(m, image_pointer_size);
4096    if (actual_method != m) {
4097      VLOG(jdwp) << "ExecuteMethod translated " << ArtMethod::PrettyMethod(m)
4098                 << " to " << ArtMethod::PrettyMethod(actual_method);
4099      m = actual_method;
4100    }
4101  }
4102  VLOG(jdwp) << "ExecuteMethod " << ArtMethod::PrettyMethod(m)
4103             << " receiver=" << pReq->receiver.Read()
4104             << " arg_count=" << pReq->arg_count;
4105  CHECK(m != nullptr);
4106
4107  static_assert(sizeof(jvalue) == sizeof(uint64_t), "jvalue and uint64_t have different sizes.");
4108
4109  // Invoke the method.
4110  ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(pReq->receiver.Read()));
4111  JValue result = InvokeWithJValues(soa, ref.get(), jni::EncodeArtMethod(m),
4112                                    reinterpret_cast<jvalue*>(pReq->arg_values.get()));
4113
4114  // Prepare JDWP ids for the reply.
4115  JDWP::JdwpTag result_tag = BasicTagFromDescriptor(m->GetShorty());
4116  const bool is_object_result = (result_tag == JDWP::JT_OBJECT);
4117  StackHandleScope<3> hs(soa.Self());
4118  Handle<mirror::Object> object_result = hs.NewHandle(is_object_result ? result.GetL() : nullptr);
4119  Handle<mirror::Throwable> exception = hs.NewHandle(soa.Self()->GetException());
4120  soa.Self()->ClearException();
4121
4122  if (!IsDebuggerActive()) {
4123    // The debugger detached: we must not re-suspend threads. We also don't need to fill the reply
4124    // because it won't be sent either.
4125    return;
4126  }
4127
4128  JDWP::ObjectId exceptionObjectId = gRegistry->Add(exception);
4129  uint64_t result_value = 0;
4130  if (exceptionObjectId != 0) {
4131    VLOG(jdwp) << "  JDWP invocation returning with exception=" << exception.Get()
4132               << " " << exception->Dump();
4133    result_value = 0;
4134  } else if (is_object_result) {
4135    /* if no exception was thrown, examine object result more closely */
4136    JDWP::JdwpTag new_tag = TagFromObject(soa, object_result.Get());
4137    if (new_tag != result_tag) {
4138      VLOG(jdwp) << "  JDWP promoted result from " << result_tag << " to " << new_tag;
4139      result_tag = new_tag;
4140    }
4141
4142    // Register the object in the registry and reference its ObjectId. This ensures
4143    // GC safety and prevents from accessing stale reference if the object is moved.
4144    result_value = gRegistry->Add(object_result.Get());
4145  } else {
4146    // Primitive result.
4147    DCHECK(IsPrimitiveTag(result_tag));
4148    result_value = result.GetJ();
4149  }
4150  const bool is_constructor = m->IsConstructor() && !m->IsStatic();
4151  if (is_constructor) {
4152    // If we invoked a constructor (which actually returns void), return the receiver,
4153    // unless we threw, in which case we return null.
4154    DCHECK_EQ(JDWP::JT_VOID, result_tag);
4155    if (exceptionObjectId == 0) {
4156      if (m->GetDeclaringClass()->IsStringClass()) {
4157        // For string constructors, the new string is remapped to the receiver (stored in ref).
4158        Handle<mirror::Object> decoded_ref = hs.NewHandle(soa.Self()->DecodeJObject(ref.get()));
4159        result_value = gRegistry->Add(decoded_ref);
4160        result_tag = TagFromObject(soa, decoded_ref.Get());
4161      } else {
4162        // TODO we could keep the receiver ObjectId in the DebugInvokeReq to avoid looking into the
4163        // object registry.
4164        result_value = GetObjectRegistry()->Add(pReq->receiver.Read());
4165        result_tag = TagFromObject(soa, pReq->receiver.Read());
4166      }
4167    } else {
4168      result_value = 0;
4169      result_tag = JDWP::JT_OBJECT;
4170    }
4171  }
4172
4173  // Suspend other threads if the invoke is not single-threaded.
4174  if ((pReq->options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
4175    ScopedThreadSuspension sts(soa.Self(), kWaitingForDebuggerSuspension);
4176    // Avoid a deadlock between GC and debugger where GC gets suspended during GC. b/25800335.
4177    gc::ScopedGCCriticalSection gcs(soa.Self(), gc::kGcCauseDebugger, gc::kCollectorTypeDebugger);
4178    VLOG(jdwp) << "      Suspending all threads";
4179    Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
4180  }
4181
4182  VLOG(jdwp) << "  --> returned " << result_tag
4183             << StringPrintf(" %#" PRIx64 " (except=%#" PRIx64 ")", result_value,
4184                             exceptionObjectId);
4185
4186  // Show detailed debug output.
4187  if (result_tag == JDWP::JT_STRING && exceptionObjectId == 0) {
4188    if (result_value != 0) {
4189      if (VLOG_IS_ON(jdwp)) {
4190        std::string result_string;
4191        JDWP::JdwpError error = Dbg::StringToUtf8(result_value, &result_string);
4192        CHECK_EQ(error, JDWP::ERR_NONE);
4193        VLOG(jdwp) << "      string '" << result_string << "'";
4194      }
4195    } else {
4196      VLOG(jdwp) << "      string (null)";
4197    }
4198  }
4199
4200  // Attach the reply to DebugInvokeReq so it can be sent to the debugger when the event thread
4201  // is ready to suspend.
4202  BuildInvokeReply(pReq->reply, pReq->request_id, result_tag, result_value, exceptionObjectId);
4203}
4204
4205void Dbg::BuildInvokeReply(JDWP::ExpandBuf* pReply, uint32_t request_id, JDWP::JdwpTag result_tag,
4206                           uint64_t result_value, JDWP::ObjectId exception) {
4207  // Make room for the JDWP header since we do not know the size of the reply yet.
4208  JDWP::expandBufAddSpace(pReply, kJDWPHeaderLen);
4209
4210  size_t width = GetTagWidth(result_tag);
4211  JDWP::expandBufAdd1(pReply, result_tag);
4212  if (width != 0) {
4213    WriteValue(pReply, width, result_value);
4214  }
4215  JDWP::expandBufAdd1(pReply, JDWP::JT_OBJECT);
4216  JDWP::expandBufAddObjectId(pReply, exception);
4217
4218  // Now we know the size, we can complete the JDWP header.
4219  uint8_t* buf = expandBufGetBuffer(pReply);
4220  JDWP::Set4BE(buf + kJDWPHeaderSizeOffset, expandBufGetLength(pReply));
4221  JDWP::Set4BE(buf + kJDWPHeaderIdOffset, request_id);
4222  JDWP::Set1(buf + kJDWPHeaderFlagsOffset, kJDWPFlagReply);  // flags
4223  JDWP::Set2BE(buf + kJDWPHeaderErrorCodeOffset, JDWP::ERR_NONE);
4224}
4225
4226void Dbg::FinishInvokeMethod(DebugInvokeReq* pReq) {
4227  CHECK_NE(Thread::Current(), GetDebugThread()) << "This must be called by the event thread";
4228
4229  JDWP::ExpandBuf* const pReply = pReq->reply;
4230  CHECK(pReply != nullptr) << "No reply attached to DebugInvokeReq";
4231
4232  // We need to prevent other threads (including JDWP thread) from interacting with the debugger
4233  // while we send the reply but are not yet suspended. The JDWP token will be released just before
4234  // we suspend ourself again (see ThreadList::SuspendSelfForDebugger).
4235  gJdwpState->AcquireJdwpTokenForEvent(pReq->thread_id);
4236
4237  // Send the reply unless the debugger detached before the completion of the method.
4238  if (IsDebuggerActive()) {
4239    const size_t replyDataLength = expandBufGetLength(pReply) - kJDWPHeaderLen;
4240    VLOG(jdwp) << StringPrintf("REPLY INVOKE id=0x%06x (length=%zu)",
4241                               pReq->request_id, replyDataLength);
4242
4243    gJdwpState->SendRequest(pReply);
4244  } else {
4245    VLOG(jdwp) << "Not sending invoke reply because debugger detached";
4246  }
4247}
4248
4249/*
4250 * "request" contains a full JDWP packet, possibly with multiple chunks.  We
4251 * need to process each, accumulate the replies, and ship the whole thing
4252 * back.
4253 *
4254 * Returns "true" if we have a reply.  The reply buffer is newly allocated,
4255 * and includes the chunk type/length, followed by the data.
4256 *
4257 * OLD-TODO: we currently assume that the request and reply include a single
4258 * chunk.  If this becomes inconvenient we will need to adapt.
4259 */
4260bool Dbg::DdmHandlePacket(JDWP::Request* request, uint8_t** pReplyBuf, int* pReplyLen) {
4261  Thread* self = Thread::Current();
4262  JNIEnv* env = self->GetJniEnv();
4263
4264  uint32_t type = request->ReadUnsigned32("type");
4265  uint32_t length = request->ReadUnsigned32("length");
4266
4267  // Create a byte[] corresponding to 'request'.
4268  size_t request_length = request->size();
4269  ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(request_length));
4270  if (dataArray.get() == nullptr) {
4271    LOG(WARNING) << "byte[] allocation failed: " << request_length;
4272    env->ExceptionClear();
4273    return false;
4274  }
4275  env->SetByteArrayRegion(dataArray.get(), 0, request_length,
4276                          reinterpret_cast<const jbyte*>(request->data()));
4277  request->Skip(request_length);
4278
4279  // Run through and find all chunks.  [Currently just find the first.]
4280  ScopedByteArrayRO contents(env, dataArray.get());
4281  if (length != request_length) {
4282    LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length);
4283    return false;
4284  }
4285
4286  // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)".
4287  ScopedLocalRef<jobject> chunk(env, env->CallStaticObjectMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
4288                                                                 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch,
4289                                                                 type, dataArray.get(), 0, length));
4290  if (env->ExceptionCheck()) {
4291    LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type);
4292    env->ExceptionDescribe();
4293    env->ExceptionClear();
4294    return false;
4295  }
4296
4297  if (chunk.get() == nullptr) {
4298    return false;
4299  }
4300
4301  /*
4302   * Pull the pieces out of the chunk.  We copy the results into a
4303   * newly-allocated buffer that the caller can free.  We don't want to
4304   * continue using the Chunk object because nothing has a reference to it.
4305   *
4306   * We could avoid this by returning type/data/offset/length and having
4307   * the caller be aware of the object lifetime issues, but that
4308   * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work
4309   * if we have responses for multiple chunks.
4310   *
4311   * So we're pretty much stuck with copying data around multiple times.
4312   */
4313  ScopedLocalRef<jbyteArray> replyData(env, reinterpret_cast<jbyteArray>(env->GetObjectField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data)));
4314  jint offset = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset);
4315  length = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length);
4316  type = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type);
4317
4318  VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length);
4319  if (length == 0 || replyData.get() == nullptr) {
4320    return false;
4321  }
4322
4323  const int kChunkHdrLen = 8;
4324  uint8_t* reply = new uint8_t[length + kChunkHdrLen];
4325  if (reply == nullptr) {
4326    LOG(WARNING) << "malloc failed: " << (length + kChunkHdrLen);
4327    return false;
4328  }
4329  JDWP::Set4BE(reply + 0, type);
4330  JDWP::Set4BE(reply + 4, length);
4331  env->GetByteArrayRegion(replyData.get(), offset, length, reinterpret_cast<jbyte*>(reply + kChunkHdrLen));
4332
4333  *pReplyBuf = reply;
4334  *pReplyLen = length + kChunkHdrLen;
4335
4336  VLOG(jdwp) << StringPrintf("dvmHandleDdm returning type=%.4s %p len=%d", reinterpret_cast<char*>(reply), reply, length);
4337  return true;
4338}
4339
4340void Dbg::DdmBroadcast(bool connect) {
4341  VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
4342
4343  Thread* self = Thread::Current();
4344  if (self->GetState() != kRunnable) {
4345    LOG(ERROR) << "DDM broadcast in thread state " << self->GetState();
4346    /* try anyway? */
4347  }
4348
4349  JNIEnv* env = self->GetJniEnv();
4350  jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/;
4351  env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
4352                            WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast,
4353                            event);
4354  if (env->ExceptionCheck()) {
4355    LOG(ERROR) << "DdmServer.broadcast " << event << " failed";
4356    env->ExceptionDescribe();
4357    env->ExceptionClear();
4358  }
4359}
4360
4361void Dbg::DdmConnected() {
4362  Dbg::DdmBroadcast(true);
4363}
4364
4365void Dbg::DdmDisconnected() {
4366  Dbg::DdmBroadcast(false);
4367  gDdmThreadNotification = false;
4368}
4369
4370/*
4371 * Send a notification when a thread starts, stops, or changes its name.
4372 *
4373 * Because we broadcast the full set of threads when the notifications are
4374 * first enabled, it's possible for "thread" to be actively executing.
4375 */
4376void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
4377  if (!gDdmThreadNotification) {
4378    return;
4379  }
4380
4381  if (type == CHUNK_TYPE("THDE")) {
4382    uint8_t buf[4];
4383    JDWP::Set4BE(&buf[0], t->GetThreadId());
4384    Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf);
4385  } else {
4386    CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
4387    ScopedObjectAccessUnchecked soa(Thread::Current());
4388    StackHandleScope<1> hs(soa.Self());
4389    Handle<mirror::String> name(hs.NewHandle(t->GetThreadName()));
4390    size_t char_count = (name != nullptr) ? name->GetLength() : 0;
4391    const jchar* chars = (name != nullptr) ? name->GetValue() : nullptr;
4392    bool is_compressed = (name != nullptr) ? name->IsCompressed() : false;
4393
4394    std::vector<uint8_t> bytes;
4395    JDWP::Append4BE(bytes, t->GetThreadId());
4396    if (is_compressed) {
4397      const uint8_t* chars_compressed = name->GetValueCompressed();
4398      JDWP::AppendUtf16CompressedBE(bytes, chars_compressed, char_count);
4399    } else {
4400      JDWP::AppendUtf16BE(bytes, chars, char_count);
4401    }
4402    CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
4403    Dbg::DdmSendChunk(type, bytes);
4404  }
4405}
4406
4407void Dbg::DdmSetThreadNotification(bool enable) {
4408  // Enable/disable thread notifications.
4409  gDdmThreadNotification = enable;
4410  if (enable) {
4411    // Suspend the VM then post thread start notifications for all threads. Threads attaching will
4412    // see a suspension in progress and block until that ends. They then post their own start
4413    // notification.
4414    SuspendVM();
4415    std::list<Thread*> threads;
4416    Thread* self = Thread::Current();
4417    {
4418      MutexLock mu(self, *Locks::thread_list_lock_);
4419      threads = Runtime::Current()->GetThreadList()->GetList();
4420    }
4421    {
4422      ScopedObjectAccess soa(self);
4423      for (Thread* thread : threads) {
4424        Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
4425      }
4426    }
4427    ResumeVM();
4428  }
4429}
4430
4431void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
4432  if (IsDebuggerActive()) {
4433    gJdwpState->PostThreadChange(t, type == CHUNK_TYPE("THCR"));
4434  }
4435  Dbg::DdmSendThreadNotification(t, type);
4436}
4437
4438void Dbg::PostThreadStart(Thread* t) {
4439  Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR"));
4440}
4441
4442void Dbg::PostThreadDeath(Thread* t) {
4443  Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
4444}
4445
4446void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) {
4447  CHECK(buf != nullptr);
4448  iovec vec[1];
4449  vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf));
4450  vec[0].iov_len = byte_count;
4451  Dbg::DdmSendChunkV(type, vec, 1);
4452}
4453
4454void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) {
4455  DdmSendChunk(type, bytes.size(), &bytes[0]);
4456}
4457
4458void Dbg::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) {
4459  if (gJdwpState == nullptr) {
4460    VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
4461  } else {
4462    gJdwpState->DdmSendChunkV(type, iov, iov_count);
4463  }
4464}
4465
4466JDWP::JdwpState* Dbg::GetJdwpState() {
4467  return gJdwpState;
4468}
4469
4470int Dbg::DdmHandleHpifChunk(HpifWhen when) {
4471  if (when == HPIF_WHEN_NOW) {
4472    DdmSendHeapInfo(when);
4473    return true;
4474  }
4475
4476  if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
4477    LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
4478    return false;
4479  }
4480
4481  gDdmHpifWhen = when;
4482  return true;
4483}
4484
4485bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
4486  if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) {
4487    LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when);
4488    return false;
4489  }
4490
4491  if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) {
4492    LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what);
4493    return false;
4494  }
4495
4496  if (native) {
4497    gDdmNhsgWhen = when;
4498    gDdmNhsgWhat = what;
4499  } else {
4500    gDdmHpsgWhen = when;
4501    gDdmHpsgWhat = what;
4502  }
4503  return true;
4504}
4505
4506void Dbg::DdmSendHeapInfo(HpifWhen reason) {
4507  // If there's a one-shot 'when', reset it.
4508  if (reason == gDdmHpifWhen) {
4509    if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) {
4510      gDdmHpifWhen = HPIF_WHEN_NEVER;
4511    }
4512  }
4513
4514  /*
4515   * Chunk HPIF (client --> server)
4516   *
4517   * Heap Info. General information about the heap,
4518   * suitable for a summary display.
4519   *
4520   *   [u4]: number of heaps
4521   *
4522   *   For each heap:
4523   *     [u4]: heap ID
4524   *     [u8]: timestamp in ms since Unix epoch
4525   *     [u1]: capture reason (same as 'when' value from server)
4526   *     [u4]: max heap size in bytes (-Xmx)
4527   *     [u4]: current heap size in bytes
4528   *     [u4]: current number of bytes allocated
4529   *     [u4]: current number of objects allocated
4530   */
4531  uint8_t heap_count = 1;
4532  gc::Heap* heap = Runtime::Current()->GetHeap();
4533  std::vector<uint8_t> bytes;
4534  JDWP::Append4BE(bytes, heap_count);
4535  JDWP::Append4BE(bytes, 1);  // Heap id (bogus; we only have one heap).
4536  JDWP::Append8BE(bytes, MilliTime());
4537  JDWP::Append1BE(bytes, reason);
4538  JDWP::Append4BE(bytes, heap->GetMaxMemory());  // Max allowed heap size in bytes.
4539  JDWP::Append4BE(bytes, heap->GetTotalMemory());  // Current heap size in bytes.
4540  JDWP::Append4BE(bytes, heap->GetBytesAllocated());
4541  JDWP::Append4BE(bytes, heap->GetObjectsAllocated());
4542  CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
4543  Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes);
4544}
4545
4546enum HpsgSolidity {
4547  SOLIDITY_FREE = 0,
4548  SOLIDITY_HARD = 1,
4549  SOLIDITY_SOFT = 2,
4550  SOLIDITY_WEAK = 3,
4551  SOLIDITY_PHANTOM = 4,
4552  SOLIDITY_FINALIZABLE = 5,
4553  SOLIDITY_SWEEP = 6,
4554};
4555
4556enum HpsgKind {
4557  KIND_OBJECT = 0,
4558  KIND_CLASS_OBJECT = 1,
4559  KIND_ARRAY_1 = 2,
4560  KIND_ARRAY_2 = 3,
4561  KIND_ARRAY_4 = 4,
4562  KIND_ARRAY_8 = 5,
4563  KIND_UNKNOWN = 6,
4564  KIND_NATIVE = 7,
4565};
4566
4567#define HPSG_PARTIAL (1<<7)
4568#define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
4569
4570class HeapChunkContext {
4571 public:
4572  // Maximum chunk size.  Obtain this from the formula:
4573  // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
4574  HeapChunkContext(bool merge, bool native)
4575      : buf_(16384 - 16),
4576        type_(0),
4577        chunk_overhead_(0) {
4578    Reset();
4579    if (native) {
4580      type_ = CHUNK_TYPE("NHSG");
4581    } else {
4582      type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO");
4583    }
4584  }
4585
4586  ~HeapChunkContext() {
4587    if (p_ > &buf_[0]) {
4588      Flush();
4589    }
4590  }
4591
4592  void SetChunkOverhead(size_t chunk_overhead) {
4593    chunk_overhead_ = chunk_overhead;
4594  }
4595
4596  void ResetStartOfNextChunk() {
4597    startOfNextMemoryChunk_ = nullptr;
4598  }
4599
4600  void EnsureHeader(const void* chunk_ptr) {
4601    if (!needHeader_) {
4602      return;
4603    }
4604
4605    // Start a new HPSx chunk.
4606    JDWP::Write4BE(&p_, 1);  // Heap id (bogus; we only have one heap).
4607    JDWP::Write1BE(&p_, 8);  // Size of allocation unit, in bytes.
4608
4609    JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr));  // virtual address of segment start.
4610    JDWP::Write4BE(&p_, 0);  // offset of this piece (relative to the virtual address).
4611    // [u4]: length of piece, in allocation units
4612    // We won't know this until we're done, so save the offset and stuff in a dummy value.
4613    pieceLenField_ = p_;
4614    JDWP::Write4BE(&p_, 0x55555555);
4615    needHeader_ = false;
4616  }
4617
4618  void Flush() REQUIRES_SHARED(Locks::mutator_lock_) {
4619    if (pieceLenField_ == nullptr) {
4620      // Flush immediately post Reset (maybe back-to-back Flush). Ignore.
4621      CHECK(needHeader_);
4622      return;
4623    }
4624    // Patch the "length of piece" field.
4625    CHECK_LE(&buf_[0], pieceLenField_);
4626    CHECK_LE(pieceLenField_, p_);
4627    JDWP::Set4BE(pieceLenField_, totalAllocationUnits_);
4628
4629    Dbg::DdmSendChunk(type_, p_ - &buf_[0], &buf_[0]);
4630    Reset();
4631  }
4632
4633  static void HeapChunkJavaCallback(void* start, void* end, size_t used_bytes, void* arg)
4634      REQUIRES_SHARED(Locks::heap_bitmap_lock_,
4635                            Locks::mutator_lock_) {
4636    reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkJavaCallback(start, end, used_bytes);
4637  }
4638
4639  static void HeapChunkNativeCallback(void* start, void* end, size_t used_bytes, void* arg)
4640      REQUIRES_SHARED(Locks::mutator_lock_) {
4641    reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkNativeCallback(start, end, used_bytes);
4642  }
4643
4644 private:
4645  enum { ALLOCATION_UNIT_SIZE = 8 };
4646
4647  void Reset() {
4648    p_ = &buf_[0];
4649    ResetStartOfNextChunk();
4650    totalAllocationUnits_ = 0;
4651    needHeader_ = true;
4652    pieceLenField_ = nullptr;
4653  }
4654
4655  bool IsNative() const {
4656    return type_ == CHUNK_TYPE("NHSG");
4657  }
4658
4659  // Returns true if the object is not an empty chunk.
4660  bool ProcessRecord(void* start, size_t used_bytes) REQUIRES_SHARED(Locks::mutator_lock_) {
4661    // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
4662    // in the following code not to allocate memory, by ensuring buf_ is of the correct size
4663    if (used_bytes == 0) {
4664      if (start == nullptr) {
4665        // Reset for start of new heap.
4666        startOfNextMemoryChunk_ = nullptr;
4667        Flush();
4668      }
4669      // Only process in use memory so that free region information
4670      // also includes dlmalloc book keeping.
4671      return false;
4672    }
4673    if (startOfNextMemoryChunk_ != nullptr) {
4674      // Transmit any pending free memory. Native free memory of over kMaxFreeLen could be because
4675      // of the use of mmaps, so don't report. If not free memory then start a new segment.
4676      bool flush = true;
4677      if (start > startOfNextMemoryChunk_) {
4678        const size_t kMaxFreeLen = 2 * kPageSize;
4679        void* free_start = startOfNextMemoryChunk_;
4680        void* free_end = start;
4681        const size_t free_len =
4682            reinterpret_cast<uintptr_t>(free_end) - reinterpret_cast<uintptr_t>(free_start);
4683        if (!IsNative() || free_len < kMaxFreeLen) {
4684          AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), free_start, free_len, IsNative());
4685          flush = false;
4686        }
4687      }
4688      if (flush) {
4689        startOfNextMemoryChunk_ = nullptr;
4690        Flush();
4691      }
4692    }
4693    return true;
4694  }
4695
4696  void HeapChunkNativeCallback(void* start, void* /*end*/, size_t used_bytes)
4697      REQUIRES_SHARED(Locks::mutator_lock_) {
4698    if (ProcessRecord(start, used_bytes)) {
4699      uint8_t state = ExamineNativeObject(start);
4700      AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/);
4701      startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
4702    }
4703  }
4704
4705  void HeapChunkJavaCallback(void* start, void* /*end*/, size_t used_bytes)
4706      REQUIRES_SHARED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
4707    if (ProcessRecord(start, used_bytes)) {
4708      // Determine the type of this chunk.
4709      // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
4710      // If it's the same, we should combine them.
4711      uint8_t state = ExamineJavaObject(reinterpret_cast<mirror::Object*>(start));
4712      AppendChunk(state, start, used_bytes + chunk_overhead_, false /*is_native*/);
4713      startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
4714    }
4715  }
4716
4717  void AppendChunk(uint8_t state, void* ptr, size_t length, bool is_native)
4718      REQUIRES_SHARED(Locks::mutator_lock_) {
4719    // Make sure there's enough room left in the buffer.
4720    // We need to use two bytes for every fractional 256 allocation units used by the chunk plus
4721    // 17 bytes for any header.
4722    const size_t needed = ((RoundUp(length / ALLOCATION_UNIT_SIZE, 256) / 256) * 2) + 17;
4723    size_t byte_left = &buf_.back() - p_;
4724    if (byte_left < needed) {
4725      if (is_native) {
4726      // Cannot trigger memory allocation while walking native heap.
4727        return;
4728      }
4729      Flush();
4730    }
4731
4732    byte_left = &buf_.back() - p_;
4733    if (byte_left < needed) {
4734      LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", "
4735          << needed << " bytes)";
4736      return;
4737    }
4738    EnsureHeader(ptr);
4739    // Write out the chunk description.
4740    length /= ALLOCATION_UNIT_SIZE;   // Convert to allocation units.
4741    totalAllocationUnits_ += length;
4742    while (length > 256) {
4743      *p_++ = state | HPSG_PARTIAL;
4744      *p_++ = 255;     // length - 1
4745      length -= 256;
4746    }
4747    *p_++ = state;
4748    *p_++ = length - 1;
4749  }
4750
4751  uint8_t ExamineNativeObject(const void* p) REQUIRES_SHARED(Locks::mutator_lock_) {
4752    return p == nullptr ? HPSG_STATE(SOLIDITY_FREE, 0) : HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4753  }
4754
4755  uint8_t ExamineJavaObject(mirror::Object* o)
4756      REQUIRES_SHARED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
4757    if (o == nullptr) {
4758      return HPSG_STATE(SOLIDITY_FREE, 0);
4759    }
4760    // It's an allocated chunk. Figure out what it is.
4761    gc::Heap* heap = Runtime::Current()->GetHeap();
4762    if (!heap->IsLiveObjectLocked(o)) {
4763      LOG(ERROR) << "Invalid object in managed heap: " << o;
4764      return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4765    }
4766    mirror::Class* c = o->GetClass();
4767    if (c == nullptr) {
4768      // The object was probably just created but hasn't been initialized yet.
4769      return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4770    }
4771    if (!heap->IsValidObjectAddress(c)) {
4772      LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
4773      return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4774    }
4775    if (c->GetClass() == nullptr) {
4776      LOG(ERROR) << "Null class of class " << c << " for object " << o;
4777      return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4778    }
4779    if (c->IsClassClass()) {
4780      return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
4781    }
4782    if (c->IsArrayClass()) {
4783      switch (c->GetComponentSize()) {
4784      case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
4785      case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
4786      case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4787      case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
4788      }
4789    }
4790    return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4791  }
4792
4793  std::vector<uint8_t> buf_;
4794  uint8_t* p_;
4795  uint8_t* pieceLenField_;
4796  void* startOfNextMemoryChunk_;
4797  size_t totalAllocationUnits_;
4798  uint32_t type_;
4799  bool needHeader_;
4800  size_t chunk_overhead_;
4801
4802  DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
4803};
4804
4805static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
4806    REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(Locks::heap_bitmap_lock_) {
4807  const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
4808  HeapChunkContext::HeapChunkJavaCallback(
4809      obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
4810}
4811
4812void Dbg::DdmSendHeapSegments(bool native) {
4813  Dbg::HpsgWhen when = native ? gDdmNhsgWhen : gDdmHpsgWhen;
4814  Dbg::HpsgWhat what = native ? gDdmNhsgWhat : gDdmHpsgWhat;
4815  if (when == HPSG_WHEN_NEVER) {
4816    return;
4817  }
4818  // Figure out what kind of chunks we'll be sending.
4819  CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS)
4820      << static_cast<int>(what);
4821
4822  // First, send a heap start chunk.
4823  uint8_t heap_id[4];
4824  JDWP::Set4BE(&heap_id[0], 1);  // Heap id (bogus; we only have one heap).
4825  Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id);
4826  Thread* self = Thread::Current();
4827  Locks::mutator_lock_->AssertSharedHeld(self);
4828
4829  // Send a series of heap segment chunks.
4830  HeapChunkContext context(what == HPSG_WHAT_MERGED_OBJECTS, native);
4831  if (native) {
4832    UNIMPLEMENTED(WARNING) << "Native heap inspection is not supported";
4833  } else {
4834    gc::Heap* heap = Runtime::Current()->GetHeap();
4835    for (const auto& space : heap->GetContinuousSpaces()) {
4836      if (space->IsDlMallocSpace()) {
4837        ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4838        // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
4839        // allocation then the first sizeof(size_t) may belong to it.
4840        context.SetChunkOverhead(sizeof(size_t));
4841        space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
4842      } else if (space->IsRosAllocSpace()) {
4843        context.SetChunkOverhead(0);
4844        // Need to acquire the mutator lock before the heap bitmap lock with exclusive access since
4845        // RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock.
4846        ScopedThreadSuspension sts(self, kSuspended);
4847        ScopedSuspendAll ssa(__FUNCTION__);
4848        ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4849        space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
4850      } else if (space->IsBumpPointerSpace()) {
4851        ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4852        context.SetChunkOverhead(0);
4853        space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
4854        HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
4855      } else if (space->IsRegionSpace()) {
4856        heap->IncrementDisableMovingGC(self);
4857        {
4858          ScopedThreadSuspension sts(self, kSuspended);
4859          ScopedSuspendAll ssa(__FUNCTION__);
4860          ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4861          context.SetChunkOverhead(0);
4862          space->AsRegionSpace()->Walk(BumpPointerSpaceCallback, &context);
4863          HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
4864        }
4865        heap->DecrementDisableMovingGC(self);
4866      } else {
4867        UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
4868      }
4869      context.ResetStartOfNextChunk();
4870    }
4871    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4872    // Walk the large objects, these are not in the AllocSpace.
4873    context.SetChunkOverhead(0);
4874    heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
4875  }
4876
4877  // Finally, send a heap end chunk.
4878  Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
4879}
4880
4881void Dbg::SetAllocTrackingEnabled(bool enable) {
4882  gc::AllocRecordObjectMap::SetAllocTrackingEnabled(enable);
4883}
4884
4885void Dbg::DumpRecentAllocations() {
4886  ScopedObjectAccess soa(Thread::Current());
4887  MutexLock mu(soa.Self(), *Locks::alloc_tracker_lock_);
4888  if (!Runtime::Current()->GetHeap()->IsAllocTrackingEnabled()) {
4889    LOG(INFO) << "Not recording tracked allocations";
4890    return;
4891  }
4892  gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords();
4893  CHECK(records != nullptr);
4894
4895  const uint16_t capped_count = CappedAllocRecordCount(records->GetRecentAllocationSize());
4896  uint16_t count = capped_count;
4897
4898  LOG(INFO) << "Tracked allocations, (count=" << count << ")";
4899  for (auto it = records->RBegin(), end = records->REnd();
4900      count > 0 && it != end; count--, it++) {
4901    const gc::AllocRecord* record = &it->second;
4902
4903    LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->GetTid(), record->ByteCount())
4904              << mirror::Class::PrettyClass(record->GetClass());
4905
4906    for (size_t stack_frame = 0, depth = record->GetDepth(); stack_frame < depth; ++stack_frame) {
4907      const gc::AllocRecordStackTraceElement& stack_element = record->StackElement(stack_frame);
4908      ArtMethod* m = stack_element.GetMethod();
4909      LOG(INFO) << "    " << ArtMethod::PrettyMethod(m) << " line "
4910                << stack_element.ComputeLineNumber();
4911    }
4912
4913    // pause periodically to help logcat catch up
4914    if ((count % 5) == 0) {
4915      usleep(40000);
4916    }
4917  }
4918}
4919
4920class StringTable {
4921 public:
4922  StringTable() {
4923  }
4924
4925  void Add(const std::string& str) {
4926    table_.insert(str);
4927  }
4928
4929  void Add(const char* str) {
4930    table_.insert(str);
4931  }
4932
4933  size_t IndexOf(const char* s) const {
4934    auto it = table_.find(s);
4935    if (it == table_.end()) {
4936      LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
4937    }
4938    return std::distance(table_.begin(), it);
4939  }
4940
4941  size_t Size() const {
4942    return table_.size();
4943  }
4944
4945  void WriteTo(std::vector<uint8_t>& bytes) const {
4946    for (const std::string& str : table_) {
4947      const char* s = str.c_str();
4948      size_t s_len = CountModifiedUtf8Chars(s);
4949      std::unique_ptr<uint16_t[]> s_utf16(new uint16_t[s_len]);
4950      ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
4951      JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len);
4952    }
4953  }
4954
4955 private:
4956  std::set<std::string> table_;
4957  DISALLOW_COPY_AND_ASSIGN(StringTable);
4958};
4959
4960static const char* GetMethodSourceFile(ArtMethod* method)
4961    REQUIRES_SHARED(Locks::mutator_lock_) {
4962  DCHECK(method != nullptr);
4963  const char* source_file = method->GetDeclaringClassSourceFile();
4964  return (source_file != nullptr) ? source_file : "";
4965}
4966
4967/*
4968 * The data we send to DDMS contains everything we have recorded.
4969 *
4970 * Message header (all values big-endian):
4971 * (1b) message header len (to allow future expansion); includes itself
4972 * (1b) entry header len
4973 * (1b) stack frame len
4974 * (2b) number of entries
4975 * (4b) offset to string table from start of message
4976 * (2b) number of class name strings
4977 * (2b) number of method name strings
4978 * (2b) number of source file name strings
4979 * For each entry:
4980 *   (4b) total allocation size
4981 *   (2b) thread id
4982 *   (2b) allocated object's class name index
4983 *   (1b) stack depth
4984 *   For each stack frame:
4985 *     (2b) method's class name
4986 *     (2b) method name
4987 *     (2b) method source file
4988 *     (2b) line number, clipped to 32767; -2 if native; -1 if no source
4989 * (xb) class name strings
4990 * (xb) method name strings
4991 * (xb) source file strings
4992 *
4993 * As with other DDM traffic, strings are sent as a 4-byte length
4994 * followed by UTF-16 data.
4995 *
4996 * We send up 16-bit unsigned indexes into string tables.  In theory there
4997 * can be (kMaxAllocRecordStackDepth * alloc_record_max_) unique strings in
4998 * each table, but in practice there should be far fewer.
4999 *
5000 * The chief reason for using a string table here is to keep the size of
5001 * the DDMS message to a minimum.  This is partly to make the protocol
5002 * efficient, but also because we have to form the whole thing up all at
5003 * once in a memory buffer.
5004 *
5005 * We use separate string tables for class names, method names, and source
5006 * files to keep the indexes small.  There will generally be no overlap
5007 * between the contents of these tables.
5008 */
5009jbyteArray Dbg::GetRecentAllocations() {
5010  if ((false)) {
5011    DumpRecentAllocations();
5012  }
5013
5014  Thread* self = Thread::Current();
5015  std::vector<uint8_t> bytes;
5016  {
5017    MutexLock mu(self, *Locks::alloc_tracker_lock_);
5018    gc::AllocRecordObjectMap* records = Runtime::Current()->GetHeap()->GetAllocationRecords();
5019    // In case this method is called when allocation tracker is disabled,
5020    // we should still send some data back.
5021    gc::AllocRecordObjectMap dummy;
5022    if (records == nullptr) {
5023      CHECK(!Runtime::Current()->GetHeap()->IsAllocTrackingEnabled());
5024      records = &dummy;
5025    }
5026    // We don't need to wait on the condition variable records->new_record_condition_, because this
5027    // function only reads the class objects, which are already marked so it doesn't change their
5028    // reachability.
5029
5030    //
5031    // Part 1: generate string tables.
5032    //
5033    StringTable class_names;
5034    StringTable method_names;
5035    StringTable filenames;
5036
5037    const uint16_t capped_count = CappedAllocRecordCount(records->GetRecentAllocationSize());
5038    uint16_t count = capped_count;
5039    for (auto it = records->RBegin(), end = records->REnd();
5040         count > 0 && it != end; count--, it++) {
5041      const gc::AllocRecord* record = &it->second;
5042      std::string temp;
5043      class_names.Add(record->GetClassDescriptor(&temp));
5044      for (size_t i = 0, depth = record->GetDepth(); i < depth; i++) {
5045        ArtMethod* m = record->StackElement(i).GetMethod();
5046        class_names.Add(m->GetDeclaringClassDescriptor());
5047        method_names.Add(m->GetName());
5048        filenames.Add(GetMethodSourceFile(m));
5049      }
5050    }
5051
5052    LOG(INFO) << "recent allocation records: " << capped_count;
5053    LOG(INFO) << "allocation records all objects: " << records->Size();
5054
5055    //
5056    // Part 2: Generate the output and store it in the buffer.
5057    //
5058
5059    // (1b) message header len (to allow future expansion); includes itself
5060    // (1b) entry header len
5061    // (1b) stack frame len
5062    const int kMessageHeaderLen = 15;
5063    const int kEntryHeaderLen = 9;
5064    const int kStackFrameLen = 8;
5065    JDWP::Append1BE(bytes, kMessageHeaderLen);
5066    JDWP::Append1BE(bytes, kEntryHeaderLen);
5067    JDWP::Append1BE(bytes, kStackFrameLen);
5068
5069    // (2b) number of entries
5070    // (4b) offset to string table from start of message
5071    // (2b) number of class name strings
5072    // (2b) number of method name strings
5073    // (2b) number of source file name strings
5074    JDWP::Append2BE(bytes, capped_count);
5075    size_t string_table_offset = bytes.size();
5076    JDWP::Append4BE(bytes, 0);  // We'll patch this later...
5077    JDWP::Append2BE(bytes, class_names.Size());
5078    JDWP::Append2BE(bytes, method_names.Size());
5079    JDWP::Append2BE(bytes, filenames.Size());
5080
5081    std::string temp;
5082    count = capped_count;
5083    // The last "count" number of allocation records in "records" are the most recent "count" number
5084    // of allocations. Reverse iterate to get them. The most recent allocation is sent first.
5085    for (auto it = records->RBegin(), end = records->REnd();
5086         count > 0 && it != end; count--, it++) {
5087      // For each entry:
5088      // (4b) total allocation size
5089      // (2b) thread id
5090      // (2b) allocated object's class name index
5091      // (1b) stack depth
5092      const gc::AllocRecord* record = &it->second;
5093      size_t stack_depth = record->GetDepth();
5094      size_t allocated_object_class_name_index =
5095          class_names.IndexOf(record->GetClassDescriptor(&temp));
5096      JDWP::Append4BE(bytes, record->ByteCount());
5097      JDWP::Append2BE(bytes, static_cast<uint16_t>(record->GetTid()));
5098      JDWP::Append2BE(bytes, allocated_object_class_name_index);
5099      JDWP::Append1BE(bytes, stack_depth);
5100
5101      for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) {
5102        // For each stack frame:
5103        // (2b) method's class name
5104        // (2b) method name
5105        // (2b) method source file
5106        // (2b) line number, clipped to 32767; -2 if native; -1 if no source
5107        ArtMethod* m = record->StackElement(stack_frame).GetMethod();
5108        size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor());
5109        size_t method_name_index = method_names.IndexOf(m->GetName());
5110        size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m));
5111        JDWP::Append2BE(bytes, class_name_index);
5112        JDWP::Append2BE(bytes, method_name_index);
5113        JDWP::Append2BE(bytes, file_name_index);
5114        JDWP::Append2BE(bytes, record->StackElement(stack_frame).ComputeLineNumber());
5115      }
5116    }
5117
5118    // (xb) class name strings
5119    // (xb) method name strings
5120    // (xb) source file strings
5121    JDWP::Set4BE(&bytes[string_table_offset], bytes.size());
5122    class_names.WriteTo(bytes);
5123    method_names.WriteTo(bytes);
5124    filenames.WriteTo(bytes);
5125  }
5126  JNIEnv* env = self->GetJniEnv();
5127  jbyteArray result = env->NewByteArray(bytes.size());
5128  if (result != nullptr) {
5129    env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
5130  }
5131  return result;
5132}
5133
5134ArtMethod* DeoptimizationRequest::Method() const {
5135  return jni::DecodeArtMethod(method_);
5136}
5137
5138void DeoptimizationRequest::SetMethod(ArtMethod* m) {
5139  method_ = jni::EncodeArtMethod(m);
5140}
5141
5142void Dbg::VisitRoots(RootVisitor* visitor) {
5143  // Visit breakpoint roots, used to prevent unloading of methods with breakpoints.
5144  ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
5145  BufferedRootVisitor<128> root_visitor(visitor, RootInfo(kRootVMInternal));
5146  for (Breakpoint& breakpoint : gBreakpoints) {
5147    breakpoint.Method()->VisitRoots(root_visitor, kRuntimePointerSize);
5148  }
5149}
5150
5151void Dbg::DbgThreadLifecycleCallback::ThreadStart(Thread* self) {
5152  Dbg::PostThreadStart(self);
5153}
5154
5155void Dbg::DbgThreadLifecycleCallback::ThreadDeath(Thread* self) {
5156  Dbg::PostThreadDeath(self);
5157}
5158
5159void Dbg::DbgClassLoadCallback::ClassLoad(Handle<mirror::Class> klass ATTRIBUTE_UNUSED) {
5160  // Ignore ClassLoad;
5161}
5162void Dbg::DbgClassLoadCallback::ClassPrepare(Handle<mirror::Class> temp_klass ATTRIBUTE_UNUSED,
5163                                             Handle<mirror::Class> klass) {
5164  Dbg::PostClassPrepare(klass.Get());
5165}
5166
5167}  // namespace art
5168