debugger.cc revision a98ffd745bbecb2e84a492194950c0b94966546b
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "debugger.h"
18
19#include <sys/uio.h>
20
21#include <set>
22
23#include "arch/context.h"
24#include "class_linker.h"
25#include "class_linker-inl.h"
26#include "dex_file-inl.h"
27#include "dex_instruction.h"
28#include "field_helper.h"
29#include "gc/accounting/card_table-inl.h"
30#include "gc/space/large_object_space.h"
31#include "gc/space/space-inl.h"
32#include "handle_scope.h"
33#include "jdwp/object_registry.h"
34#include "method_helper.h"
35#include "mirror/art_field-inl.h"
36#include "mirror/art_method-inl.h"
37#include "mirror/class.h"
38#include "mirror/class-inl.h"
39#include "mirror/class_loader.h"
40#include "mirror/object-inl.h"
41#include "mirror/object_array-inl.h"
42#include "mirror/string-inl.h"
43#include "mirror/throwable.h"
44#include "quick/inline_method_analyser.h"
45#include "reflection.h"
46#include "safe_map.h"
47#include "scoped_thread_state_change.h"
48#include "ScopedLocalRef.h"
49#include "ScopedPrimitiveArray.h"
50#include "handle_scope-inl.h"
51#include "thread_list.h"
52#include "throw_location.h"
53#include "utf.h"
54#include "verifier/method_verifier-inl.h"
55#include "well_known_classes.h"
56
57#ifdef HAVE_ANDROID_OS
58#include "cutils/properties.h"
59#endif
60
61namespace art {
62
63static const size_t kMaxAllocRecordStackDepth = 16;  // Max 255.
64static const size_t kDefaultNumAllocRecords = 64*1024;  // Must be a power of 2. 2BE can hold 64k-1.
65
66// Limit alloc_record_count to the 2BE value that is the limit of the current protocol.
67static uint16_t CappedAllocRecordCount(size_t alloc_record_count) {
68  if (alloc_record_count > 0xffff) {
69    return 0xffff;
70  }
71  return alloc_record_count;
72}
73
74class AllocRecordStackTraceElement {
75 public:
76  AllocRecordStackTraceElement() : method_(nullptr), dex_pc_(0) {
77  }
78
79  int32_t LineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
80    mirror::ArtMethod* method = Method();
81    DCHECK(method != nullptr);
82    return method->GetLineNumFromDexPC(DexPc());
83  }
84
85  mirror::ArtMethod* Method() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
86    ScopedObjectAccessUnchecked soa(Thread::Current());
87    return soa.DecodeMethod(method_);
88  }
89
90  void SetMethod(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
91    ScopedObjectAccessUnchecked soa(Thread::Current());
92    method_ = soa.EncodeMethod(m);
93  }
94
95  uint32_t DexPc() const {
96    return dex_pc_;
97  }
98
99  void SetDexPc(uint32_t pc) {
100    dex_pc_ = pc;
101  }
102
103 private:
104  jmethodID method_;
105  uint32_t dex_pc_;
106};
107
108jobject Dbg::TypeCache::Add(mirror::Class* t) {
109  ScopedObjectAccessUnchecked soa(Thread::Current());
110  int32_t hash_code = t->IdentityHashCode();
111  auto range = objects_.equal_range(hash_code);
112  for (auto it = range.first; it != range.second; ++it) {
113    if (soa.Decode<mirror::Class*>(it->second) == t) {
114      // Found a matching weak global, return it.
115      return it->second;
116    }
117  }
118  JNIEnv* env = soa.Env();
119  const jobject local_ref = soa.AddLocalReference<jobject>(t);
120  const jobject weak_global = env->NewWeakGlobalRef(local_ref);
121  env->DeleteLocalRef(local_ref);
122  objects_.insert(std::make_pair(hash_code, weak_global));
123  return weak_global;
124}
125
126void Dbg::TypeCache::Clear() {
127  JavaVMExt* vm = Runtime::Current()->GetJavaVM();
128  Thread* self = Thread::Current();
129  for (const auto& p : objects_) {
130    vm->DeleteWeakGlobalRef(self, p.second);
131  }
132  objects_.clear();
133}
134
135class AllocRecord {
136 public:
137  AllocRecord() : type_(nullptr), byte_count_(0), thin_lock_id_(0) {}
138
139  mirror::Class* Type() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
140    return down_cast<mirror::Class*>(Thread::Current()->DecodeJObject(type_));
141  }
142
143  void SetType(mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
144                                                       Locks::alloc_tracker_lock_) {
145    type_ = Dbg::type_cache_.Add(t);
146  }
147
148  size_t GetDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
149    size_t depth = 0;
150    while (depth < kMaxAllocRecordStackDepth && stack_[depth].Method() != NULL) {
151      ++depth;
152    }
153    return depth;
154  }
155
156  size_t ByteCount() const {
157    return byte_count_;
158  }
159
160  void SetByteCount(size_t count) {
161    byte_count_ = count;
162  }
163
164  uint16_t ThinLockId() const {
165    return thin_lock_id_;
166  }
167
168  void SetThinLockId(uint16_t id) {
169    thin_lock_id_ = id;
170  }
171
172  AllocRecordStackTraceElement* StackElement(size_t index) {
173    DCHECK_LT(index, kMaxAllocRecordStackDepth);
174    return &stack_[index];
175  }
176
177 private:
178  jobject type_;  // This is a weak global.
179  size_t byte_count_;
180  uint16_t thin_lock_id_;
181  AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth];  // Unused entries have NULL method.
182};
183
184class Breakpoint {
185 public:
186  Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization)
187    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
188    : method_(nullptr), dex_pc_(dex_pc), need_full_deoptimization_(need_full_deoptimization) {
189    ScopedObjectAccessUnchecked soa(Thread::Current());
190    method_ = soa.EncodeMethod(method);
191  }
192
193  Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
194    : method_(nullptr), dex_pc_(other.dex_pc_),
195      need_full_deoptimization_(other.need_full_deoptimization_) {
196    ScopedObjectAccessUnchecked soa(Thread::Current());
197    method_ = soa.EncodeMethod(other.Method());
198  }
199
200  mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
201    ScopedObjectAccessUnchecked soa(Thread::Current());
202    return soa.DecodeMethod(method_);
203  }
204
205  uint32_t DexPc() const {
206    return dex_pc_;
207  }
208
209  bool NeedFullDeoptimization() const {
210    return need_full_deoptimization_;
211  }
212
213 private:
214  // The location of this breakpoint.
215  jmethodID method_;
216  uint32_t dex_pc_;
217
218  // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
219  bool need_full_deoptimization_;
220};
221
222static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
223    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
224  os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
225  return os;
226}
227
228class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
229 public:
230  DebugInstrumentationListener() {}
231  virtual ~DebugInstrumentationListener() {}
232
233  void MethodEntered(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
234                     uint32_t dex_pc)
235      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
236    if (method->IsNative()) {
237      // TODO: post location events is a suspension point and native method entry stubs aren't.
238      return;
239    }
240    Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr);
241  }
242
243  void MethodExited(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
244                    uint32_t dex_pc, const JValue& return_value)
245      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
246    if (method->IsNative()) {
247      // TODO: post location events is a suspension point and native method entry stubs aren't.
248      return;
249    }
250    Dbg::UpdateDebugger(thread, this_object, method, dex_pc, Dbg::kMethodExit, &return_value);
251  }
252
253  void MethodUnwind(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
254                    uint32_t dex_pc)
255      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
256    // We're not recorded to listen to this kind of event, so complain.
257    LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
258               << " " << dex_pc;
259  }
260
261  void DexPcMoved(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
262                  uint32_t new_dex_pc)
263      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
264    Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, 0, nullptr);
265  }
266
267  void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
268                 uint32_t dex_pc, mirror::ArtField* field)
269      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
270    Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
271  }
272
273  void FieldWritten(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
274                    uint32_t dex_pc, mirror::ArtField* field, const JValue& field_value)
275      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
276    Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
277  }
278
279  void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
280                       mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
281                       mirror::Throwable* exception_object)
282      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
283    Dbg::PostException(throw_location, catch_method, catch_dex_pc, exception_object);
284  }
285
286 private:
287  DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
288} gDebugInstrumentationListener;
289
290// JDWP is allowed unless the Zygote forbids it.
291static bool gJdwpAllowed = true;
292
293// Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line?
294static bool gJdwpConfigured = false;
295
296// Broken-down JDWP options. (Only valid if IsJdwpConfigured() is true.)
297static JDWP::JdwpOptions gJdwpOptions;
298
299// Runtime JDWP state.
300static JDWP::JdwpState* gJdwpState = NULL;
301static bool gDebuggerConnected;  // debugger or DDMS is connected.
302static bool gDebuggerActive;     // debugger is making requests.
303static bool gDisposed;           // debugger called VirtualMachine.Dispose, so we should drop the connection.
304
305static bool gDdmThreadNotification = false;
306
307// DDMS GC-related settings.
308static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER;
309static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER;
310static Dbg::HpsgWhat gDdmHpsgWhat;
311static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
312static Dbg::HpsgWhat gDdmNhsgWhat;
313
314ObjectRegistry* Dbg::gRegistry = nullptr;
315
316// Recent allocation tracking.
317AllocRecord* Dbg::recent_allocation_records_ = nullptr;  // TODO: CircularBuffer<AllocRecord>
318size_t Dbg::alloc_record_max_ = 0;
319size_t Dbg::alloc_record_head_ = 0;
320size_t Dbg::alloc_record_count_ = 0;
321Dbg::TypeCache Dbg::type_cache_;
322
323// Deoptimization support.
324std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
325size_t Dbg::full_deoptimization_event_count_ = 0;
326size_t Dbg::delayed_full_undeoptimization_count_ = 0;
327
328// Instrumentation event reference counters.
329size_t Dbg::dex_pc_change_event_ref_count_ = 0;
330size_t Dbg::method_enter_event_ref_count_ = 0;
331size_t Dbg::method_exit_event_ref_count_ = 0;
332size_t Dbg::field_read_event_ref_count_ = 0;
333size_t Dbg::field_write_event_ref_count_ = 0;
334size_t Dbg::exception_catch_event_ref_count_ = 0;
335uint32_t Dbg::instrumentation_events_ = 0;
336
337// Breakpoints.
338static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
339
340void DebugInvokeReq::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
341                                RootType root_type) {
342  if (receiver != nullptr) {
343    callback(&receiver, arg, tid, root_type);
344  }
345  if (thread != nullptr) {
346    callback(&thread, arg, tid, root_type);
347  }
348  if (klass != nullptr) {
349    callback(reinterpret_cast<mirror::Object**>(&klass), arg, tid, root_type);
350  }
351  if (method != nullptr) {
352    callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
353  }
354}
355
356void DebugInvokeReq::Clear() {
357  invoke_needed = false;
358  receiver = nullptr;
359  thread = nullptr;
360  klass = nullptr;
361  method = nullptr;
362}
363
364void SingleStepControl::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
365                                   RootType root_type) {
366  if (method != nullptr) {
367    callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
368  }
369}
370
371bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const {
372  return dex_pcs.find(dex_pc) == dex_pcs.end();
373}
374
375void SingleStepControl::Clear() {
376  is_active = false;
377  method = nullptr;
378  dex_pcs.clear();
379}
380
381static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc)
382    LOCKS_EXCLUDED(Locks::breakpoint_lock_)
383    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
384  ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
385  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
386    if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) {
387      VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
388      return true;
389    }
390  }
391  return false;
392}
393
394static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
395    LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) {
396  MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
397  // A thread may be suspended for GC; in this code, we really want to know whether
398  // there's a debugger suspension active.
399  return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0;
400}
401
402static mirror::Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status)
403    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
404  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id);
405  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
406    status = JDWP::ERR_INVALID_OBJECT;
407    return NULL;
408  }
409  if (!o->IsArrayInstance()) {
410    status = JDWP::ERR_INVALID_ARRAY;
411    return NULL;
412  }
413  status = JDWP::ERR_NONE;
414  return o->AsArray();
415}
416
417static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status)
418    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
419  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id);
420  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
421    status = JDWP::ERR_INVALID_OBJECT;
422    return NULL;
423  }
424  if (!o->IsClass()) {
425    status = JDWP::ERR_INVALID_CLASS;
426    return NULL;
427  }
428  status = JDWP::ERR_NONE;
429  return o->AsClass();
430}
431
432static JDWP::JdwpError DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id, Thread*& thread)
433    EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
434    LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
435    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
436  mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id);
437  if (thread_peer == NULL || thread_peer == ObjectRegistry::kInvalidObject) {
438    // This isn't even an object.
439    return JDWP::ERR_INVALID_OBJECT;
440  }
441
442  mirror::Class* java_lang_Thread = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
443  if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) {
444    // This isn't a thread.
445    return JDWP::ERR_INVALID_THREAD;
446  }
447
448  thread = Thread::FromManagedThread(soa, thread_peer);
449  if (thread == NULL) {
450    // This is a java.lang.Thread without a Thread*. Must be a zombie.
451    return JDWP::ERR_THREAD_NOT_ALIVE;
452  }
453  return JDWP::ERR_NONE;
454}
455
456static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) {
457  // JDWP deliberately uses the descriptor characters' ASCII values for its enum.
458  // Note that by "basic" we mean that we don't get more specific than JT_OBJECT.
459  return static_cast<JDWP::JdwpTag>(descriptor[0]);
460}
461
462static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
463    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
464  std::string temp;
465  const char* descriptor = klass->GetDescriptor(&temp);
466  return BasicTagFromDescriptor(descriptor);
467}
468
469static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
470    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
471  CHECK(c != NULL);
472  if (c->IsArrayClass()) {
473    return JDWP::JT_ARRAY;
474  }
475  if (c->IsStringClass()) {
476    return JDWP::JT_STRING;
477  }
478  if (c->IsClassClass()) {
479    return JDWP::JT_CLASS_OBJECT;
480  }
481  {
482    mirror::Class* thread_class = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
483    if (thread_class->IsAssignableFrom(c)) {
484      return JDWP::JT_THREAD;
485    }
486  }
487  {
488    mirror::Class* thread_group_class =
489        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
490    if (thread_group_class->IsAssignableFrom(c)) {
491      return JDWP::JT_THREAD_GROUP;
492    }
493  }
494  {
495    mirror::Class* class_loader_class =
496        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader);
497    if (class_loader_class->IsAssignableFrom(c)) {
498      return JDWP::JT_CLASS_LOADER;
499    }
500  }
501  return JDWP::JT_OBJECT;
502}
503
504/*
505 * Objects declared to hold Object might actually hold a more specific
506 * type.  The debugger may take a special interest in these (e.g. it
507 * wants to display the contents of Strings), so we want to return an
508 * appropriate tag.
509 *
510 * Null objects are tagged JT_OBJECT.
511 */
512JDWP::JdwpTag Dbg::TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o) {
513  return (o == NULL) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass());
514}
515
516static bool IsPrimitiveTag(JDWP::JdwpTag tag) {
517  switch (tag) {
518  case JDWP::JT_BOOLEAN:
519  case JDWP::JT_BYTE:
520  case JDWP::JT_CHAR:
521  case JDWP::JT_FLOAT:
522  case JDWP::JT_DOUBLE:
523  case JDWP::JT_INT:
524  case JDWP::JT_LONG:
525  case JDWP::JT_SHORT:
526  case JDWP::JT_VOID:
527    return true;
528  default:
529    return false;
530  }
531}
532
533/*
534 * Handle one of the JDWP name/value pairs.
535 *
536 * JDWP options are:
537 *  help: if specified, show help message and bail
538 *  transport: may be dt_socket or dt_shmem
539 *  address: for dt_socket, "host:port", or just "port" when listening
540 *  server: if "y", wait for debugger to attach; if "n", attach to debugger
541 *  timeout: how long to wait for debugger to connect / listen
542 *
543 * Useful with server=n (these aren't supported yet):
544 *  onthrow=<exception-name>: connect to debugger when exception thrown
545 *  onuncaught=y|n: connect to debugger when uncaught exception thrown
546 *  launch=<command-line>: launch the debugger itself
547 *
548 * The "transport" option is required, as is "address" if server=n.
549 */
550static bool ParseJdwpOption(const std::string& name, const std::string& value) {
551  if (name == "transport") {
552    if (value == "dt_socket") {
553      gJdwpOptions.transport = JDWP::kJdwpTransportSocket;
554    } else if (value == "dt_android_adb") {
555      gJdwpOptions.transport = JDWP::kJdwpTransportAndroidAdb;
556    } else {
557      LOG(ERROR) << "JDWP transport not supported: " << value;
558      return false;
559    }
560  } else if (name == "server") {
561    if (value == "n") {
562      gJdwpOptions.server = false;
563    } else if (value == "y") {
564      gJdwpOptions.server = true;
565    } else {
566      LOG(ERROR) << "JDWP option 'server' must be 'y' or 'n'";
567      return false;
568    }
569  } else if (name == "suspend") {
570    if (value == "n") {
571      gJdwpOptions.suspend = false;
572    } else if (value == "y") {
573      gJdwpOptions.suspend = true;
574    } else {
575      LOG(ERROR) << "JDWP option 'suspend' must be 'y' or 'n'";
576      return false;
577    }
578  } else if (name == "address") {
579    /* this is either <port> or <host>:<port> */
580    std::string port_string;
581    gJdwpOptions.host.clear();
582    std::string::size_type colon = value.find(':');
583    if (colon != std::string::npos) {
584      gJdwpOptions.host = value.substr(0, colon);
585      port_string = value.substr(colon + 1);
586    } else {
587      port_string = value;
588    }
589    if (port_string.empty()) {
590      LOG(ERROR) << "JDWP address missing port: " << value;
591      return false;
592    }
593    char* end;
594    uint64_t port = strtoul(port_string.c_str(), &end, 10);
595    if (*end != '\0' || port > 0xffff) {
596      LOG(ERROR) << "JDWP address has junk in port field: " << value;
597      return false;
598    }
599    gJdwpOptions.port = port;
600  } else if (name == "launch" || name == "onthrow" || name == "oncaught" || name == "timeout") {
601    /* valid but unsupported */
602    LOG(INFO) << "Ignoring JDWP option '" << name << "'='" << value << "'";
603  } else {
604    LOG(INFO) << "Ignoring unrecognized JDWP option '" << name << "'='" << value << "'";
605  }
606
607  return true;
608}
609
610/*
611 * Parse the latter half of a -Xrunjdwp/-agentlib:jdwp= string, e.g.:
612 * "transport=dt_socket,address=8000,server=y,suspend=n"
613 */
614bool Dbg::ParseJdwpOptions(const std::string& options) {
615  VLOG(jdwp) << "ParseJdwpOptions: " << options;
616
617  std::vector<std::string> pairs;
618  Split(options, ',', pairs);
619
620  for (size_t i = 0; i < pairs.size(); ++i) {
621    std::string::size_type equals = pairs[i].find('=');
622    if (equals == std::string::npos) {
623      LOG(ERROR) << "Can't parse JDWP option '" << pairs[i] << "' in '" << options << "'";
624      return false;
625    }
626    ParseJdwpOption(pairs[i].substr(0, equals), pairs[i].substr(equals + 1));
627  }
628
629  if (gJdwpOptions.transport == JDWP::kJdwpTransportUnknown) {
630    LOG(ERROR) << "Must specify JDWP transport: " << options;
631  }
632  if (!gJdwpOptions.server && (gJdwpOptions.host.empty() || gJdwpOptions.port == 0)) {
633    LOG(ERROR) << "Must specify JDWP host and port when server=n: " << options;
634    return false;
635  }
636
637  gJdwpConfigured = true;
638  return true;
639}
640
641void Dbg::StartJdwp() {
642  if (!gJdwpAllowed || !IsJdwpConfigured()) {
643    // No JDWP for you!
644    return;
645  }
646
647  CHECK(gRegistry == nullptr);
648  gRegistry = new ObjectRegistry;
649
650  // Init JDWP if the debugger is enabled. This may connect out to a
651  // debugger, passively listen for a debugger, or block waiting for a
652  // debugger.
653  gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions);
654  if (gJdwpState == NULL) {
655    // We probably failed because some other process has the port already, which means that
656    // if we don't abort the user is likely to think they're talking to us when they're actually
657    // talking to that other process.
658    LOG(FATAL) << "Debugger thread failed to initialize";
659  }
660
661  // If a debugger has already attached, send the "welcome" message.
662  // This may cause us to suspend all threads.
663  if (gJdwpState->IsActive()) {
664    ScopedObjectAccess soa(Thread::Current());
665    if (!gJdwpState->PostVMStart()) {
666      LOG(WARNING) << "Failed to post 'start' message to debugger";
667    }
668  }
669}
670
671void Dbg::StopJdwp() {
672  // Post VM_DEATH event before the JDWP connection is closed (either by the JDWP thread or the
673  // destruction of gJdwpState).
674  if (gJdwpState != nullptr && gJdwpState->IsActive()) {
675    gJdwpState->PostVMDeath();
676  }
677  // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
678  Disposed();
679  delete gJdwpState;
680  gJdwpState = nullptr;
681  delete gRegistry;
682  gRegistry = nullptr;
683}
684
685void Dbg::GcDidFinish() {
686  if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
687    ScopedObjectAccess soa(Thread::Current());
688    VLOG(jdwp) << "Sending heap info to DDM";
689    DdmSendHeapInfo(gDdmHpifWhen);
690  }
691  if (gDdmHpsgWhen != HPSG_WHEN_NEVER) {
692    ScopedObjectAccess soa(Thread::Current());
693    VLOG(jdwp) << "Dumping heap to DDM";
694    DdmSendHeapSegments(false);
695  }
696  if (gDdmNhsgWhen != HPSG_WHEN_NEVER) {
697    ScopedObjectAccess soa(Thread::Current());
698    VLOG(jdwp) << "Dumping native heap to DDM";
699    DdmSendHeapSegments(true);
700  }
701}
702
703void Dbg::SetJdwpAllowed(bool allowed) {
704  gJdwpAllowed = allowed;
705}
706
707DebugInvokeReq* Dbg::GetInvokeReq() {
708  return Thread::Current()->GetInvokeReq();
709}
710
711Thread* Dbg::GetDebugThread() {
712  return (gJdwpState != NULL) ? gJdwpState->GetDebugThread() : NULL;
713}
714
715void Dbg::ClearWaitForEventThread() {
716  gJdwpState->ClearWaitForEventThread();
717}
718
719void Dbg::Connected() {
720  CHECK(!gDebuggerConnected);
721  VLOG(jdwp) << "JDWP has attached";
722  gDebuggerConnected = true;
723  gDisposed = false;
724}
725
726void Dbg::Disposed() {
727  gDisposed = true;
728}
729
730bool Dbg::IsDisposed() {
731  return gDisposed;
732}
733
734void Dbg::GoActive() {
735  // Enable all debugging features, including scans for breakpoints.
736  // This is a no-op if we're already active.
737  // Only called from the JDWP handler thread.
738  if (gDebuggerActive) {
739    return;
740  }
741
742  {
743    // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected?
744    ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
745    CHECK_EQ(gBreakpoints.size(), 0U);
746  }
747
748  {
749    MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
750    CHECK_EQ(deoptimization_requests_.size(), 0U);
751    CHECK_EQ(full_deoptimization_event_count_, 0U);
752    CHECK_EQ(delayed_full_undeoptimization_count_, 0U);
753    CHECK_EQ(dex_pc_change_event_ref_count_, 0U);
754    CHECK_EQ(method_enter_event_ref_count_, 0U);
755    CHECK_EQ(method_exit_event_ref_count_, 0U);
756    CHECK_EQ(field_read_event_ref_count_, 0U);
757    CHECK_EQ(field_write_event_ref_count_, 0U);
758    CHECK_EQ(exception_catch_event_ref_count_, 0U);
759  }
760
761  Runtime* runtime = Runtime::Current();
762  runtime->GetThreadList()->SuspendAll();
763  Thread* self = Thread::Current();
764  ThreadState old_state = self->SetStateUnsafe(kRunnable);
765  CHECK_NE(old_state, kRunnable);
766  runtime->GetInstrumentation()->EnableDeoptimization();
767  instrumentation_events_ = 0;
768  gDebuggerActive = true;
769  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
770  runtime->GetThreadList()->ResumeAll();
771
772  LOG(INFO) << "Debugger is active";
773}
774
775void Dbg::Disconnected() {
776  CHECK(gDebuggerConnected);
777
778  LOG(INFO) << "Debugger is no longer active";
779
780  // Suspend all threads and exclusively acquire the mutator lock. Set the state of the thread
781  // to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener
782  // and clear the object registry.
783  Runtime* runtime = Runtime::Current();
784  runtime->GetThreadList()->SuspendAll();
785  Thread* self = Thread::Current();
786  ThreadState old_state = self->SetStateUnsafe(kRunnable);
787
788  // Debugger may not be active at this point.
789  if (gDebuggerActive) {
790    {
791      // Since we're going to disable deoptimization, we clear the deoptimization requests queue.
792      // This prevents us from having any pending deoptimization request when the debugger attaches
793      // to us again while no event has been requested yet.
794      MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
795      deoptimization_requests_.clear();
796      full_deoptimization_event_count_ = 0U;
797      delayed_full_undeoptimization_count_ = 0U;
798    }
799    if (instrumentation_events_ != 0) {
800      runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
801                                                    instrumentation_events_);
802      instrumentation_events_ = 0;
803    }
804    runtime->GetInstrumentation()->DisableDeoptimization();
805    gDebuggerActive = false;
806  }
807  gRegistry->Clear();
808  gDebuggerConnected = false;
809  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
810  runtime->GetThreadList()->ResumeAll();
811}
812
813bool Dbg::IsDebuggerActive() {
814  return gDebuggerActive;
815}
816
817bool Dbg::IsJdwpConfigured() {
818  return gJdwpConfigured;
819}
820
821int64_t Dbg::LastDebuggerActivity() {
822  return gJdwpState->LastDebuggerActivity();
823}
824
825void Dbg::UndoDebuggerSuspensions() {
826  Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
827}
828
829std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
830  mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id);
831  if (o == NULL) {
832    return "NULL";
833  }
834  if (o == ObjectRegistry::kInvalidObject) {
835    return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
836  }
837  if (!o->IsClass()) {
838    return StringPrintf("non-class %p", o);  // This is only used for debugging output anyway.
839  }
840  return GetClassName(o->AsClass());
841}
842
843std::string Dbg::GetClassName(mirror::Class* klass) {
844  if (klass == nullptr) {
845    return "NULL";
846  }
847  std::string temp;
848  return DescriptorToName(klass->GetDescriptor(&temp));
849}
850
851JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& class_object_id) {
852  JDWP::JdwpError status;
853  mirror::Class* c = DecodeClass(id, status);
854  if (c == NULL) {
855    return status;
856  }
857  class_object_id = gRegistry->Add(c);
858  return JDWP::ERR_NONE;
859}
860
861JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId& superclass_id) {
862  JDWP::JdwpError status;
863  mirror::Class* c = DecodeClass(id, status);
864  if (c == NULL) {
865    return status;
866  }
867  if (c->IsInterface()) {
868    // http://code.google.com/p/android/issues/detail?id=20856
869    superclass_id = 0;
870  } else {
871    superclass_id = gRegistry->Add(c->GetSuperClass());
872  }
873  return JDWP::ERR_NONE;
874}
875
876JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
877  mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
878  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
879    return JDWP::ERR_INVALID_OBJECT;
880  }
881  expandBufAddObjectId(pReply, gRegistry->Add(o->GetClass()->GetClassLoader()));
882  return JDWP::ERR_NONE;
883}
884
885JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
886  JDWP::JdwpError status;
887  mirror::Class* c = DecodeClass(id, status);
888  if (c == NULL) {
889    return status;
890  }
891
892  uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask;
893
894  // Set ACC_SUPER. Dex files don't contain this flag but only classes are supposed to have it set,
895  // not interfaces.
896  // Class.getModifiers doesn't return it, but JDWP does, so we set it here.
897  if ((access_flags & kAccInterface) == 0) {
898    access_flags |= kAccSuper;
899  }
900
901  expandBufAdd4BE(pReply, access_flags);
902
903  return JDWP::ERR_NONE;
904}
905
906JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply)
907    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
908  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
909  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
910    return JDWP::ERR_INVALID_OBJECT;
911  }
912
913  // Ensure all threads are suspended while we read objects' lock words.
914  Thread* self = Thread::Current();
915  CHECK_EQ(self->GetState(), kRunnable);
916  self->TransitionFromRunnableToSuspended(kSuspended);
917  Runtime::Current()->GetThreadList()->SuspendAll();
918
919  MonitorInfo monitor_info(o);
920
921  Runtime::Current()->GetThreadList()->ResumeAll();
922  self->TransitionFromSuspendedToRunnable();
923
924  if (monitor_info.owner_ != NULL) {
925    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeer()));
926  } else {
927    expandBufAddObjectId(reply, gRegistry->Add(NULL));
928  }
929  expandBufAdd4BE(reply, monitor_info.entry_count_);
930  expandBufAdd4BE(reply, monitor_info.waiters_.size());
931  for (size_t i = 0; i < monitor_info.waiters_.size(); ++i) {
932    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters_[i]->GetPeer()));
933  }
934  return JDWP::ERR_NONE;
935}
936
937JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id,
938                                      std::vector<JDWP::ObjectId>& monitors,
939                                      std::vector<uint32_t>& stack_depths) {
940  struct OwnedMonitorVisitor : public StackVisitor {
941    OwnedMonitorVisitor(Thread* thread, Context* context,
942                        std::vector<JDWP::ObjectId>* monitor_vector,
943                        std::vector<uint32_t>* stack_depth_vector)
944        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
945      : StackVisitor(thread, context), current_stack_depth(0),
946        monitors(monitor_vector), stack_depths(stack_depth_vector) {}
947
948    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
949    // annotalysis.
950    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
951      if (!GetMethod()->IsRuntimeMethod()) {
952        Monitor::VisitLocks(this, AppendOwnedMonitors, this);
953        ++current_stack_depth;
954      }
955      return true;
956    }
957
958    static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg)
959        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
960      OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
961      visitor->monitors->push_back(gRegistry->Add(owned_monitor));
962      visitor->stack_depths->push_back(visitor->current_stack_depth);
963    }
964
965    size_t current_stack_depth;
966    std::vector<JDWP::ObjectId>* monitors;
967    std::vector<uint32_t>* stack_depths;
968  };
969
970  ScopedObjectAccessUnchecked soa(Thread::Current());
971  Thread* thread;
972  {
973    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
974    JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
975    if (error != JDWP::ERR_NONE) {
976      return error;
977    }
978    if (!IsSuspendedForDebugger(soa, thread)) {
979      return JDWP::ERR_THREAD_NOT_SUSPENDED;
980    }
981  }
982  std::unique_ptr<Context> context(Context::Create());
983  OwnedMonitorVisitor visitor(thread, context.get(), &monitors, &stack_depths);
984  visitor.WalkStack();
985  return JDWP::ERR_NONE;
986}
987
988JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id,
989                                         JDWP::ObjectId& contended_monitor) {
990  mirror::Object* contended_monitor_obj;
991  ScopedObjectAccessUnchecked soa(Thread::Current());
992  {
993    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
994    Thread* thread;
995    JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
996    if (error != JDWP::ERR_NONE) {
997      return error;
998    }
999    if (!IsSuspendedForDebugger(soa, thread)) {
1000      return JDWP::ERR_THREAD_NOT_SUSPENDED;
1001    }
1002    contended_monitor_obj = Monitor::GetContendedMonitor(thread);
1003  }
1004  // Add() requires the thread_list_lock_ not held to avoid the lock
1005  // level violation.
1006  contended_monitor = gRegistry->Add(contended_monitor_obj);
1007  return JDWP::ERR_NONE;
1008}
1009
1010JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
1011                                       std::vector<uint64_t>& counts)
1012    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1013  gc::Heap* heap = Runtime::Current()->GetHeap();
1014  heap->CollectGarbage(false);
1015  std::vector<mirror::Class*> classes;
1016  counts.clear();
1017  for (size_t i = 0; i < class_ids.size(); ++i) {
1018    JDWP::JdwpError status;
1019    mirror::Class* c = DecodeClass(class_ids[i], status);
1020    if (c == NULL) {
1021      return status;
1022    }
1023    classes.push_back(c);
1024    counts.push_back(0);
1025  }
1026  heap->CountInstances(classes, false, &counts[0]);
1027  return JDWP::ERR_NONE;
1028}
1029
1030JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, std::vector<JDWP::ObjectId>& instances)
1031    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1032  gc::Heap* heap = Runtime::Current()->GetHeap();
1033  // We only want reachable instances, so do a GC.
1034  heap->CollectGarbage(false);
1035  JDWP::JdwpError status;
1036  mirror::Class* c = DecodeClass(class_id, status);
1037  if (c == nullptr) {
1038    return status;
1039  }
1040  std::vector<mirror::Object*> raw_instances;
1041  Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
1042  for (size_t i = 0; i < raw_instances.size(); ++i) {
1043    instances.push_back(gRegistry->Add(raw_instances[i]));
1044  }
1045  return JDWP::ERR_NONE;
1046}
1047
1048JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
1049                                         std::vector<JDWP::ObjectId>& referring_objects)
1050    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1051  gc::Heap* heap = Runtime::Current()->GetHeap();
1052  heap->CollectGarbage(false);
1053  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1054  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1055    return JDWP::ERR_INVALID_OBJECT;
1056  }
1057  std::vector<mirror::Object*> raw_instances;
1058  heap->GetReferringObjects(o, max_count, raw_instances);
1059  for (size_t i = 0; i < raw_instances.size(); ++i) {
1060    referring_objects.push_back(gRegistry->Add(raw_instances[i]));
1061  }
1062  return JDWP::ERR_NONE;
1063}
1064
1065JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id)
1066    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1067  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1068  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1069    return JDWP::ERR_INVALID_OBJECT;
1070  }
1071  gRegistry->DisableCollection(object_id);
1072  return JDWP::ERR_NONE;
1073}
1074
1075JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id)
1076    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1077  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1078  // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI
1079  // also ignores these cases and never return an error. However it's not obvious why this command
1080  // should behave differently from DisableCollection and IsCollected commands. So let's be more
1081  // strict and return an error if this happens.
1082  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1083    return JDWP::ERR_INVALID_OBJECT;
1084  }
1085  gRegistry->EnableCollection(object_id);
1086  return JDWP::ERR_NONE;
1087}
1088
1089JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool& is_collected)
1090    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1091  if (object_id == 0) {
1092    // Null object id is invalid.
1093    return JDWP::ERR_INVALID_OBJECT;
1094  }
1095  // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However
1096  // the RI seems to ignore this and assume object has been collected.
1097  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1098  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1099    is_collected = true;
1100  } else {
1101    is_collected = gRegistry->IsCollected(object_id);
1102  }
1103  return JDWP::ERR_NONE;
1104}
1105
1106void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count)
1107    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1108  gRegistry->DisposeObject(object_id, reference_count);
1109}
1110
1111JDWP::JdwpTypeTag Dbg::GetTypeTag(mirror::Class* klass) {
1112  DCHECK(klass != nullptr);
1113  if (klass->IsArrayClass()) {
1114    return JDWP::TT_ARRAY;
1115  } else if (klass->IsInterface()) {
1116    return JDWP::TT_INTERFACE;
1117  } else {
1118    return JDWP::TT_CLASS;
1119  }
1120}
1121
1122JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1123  JDWP::JdwpError status;
1124  mirror::Class* c = DecodeClass(class_id, status);
1125  if (c == NULL) {
1126    return status;
1127  }
1128
1129  JDWP::JdwpTypeTag type_tag = GetTypeTag(c);
1130  expandBufAdd1(pReply, type_tag);
1131  expandBufAddRefTypeId(pReply, class_id);
1132  return JDWP::ERR_NONE;
1133}
1134
1135void Dbg::GetClassList(std::vector<JDWP::RefTypeId>& classes) {
1136  // Get the complete list of reference classes (i.e. all classes except
1137  // the primitive types).
1138  // Returns a newly-allocated buffer full of RefTypeId values.
1139  struct ClassListCreator {
1140    explicit ClassListCreator(std::vector<JDWP::RefTypeId>& classes) : classes(classes) {
1141    }
1142
1143    static bool Visit(mirror::Class* c, void* arg) {
1144      return reinterpret_cast<ClassListCreator*>(arg)->Visit(c);
1145    }
1146
1147    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
1148    // annotalysis.
1149    bool Visit(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS {
1150      if (!c->IsPrimitive()) {
1151        classes.push_back(gRegistry->AddRefType(c));
1152      }
1153      return true;
1154    }
1155
1156    std::vector<JDWP::RefTypeId>& classes;
1157  };
1158
1159  ClassListCreator clc(classes);
1160  Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(ClassListCreator::Visit,
1161                                                                       &clc);
1162}
1163
1164JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
1165                                  uint32_t* pStatus, std::string* pDescriptor) {
1166  JDWP::JdwpError status;
1167  mirror::Class* c = DecodeClass(class_id, status);
1168  if (c == NULL) {
1169    return status;
1170  }
1171
1172  if (c->IsArrayClass()) {
1173    *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
1174    *pTypeTag = JDWP::TT_ARRAY;
1175  } else {
1176    if (c->IsErroneous()) {
1177      *pStatus = JDWP::CS_ERROR;
1178    } else {
1179      *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED;
1180    }
1181    *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
1182  }
1183
1184  if (pDescriptor != NULL) {
1185    std::string temp;
1186    *pDescriptor = c->GetDescriptor(&temp);
1187  }
1188  return JDWP::ERR_NONE;
1189}
1190
1191void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>& ids) {
1192  std::vector<mirror::Class*> classes;
1193  Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
1194  ids.clear();
1195  for (size_t i = 0; i < classes.size(); ++i) {
1196    ids.push_back(gRegistry->Add(classes[i]));
1197  }
1198}
1199
1200JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply)
1201    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1202  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1203  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1204    return JDWP::ERR_INVALID_OBJECT;
1205  }
1206
1207  JDWP::JdwpTypeTag type_tag = GetTypeTag(o->GetClass());
1208  JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass());
1209
1210  expandBufAdd1(pReply, type_tag);
1211  expandBufAddRefTypeId(pReply, type_id);
1212
1213  return JDWP::ERR_NONE;
1214}
1215
1216JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) {
1217  JDWP::JdwpError status;
1218  mirror::Class* c = DecodeClass(class_id, status);
1219  if (c == NULL) {
1220    return status;
1221  }
1222  std::string temp;
1223  *signature = c->GetDescriptor(&temp);
1224  return JDWP::ERR_NONE;
1225}
1226
1227JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string& result) {
1228  JDWP::JdwpError status;
1229  mirror::Class* c = DecodeClass(class_id, status);
1230  if (c == nullptr) {
1231    return status;
1232  }
1233  const char* source_file = c->GetSourceFile();
1234  if (source_file == nullptr) {
1235    return JDWP::ERR_ABSENT_INFORMATION;
1236  }
1237  result = source_file;
1238  return JDWP::ERR_NONE;
1239}
1240
1241JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t& tag) {
1242  ScopedObjectAccessUnchecked soa(Thread::Current());
1243  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1244  if (o == ObjectRegistry::kInvalidObject) {
1245    return JDWP::ERR_INVALID_OBJECT;
1246  }
1247  tag = TagFromObject(soa, o);
1248  return JDWP::ERR_NONE;
1249}
1250
1251size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) {
1252  switch (tag) {
1253  case JDWP::JT_VOID:
1254    return 0;
1255  case JDWP::JT_BYTE:
1256  case JDWP::JT_BOOLEAN:
1257    return 1;
1258  case JDWP::JT_CHAR:
1259  case JDWP::JT_SHORT:
1260    return 2;
1261  case JDWP::JT_FLOAT:
1262  case JDWP::JT_INT:
1263    return 4;
1264  case JDWP::JT_ARRAY:
1265  case JDWP::JT_OBJECT:
1266  case JDWP::JT_STRING:
1267  case JDWP::JT_THREAD:
1268  case JDWP::JT_THREAD_GROUP:
1269  case JDWP::JT_CLASS_LOADER:
1270  case JDWP::JT_CLASS_OBJECT:
1271    return sizeof(JDWP::ObjectId);
1272  case JDWP::JT_DOUBLE:
1273  case JDWP::JT_LONG:
1274    return 8;
1275  default:
1276    LOG(FATAL) << "Unknown tag " << tag;
1277    return -1;
1278  }
1279}
1280
1281JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int& length) {
1282  JDWP::JdwpError status;
1283  mirror::Array* a = DecodeArray(array_id, status);
1284  if (a == NULL) {
1285    return status;
1286  }
1287  length = a->GetLength();
1288  return JDWP::ERR_NONE;
1289}
1290
1291JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) {
1292  JDWP::JdwpError status;
1293  mirror::Array* a = DecodeArray(array_id, status);
1294  if (a == nullptr) {
1295    return status;
1296  }
1297
1298  if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) {
1299    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1300    return JDWP::ERR_INVALID_LENGTH;
1301  }
1302  JDWP::JdwpTag element_tag = BasicTagFromClass(a->GetClass()->GetComponentType());
1303  expandBufAdd1(pReply, element_tag);
1304  expandBufAdd4BE(pReply, count);
1305
1306  if (IsPrimitiveTag(element_tag)) {
1307    size_t width = GetTagWidth(element_tag);
1308    uint8_t* dst = expandBufAddSpace(pReply, count * width);
1309    if (width == 8) {
1310      const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0));
1311      for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]);
1312    } else if (width == 4) {
1313      const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0));
1314      for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]);
1315    } else if (width == 2) {
1316      const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0));
1317      for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]);
1318    } else {
1319      const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0));
1320      memcpy(dst, &src[offset * width], count * width);
1321    }
1322  } else {
1323    ScopedObjectAccessUnchecked soa(Thread::Current());
1324    mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>();
1325    for (int i = 0; i < count; ++i) {
1326      mirror::Object* element = oa->Get(offset + i);
1327      JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element)
1328                                                        : element_tag;
1329      expandBufAdd1(pReply, specific_tag);
1330      expandBufAddObjectId(pReply, gRegistry->Add(element));
1331    }
1332  }
1333
1334  return JDWP::ERR_NONE;
1335}
1336
1337template <typename T>
1338static void CopyArrayData(mirror::Array* a, JDWP::Request& src, int offset, int count)
1339    NO_THREAD_SAFETY_ANALYSIS {
1340  // TODO: fix when annotalysis correctly handles non-member functions.
1341  DCHECK(a->GetClass()->IsPrimitiveArray());
1342
1343  T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset));
1344  for (int i = 0; i < count; ++i) {
1345    *dst++ = src.ReadValue(sizeof(T));
1346  }
1347}
1348
1349JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
1350                                      JDWP::Request& request)
1351    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1352  JDWP::JdwpError status;
1353  mirror::Array* dst = DecodeArray(array_id, status);
1354  if (dst == NULL) {
1355    return status;
1356  }
1357
1358  if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) {
1359    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1360    return JDWP::ERR_INVALID_LENGTH;
1361  }
1362  JDWP::JdwpTag element_tag = BasicTagFromClass(dst->GetClass()->GetComponentType());
1363
1364  if (IsPrimitiveTag(element_tag)) {
1365    size_t width = GetTagWidth(element_tag);
1366    if (width == 8) {
1367      CopyArrayData<uint64_t>(dst, request, offset, count);
1368    } else if (width == 4) {
1369      CopyArrayData<uint32_t>(dst, request, offset, count);
1370    } else if (width == 2) {
1371      CopyArrayData<uint16_t>(dst, request, offset, count);
1372    } else {
1373      CopyArrayData<uint8_t>(dst, request, offset, count);
1374    }
1375  } else {
1376    mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>();
1377    for (int i = 0; i < count; ++i) {
1378      JDWP::ObjectId id = request.ReadObjectId();
1379      mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
1380      if (o == ObjectRegistry::kInvalidObject) {
1381        return JDWP::ERR_INVALID_OBJECT;
1382      }
1383      oa->Set<false>(offset + i, o);
1384    }
1385  }
1386
1387  return JDWP::ERR_NONE;
1388}
1389
1390JDWP::ObjectId Dbg::CreateString(const std::string& str) {
1391  return gRegistry->Add(mirror::String::AllocFromModifiedUtf8(Thread::Current(), str.c_str()));
1392}
1393
1394JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId& new_object) {
1395  JDWP::JdwpError status;
1396  mirror::Class* c = DecodeClass(class_id, status);
1397  if (c == NULL) {
1398    return status;
1399  }
1400  new_object = gRegistry->Add(c->AllocObject(Thread::Current()));
1401  return JDWP::ERR_NONE;
1402}
1403
1404/*
1405 * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]".
1406 */
1407JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
1408                                       JDWP::ObjectId& new_array) {
1409  JDWP::JdwpError status;
1410  mirror::Class* c = DecodeClass(array_class_id, status);
1411  if (c == NULL) {
1412    return status;
1413  }
1414  new_array = gRegistry->Add(mirror::Array::Alloc<true>(Thread::Current(), c, length,
1415                                                        c->GetComponentSize(),
1416                                                        Runtime::Current()->GetHeap()->GetCurrentAllocator()));
1417  return JDWP::ERR_NONE;
1418}
1419
1420JDWP::FieldId Dbg::ToFieldId(const mirror::ArtField* f) {
1421  CHECK(!kMovingFields);
1422  return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
1423}
1424
1425static JDWP::MethodId ToMethodId(const mirror::ArtMethod* m)
1426    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1427  CHECK(!kMovingMethods);
1428  return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
1429}
1430
1431static mirror::ArtField* FromFieldId(JDWP::FieldId fid)
1432    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1433  CHECK(!kMovingFields);
1434  return reinterpret_cast<mirror::ArtField*>(static_cast<uintptr_t>(fid));
1435}
1436
1437static mirror::ArtMethod* FromMethodId(JDWP::MethodId mid)
1438    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1439  CHECK(!kMovingMethods);
1440  return reinterpret_cast<mirror::ArtMethod*>(static_cast<uintptr_t>(mid));
1441}
1442
1443bool Dbg::MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread) {
1444  CHECK(event_thread != nullptr);
1445  mirror::Object* expected_thread_peer = gRegistry->Get<mirror::Object*>(expected_thread_id);
1446  return expected_thread_peer == event_thread->GetPeer();
1447}
1448
1449bool Dbg::MatchLocation(const JDWP::JdwpLocation& expected_location,
1450                        const JDWP::EventLocation& event_location) {
1451  if (expected_location.dex_pc != event_location.dex_pc) {
1452    return false;
1453  }
1454  mirror::ArtMethod* m = FromMethodId(expected_location.method_id);
1455  return m == event_location.method;
1456}
1457
1458bool Dbg::MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id) {
1459  if (event_class == nullptr) {
1460    return false;
1461  }
1462  JDWP::JdwpError status;
1463  mirror::Class* expected_class = DecodeClass(class_id, status);
1464  CHECK(expected_class != nullptr);
1465  return expected_class->IsAssignableFrom(event_class);
1466}
1467
1468bool Dbg::MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
1469                     mirror::ArtField* event_field) {
1470  mirror::ArtField* expected_field = FromFieldId(expected_field_id);
1471  if (expected_field != event_field) {
1472    return false;
1473  }
1474  return Dbg::MatchType(event_field->GetDeclaringClass(), expected_type_id);
1475}
1476
1477bool Dbg::MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance) {
1478  mirror::Object* modifier_instance = gRegistry->Get<mirror::Object*>(expected_instance_id);
1479  return modifier_instance == event_instance;
1480}
1481
1482void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
1483    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1484  if (m == nullptr) {
1485    memset(location, 0, sizeof(*location));
1486  } else {
1487    mirror::Class* c = m->GetDeclaringClass();
1488    location->type_tag = GetTypeTag(c);
1489    location->class_id = gRegistry->AddRefType(c);
1490    location->method_id = ToMethodId(m);
1491    location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc;
1492  }
1493}
1494
1495std::string Dbg::GetMethodName(JDWP::MethodId method_id)
1496    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1497  mirror::ArtMethod* m = FromMethodId(method_id);
1498  if (m == nullptr) {
1499    return "NULL";
1500  }
1501  return m->GetName();
1502}
1503
1504std::string Dbg::GetFieldName(JDWP::FieldId field_id)
1505    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1506  mirror::ArtField* f = FromFieldId(field_id);
1507  if (f == nullptr) {
1508    return "NULL";
1509  }
1510  return f->GetName();
1511}
1512
1513/*
1514 * Augment the access flags for synthetic methods and fields by setting
1515 * the (as described by the spec) "0xf0000000 bit".  Also, strip out any
1516 * flags not specified by the Java programming language.
1517 */
1518static uint32_t MangleAccessFlags(uint32_t accessFlags) {
1519  accessFlags &= kAccJavaFlagsMask;
1520  if ((accessFlags & kAccSynthetic) != 0) {
1521    accessFlags |= 0xf0000000;
1522  }
1523  return accessFlags;
1524}
1525
1526/*
1527 * Circularly shifts registers so that arguments come first. Debuggers
1528 * expect slots to begin with arguments, but dex code places them at
1529 * the end.
1530 */
1531static uint16_t MangleSlot(uint16_t slot, mirror::ArtMethod* m)
1532    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1533  const DexFile::CodeItem* code_item = m->GetCodeItem();
1534  if (code_item == nullptr) {
1535    // We should not get here for a method without code (native, proxy or abstract). Log it and
1536    // return the slot as is since all registers are arguments.
1537    LOG(WARNING) << "Trying to mangle slot for method without code " << PrettyMethod(m);
1538    return slot;
1539  }
1540  uint16_t ins_size = code_item->ins_size_;
1541  uint16_t locals_size = code_item->registers_size_ - ins_size;
1542  if (slot >= locals_size) {
1543    return slot - locals_size;
1544  } else {
1545    return slot + ins_size;
1546  }
1547}
1548
1549/*
1550 * Circularly shifts registers so that arguments come last. Reverts
1551 * slots to dex style argument placement.
1552 */
1553static uint16_t DemangleSlot(uint16_t slot, mirror::ArtMethod* m)
1554    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1555  const DexFile::CodeItem* code_item = m->GetCodeItem();
1556  if (code_item == nullptr) {
1557    // We should not get here for a method without code (native, proxy or abstract). Log it and
1558    // return the slot as is since all registers are arguments.
1559    LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m);
1560    return slot;
1561  }
1562  uint16_t ins_size = code_item->ins_size_;
1563  uint16_t locals_size = code_item->registers_size_ - ins_size;
1564  if (slot < ins_size) {
1565    return slot + locals_size;
1566  } else {
1567    return slot - ins_size;
1568  }
1569}
1570
1571JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) {
1572  JDWP::JdwpError status;
1573  mirror::Class* c = DecodeClass(class_id, status);
1574  if (c == NULL) {
1575    return status;
1576  }
1577
1578  size_t instance_field_count = c->NumInstanceFields();
1579  size_t static_field_count = c->NumStaticFields();
1580
1581  expandBufAdd4BE(pReply, instance_field_count + static_field_count);
1582
1583  for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
1584    mirror::ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
1585    expandBufAddFieldId(pReply, ToFieldId(f));
1586    expandBufAddUtf8String(pReply, f->GetName());
1587    expandBufAddUtf8String(pReply, f->GetTypeDescriptor());
1588    if (with_generic) {
1589      static const char genericSignature[1] = "";
1590      expandBufAddUtf8String(pReply, genericSignature);
1591    }
1592    expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags()));
1593  }
1594  return JDWP::ERR_NONE;
1595}
1596
1597JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic,
1598                                           JDWP::ExpandBuf* pReply) {
1599  JDWP::JdwpError status;
1600  mirror::Class* c = DecodeClass(class_id, status);
1601  if (c == NULL) {
1602    return status;
1603  }
1604
1605  size_t direct_method_count = c->NumDirectMethods();
1606  size_t virtual_method_count = c->NumVirtualMethods();
1607
1608  expandBufAdd4BE(pReply, direct_method_count + virtual_method_count);
1609
1610  for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) {
1611    mirror::ArtMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count);
1612    expandBufAddMethodId(pReply, ToMethodId(m));
1613    expandBufAddUtf8String(pReply, m->GetName());
1614    expandBufAddUtf8String(pReply, m->GetSignature().ToString());
1615    if (with_generic) {
1616      static const char genericSignature[1] = "";
1617      expandBufAddUtf8String(pReply, genericSignature);
1618    }
1619    expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags()));
1620  }
1621  return JDWP::ERR_NONE;
1622}
1623
1624JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1625  JDWP::JdwpError status;
1626  Thread* self = Thread::Current();
1627  StackHandleScope<1> hs(self);
1628  Handle<mirror::Class> c(hs.NewHandle(DecodeClass(class_id, status)));
1629  if (c.Get() == nullptr) {
1630    return status;
1631  }
1632  size_t interface_count = c->NumDirectInterfaces();
1633  expandBufAdd4BE(pReply, interface_count);
1634  for (size_t i = 0; i < interface_count; ++i) {
1635    expandBufAddRefTypeId(pReply,
1636                          gRegistry->AddRefType(mirror::Class::GetDirectInterface(self, c, i)));
1637  }
1638  return JDWP::ERR_NONE;
1639}
1640
1641void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply)
1642    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1643  struct DebugCallbackContext {
1644    int numItems;
1645    JDWP::ExpandBuf* pReply;
1646
1647    static bool Callback(void* context, uint32_t address, uint32_t line_number) {
1648      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1649      expandBufAdd8BE(pContext->pReply, address);
1650      expandBufAdd4BE(pContext->pReply, line_number);
1651      pContext->numItems++;
1652      return false;
1653    }
1654  };
1655  mirror::ArtMethod* m = FromMethodId(method_id);
1656  const DexFile::CodeItem* code_item = m->GetCodeItem();
1657  uint64_t start, end;
1658  if (code_item == nullptr) {
1659    DCHECK(m->IsNative() || m->IsProxyMethod());
1660    start = -1;
1661    end = -1;
1662  } else {
1663    start = 0;
1664    // Return the index of the last instruction
1665    end = code_item->insns_size_in_code_units_ - 1;
1666  }
1667
1668  expandBufAdd8BE(pReply, start);
1669  expandBufAdd8BE(pReply, end);
1670
1671  // Add numLines later
1672  size_t numLinesOffset = expandBufGetLength(pReply);
1673  expandBufAdd4BE(pReply, 0);
1674
1675  DebugCallbackContext context;
1676  context.numItems = 0;
1677  context.pReply = pReply;
1678
1679  if (code_item != nullptr) {
1680    m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
1681                                     DebugCallbackContext::Callback, NULL, &context);
1682  }
1683
1684  JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
1685}
1686
1687void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic,
1688                              JDWP::ExpandBuf* pReply) {
1689  struct DebugCallbackContext {
1690    mirror::ArtMethod* method;
1691    JDWP::ExpandBuf* pReply;
1692    size_t variable_count;
1693    bool with_generic;
1694
1695    static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress,
1696                         const char* name, const char* descriptor, const char* signature)
1697        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1698      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1699
1700      VLOG(jdwp) << StringPrintf("    %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
1701                                 pContext->variable_count, startAddress, endAddress - startAddress,
1702                                 name, descriptor, signature, slot,
1703                                 MangleSlot(slot, pContext->method));
1704
1705      slot = MangleSlot(slot, pContext->method);
1706
1707      expandBufAdd8BE(pContext->pReply, startAddress);
1708      expandBufAddUtf8String(pContext->pReply, name);
1709      expandBufAddUtf8String(pContext->pReply, descriptor);
1710      if (pContext->with_generic) {
1711        expandBufAddUtf8String(pContext->pReply, signature);
1712      }
1713      expandBufAdd4BE(pContext->pReply, endAddress - startAddress);
1714      expandBufAdd4BE(pContext->pReply, slot);
1715
1716      ++pContext->variable_count;
1717    }
1718  };
1719  mirror::ArtMethod* m = FromMethodId(method_id);
1720
1721  // arg_count considers doubles and longs to take 2 units.
1722  // variable_count considers everything to take 1 unit.
1723  std::string shorty(m->GetShorty());
1724  expandBufAdd4BE(pReply, mirror::ArtMethod::NumArgRegisters(shorty));
1725
1726  // We don't know the total number of variables yet, so leave a blank and update it later.
1727  size_t variable_count_offset = expandBufGetLength(pReply);
1728  expandBufAdd4BE(pReply, 0);
1729
1730  DebugCallbackContext context;
1731  context.method = m;
1732  context.pReply = pReply;
1733  context.variable_count = 0;
1734  context.with_generic = with_generic;
1735
1736  const DexFile::CodeItem* code_item = m->GetCodeItem();
1737  if (code_item != nullptr) {
1738    m->GetDexFile()->DecodeDebugInfo(
1739        code_item, m->IsStatic(), m->GetDexMethodIndex(), NULL, DebugCallbackContext::Callback,
1740        &context);
1741  }
1742
1743  JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count);
1744}
1745
1746void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
1747                                  JDWP::ExpandBuf* pReply) {
1748  mirror::ArtMethod* m = FromMethodId(method_id);
1749  JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty());
1750  OutputJValue(tag, return_value, pReply);
1751}
1752
1753void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
1754                           JDWP::ExpandBuf* pReply) {
1755  mirror::ArtField* f = FromFieldId(field_id);
1756  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1757  OutputJValue(tag, field_value, pReply);
1758}
1759
1760JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
1761                                  std::vector<uint8_t>& bytecodes)
1762    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1763  mirror::ArtMethod* m = FromMethodId(method_id);
1764  if (m == NULL) {
1765    return JDWP::ERR_INVALID_METHODID;
1766  }
1767  const DexFile::CodeItem* code_item = m->GetCodeItem();
1768  size_t byte_count = code_item->insns_size_in_code_units_ * 2;
1769  const uint8_t* begin = reinterpret_cast<const uint8_t*>(code_item->insns_);
1770  const uint8_t* end = begin + byte_count;
1771  for (const uint8_t* p = begin; p != end; ++p) {
1772    bytecodes.push_back(*p);
1773  }
1774  return JDWP::ERR_NONE;
1775}
1776
1777JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) {
1778  return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1779}
1780
1781JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) {
1782  return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1783}
1784
1785static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
1786                                         JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
1787                                         bool is_static)
1788    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1789  JDWP::JdwpError status;
1790  mirror::Class* c = DecodeClass(ref_type_id, status);
1791  if (ref_type_id != 0 && c == NULL) {
1792    return status;
1793  }
1794
1795  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id);
1796  if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) {
1797    return JDWP::ERR_INVALID_OBJECT;
1798  }
1799  mirror::ArtField* f = FromFieldId(field_id);
1800
1801  mirror::Class* receiver_class = c;
1802  if (receiver_class == NULL && o != NULL) {
1803    receiver_class = o->GetClass();
1804  }
1805  // TODO: should we give up now if receiver_class is NULL?
1806  if (receiver_class != NULL && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
1807    LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class);
1808    return JDWP::ERR_INVALID_FIELDID;
1809  }
1810
1811  // The RI only enforces the static/non-static mismatch in one direction.
1812  // TODO: should we change the tests and check both?
1813  if (is_static) {
1814    if (!f->IsStatic()) {
1815      return JDWP::ERR_INVALID_FIELDID;
1816    }
1817  } else {
1818    if (f->IsStatic()) {
1819      LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f);
1820    }
1821  }
1822  if (f->IsStatic()) {
1823    o = f->GetDeclaringClass();
1824  }
1825
1826  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1827  JValue field_value;
1828  if (tag == JDWP::JT_VOID) {
1829    LOG(FATAL) << "Unknown tag: " << tag;
1830  } else if (!IsPrimitiveTag(tag)) {
1831    field_value.SetL(f->GetObject(o));
1832  } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1833    field_value.SetJ(f->Get64(o));
1834  } else {
1835    field_value.SetI(f->Get32(o));
1836  }
1837  Dbg::OutputJValue(tag, &field_value, pReply);
1838
1839  return JDWP::ERR_NONE;
1840}
1841
1842JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1843                                   JDWP::ExpandBuf* pReply) {
1844  return GetFieldValueImpl(0, object_id, field_id, pReply, false);
1845}
1846
1847JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id, JDWP::ExpandBuf* pReply) {
1848  return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true);
1849}
1850
1851static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1852                                         uint64_t value, int width, bool is_static)
1853    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1854  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id);
1855  if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) {
1856    return JDWP::ERR_INVALID_OBJECT;
1857  }
1858  mirror::ArtField* f = FromFieldId(field_id);
1859
1860  // The RI only enforces the static/non-static mismatch in one direction.
1861  // TODO: should we change the tests and check both?
1862  if (is_static) {
1863    if (!f->IsStatic()) {
1864      return JDWP::ERR_INVALID_FIELDID;
1865    }
1866  } else {
1867    if (f->IsStatic()) {
1868      LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f);
1869    }
1870  }
1871  if (f->IsStatic()) {
1872    o = f->GetDeclaringClass();
1873  }
1874
1875  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1876
1877  if (IsPrimitiveTag(tag)) {
1878    if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1879      CHECK_EQ(width, 8);
1880      // Debugging can't use transactional mode (runtime only).
1881      f->Set64<false>(o, value);
1882    } else {
1883      CHECK_LE(width, 4);
1884      // Debugging can't use transactional mode (runtime only).
1885      f->Set32<false>(o, value);
1886    }
1887  } else {
1888    mirror::Object* v = Dbg::GetObjectRegistry()->Get<mirror::Object*>(value);
1889    if (v == ObjectRegistry::kInvalidObject) {
1890      return JDWP::ERR_INVALID_OBJECT;
1891    }
1892    if (v != NULL) {
1893      mirror::Class* field_type;
1894      {
1895        StackHandleScope<3> hs(Thread::Current());
1896        HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
1897        HandleWrapper<mirror::ArtField> h_f(hs.NewHandleWrapper(&f));
1898        HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
1899        field_type = FieldHelper(h_f).GetType();
1900      }
1901      if (!field_type->IsAssignableFrom(v->GetClass())) {
1902        return JDWP::ERR_INVALID_OBJECT;
1903      }
1904    }
1905    // Debugging can't use transactional mode (runtime only).
1906    f->SetObject<false>(o, v);
1907  }
1908
1909  return JDWP::ERR_NONE;
1910}
1911
1912JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value,
1913                                   int width) {
1914  return SetFieldValueImpl(object_id, field_id, value, width, false);
1915}
1916
1917JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) {
1918  return SetFieldValueImpl(0, field_id, value, width, true);
1919}
1920
1921JDWP::JdwpError Dbg::StringToUtf8(JDWP::ObjectId string_id, std::string* str) {
1922  mirror::Object* obj = gRegistry->Get<mirror::Object*>(string_id);
1923  if (obj == nullptr || obj == ObjectRegistry::kInvalidObject) {
1924    return JDWP::ERR_INVALID_OBJECT;
1925  }
1926  {
1927    ScopedObjectAccessUnchecked soa(Thread::Current());
1928    mirror::Class* java_lang_String = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_String);
1929    if (!java_lang_String->IsAssignableFrom(obj->GetClass())) {
1930      // This isn't a string.
1931      return JDWP::ERR_INVALID_STRING;
1932    }
1933  }
1934  *str = obj->AsString()->ToModifiedUtf8();
1935  return JDWP::ERR_NONE;
1936}
1937
1938void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) {
1939  if (IsPrimitiveTag(tag)) {
1940    expandBufAdd1(pReply, tag);
1941    if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) {
1942      expandBufAdd1(pReply, return_value->GetI());
1943    } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) {
1944      expandBufAdd2BE(pReply, return_value->GetI());
1945    } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) {
1946      expandBufAdd4BE(pReply, return_value->GetI());
1947    } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1948      expandBufAdd8BE(pReply, return_value->GetJ());
1949    } else {
1950      CHECK_EQ(tag, JDWP::JT_VOID);
1951    }
1952  } else {
1953    ScopedObjectAccessUnchecked soa(Thread::Current());
1954    mirror::Object* value = return_value->GetL();
1955    expandBufAdd1(pReply, TagFromObject(soa, value));
1956    expandBufAddObjectId(pReply, gRegistry->Add(value));
1957  }
1958}
1959
1960JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string& name) {
1961  ScopedObjectAccessUnchecked soa(Thread::Current());
1962  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1963  Thread* thread;
1964  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1965  if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
1966    return error;
1967  }
1968
1969  // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName.
1970  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
1971  mirror::ArtField* java_lang_Thread_name_field =
1972      soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
1973  mirror::String* s =
1974      reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object));
1975  if (s != NULL) {
1976    name = s->ToModifiedUtf8();
1977  }
1978  return JDWP::ERR_NONE;
1979}
1980
1981JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
1982  ScopedObjectAccess soa(Thread::Current());
1983  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
1984  if (thread_object == ObjectRegistry::kInvalidObject) {
1985    return JDWP::ERR_INVALID_OBJECT;
1986  }
1987  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroup");
1988  // Okay, so it's an object, but is it actually a thread?
1989  JDWP::JdwpError error;
1990  {
1991    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1992    Thread* thread;
1993    error = DecodeThread(soa, thread_id, thread);
1994  }
1995  if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
1996    // Zombie threads are in the null group.
1997    expandBufAddObjectId(pReply, JDWP::ObjectId(0));
1998    error = JDWP::ERR_NONE;
1999  } else if (error == JDWP::ERR_NONE) {
2000    mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
2001    CHECK(c != nullptr);
2002    mirror::ArtField* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;");
2003    CHECK(f != nullptr);
2004    mirror::Object* group = f->GetObject(thread_object);
2005    CHECK(group != nullptr);
2006    JDWP::ObjectId thread_group_id = gRegistry->Add(group);
2007    expandBufAddObjectId(pReply, thread_group_id);
2008  }
2009  soa.Self()->EndAssertNoThreadSuspension(old_cause);
2010  return error;
2011}
2012
2013std::string Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id) {
2014  ScopedObjectAccess soa(Thread::Current());
2015  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
2016  CHECK(thread_group != nullptr);
2017  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupName");
2018  mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
2019  CHECK(c != nullptr);
2020  mirror::ArtField* f = c->FindInstanceField("name", "Ljava/lang/String;");
2021  CHECK(f != NULL);
2022  mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
2023  soa.Self()->EndAssertNoThreadSuspension(old_cause);
2024  return s->ToModifiedUtf8();
2025}
2026
2027JDWP::ObjectId Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id) {
2028  ScopedObjectAccessUnchecked soa(Thread::Current());
2029  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
2030  CHECK(thread_group != nullptr);
2031  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupParent");
2032  mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
2033  CHECK(c != nullptr);
2034  mirror::ArtField* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;");
2035  CHECK(f != NULL);
2036  mirror::Object* parent = f->GetObject(thread_group);
2037  soa.Self()->EndAssertNoThreadSuspension(old_cause);
2038  return gRegistry->Add(parent);
2039}
2040
2041JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
2042  ScopedObjectAccessUnchecked soa(Thread::Current());
2043  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
2044  mirror::Object* group = f->GetObject(f->GetDeclaringClass());
2045  return gRegistry->Add(group);
2046}
2047
2048JDWP::ObjectId Dbg::GetMainThreadGroupId() {
2049  ScopedObjectAccess soa(Thread::Current());
2050  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup);
2051  mirror::Object* group = f->GetObject(f->GetDeclaringClass());
2052  return gRegistry->Add(group);
2053}
2054
2055JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
2056  switch (state) {
2057    case kBlocked:
2058      return JDWP::TS_MONITOR;
2059    case kNative:
2060    case kRunnable:
2061    case kSuspended:
2062      return JDWP::TS_RUNNING;
2063    case kSleeping:
2064      return JDWP::TS_SLEEPING;
2065    case kStarting:
2066    case kTerminated:
2067      return JDWP::TS_ZOMBIE;
2068    case kTimedWaiting:
2069    case kWaitingForCheckPointsToRun:
2070    case kWaitingForDebuggerSend:
2071    case kWaitingForDebuggerSuspension:
2072    case kWaitingForDebuggerToAttach:
2073    case kWaitingForDeoptimization:
2074    case kWaitingForGcToComplete:
2075    case kWaitingForJniOnLoad:
2076    case kWaitingForMethodTracingStart:
2077    case kWaitingForSignalCatcherOutput:
2078    case kWaitingInMainDebuggerLoop:
2079    case kWaitingInMainSignalCatcherLoop:
2080    case kWaitingPerformingGc:
2081    case kWaiting:
2082      return JDWP::TS_WAIT;
2083      // Don't add a 'default' here so the compiler can spot incompatible enum changes.
2084  }
2085  LOG(FATAL) << "Unknown thread state: " << state;
2086  return JDWP::TS_ZOMBIE;
2087}
2088
2089JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus,
2090                                     JDWP::JdwpSuspendStatus* pSuspendStatus) {
2091  ScopedObjectAccess soa(Thread::Current());
2092
2093  *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED;
2094
2095  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2096  Thread* thread;
2097  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2098  if (error != JDWP::ERR_NONE) {
2099    if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
2100      *pThreadStatus = JDWP::TS_ZOMBIE;
2101      return JDWP::ERR_NONE;
2102    }
2103    return error;
2104  }
2105
2106  if (IsSuspendedForDebugger(soa, thread)) {
2107    *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED;
2108  }
2109
2110  *pThreadStatus = ToJdwpThreadStatus(thread->GetState());
2111  return JDWP::ERR_NONE;
2112}
2113
2114JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
2115  ScopedObjectAccess soa(Thread::Current());
2116  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2117  Thread* thread;
2118  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2119  if (error != JDWP::ERR_NONE) {
2120    return error;
2121  }
2122  MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2123  expandBufAdd4BE(pReply, thread->GetDebugSuspendCount());
2124  return JDWP::ERR_NONE;
2125}
2126
2127JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) {
2128  ScopedObjectAccess soa(Thread::Current());
2129  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2130  Thread* thread;
2131  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2132  if (error != JDWP::ERR_NONE) {
2133    return error;
2134  }
2135  thread->Interrupt(soa.Self());
2136  return JDWP::ERR_NONE;
2137}
2138
2139static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
2140                                   mirror::Object* desired_thread_group, mirror::Object* peer)
2141    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2142  // Do we want threads from all thread groups?
2143  if (desired_thread_group == nullptr) {
2144    return true;
2145  }
2146  mirror::ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
2147  DCHECK(thread_group_field != nullptr);
2148  mirror::Object* group = thread_group_field->GetObject(peer);
2149  return (group == desired_thread_group);
2150}
2151
2152void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& thread_ids) {
2153  ScopedObjectAccessUnchecked soa(Thread::Current());
2154  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
2155  std::list<Thread*> all_threads_list;
2156  {
2157    MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
2158    all_threads_list = Runtime::Current()->GetThreadList()->GetList();
2159  }
2160  for (Thread* t : all_threads_list) {
2161    if (t == Dbg::GetDebugThread()) {
2162      // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
2163      // query all threads, so it's easier if we just don't tell them about this thread.
2164      continue;
2165    }
2166    if (t->IsStillStarting()) {
2167      // This thread is being started (and has been registered in the thread list). However, it is
2168      // not completely started yet so we must ignore it.
2169      continue;
2170    }
2171    mirror::Object* peer = t->GetPeer();
2172    if (peer == nullptr) {
2173      // peer might be NULL if the thread is still starting up. We can't tell the debugger about
2174      // this thread yet.
2175      // TODO: if we identified threads to the debugger by their Thread*
2176      // rather than their peer's mirror::Object*, we could fix this.
2177      // Doing so might help us report ZOMBIE threads too.
2178      continue;
2179    }
2180    if (IsInDesiredThreadGroup(soa, thread_group, peer)) {
2181      thread_ids.push_back(gRegistry->Add(peer));
2182    }
2183  }
2184}
2185
2186void Dbg::GetChildThreadGroups(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& child_thread_group_ids) {
2187  ScopedObjectAccess soa(Thread::Current());
2188  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
2189
2190  // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
2191  mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
2192  mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
2193
2194  // Get the array and size out of the ArrayList<ThreadGroup>...
2195  mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
2196  mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
2197  mirror::ObjectArray<mirror::Object>* groups_array =
2198      array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
2199  const int32_t size = size_field->GetInt(groups_array_list);
2200
2201  // Copy the first 'size' elements out of the array into the result.
2202  for (int32_t i = 0; i < size; ++i) {
2203    child_thread_group_ids.push_back(gRegistry->Add(groups_array->Get(i)));
2204  }
2205}
2206
2207static int GetStackDepth(Thread* thread)
2208    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2209  struct CountStackDepthVisitor : public StackVisitor {
2210    explicit CountStackDepthVisitor(Thread* thread)
2211        : StackVisitor(thread, NULL), depth(0) {}
2212
2213    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2214    // annotalysis.
2215    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2216      if (!GetMethod()->IsRuntimeMethod()) {
2217        ++depth;
2218      }
2219      return true;
2220    }
2221    size_t depth;
2222  };
2223
2224  CountStackDepthVisitor visitor(thread);
2225  visitor.WalkStack();
2226  return visitor.depth;
2227}
2228
2229JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t& result) {
2230  ScopedObjectAccess soa(Thread::Current());
2231  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2232  Thread* thread;
2233  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2234  if (error != JDWP::ERR_NONE) {
2235    return error;
2236  }
2237  if (!IsSuspendedForDebugger(soa, thread)) {
2238    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2239  }
2240  result = GetStackDepth(thread);
2241  return JDWP::ERR_NONE;
2242}
2243
2244JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
2245                                     size_t frame_count, JDWP::ExpandBuf* buf) {
2246  class GetFrameVisitor : public StackVisitor {
2247   public:
2248    GetFrameVisitor(Thread* thread, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf)
2249        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2250        : StackVisitor(thread, NULL), depth_(0),
2251          start_frame_(start_frame), frame_count_(frame_count), buf_(buf) {
2252      expandBufAdd4BE(buf_, frame_count_);
2253    }
2254
2255    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2256    // annotalysis.
2257    virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2258      if (GetMethod()->IsRuntimeMethod()) {
2259        return true;  // The debugger can't do anything useful with a frame that has no Method*.
2260      }
2261      if (depth_ >= start_frame_ + frame_count_) {
2262        return false;
2263      }
2264      if (depth_ >= start_frame_) {
2265        JDWP::FrameId frame_id(GetFrameId());
2266        JDWP::JdwpLocation location;
2267        SetJdwpLocation(&location, GetMethod(), GetDexPc());
2268        VLOG(jdwp) << StringPrintf("    Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
2269        expandBufAdd8BE(buf_, frame_id);
2270        expandBufAddLocation(buf_, location);
2271      }
2272      ++depth_;
2273      return true;
2274    }
2275
2276   private:
2277    size_t depth_;
2278    const size_t start_frame_;
2279    const size_t frame_count_;
2280    JDWP::ExpandBuf* buf_;
2281  };
2282
2283  ScopedObjectAccessUnchecked soa(Thread::Current());
2284  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2285  Thread* thread;
2286  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2287  if (error != JDWP::ERR_NONE) {
2288    return error;
2289  }
2290  if (!IsSuspendedForDebugger(soa, thread)) {
2291    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2292  }
2293  GetFrameVisitor visitor(thread, start_frame, frame_count, buf);
2294  visitor.WalkStack();
2295  return JDWP::ERR_NONE;
2296}
2297
2298JDWP::ObjectId Dbg::GetThreadSelfId() {
2299  return GetThreadId(Thread::Current());
2300}
2301
2302JDWP::ObjectId Dbg::GetThreadId(Thread* thread) {
2303  ScopedObjectAccessUnchecked soa(Thread::Current());
2304  return gRegistry->Add(thread->GetPeer());
2305}
2306
2307void Dbg::SuspendVM() {
2308  Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
2309}
2310
2311void Dbg::ResumeVM() {
2312  Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
2313}
2314
2315JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
2316  Thread* self = Thread::Current();
2317  ScopedLocalRef<jobject> peer(self->GetJniEnv(), NULL);
2318  {
2319    ScopedObjectAccess soa(self);
2320    peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id)));
2321  }
2322  if (peer.get() == NULL) {
2323    return JDWP::ERR_THREAD_NOT_ALIVE;
2324  }
2325  // Suspend thread to build stack trace. Take suspend thread lock to avoid races with threads
2326  // trying to suspend this one.
2327  MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
2328  bool timed_out;
2329  ThreadList* thread_list = Runtime::Current()->GetThreadList();
2330  Thread* thread = thread_list->SuspendThreadByPeer(peer.get(), request_suspension, true,
2331                                                    &timed_out);
2332  if (thread != NULL) {
2333    return JDWP::ERR_NONE;
2334  } else if (timed_out) {
2335    return JDWP::ERR_INTERNAL;
2336  } else {
2337    return JDWP::ERR_THREAD_NOT_ALIVE;
2338  }
2339}
2340
2341void Dbg::ResumeThread(JDWP::ObjectId thread_id) {
2342  ScopedObjectAccessUnchecked soa(Thread::Current());
2343  mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id);
2344  Thread* thread;
2345  {
2346    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2347    thread = Thread::FromManagedThread(soa, peer);
2348  }
2349  if (thread == NULL) {
2350    LOG(WARNING) << "No such thread for resume: " << peer;
2351    return;
2352  }
2353  bool needs_resume;
2354  {
2355    MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2356    needs_resume = thread->GetSuspendCount() > 0;
2357  }
2358  if (needs_resume) {
2359    Runtime::Current()->GetThreadList()->Resume(thread, true);
2360  }
2361}
2362
2363void Dbg::SuspendSelf() {
2364  Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
2365}
2366
2367struct GetThisVisitor : public StackVisitor {
2368  GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
2369      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2370      : StackVisitor(thread, context), this_object(NULL), frame_id(frame_id) {}
2371
2372  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2373  // annotalysis.
2374  virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2375    if (frame_id != GetFrameId()) {
2376      return true;  // continue
2377    } else {
2378      this_object = GetThisObject();
2379      return false;
2380    }
2381  }
2382
2383  mirror::Object* this_object;
2384  JDWP::FrameId frame_id;
2385};
2386
2387JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
2388                                   JDWP::ObjectId* result) {
2389  ScopedObjectAccessUnchecked soa(Thread::Current());
2390  Thread* thread;
2391  {
2392    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2393    JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2394    if (error != JDWP::ERR_NONE) {
2395      return error;
2396    }
2397    if (!IsSuspendedForDebugger(soa, thread)) {
2398      return JDWP::ERR_THREAD_NOT_SUSPENDED;
2399    }
2400  }
2401  std::unique_ptr<Context> context(Context::Create());
2402  GetThisVisitor visitor(thread, context.get(), frame_id);
2403  visitor.WalkStack();
2404  *result = gRegistry->Add(visitor.this_object);
2405  return JDWP::ERR_NONE;
2406}
2407
2408JDWP::JdwpError Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
2409                                   JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
2410  struct GetLocalVisitor : public StackVisitor {
2411    GetLocalVisitor(const ScopedObjectAccessUnchecked& soa, Thread* thread, Context* context,
2412                    JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width)
2413        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2414        : StackVisitor(thread, context), soa_(soa), frame_id_(frame_id), slot_(slot), tag_(tag),
2415          buf_(buf), width_(width), error_(JDWP::ERR_NONE) {}
2416
2417    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2418    // annotalysis.
2419    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2420      if (GetFrameId() != frame_id_) {
2421        return true;  // Not our frame, carry on.
2422      }
2423      // TODO: check that the tag is compatible with the actual type of the slot!
2424      // TODO: check slot is valid for this method or return INVALID_SLOT error.
2425      mirror::ArtMethod* m = GetMethod();
2426      if (m->IsNative()) {
2427        // We can't read local value from native method.
2428        error_ = JDWP::ERR_OPAQUE_FRAME;
2429        return false;
2430      }
2431      uint16_t reg = DemangleSlot(slot_, m);
2432      constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
2433      switch (tag_) {
2434        case JDWP::JT_BOOLEAN: {
2435          CHECK_EQ(width_, 1U);
2436          uint32_t intVal;
2437          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2438            VLOG(jdwp) << "get boolean local " << reg << " = " << intVal;
2439            JDWP::Set1(buf_+1, intVal != 0);
2440          } else {
2441            VLOG(jdwp) << "failed to get boolean local " << reg;
2442            error_ = kFailureErrorCode;
2443          }
2444          break;
2445        }
2446        case JDWP::JT_BYTE: {
2447          CHECK_EQ(width_, 1U);
2448          uint32_t intVal;
2449          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2450            VLOG(jdwp) << "get byte local " << reg << " = " << intVal;
2451            JDWP::Set1(buf_+1, intVal);
2452          } else {
2453            VLOG(jdwp) << "failed to get byte local " << reg;
2454            error_ = kFailureErrorCode;
2455          }
2456          break;
2457        }
2458        case JDWP::JT_SHORT:
2459        case JDWP::JT_CHAR: {
2460          CHECK_EQ(width_, 2U);
2461          uint32_t intVal;
2462          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2463            VLOG(jdwp) << "get short/char local " << reg << " = " << intVal;
2464            JDWP::Set2BE(buf_+1, intVal);
2465          } else {
2466            VLOG(jdwp) << "failed to get short/char local " << reg;
2467            error_ = kFailureErrorCode;
2468          }
2469          break;
2470        }
2471        case JDWP::JT_INT: {
2472          CHECK_EQ(width_, 4U);
2473          uint32_t intVal;
2474          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2475            VLOG(jdwp) << "get int local " << reg << " = " << intVal;
2476            JDWP::Set4BE(buf_+1, intVal);
2477          } else {
2478            VLOG(jdwp) << "failed to get int local " << reg;
2479            error_ = kFailureErrorCode;
2480          }
2481          break;
2482        }
2483        case JDWP::JT_FLOAT: {
2484          CHECK_EQ(width_, 4U);
2485          uint32_t intVal;
2486          if (GetVReg(m, reg, kFloatVReg, &intVal)) {
2487            VLOG(jdwp) << "get float local " << reg << " = " << intVal;
2488            JDWP::Set4BE(buf_+1, intVal);
2489          } else {
2490            VLOG(jdwp) << "failed to get float local " << reg;
2491            error_ = kFailureErrorCode;
2492          }
2493          break;
2494        }
2495        case JDWP::JT_ARRAY:
2496        case JDWP::JT_CLASS_LOADER:
2497        case JDWP::JT_CLASS_OBJECT:
2498        case JDWP::JT_OBJECT:
2499        case JDWP::JT_STRING:
2500        case JDWP::JT_THREAD:
2501        case JDWP::JT_THREAD_GROUP: {
2502          CHECK_EQ(width_, sizeof(JDWP::ObjectId));
2503          uint32_t intVal;
2504          if (GetVReg(m, reg, kReferenceVReg, &intVal)) {
2505            mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal);
2506            VLOG(jdwp) << "get " << tag_ << " object local " << reg << " = " << o;
2507            if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
2508              LOG(FATAL) << "Register " << reg << " expected to hold " << tag_ << " object: " << o;
2509            }
2510            tag_ = TagFromObject(soa_, o);
2511            JDWP::SetObjectId(buf_+1, gRegistry->Add(o));
2512          } else {
2513            VLOG(jdwp) << "failed to get " << tag_ << " object local " << reg;
2514            error_ = kFailureErrorCode;
2515          }
2516          break;
2517        }
2518        case JDWP::JT_DOUBLE: {
2519          CHECK_EQ(width_, 8U);
2520          uint64_t longVal;
2521          if (GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
2522            VLOG(jdwp) << "get double local " << reg << " = " << longVal;
2523            JDWP::Set8BE(buf_+1, longVal);
2524          } else {
2525            VLOG(jdwp) << "failed to get double local " << reg;
2526            error_ = kFailureErrorCode;
2527          }
2528          break;
2529        }
2530        case JDWP::JT_LONG: {
2531          CHECK_EQ(width_, 8U);
2532          uint64_t longVal;
2533          if (GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &longVal)) {
2534            VLOG(jdwp) << "get long local " << reg << " = " << longVal;
2535            JDWP::Set8BE(buf_+1, longVal);
2536          } else {
2537            VLOG(jdwp) << "failed to get long local " << reg;
2538            error_ = kFailureErrorCode;
2539          }
2540          break;
2541        }
2542        default:
2543          LOG(FATAL) << "Unknown tag " << tag_;
2544          break;
2545      }
2546
2547      // Prepend tag, which may have been updated.
2548      JDWP::Set1(buf_, tag_);
2549      return false;
2550    }
2551    const ScopedObjectAccessUnchecked& soa_;
2552    const JDWP::FrameId frame_id_;
2553    const int slot_;
2554    JDWP::JdwpTag tag_;
2555    uint8_t* const buf_;
2556    const size_t width_;
2557    JDWP::JdwpError error_;
2558  };
2559
2560  ScopedObjectAccessUnchecked soa(Thread::Current());
2561  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2562  Thread* thread;
2563  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2564  if (error != JDWP::ERR_NONE) {
2565    return error;
2566  }
2567  // TODO check thread is suspended by the debugger ?
2568  std::unique_ptr<Context> context(Context::Create());
2569  GetLocalVisitor visitor(soa, thread, context.get(), frame_id, slot, tag, buf, width);
2570  visitor.WalkStack();
2571  return visitor.error_;
2572}
2573
2574JDWP::JdwpError Dbg::SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
2575                                   JDWP::JdwpTag tag, uint64_t value, size_t width) {
2576  struct SetLocalVisitor : public StackVisitor {
2577    SetLocalVisitor(Thread* thread, Context* context,
2578                    JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint64_t value,
2579                    size_t width)
2580        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2581        : StackVisitor(thread, context),
2582          frame_id_(frame_id), slot_(slot), tag_(tag), value_(value), width_(width),
2583          error_(JDWP::ERR_NONE) {}
2584
2585    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2586    // annotalysis.
2587    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2588      if (GetFrameId() != frame_id_) {
2589        return true;  // Not our frame, carry on.
2590      }
2591      // TODO: check that the tag is compatible with the actual type of the slot!
2592      // TODO: check slot is valid for this method or return INVALID_SLOT error.
2593      mirror::ArtMethod* m = GetMethod();
2594      if (m->IsNative()) {
2595        // We can't read local value from native method.
2596        error_ = JDWP::ERR_OPAQUE_FRAME;
2597        return false;
2598      }
2599      uint16_t reg = DemangleSlot(slot_, m);
2600      constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
2601      switch (tag_) {
2602        case JDWP::JT_BOOLEAN:
2603        case JDWP::JT_BYTE:
2604          CHECK_EQ(width_, 1U);
2605          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2606            VLOG(jdwp) << "failed to set boolean/byte local " << reg << " = "
2607                       << static_cast<uint32_t>(value_);
2608            error_ = kFailureErrorCode;
2609          }
2610          break;
2611        case JDWP::JT_SHORT:
2612        case JDWP::JT_CHAR:
2613          CHECK_EQ(width_, 2U);
2614          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2615            VLOG(jdwp) << "failed to set short/char local " << reg << " = "
2616                       << static_cast<uint32_t>(value_);
2617            error_ = kFailureErrorCode;
2618          }
2619          break;
2620        case JDWP::JT_INT:
2621          CHECK_EQ(width_, 4U);
2622          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2623            VLOG(jdwp) << "failed to set int local " << reg << " = "
2624                       << static_cast<uint32_t>(value_);
2625            error_ = kFailureErrorCode;
2626          }
2627          break;
2628        case JDWP::JT_FLOAT:
2629          CHECK_EQ(width_, 4U);
2630          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kFloatVReg)) {
2631            VLOG(jdwp) << "failed to set float local " << reg << " = "
2632                       << static_cast<uint32_t>(value_);
2633            error_ = kFailureErrorCode;
2634          }
2635          break;
2636        case JDWP::JT_ARRAY:
2637        case JDWP::JT_CLASS_LOADER:
2638        case JDWP::JT_CLASS_OBJECT:
2639        case JDWP::JT_OBJECT:
2640        case JDWP::JT_STRING:
2641        case JDWP::JT_THREAD:
2642        case JDWP::JT_THREAD_GROUP: {
2643          CHECK_EQ(width_, sizeof(JDWP::ObjectId));
2644          mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value_));
2645          if (o == ObjectRegistry::kInvalidObject) {
2646            VLOG(jdwp) << tag_ << " object " << o << " is an invalid object";
2647            error_ = JDWP::ERR_INVALID_OBJECT;
2648          } else if (!SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)),
2649                              kReferenceVReg)) {
2650            VLOG(jdwp) << "failed to set " << tag_ << " object local " << reg << " = " << o;
2651            error_ = kFailureErrorCode;
2652          }
2653          break;
2654        }
2655        case JDWP::JT_DOUBLE: {
2656          CHECK_EQ(width_, 8U);
2657          bool success = SetVRegPair(m, reg, value_, kDoubleLoVReg, kDoubleHiVReg);
2658          if (!success) {
2659            VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
2660            error_ = kFailureErrorCode;
2661          }
2662          break;
2663        }
2664        case JDWP::JT_LONG: {
2665          CHECK_EQ(width_, 8U);
2666          bool success = SetVRegPair(m, reg, value_, kLongLoVReg, kLongHiVReg);
2667          if (!success) {
2668            VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
2669            error_ = kFailureErrorCode;
2670          }
2671          break;
2672        }
2673        default:
2674          LOG(FATAL) << "Unknown tag " << tag_;
2675          break;
2676      }
2677      return false;
2678    }
2679
2680    const JDWP::FrameId frame_id_;
2681    const int slot_;
2682    const JDWP::JdwpTag tag_;
2683    const uint64_t value_;
2684    const size_t width_;
2685    JDWP::JdwpError error_;
2686  };
2687
2688  ScopedObjectAccessUnchecked soa(Thread::Current());
2689  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2690  Thread* thread;
2691  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2692  if (error != JDWP::ERR_NONE) {
2693    return error;
2694  }
2695  // TODO check thread is suspended by the debugger ?
2696  std::unique_ptr<Context> context(Context::Create());
2697  SetLocalVisitor visitor(thread, context.get(), frame_id, slot, tag, value, width);
2698  visitor.WalkStack();
2699  return visitor.error_;
2700}
2701
2702static void SetEventLocation(JDWP::EventLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
2703    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2704  DCHECK(location != nullptr);
2705  if (m == nullptr) {
2706    memset(location, 0, sizeof(*location));
2707  } else {
2708    location->method = m;
2709    location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint32_t>(-1) : dex_pc;
2710  }
2711}
2712
2713void Dbg::PostLocationEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object,
2714                            int event_flags, const JValue* return_value) {
2715  if (!IsDebuggerActive()) {
2716    return;
2717  }
2718  DCHECK(m != nullptr);
2719  DCHECK_EQ(m->IsStatic(), this_object == nullptr);
2720  JDWP::EventLocation location;
2721  SetEventLocation(&location, m, dex_pc);
2722
2723  gJdwpState->PostLocationEvent(&location, this_object, event_flags, return_value);
2724}
2725
2726void Dbg::PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc,
2727                               mirror::Object* this_object, mirror::ArtField* f) {
2728  if (!IsDebuggerActive()) {
2729    return;
2730  }
2731  DCHECK(m != nullptr);
2732  DCHECK(f != nullptr);
2733  JDWP::EventLocation location;
2734  SetEventLocation(&location, m, dex_pc);
2735
2736  gJdwpState->PostFieldEvent(&location, f, this_object, nullptr, false);
2737}
2738
2739void Dbg::PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc,
2740                                     mirror::Object* this_object, mirror::ArtField* f,
2741                                     const JValue* field_value) {
2742  if (!IsDebuggerActive()) {
2743    return;
2744  }
2745  DCHECK(m != nullptr);
2746  DCHECK(f != nullptr);
2747  DCHECK(field_value != nullptr);
2748  JDWP::EventLocation location;
2749  SetEventLocation(&location, m, dex_pc);
2750
2751  gJdwpState->PostFieldEvent(&location, f, this_object, field_value, true);
2752}
2753
2754void Dbg::PostException(const ThrowLocation& throw_location,
2755                        mirror::ArtMethod* catch_method,
2756                        uint32_t catch_dex_pc, mirror::Throwable* exception_object) {
2757  if (!IsDebuggerActive()) {
2758    return;
2759  }
2760  JDWP::EventLocation exception_throw_location;
2761  SetEventLocation(&exception_throw_location, throw_location.GetMethod(), throw_location.GetDexPc());
2762  JDWP::EventLocation exception_catch_location;
2763  SetEventLocation(&exception_catch_location, catch_method, catch_dex_pc);
2764
2765  gJdwpState->PostException(&exception_throw_location, exception_object, &exception_catch_location,
2766                            throw_location.GetThis());
2767}
2768
2769void Dbg::PostClassPrepare(mirror::Class* c) {
2770  if (!IsDebuggerActive()) {
2771    return;
2772  }
2773  gJdwpState->PostClassPrepare(c);
2774}
2775
2776void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
2777                         mirror::ArtMethod* m, uint32_t dex_pc,
2778                         int event_flags, const JValue* return_value) {
2779  if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
2780    return;
2781  }
2782
2783  if (IsBreakpoint(m, dex_pc)) {
2784    event_flags |= kBreakpoint;
2785  }
2786
2787  // If the debugger is single-stepping one of our threads, check to
2788  // see if we're that thread and we've reached a step point.
2789  const SingleStepControl* single_step_control = thread->GetSingleStepControl();
2790  DCHECK(single_step_control != nullptr);
2791  if (single_step_control->is_active) {
2792    CHECK(!m->IsNative());
2793    if (single_step_control->step_depth == JDWP::SD_INTO) {
2794      // Step into method calls.  We break when the line number
2795      // or method pointer changes.  If we're in SS_MIN mode, we
2796      // always stop.
2797      if (single_step_control->method != m) {
2798        event_flags |= kSingleStep;
2799        VLOG(jdwp) << "SS new method";
2800      } else if (single_step_control->step_size == JDWP::SS_MIN) {
2801        event_flags |= kSingleStep;
2802        VLOG(jdwp) << "SS new instruction";
2803      } else if (single_step_control->ContainsDexPc(dex_pc)) {
2804        event_flags |= kSingleStep;
2805        VLOG(jdwp) << "SS new line";
2806      }
2807    } else if (single_step_control->step_depth == JDWP::SD_OVER) {
2808      // Step over method calls.  We break when the line number is
2809      // different and the frame depth is <= the original frame
2810      // depth.  (We can't just compare on the method, because we
2811      // might get unrolled past it by an exception, and it's tricky
2812      // to identify recursion.)
2813
2814      int stack_depth = GetStackDepth(thread);
2815
2816      if (stack_depth < single_step_control->stack_depth) {
2817        // Popped up one or more frames, always trigger.
2818        event_flags |= kSingleStep;
2819        VLOG(jdwp) << "SS method pop";
2820      } else if (stack_depth == single_step_control->stack_depth) {
2821        // Same depth, see if we moved.
2822        if (single_step_control->step_size == JDWP::SS_MIN) {
2823          event_flags |= kSingleStep;
2824          VLOG(jdwp) << "SS new instruction";
2825        } else if (single_step_control->ContainsDexPc(dex_pc)) {
2826          event_flags |= kSingleStep;
2827          VLOG(jdwp) << "SS new line";
2828        }
2829      }
2830    } else {
2831      CHECK_EQ(single_step_control->step_depth, JDWP::SD_OUT);
2832      // Return from the current method.  We break when the frame
2833      // depth pops up.
2834
2835      // This differs from the "method exit" break in that it stops
2836      // with the PC at the next instruction in the returned-to
2837      // function, rather than the end of the returning function.
2838
2839      int stack_depth = GetStackDepth(thread);
2840      if (stack_depth < single_step_control->stack_depth) {
2841        event_flags |= kSingleStep;
2842        VLOG(jdwp) << "SS method pop";
2843      }
2844    }
2845  }
2846
2847  // If there's something interesting going on, see if it matches one
2848  // of the debugger filters.
2849  if (event_flags != 0) {
2850    Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, return_value);
2851  }
2852}
2853
2854size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) {
2855  switch (instrumentation_event) {
2856    case instrumentation::Instrumentation::kMethodEntered:
2857      return &method_enter_event_ref_count_;
2858    case instrumentation::Instrumentation::kMethodExited:
2859      return &method_exit_event_ref_count_;
2860    case instrumentation::Instrumentation::kDexPcMoved:
2861      return &dex_pc_change_event_ref_count_;
2862    case instrumentation::Instrumentation::kFieldRead:
2863      return &field_read_event_ref_count_;
2864    case instrumentation::Instrumentation::kFieldWritten:
2865      return &field_write_event_ref_count_;
2866    case instrumentation::Instrumentation::kExceptionCaught:
2867      return &exception_catch_event_ref_count_;
2868    default:
2869      return nullptr;
2870  }
2871}
2872
2873// Process request while all mutator threads are suspended.
2874void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
2875  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
2876  switch (request.GetKind()) {
2877    case DeoptimizationRequest::kNothing:
2878      LOG(WARNING) << "Ignoring empty deoptimization request.";
2879      break;
2880    case DeoptimizationRequest::kRegisterForEvent:
2881      VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x",
2882                                 request.InstrumentationEvent());
2883      instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent());
2884      instrumentation_events_ |= request.InstrumentationEvent();
2885      break;
2886    case DeoptimizationRequest::kUnregisterForEvent:
2887      VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x",
2888                                 request.InstrumentationEvent());
2889      instrumentation->RemoveListener(&gDebugInstrumentationListener,
2890                                      request.InstrumentationEvent());
2891      instrumentation_events_ &= ~request.InstrumentationEvent();
2892      break;
2893    case DeoptimizationRequest::kFullDeoptimization:
2894      VLOG(jdwp) << "Deoptimize the world ...";
2895      instrumentation->DeoptimizeEverything();
2896      VLOG(jdwp) << "Deoptimize the world DONE";
2897      break;
2898    case DeoptimizationRequest::kFullUndeoptimization:
2899      VLOG(jdwp) << "Undeoptimize the world ...";
2900      instrumentation->UndeoptimizeEverything();
2901      VLOG(jdwp) << "Undeoptimize the world DONE";
2902      break;
2903    case DeoptimizationRequest::kSelectiveDeoptimization:
2904      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " ...";
2905      instrumentation->Deoptimize(request.Method());
2906      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " DONE";
2907      break;
2908    case DeoptimizationRequest::kSelectiveUndeoptimization:
2909      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " ...";
2910      instrumentation->Undeoptimize(request.Method());
2911      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " DONE";
2912      break;
2913    default:
2914      LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind();
2915      break;
2916  }
2917}
2918
2919void Dbg::DelayFullUndeoptimization() {
2920  MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
2921  ++delayed_full_undeoptimization_count_;
2922  DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_);
2923}
2924
2925void Dbg::ProcessDelayedFullUndeoptimizations() {
2926  // TODO: avoid taking the lock twice (once here and once in ManageDeoptimization).
2927  {
2928    MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
2929    while (delayed_full_undeoptimization_count_ > 0) {
2930      DeoptimizationRequest req;
2931      req.SetKind(DeoptimizationRequest::kFullUndeoptimization);
2932      req.SetMethod(nullptr);
2933      RequestDeoptimizationLocked(req);
2934      --delayed_full_undeoptimization_count_;
2935    }
2936  }
2937  ManageDeoptimization();
2938}
2939
2940void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
2941  if (req.GetKind() == DeoptimizationRequest::kNothing) {
2942    // Nothing to do.
2943    return;
2944  }
2945  MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
2946  RequestDeoptimizationLocked(req);
2947}
2948
2949void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
2950  switch (req.GetKind()) {
2951    case DeoptimizationRequest::kRegisterForEvent: {
2952      DCHECK_NE(req.InstrumentationEvent(), 0u);
2953      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
2954      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
2955                                                req.InstrumentationEvent());
2956      if (*counter == 0) {
2957        VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x",
2958                                   deoptimization_requests_.size(), req.InstrumentationEvent());
2959        deoptimization_requests_.push_back(req);
2960      }
2961      *counter = *counter + 1;
2962      break;
2963    }
2964    case DeoptimizationRequest::kUnregisterForEvent: {
2965      DCHECK_NE(req.InstrumentationEvent(), 0u);
2966      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
2967      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
2968                                                req.InstrumentationEvent());
2969      *counter = *counter - 1;
2970      if (*counter == 0) {
2971        VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x",
2972                                   deoptimization_requests_.size(), req.InstrumentationEvent());
2973        deoptimization_requests_.push_back(req);
2974      }
2975      break;
2976    }
2977    case DeoptimizationRequest::kFullDeoptimization: {
2978      DCHECK(req.Method() == nullptr);
2979      if (full_deoptimization_event_count_ == 0) {
2980        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2981                   << " for full deoptimization";
2982        deoptimization_requests_.push_back(req);
2983      }
2984      ++full_deoptimization_event_count_;
2985      break;
2986    }
2987    case DeoptimizationRequest::kFullUndeoptimization: {
2988      DCHECK(req.Method() == nullptr);
2989      DCHECK_GT(full_deoptimization_event_count_, 0U);
2990      --full_deoptimization_event_count_;
2991      if (full_deoptimization_event_count_ == 0) {
2992        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2993                   << " for full undeoptimization";
2994        deoptimization_requests_.push_back(req);
2995      }
2996      break;
2997    }
2998    case DeoptimizationRequest::kSelectiveDeoptimization: {
2999      DCHECK(req.Method() != nullptr);
3000      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3001                 << " for deoptimization of " << PrettyMethod(req.Method());
3002      deoptimization_requests_.push_back(req);
3003      break;
3004    }
3005    case DeoptimizationRequest::kSelectiveUndeoptimization: {
3006      DCHECK(req.Method() != nullptr);
3007      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3008                 << " for undeoptimization of " << PrettyMethod(req.Method());
3009      deoptimization_requests_.push_back(req);
3010      break;
3011    }
3012    default: {
3013      LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind();
3014      break;
3015    }
3016  }
3017}
3018
3019void Dbg::ManageDeoptimization() {
3020  Thread* const self = Thread::Current();
3021  {
3022    // Avoid suspend/resume if there is no pending request.
3023    MutexLock mu(self, *Locks::deoptimization_lock_);
3024    if (deoptimization_requests_.empty()) {
3025      return;
3026    }
3027  }
3028  CHECK_EQ(self->GetState(), kRunnable);
3029  self->TransitionFromRunnableToSuspended(kWaitingForDeoptimization);
3030  // We need to suspend mutator threads first.
3031  Runtime* const runtime = Runtime::Current();
3032  runtime->GetThreadList()->SuspendAll();
3033  const ThreadState old_state = self->SetStateUnsafe(kRunnable);
3034  {
3035    MutexLock mu(self, *Locks::deoptimization_lock_);
3036    size_t req_index = 0;
3037    for (DeoptimizationRequest& request : deoptimization_requests_) {
3038      VLOG(jdwp) << "Process deoptimization request #" << req_index++;
3039      ProcessDeoptimizationRequest(request);
3040    }
3041    deoptimization_requests_.clear();
3042  }
3043  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
3044  runtime->GetThreadList()->ResumeAll();
3045  self->TransitionFromSuspendedToRunnable();
3046}
3047
3048static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m)
3049    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
3050  const DexFile::CodeItem* code_item = m->GetCodeItem();
3051  if (code_item == nullptr) {
3052    // TODO We should not be asked to watch location in a native or abstract method so the code item
3053    // should never be null. We could just check we never encounter this case.
3054    return false;
3055  }
3056  // Note: method verifier may cause thread suspension.
3057  self->AssertThreadSuspensionIsAllowable();
3058  StackHandleScope<2> hs(self);
3059  mirror::Class* declaring_class = m->GetDeclaringClass();
3060  Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
3061  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
3062  verifier::MethodVerifier verifier(dex_cache->GetDexFile(), &dex_cache, &class_loader,
3063                                    &m->GetClassDef(), code_item, m->GetDexMethodIndex(), m,
3064                                    m->GetAccessFlags(), false, true, false);
3065  // Note: we don't need to verify the method.
3066  return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr);
3067}
3068
3069static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m)
3070    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3071  for (Breakpoint& breakpoint : gBreakpoints) {
3072    if (breakpoint.Method() == m) {
3073      return &breakpoint;
3074    }
3075  }
3076  return nullptr;
3077}
3078
3079// Sanity checks all existing breakpoints on the same method.
3080static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_deoptimization)
3081    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3082  for (const Breakpoint& breakpoint : gBreakpoints) {
3083    CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization());
3084  }
3085  if (need_full_deoptimization) {
3086    // We should have deoptimized everything but not "selectively" deoptimized this method.
3087    CHECK(Runtime::Current()->GetInstrumentation()->AreAllMethodsDeoptimized());
3088    CHECK(!Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3089  } else {
3090    // We should have "selectively" deoptimized this method.
3091    // Note: while we have not deoptimized everything for this method, we may have done it for
3092    // another event.
3093    CHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3094  }
3095}
3096
3097// Installs a breakpoint at the specified location. Also indicates through the deoptimization
3098// request if we need to deoptimize.
3099void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3100  Thread* const self = Thread::Current();
3101  mirror::ArtMethod* m = FromMethodId(location->method_id);
3102  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3103
3104  const Breakpoint* existing_breakpoint;
3105  {
3106    ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
3107    existing_breakpoint = FindFirstBreakpointForMethod(m);
3108  }
3109  bool need_full_deoptimization;
3110  if (existing_breakpoint == nullptr) {
3111    // There is no breakpoint on this method yet: we need to deoptimize. If this method may be
3112    // inlined, we deoptimize everything; otherwise we deoptimize only this method.
3113    // Note: IsMethodPossiblyInlined goes into the method verifier and may cause thread suspension.
3114    // Therefore we must not hold any lock when we call it.
3115    need_full_deoptimization = IsMethodPossiblyInlined(self, m);
3116    if (need_full_deoptimization) {
3117      req->SetKind(DeoptimizationRequest::kFullDeoptimization);
3118      req->SetMethod(nullptr);
3119    } else {
3120      req->SetKind(DeoptimizationRequest::kSelectiveDeoptimization);
3121      req->SetMethod(m);
3122    }
3123  } else {
3124    // There is at least one breakpoint for this method: we don't need to deoptimize.
3125    req->SetKind(DeoptimizationRequest::kNothing);
3126    req->SetMethod(nullptr);
3127
3128    need_full_deoptimization = existing_breakpoint->NeedFullDeoptimization();
3129    if (kIsDebugBuild) {
3130      ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
3131      SanityCheckExistingBreakpoints(m, need_full_deoptimization);
3132    }
3133  }
3134
3135  {
3136    WriterMutexLock mu(self, *Locks::breakpoint_lock_);
3137    gBreakpoints.push_back(Breakpoint(m, location->dex_pc, need_full_deoptimization));
3138    VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
3139               << gBreakpoints[gBreakpoints.size() - 1];
3140  }
3141}
3142
3143// Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
3144// request if we need to undeoptimize.
3145void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3146  WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
3147  mirror::ArtMethod* m = FromMethodId(location->method_id);
3148  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3149  bool need_full_deoptimization = false;
3150  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
3151    if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].Method() == m) {
3152      VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
3153      need_full_deoptimization = gBreakpoints[i].NeedFullDeoptimization();
3154      DCHECK_NE(need_full_deoptimization, Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3155      gBreakpoints.erase(gBreakpoints.begin() + i);
3156      break;
3157    }
3158  }
3159  const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
3160  if (existing_breakpoint == nullptr) {
3161    // There is no more breakpoint on this method: we need to undeoptimize.
3162    if (need_full_deoptimization) {
3163      // This method required full deoptimization: we need to undeoptimize everything.
3164      req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
3165      req->SetMethod(nullptr);
3166    } else {
3167      // This method required selective deoptimization: we need to undeoptimize only that method.
3168      req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
3169      req->SetMethod(m);
3170    }
3171  } else {
3172    // There is at least one breakpoint for this method: we don't need to undeoptimize.
3173    req->SetKind(DeoptimizationRequest::kNothing);
3174    req->SetMethod(nullptr);
3175    if (kIsDebugBuild) {
3176      SanityCheckExistingBreakpoints(m, need_full_deoptimization);
3177    }
3178  }
3179}
3180
3181// Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
3182// cause suspension if the thread is the current thread.
3183class ScopedThreadSuspension {
3184 public:
3185  ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
3186      LOCKS_EXCLUDED(Locks::thread_list_lock_)
3187      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
3188      thread_(nullptr),
3189      error_(JDWP::ERR_NONE),
3190      self_suspend_(false),
3191      other_suspend_(false) {
3192    ScopedObjectAccessUnchecked soa(self);
3193    {
3194      MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3195      error_ = DecodeThread(soa, thread_id, thread_);
3196    }
3197    if (error_ == JDWP::ERR_NONE) {
3198      if (thread_ == soa.Self()) {
3199        self_suspend_ = true;
3200      } else {
3201        soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3202        jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id);
3203        bool timed_out;
3204        Thread* suspended_thread;
3205        {
3206          // Take suspend thread lock to avoid races with threads trying to suspend this one.
3207          MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
3208          ThreadList* thread_list = Runtime::Current()->GetThreadList();
3209          suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true, &timed_out);
3210        }
3211        CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
3212        if (suspended_thread == nullptr) {
3213          // Thread terminated from under us while suspending.
3214          error_ = JDWP::ERR_INVALID_THREAD;
3215        } else {
3216          CHECK_EQ(suspended_thread, thread_);
3217          other_suspend_ = true;
3218        }
3219      }
3220    }
3221  }
3222
3223  Thread* GetThread() const {
3224    return thread_;
3225  }
3226
3227  JDWP::JdwpError GetError() const {
3228    return error_;
3229  }
3230
3231  ~ScopedThreadSuspension() {
3232    if (other_suspend_) {
3233      Runtime::Current()->GetThreadList()->Resume(thread_, true);
3234    }
3235  }
3236
3237 private:
3238  Thread* thread_;
3239  JDWP::JdwpError error_;
3240  bool self_suspend_;
3241  bool other_suspend_;
3242};
3243
3244JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size,
3245                                   JDWP::JdwpStepDepth step_depth) {
3246  Thread* self = Thread::Current();
3247  ScopedThreadSuspension sts(self, thread_id);
3248  if (sts.GetError() != JDWP::ERR_NONE) {
3249    return sts.GetError();
3250  }
3251
3252  //
3253  // Work out what Method* we're in, the current line number, and how deep the stack currently
3254  // is for step-out.
3255  //
3256
3257  struct SingleStepStackVisitor : public StackVisitor {
3258    explicit SingleStepStackVisitor(Thread* thread, SingleStepControl* single_step_control,
3259                                    int32_t* line_number)
3260        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
3261        : StackVisitor(thread, NULL), single_step_control_(single_step_control),
3262          line_number_(line_number) {
3263      DCHECK_EQ(single_step_control_, thread->GetSingleStepControl());
3264      single_step_control_->method = NULL;
3265      single_step_control_->stack_depth = 0;
3266    }
3267
3268    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
3269    // annotalysis.
3270    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
3271      mirror::ArtMethod* m = GetMethod();
3272      if (!m->IsRuntimeMethod()) {
3273        ++single_step_control_->stack_depth;
3274        if (single_step_control_->method == NULL) {
3275          mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
3276          single_step_control_->method = m;
3277          *line_number_ = -1;
3278          if (dex_cache != NULL) {
3279            const DexFile& dex_file = *dex_cache->GetDexFile();
3280            *line_number_ = dex_file.GetLineNumFromPC(m, GetDexPc());
3281          }
3282        }
3283      }
3284      return true;
3285    }
3286
3287    SingleStepControl* const single_step_control_;
3288    int32_t* const line_number_;
3289  };
3290
3291  Thread* const thread = sts.GetThread();
3292  SingleStepControl* const single_step_control = thread->GetSingleStepControl();
3293  DCHECK(single_step_control != nullptr);
3294  int32_t line_number = -1;
3295  SingleStepStackVisitor visitor(thread, single_step_control, &line_number);
3296  visitor.WalkStack();
3297
3298  //
3299  // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
3300  //
3301
3302  struct DebugCallbackContext {
3303    explicit DebugCallbackContext(SingleStepControl* single_step_control, int32_t line_number,
3304                                  const DexFile::CodeItem* code_item)
3305      : single_step_control_(single_step_control), line_number_(line_number), code_item_(code_item),
3306        last_pc_valid(false), last_pc(0) {
3307    }
3308
3309    static bool Callback(void* raw_context, uint32_t address, uint32_t line_number) {
3310      DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
3311      if (static_cast<int32_t>(line_number) == context->line_number_) {
3312        if (!context->last_pc_valid) {
3313          // Everything from this address until the next line change is ours.
3314          context->last_pc = address;
3315          context->last_pc_valid = true;
3316        }
3317        // Otherwise, if we're already in a valid range for this line,
3318        // just keep going (shouldn't really happen)...
3319      } else if (context->last_pc_valid) {  // and the line number is new
3320        // Add everything from the last entry up until here to the set
3321        for (uint32_t dex_pc = context->last_pc; dex_pc < address; ++dex_pc) {
3322          context->single_step_control_->dex_pcs.insert(dex_pc);
3323        }
3324        context->last_pc_valid = false;
3325      }
3326      return false;  // There may be multiple entries for any given line.
3327    }
3328
3329    ~DebugCallbackContext() {
3330      // If the line number was the last in the position table...
3331      if (last_pc_valid) {
3332        size_t end = code_item_->insns_size_in_code_units_;
3333        for (uint32_t dex_pc = last_pc; dex_pc < end; ++dex_pc) {
3334          single_step_control_->dex_pcs.insert(dex_pc);
3335        }
3336      }
3337    }
3338
3339    SingleStepControl* const single_step_control_;
3340    const int32_t line_number_;
3341    const DexFile::CodeItem* const code_item_;
3342    bool last_pc_valid;
3343    uint32_t last_pc;
3344  };
3345  single_step_control->dex_pcs.clear();
3346  mirror::ArtMethod* m = single_step_control->method;
3347  if (!m->IsNative()) {
3348    const DexFile::CodeItem* const code_item = m->GetCodeItem();
3349    DebugCallbackContext context(single_step_control, line_number, code_item);
3350    m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
3351                                     DebugCallbackContext::Callback, NULL, &context);
3352  }
3353
3354  //
3355  // Everything else...
3356  //
3357
3358  single_step_control->step_size = step_size;
3359  single_step_control->step_depth = step_depth;
3360  single_step_control->is_active = true;
3361
3362  if (VLOG_IS_ON(jdwp)) {
3363    VLOG(jdwp) << "Single-step thread: " << *thread;
3364    VLOG(jdwp) << "Single-step step size: " << single_step_control->step_size;
3365    VLOG(jdwp) << "Single-step step depth: " << single_step_control->step_depth;
3366    VLOG(jdwp) << "Single-step current method: " << PrettyMethod(single_step_control->method);
3367    VLOG(jdwp) << "Single-step current line: " << line_number;
3368    VLOG(jdwp) << "Single-step current stack depth: " << single_step_control->stack_depth;
3369    VLOG(jdwp) << "Single-step dex_pc values:";
3370    for (uint32_t dex_pc : single_step_control->dex_pcs) {
3371      VLOG(jdwp) << StringPrintf(" %#x", dex_pc);
3372    }
3373  }
3374
3375  return JDWP::ERR_NONE;
3376}
3377
3378void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) {
3379  ScopedObjectAccessUnchecked soa(Thread::Current());
3380  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3381  Thread* thread;
3382  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
3383  if (error == JDWP::ERR_NONE) {
3384    SingleStepControl* single_step_control = thread->GetSingleStepControl();
3385    DCHECK(single_step_control != nullptr);
3386    single_step_control->Clear();
3387  }
3388}
3389
3390static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
3391  switch (tag) {
3392    default:
3393      LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag);
3394
3395    // Primitives.
3396    case JDWP::JT_BYTE:    return 'B';
3397    case JDWP::JT_CHAR:    return 'C';
3398    case JDWP::JT_FLOAT:   return 'F';
3399    case JDWP::JT_DOUBLE:  return 'D';
3400    case JDWP::JT_INT:     return 'I';
3401    case JDWP::JT_LONG:    return 'J';
3402    case JDWP::JT_SHORT:   return 'S';
3403    case JDWP::JT_VOID:    return 'V';
3404    case JDWP::JT_BOOLEAN: return 'Z';
3405
3406    // Reference types.
3407    case JDWP::JT_ARRAY:
3408    case JDWP::JT_OBJECT:
3409    case JDWP::JT_STRING:
3410    case JDWP::JT_THREAD:
3411    case JDWP::JT_THREAD_GROUP:
3412    case JDWP::JT_CLASS_LOADER:
3413    case JDWP::JT_CLASS_OBJECT:
3414      return 'L';
3415  }
3416}
3417
3418JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id,
3419                                  JDWP::RefTypeId class_id, JDWP::MethodId method_id,
3420                                  uint32_t arg_count, uint64_t* arg_values,
3421                                  JDWP::JdwpTag* arg_types, uint32_t options,
3422                                  JDWP::JdwpTag* pResultTag, uint64_t* pResultValue,
3423                                  JDWP::ObjectId* pExceptionId) {
3424  ThreadList* thread_list = Runtime::Current()->GetThreadList();
3425
3426  Thread* targetThread = NULL;
3427  DebugInvokeReq* req = NULL;
3428  Thread* self = Thread::Current();
3429  {
3430    ScopedObjectAccessUnchecked soa(self);
3431    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3432    JDWP::JdwpError error = DecodeThread(soa, thread_id, targetThread);
3433    if (error != JDWP::ERR_NONE) {
3434      LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id;
3435      return error;
3436    }
3437    req = targetThread->GetInvokeReq();
3438    if (!req->ready) {
3439      LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
3440      return JDWP::ERR_INVALID_THREAD;
3441    }
3442
3443    /*
3444     * We currently have a bug where we don't successfully resume the
3445     * target thread if the suspend count is too deep.  We're expected to
3446     * require one "resume" for each "suspend", but when asked to execute
3447     * a method we have to resume fully and then re-suspend it back to the
3448     * same level.  (The easiest way to cause this is to type "suspend"
3449     * multiple times in jdb.)
3450     *
3451     * It's unclear what this means when the event specifies "resume all"
3452     * and some threads are suspended more deeply than others.  This is
3453     * a rare problem, so for now we just prevent it from hanging forever
3454     * by rejecting the method invocation request.  Without this, we will
3455     * be stuck waiting on a suspended thread.
3456     */
3457    int suspend_count;
3458    {
3459      MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
3460      suspend_count = targetThread->GetSuspendCount();
3461    }
3462    if (suspend_count > 1) {
3463      LOG(ERROR) << *targetThread << " suspend count too deep for method invocation: " << suspend_count;
3464      return JDWP::ERR_THREAD_SUSPENDED;  // Probably not expected here.
3465    }
3466
3467    JDWP::JdwpError status;
3468    mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id);
3469    if (receiver == ObjectRegistry::kInvalidObject) {
3470      return JDWP::ERR_INVALID_OBJECT;
3471    }
3472
3473    mirror::Object* thread = gRegistry->Get<mirror::Object*>(thread_id);
3474    if (thread == ObjectRegistry::kInvalidObject) {
3475      return JDWP::ERR_INVALID_OBJECT;
3476    }
3477    // TODO: check that 'thread' is actually a java.lang.Thread!
3478
3479    mirror::Class* c = DecodeClass(class_id, status);
3480    if (c == NULL) {
3481      return status;
3482    }
3483
3484    mirror::ArtMethod* m = FromMethodId(method_id);
3485    if (m->IsStatic() != (receiver == NULL)) {
3486      return JDWP::ERR_INVALID_METHODID;
3487    }
3488    if (m->IsStatic()) {
3489      if (m->GetDeclaringClass() != c) {
3490        return JDWP::ERR_INVALID_METHODID;
3491      }
3492    } else {
3493      if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
3494        return JDWP::ERR_INVALID_METHODID;
3495      }
3496    }
3497
3498    // Check the argument list matches the method.
3499    uint32_t shorty_len = 0;
3500    const char* shorty = m->GetShorty(&shorty_len);
3501    if (shorty_len - 1 != arg_count) {
3502      return JDWP::ERR_ILLEGAL_ARGUMENT;
3503    }
3504
3505    {
3506      StackHandleScope<3> hs(soa.Self());
3507      MethodHelper mh(hs.NewHandle(m));
3508      HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver));
3509      HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c));
3510      const DexFile::TypeList* types = m->GetParameterTypeList();
3511      for (size_t i = 0; i < arg_count; ++i) {
3512        if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) {
3513          return JDWP::ERR_ILLEGAL_ARGUMENT;
3514        }
3515
3516        if (shorty[i + 1] == 'L') {
3517          // Did we really get an argument of an appropriate reference type?
3518          mirror::Class* parameter_type = mh.GetClassFromTypeIdx(types->GetTypeItem(i).type_idx_);
3519          mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i]);
3520          if (argument == ObjectRegistry::kInvalidObject) {
3521            return JDWP::ERR_INVALID_OBJECT;
3522          }
3523          if (argument != NULL && !argument->InstanceOf(parameter_type)) {
3524            return JDWP::ERR_ILLEGAL_ARGUMENT;
3525          }
3526
3527          // Turn the on-the-wire ObjectId into a jobject.
3528          jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]);
3529          v.l = gRegistry->GetJObject(arg_values[i]);
3530        }
3531      }
3532      // Update in case it moved.
3533      m = mh.GetMethod();
3534    }
3535
3536    req->receiver = receiver;
3537    req->thread = thread;
3538    req->klass = c;
3539    req->method = m;
3540    req->arg_count = arg_count;
3541    req->arg_values = arg_values;
3542    req->options = options;
3543    req->invoke_needed = true;
3544  }
3545
3546  // The fact that we've released the thread list lock is a bit risky --- if the thread goes
3547  // away we're sitting high and dry -- but we must release this before the ResumeAllThreads
3548  // call, and it's unwise to hold it during WaitForSuspend.
3549
3550  {
3551    /*
3552     * We change our (JDWP thread) status, which should be THREAD_RUNNING,
3553     * so we can suspend for a GC if the invoke request causes us to
3554     * run out of memory.  It's also a good idea to change it before locking
3555     * the invokeReq mutex, although that should never be held for long.
3556     */
3557    self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
3558
3559    VLOG(jdwp) << "    Transferring control to event thread";
3560    {
3561      MutexLock mu(self, req->lock);
3562
3563      if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3564        VLOG(jdwp) << "      Resuming all threads";
3565        thread_list->UndoDebuggerSuspensions();
3566      } else {
3567        VLOG(jdwp) << "      Resuming event thread only";
3568        thread_list->Resume(targetThread, true);
3569      }
3570
3571      // Wait for the request to finish executing.
3572      while (req->invoke_needed) {
3573        req->cond.Wait(self);
3574      }
3575    }
3576    VLOG(jdwp) << "    Control has returned from event thread";
3577
3578    /* wait for thread to re-suspend itself */
3579    SuspendThread(thread_id, false /* request_suspension */);
3580    self->TransitionFromSuspendedToRunnable();
3581  }
3582
3583  /*
3584   * Suspend the threads.  We waited for the target thread to suspend
3585   * itself, so all we need to do is suspend the others.
3586   *
3587   * The suspendAllThreads() call will double-suspend the event thread,
3588   * so we want to resume the target thread once to keep the books straight.
3589   */
3590  if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3591    self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3592    VLOG(jdwp) << "      Suspending all threads";
3593    thread_list->SuspendAllForDebugger();
3594    self->TransitionFromSuspendedToRunnable();
3595    VLOG(jdwp) << "      Resuming event thread to balance the count";
3596    thread_list->Resume(targetThread, true);
3597  }
3598
3599  // Copy the result.
3600  *pResultTag = req->result_tag;
3601  if (IsPrimitiveTag(req->result_tag)) {
3602    *pResultValue = req->result_value.GetJ();
3603  } else {
3604    *pResultValue = gRegistry->Add(req->result_value.GetL());
3605  }
3606  *pExceptionId = req->exception;
3607  return req->error;
3608}
3609
3610void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
3611  ScopedObjectAccess soa(Thread::Current());
3612
3613  // We can be called while an exception is pending. We need
3614  // to preserve that across the method invocation.
3615  StackHandleScope<4> hs(soa.Self());
3616  auto old_throw_this_object = hs.NewHandle<mirror::Object>(nullptr);
3617  auto old_throw_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
3618  auto old_exception = hs.NewHandle<mirror::Throwable>(nullptr);
3619  uint32_t old_throw_dex_pc;
3620  bool old_exception_report_flag;
3621  {
3622    ThrowLocation old_throw_location;
3623    mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location);
3624    old_throw_this_object.Assign(old_throw_location.GetThis());
3625    old_throw_method.Assign(old_throw_location.GetMethod());
3626    old_exception.Assign(old_exception_obj);
3627    old_throw_dex_pc = old_throw_location.GetDexPc();
3628    old_exception_report_flag = soa.Self()->IsExceptionReportedToInstrumentation();
3629    soa.Self()->ClearException();
3630  }
3631
3632  // Translate the method through the vtable, unless the debugger wants to suppress it.
3633  Handle<mirror::ArtMethod> m(hs.NewHandle(pReq->method));
3634  if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver != NULL) {
3635    mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.Get());
3636    if (actual_method != m.Get()) {
3637      VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.Get()) << " to " << PrettyMethod(actual_method);
3638      m.Assign(actual_method);
3639    }
3640  }
3641  VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.Get())
3642             << " receiver=" << pReq->receiver
3643             << " arg_count=" << pReq->arg_count;
3644  CHECK(m.Get() != nullptr);
3645
3646  CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
3647
3648  pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(m.Get()),
3649                                         reinterpret_cast<jvalue*>(pReq->arg_values));
3650
3651  mirror::Throwable* exception = soa.Self()->GetException(NULL);
3652  soa.Self()->ClearException();
3653  pReq->exception = gRegistry->Add(exception);
3654  pReq->result_tag = BasicTagFromDescriptor(m.Get()->GetShorty());
3655  if (pReq->exception != 0) {
3656    VLOG(jdwp) << "  JDWP invocation returning with exception=" << exception
3657        << " " << exception->Dump();
3658    pReq->result_value.SetJ(0);
3659  } else if (pReq->result_tag == JDWP::JT_OBJECT) {
3660    /* if no exception thrown, examine object result more closely */
3661    JDWP::JdwpTag new_tag = TagFromObject(soa, pReq->result_value.GetL());
3662    if (new_tag != pReq->result_tag) {
3663      VLOG(jdwp) << "  JDWP promoted result from " << pReq->result_tag << " to " << new_tag;
3664      pReq->result_tag = new_tag;
3665    }
3666
3667    /*
3668     * Register the object.  We don't actually need an ObjectId yet,
3669     * but we do need to be sure that the GC won't move or discard the
3670     * object when we switch out of RUNNING.  The ObjectId conversion
3671     * will add the object to the "do not touch" list.
3672     *
3673     * We can't use the "tracked allocation" mechanism here because
3674     * the object is going to be handed off to a different thread.
3675     */
3676    gRegistry->Add(pReq->result_value.GetL());
3677  }
3678
3679  if (old_exception.Get() != NULL) {
3680    ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
3681                                         old_throw_dex_pc);
3682    soa.Self()->SetException(gc_safe_throw_location, old_exception.Get());
3683    soa.Self()->SetExceptionReportedToInstrumentation(old_exception_report_flag);
3684  }
3685}
3686
3687/*
3688 * "request" contains a full JDWP packet, possibly with multiple chunks.  We
3689 * need to process each, accumulate the replies, and ship the whole thing
3690 * back.
3691 *
3692 * Returns "true" if we have a reply.  The reply buffer is newly allocated,
3693 * and includes the chunk type/length, followed by the data.
3694 *
3695 * OLD-TODO: we currently assume that the request and reply include a single
3696 * chunk.  If this becomes inconvenient we will need to adapt.
3697 */
3698bool Dbg::DdmHandlePacket(JDWP::Request& request, uint8_t** pReplyBuf, int* pReplyLen) {
3699  Thread* self = Thread::Current();
3700  JNIEnv* env = self->GetJniEnv();
3701
3702  uint32_t type = request.ReadUnsigned32("type");
3703  uint32_t length = request.ReadUnsigned32("length");
3704
3705  // Create a byte[] corresponding to 'request'.
3706  size_t request_length = request.size();
3707  ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(request_length));
3708  if (dataArray.get() == NULL) {
3709    LOG(WARNING) << "byte[] allocation failed: " << request_length;
3710    env->ExceptionClear();
3711    return false;
3712  }
3713  env->SetByteArrayRegion(dataArray.get(), 0, request_length, reinterpret_cast<const jbyte*>(request.data()));
3714  request.Skip(request_length);
3715
3716  // Run through and find all chunks.  [Currently just find the first.]
3717  ScopedByteArrayRO contents(env, dataArray.get());
3718  if (length != request_length) {
3719    LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length);
3720    return false;
3721  }
3722
3723  // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)".
3724  ScopedLocalRef<jobject> chunk(env, env->CallStaticObjectMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3725                                                                 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch,
3726                                                                 type, dataArray.get(), 0, length));
3727  if (env->ExceptionCheck()) {
3728    LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type);
3729    env->ExceptionDescribe();
3730    env->ExceptionClear();
3731    return false;
3732  }
3733
3734  if (chunk.get() == NULL) {
3735    return false;
3736  }
3737
3738  /*
3739   * Pull the pieces out of the chunk.  We copy the results into a
3740   * newly-allocated buffer that the caller can free.  We don't want to
3741   * continue using the Chunk object because nothing has a reference to it.
3742   *
3743   * We could avoid this by returning type/data/offset/length and having
3744   * the caller be aware of the object lifetime issues, but that
3745   * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work
3746   * if we have responses for multiple chunks.
3747   *
3748   * So we're pretty much stuck with copying data around multiple times.
3749   */
3750  ScopedLocalRef<jbyteArray> replyData(env, reinterpret_cast<jbyteArray>(env->GetObjectField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data)));
3751  jint offset = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset);
3752  length = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length);
3753  type = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type);
3754
3755  VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length);
3756  if (length == 0 || replyData.get() == NULL) {
3757    return false;
3758  }
3759
3760  const int kChunkHdrLen = 8;
3761  uint8_t* reply = new uint8_t[length + kChunkHdrLen];
3762  if (reply == NULL) {
3763    LOG(WARNING) << "malloc failed: " << (length + kChunkHdrLen);
3764    return false;
3765  }
3766  JDWP::Set4BE(reply + 0, type);
3767  JDWP::Set4BE(reply + 4, length);
3768  env->GetByteArrayRegion(replyData.get(), offset, length, reinterpret_cast<jbyte*>(reply + kChunkHdrLen));
3769
3770  *pReplyBuf = reply;
3771  *pReplyLen = length + kChunkHdrLen;
3772
3773  VLOG(jdwp) << StringPrintf("dvmHandleDdm returning type=%.4s %p len=%d", reinterpret_cast<char*>(reply), reply, length);
3774  return true;
3775}
3776
3777void Dbg::DdmBroadcast(bool connect) {
3778  VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
3779
3780  Thread* self = Thread::Current();
3781  if (self->GetState() != kRunnable) {
3782    LOG(ERROR) << "DDM broadcast in thread state " << self->GetState();
3783    /* try anyway? */
3784  }
3785
3786  JNIEnv* env = self->GetJniEnv();
3787  jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/;
3788  env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3789                            WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast,
3790                            event);
3791  if (env->ExceptionCheck()) {
3792    LOG(ERROR) << "DdmServer.broadcast " << event << " failed";
3793    env->ExceptionDescribe();
3794    env->ExceptionClear();
3795  }
3796}
3797
3798void Dbg::DdmConnected() {
3799  Dbg::DdmBroadcast(true);
3800}
3801
3802void Dbg::DdmDisconnected() {
3803  Dbg::DdmBroadcast(false);
3804  gDdmThreadNotification = false;
3805}
3806
3807/*
3808 * Send a notification when a thread starts, stops, or changes its name.
3809 *
3810 * Because we broadcast the full set of threads when the notifications are
3811 * first enabled, it's possible for "thread" to be actively executing.
3812 */
3813void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
3814  if (!gDdmThreadNotification) {
3815    return;
3816  }
3817
3818  if (type == CHUNK_TYPE("THDE")) {
3819    uint8_t buf[4];
3820    JDWP::Set4BE(&buf[0], t->GetThreadId());
3821    Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf);
3822  } else {
3823    CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
3824    ScopedObjectAccessUnchecked soa(Thread::Current());
3825    StackHandleScope<1> hs(soa.Self());
3826    Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa)));
3827    size_t char_count = (name.Get() != NULL) ? name->GetLength() : 0;
3828    const jchar* chars = (name.Get() != NULL) ? name->GetCharArray()->GetData() : NULL;
3829
3830    std::vector<uint8_t> bytes;
3831    JDWP::Append4BE(bytes, t->GetThreadId());
3832    JDWP::AppendUtf16BE(bytes, chars, char_count);
3833    CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
3834    Dbg::DdmSendChunk(type, bytes);
3835  }
3836}
3837
3838void Dbg::DdmSetThreadNotification(bool enable) {
3839  // Enable/disable thread notifications.
3840  gDdmThreadNotification = enable;
3841  if (enable) {
3842    // Suspend the VM then post thread start notifications for all threads. Threads attaching will
3843    // see a suspension in progress and block until that ends. They then post their own start
3844    // notification.
3845    SuspendVM();
3846    std::list<Thread*> threads;
3847    Thread* self = Thread::Current();
3848    {
3849      MutexLock mu(self, *Locks::thread_list_lock_);
3850      threads = Runtime::Current()->GetThreadList()->GetList();
3851    }
3852    {
3853      ScopedObjectAccess soa(self);
3854      for (Thread* thread : threads) {
3855        Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
3856      }
3857    }
3858    ResumeVM();
3859  }
3860}
3861
3862void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
3863  if (IsDebuggerActive()) {
3864    gJdwpState->PostThreadChange(t, type == CHUNK_TYPE("THCR"));
3865  }
3866  Dbg::DdmSendThreadNotification(t, type);
3867}
3868
3869void Dbg::PostThreadStart(Thread* t) {
3870  Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR"));
3871}
3872
3873void Dbg::PostThreadDeath(Thread* t) {
3874  Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
3875}
3876
3877void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) {
3878  CHECK(buf != NULL);
3879  iovec vec[1];
3880  vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf));
3881  vec[0].iov_len = byte_count;
3882  Dbg::DdmSendChunkV(type, vec, 1);
3883}
3884
3885void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) {
3886  DdmSendChunk(type, bytes.size(), &bytes[0]);
3887}
3888
3889void Dbg::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) {
3890  if (gJdwpState == NULL) {
3891    VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
3892  } else {
3893    gJdwpState->DdmSendChunkV(type, iov, iov_count);
3894  }
3895}
3896
3897int Dbg::DdmHandleHpifChunk(HpifWhen when) {
3898  if (when == HPIF_WHEN_NOW) {
3899    DdmSendHeapInfo(when);
3900    return true;
3901  }
3902
3903  if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
3904    LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
3905    return false;
3906  }
3907
3908  gDdmHpifWhen = when;
3909  return true;
3910}
3911
3912bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
3913  if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) {
3914    LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when);
3915    return false;
3916  }
3917
3918  if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) {
3919    LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what);
3920    return false;
3921  }
3922
3923  if (native) {
3924    gDdmNhsgWhen = when;
3925    gDdmNhsgWhat = what;
3926  } else {
3927    gDdmHpsgWhen = when;
3928    gDdmHpsgWhat = what;
3929  }
3930  return true;
3931}
3932
3933void Dbg::DdmSendHeapInfo(HpifWhen reason) {
3934  // If there's a one-shot 'when', reset it.
3935  if (reason == gDdmHpifWhen) {
3936    if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) {
3937      gDdmHpifWhen = HPIF_WHEN_NEVER;
3938    }
3939  }
3940
3941  /*
3942   * Chunk HPIF (client --> server)
3943   *
3944   * Heap Info. General information about the heap,
3945   * suitable for a summary display.
3946   *
3947   *   [u4]: number of heaps
3948   *
3949   *   For each heap:
3950   *     [u4]: heap ID
3951   *     [u8]: timestamp in ms since Unix epoch
3952   *     [u1]: capture reason (same as 'when' value from server)
3953   *     [u4]: max heap size in bytes (-Xmx)
3954   *     [u4]: current heap size in bytes
3955   *     [u4]: current number of bytes allocated
3956   *     [u4]: current number of objects allocated
3957   */
3958  uint8_t heap_count = 1;
3959  gc::Heap* heap = Runtime::Current()->GetHeap();
3960  std::vector<uint8_t> bytes;
3961  JDWP::Append4BE(bytes, heap_count);
3962  JDWP::Append4BE(bytes, 1);  // Heap id (bogus; we only have one heap).
3963  JDWP::Append8BE(bytes, MilliTime());
3964  JDWP::Append1BE(bytes, reason);
3965  JDWP::Append4BE(bytes, heap->GetMaxMemory());  // Max allowed heap size in bytes.
3966  JDWP::Append4BE(bytes, heap->GetTotalMemory());  // Current heap size in bytes.
3967  JDWP::Append4BE(bytes, heap->GetBytesAllocated());
3968  JDWP::Append4BE(bytes, heap->GetObjectsAllocated());
3969  CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
3970  Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes);
3971}
3972
3973enum HpsgSolidity {
3974  SOLIDITY_FREE = 0,
3975  SOLIDITY_HARD = 1,
3976  SOLIDITY_SOFT = 2,
3977  SOLIDITY_WEAK = 3,
3978  SOLIDITY_PHANTOM = 4,
3979  SOLIDITY_FINALIZABLE = 5,
3980  SOLIDITY_SWEEP = 6,
3981};
3982
3983enum HpsgKind {
3984  KIND_OBJECT = 0,
3985  KIND_CLASS_OBJECT = 1,
3986  KIND_ARRAY_1 = 2,
3987  KIND_ARRAY_2 = 3,
3988  KIND_ARRAY_4 = 4,
3989  KIND_ARRAY_8 = 5,
3990  KIND_UNKNOWN = 6,
3991  KIND_NATIVE = 7,
3992};
3993
3994#define HPSG_PARTIAL (1<<7)
3995#define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
3996
3997class HeapChunkContext {
3998 public:
3999  // Maximum chunk size.  Obtain this from the formula:
4000  // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
4001  HeapChunkContext(bool merge, bool native)
4002      : buf_(16384 - 16),
4003        type_(0),
4004        merge_(merge),
4005        chunk_overhead_(0) {
4006    Reset();
4007    if (native) {
4008      type_ = CHUNK_TYPE("NHSG");
4009    } else {
4010      type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO");
4011    }
4012  }
4013
4014  ~HeapChunkContext() {
4015    if (p_ > &buf_[0]) {
4016      Flush();
4017    }
4018  }
4019
4020  void SetChunkOverhead(size_t chunk_overhead) {
4021    chunk_overhead_ = chunk_overhead;
4022  }
4023
4024  void ResetStartOfNextChunk() {
4025    startOfNextMemoryChunk_ = nullptr;
4026  }
4027
4028  void EnsureHeader(const void* chunk_ptr) {
4029    if (!needHeader_) {
4030      return;
4031    }
4032
4033    // Start a new HPSx chunk.
4034    JDWP::Write4BE(&p_, 1);  // Heap id (bogus; we only have one heap).
4035    JDWP::Write1BE(&p_, 8);  // Size of allocation unit, in bytes.
4036
4037    JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr));  // virtual address of segment start.
4038    JDWP::Write4BE(&p_, 0);  // offset of this piece (relative to the virtual address).
4039    // [u4]: length of piece, in allocation units
4040    // We won't know this until we're done, so save the offset and stuff in a dummy value.
4041    pieceLenField_ = p_;
4042    JDWP::Write4BE(&p_, 0x55555555);
4043    needHeader_ = false;
4044  }
4045
4046  void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4047    if (pieceLenField_ == NULL) {
4048      // Flush immediately post Reset (maybe back-to-back Flush). Ignore.
4049      CHECK(needHeader_);
4050      return;
4051    }
4052    // Patch the "length of piece" field.
4053    CHECK_LE(&buf_[0], pieceLenField_);
4054    CHECK_LE(pieceLenField_, p_);
4055    JDWP::Set4BE(pieceLenField_, totalAllocationUnits_);
4056
4057    Dbg::DdmSendChunk(type_, p_ - &buf_[0], &buf_[0]);
4058    Reset();
4059  }
4060
4061  static void HeapChunkCallback(void* start, void* end, size_t used_bytes, void* arg)
4062      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
4063                            Locks::mutator_lock_) {
4064    reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkCallback(start, end, used_bytes);
4065  }
4066
4067 private:
4068  enum { ALLOCATION_UNIT_SIZE = 8 };
4069
4070  void Reset() {
4071    p_ = &buf_[0];
4072    ResetStartOfNextChunk();
4073    totalAllocationUnits_ = 0;
4074    needHeader_ = true;
4075    pieceLenField_ = NULL;
4076  }
4077
4078  void HeapChunkCallback(void* start, void* /*end*/, size_t used_bytes)
4079      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
4080                            Locks::mutator_lock_) {
4081    // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
4082    // in the following code not to allocate memory, by ensuring buf_ is of the correct size
4083    if (used_bytes == 0) {
4084        if (start == NULL) {
4085            // Reset for start of new heap.
4086            startOfNextMemoryChunk_ = NULL;
4087            Flush();
4088        }
4089        // Only process in use memory so that free region information
4090        // also includes dlmalloc book keeping.
4091        return;
4092    }
4093
4094    /* If we're looking at the native heap, we'll just return
4095     * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
4096     */
4097    bool native = type_ == CHUNK_TYPE("NHSG");
4098
4099    // TODO: I'm not sure using start of next chunk works well with multiple spaces. We shouldn't
4100    // count gaps inbetween spaces as free memory.
4101    if (startOfNextMemoryChunk_ != NULL) {
4102        // Transmit any pending free memory. Native free memory of
4103        // over kMaxFreeLen could be because of the use of mmaps, so
4104        // don't report. If not free memory then start a new segment.
4105        bool flush = true;
4106        if (start > startOfNextMemoryChunk_) {
4107            const size_t kMaxFreeLen = 2 * kPageSize;
4108            void* freeStart = startOfNextMemoryChunk_;
4109            void* freeEnd = start;
4110            size_t freeLen = reinterpret_cast<char*>(freeEnd) - reinterpret_cast<char*>(freeStart);
4111            if (!native || freeLen < kMaxFreeLen) {
4112                AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), freeStart, freeLen);
4113                flush = false;
4114            }
4115        }
4116        if (flush) {
4117            startOfNextMemoryChunk_ = NULL;
4118            Flush();
4119        }
4120    }
4121    mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
4122
4123    // Determine the type of this chunk.
4124    // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
4125    // If it's the same, we should combine them.
4126    uint8_t state = ExamineObject(obj, native);
4127    AppendChunk(state, start, used_bytes + chunk_overhead_);
4128    startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
4129  }
4130
4131  void AppendChunk(uint8_t state, void* ptr, size_t length)
4132      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4133    // Make sure there's enough room left in the buffer.
4134    // We need to use two bytes for every fractional 256 allocation units used by the chunk plus
4135    // 17 bytes for any header.
4136    size_t needed = (((length/ALLOCATION_UNIT_SIZE + 255) / 256) * 2) + 17;
4137    size_t bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
4138    if (bytesLeft < needed) {
4139      Flush();
4140    }
4141
4142    bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
4143    if (bytesLeft < needed) {
4144      LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", "
4145          << needed << " bytes)";
4146      return;
4147    }
4148    EnsureHeader(ptr);
4149    // Write out the chunk description.
4150    length /= ALLOCATION_UNIT_SIZE;   // Convert to allocation units.
4151    totalAllocationUnits_ += length;
4152    while (length > 256) {
4153      *p_++ = state | HPSG_PARTIAL;
4154      *p_++ = 255;     // length - 1
4155      length -= 256;
4156    }
4157    *p_++ = state;
4158    *p_++ = length - 1;
4159  }
4160
4161  uint8_t ExamineObject(mirror::Object* o, bool is_native_heap)
4162      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
4163    if (o == NULL) {
4164      return HPSG_STATE(SOLIDITY_FREE, 0);
4165    }
4166
4167    // It's an allocated chunk. Figure out what it is.
4168
4169    // If we're looking at the native heap, we'll just return
4170    // (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks.
4171    if (is_native_heap) {
4172      return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4173    }
4174
4175    if (!Runtime::Current()->GetHeap()->IsLiveObjectLocked(o)) {
4176      return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4177    }
4178
4179    mirror::Class* c = o->GetClass();
4180    if (c == NULL) {
4181      // The object was probably just created but hasn't been initialized yet.
4182      return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4183    }
4184
4185    if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
4186      LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
4187      return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4188    }
4189
4190    if (c->IsClassClass()) {
4191      return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
4192    }
4193
4194    if (c->IsArrayClass()) {
4195      if (o->IsObjectArray()) {
4196        return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4197      }
4198      switch (c->GetComponentSize()) {
4199      case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
4200      case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
4201      case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4202      case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
4203      }
4204    }
4205
4206    return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4207  }
4208
4209  std::vector<uint8_t> buf_;
4210  uint8_t* p_;
4211  uint8_t* pieceLenField_;
4212  void* startOfNextMemoryChunk_;
4213  size_t totalAllocationUnits_;
4214  uint32_t type_;
4215  bool merge_;
4216  bool needHeader_;
4217  size_t chunk_overhead_;
4218
4219  DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
4220};
4221
4222static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
4223    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
4224  const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
4225  HeapChunkContext::HeapChunkCallback(
4226      obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
4227}
4228
4229void Dbg::DdmSendHeapSegments(bool native) {
4230  Dbg::HpsgWhen when;
4231  Dbg::HpsgWhat what;
4232  if (!native) {
4233    when = gDdmHpsgWhen;
4234    what = gDdmHpsgWhat;
4235  } else {
4236    when = gDdmNhsgWhen;
4237    what = gDdmNhsgWhat;
4238  }
4239  if (when == HPSG_WHEN_NEVER) {
4240    return;
4241  }
4242
4243  // Figure out what kind of chunks we'll be sending.
4244  CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS) << static_cast<int>(what);
4245
4246  // First, send a heap start chunk.
4247  uint8_t heap_id[4];
4248  JDWP::Set4BE(&heap_id[0], 1);  // Heap id (bogus; we only have one heap).
4249  Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id);
4250
4251  Thread* self = Thread::Current();
4252
4253  // To allow the Walk/InspectAll() below to exclusively-lock the
4254  // mutator lock, temporarily release the shared access to the
4255  // mutator lock here by transitioning to the suspended state.
4256  Locks::mutator_lock_->AssertSharedHeld(self);
4257  self->TransitionFromRunnableToSuspended(kSuspended);
4258
4259  // Send a series of heap segment chunks.
4260  HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
4261  if (native) {
4262#ifdef USE_DLMALLOC
4263    dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context);
4264#else
4265    UNIMPLEMENTED(WARNING) << "Native heap inspection is only supported with dlmalloc";
4266#endif
4267  } else {
4268    gc::Heap* heap = Runtime::Current()->GetHeap();
4269    for (const auto& space : heap->GetContinuousSpaces()) {
4270      if (space->IsDlMallocSpace()) {
4271        // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
4272        // allocation then the first sizeof(size_t) may belong to it.
4273        context.SetChunkOverhead(sizeof(size_t));
4274        space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4275      } else if (space->IsRosAllocSpace()) {
4276        context.SetChunkOverhead(0);
4277        space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4278      } else if (space->IsBumpPointerSpace()) {
4279        context.SetChunkOverhead(0);
4280        ReaderMutexLock mu(self, *Locks::mutator_lock_);
4281        WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
4282        space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
4283      } else {
4284        UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
4285      }
4286      context.ResetStartOfNextChunk();
4287    }
4288    // Walk the large objects, these are not in the AllocSpace.
4289    context.SetChunkOverhead(0);
4290    heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4291  }
4292
4293  // Shared-lock the mutator lock back.
4294  self->TransitionFromSuspendedToRunnable();
4295  Locks::mutator_lock_->AssertSharedHeld(self);
4296
4297  // Finally, send a heap end chunk.
4298  Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
4299}
4300
4301static size_t GetAllocTrackerMax() {
4302#ifdef HAVE_ANDROID_OS
4303  // Check whether there's a system property overriding the number of records.
4304  const char* propertyName = "dalvik.vm.allocTrackerMax";
4305  char allocRecordMaxString[PROPERTY_VALUE_MAX];
4306  if (property_get(propertyName, allocRecordMaxString, "") > 0) {
4307    char* end;
4308    size_t value = strtoul(allocRecordMaxString, &end, 10);
4309    if (*end != '\0') {
4310      LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocRecordMaxString
4311                 << "' --- invalid";
4312      return kDefaultNumAllocRecords;
4313    }
4314    if (!IsPowerOfTwo(value)) {
4315      LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocRecordMaxString
4316                 << "' --- not power of two";
4317      return kDefaultNumAllocRecords;
4318    }
4319    return value;
4320  }
4321#endif
4322  return kDefaultNumAllocRecords;
4323}
4324
4325void Dbg::SetAllocTrackingEnabled(bool enable) {
4326  Thread* self = Thread::Current();
4327  if (enable) {
4328    {
4329      MutexLock mu(self, *Locks::alloc_tracker_lock_);
4330      if (recent_allocation_records_ != NULL) {
4331        return;  // Already enabled, bail.
4332      }
4333      alloc_record_max_ = GetAllocTrackerMax();
4334      LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
4335                << kMaxAllocRecordStackDepth << " frames, taking "
4336                << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
4337      DCHECK_EQ(alloc_record_head_, 0U);
4338      DCHECK_EQ(alloc_record_count_, 0U);
4339      recent_allocation_records_ = new AllocRecord[alloc_record_max_];
4340      CHECK(recent_allocation_records_ != NULL);
4341    }
4342    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
4343  } else {
4344    {
4345      ScopedObjectAccess soa(self);  // For type_cache_.Clear();
4346      MutexLock mu(self, *Locks::alloc_tracker_lock_);
4347      if (recent_allocation_records_ == NULL) {
4348        return;  // Already disabled, bail.
4349      }
4350      LOG(INFO) << "Disabling alloc tracker";
4351      delete[] recent_allocation_records_;
4352      recent_allocation_records_ = NULL;
4353      alloc_record_head_ = 0;
4354      alloc_record_count_ = 0;
4355      type_cache_.Clear();
4356    }
4357    // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
4358    Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
4359  }
4360}
4361
4362struct AllocRecordStackVisitor : public StackVisitor {
4363  AllocRecordStackVisitor(Thread* thread, AllocRecord* record)
4364      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
4365      : StackVisitor(thread, NULL), record(record), depth(0) {}
4366
4367  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
4368  // annotalysis.
4369  bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
4370    if (depth >= kMaxAllocRecordStackDepth) {
4371      return false;
4372    }
4373    mirror::ArtMethod* m = GetMethod();
4374    if (!m->IsRuntimeMethod()) {
4375      record->StackElement(depth)->SetMethod(m);
4376      record->StackElement(depth)->SetDexPc(GetDexPc());
4377      ++depth;
4378    }
4379    return true;
4380  }
4381
4382  ~AllocRecordStackVisitor() {
4383    // Clear out any unused stack trace elements.
4384    for (; depth < kMaxAllocRecordStackDepth; ++depth) {
4385      record->StackElement(depth)->SetMethod(nullptr);
4386      record->StackElement(depth)->SetDexPc(0);
4387    }
4388  }
4389
4390  AllocRecord* record;
4391  size_t depth;
4392};
4393
4394void Dbg::RecordAllocation(mirror::Class* type, size_t byte_count) {
4395  Thread* self = Thread::Current();
4396  CHECK(self != NULL);
4397
4398  MutexLock mu(self, *Locks::alloc_tracker_lock_);
4399  if (recent_allocation_records_ == NULL) {
4400    // In the process of shutting down recording, bail.
4401    return;
4402  }
4403
4404  // Advance and clip.
4405  if (++alloc_record_head_ == alloc_record_max_) {
4406    alloc_record_head_ = 0;
4407  }
4408
4409  // Fill in the basics.
4410  AllocRecord* record = &recent_allocation_records_[alloc_record_head_];
4411  record->SetType(type);
4412  record->SetByteCount(byte_count);
4413  record->SetThinLockId(self->GetThreadId());
4414
4415  // Fill in the stack trace.
4416  AllocRecordStackVisitor visitor(self, record);
4417  visitor.WalkStack();
4418
4419  if (alloc_record_count_ < alloc_record_max_) {
4420    ++alloc_record_count_;
4421  }
4422}
4423
4424// Returns the index of the head element.
4425//
4426// We point at the most-recently-written record, so if alloc_record_count_ is 1
4427// we want to use the current element.  Take "head+1" and subtract count
4428// from it.
4429//
4430// We need to handle underflow in our circular buffer, so we add
4431// alloc_record_max_ and then mask it back down.
4432size_t Dbg::HeadIndex() {
4433  return (Dbg::alloc_record_head_ + 1 + Dbg::alloc_record_max_ - Dbg::alloc_record_count_) &
4434      (Dbg::alloc_record_max_ - 1);
4435}
4436
4437void Dbg::DumpRecentAllocations() {
4438  ScopedObjectAccess soa(Thread::Current());
4439  MutexLock mu(soa.Self(), *Locks::alloc_tracker_lock_);
4440  if (recent_allocation_records_ == NULL) {
4441    LOG(INFO) << "Not recording tracked allocations";
4442    return;
4443  }
4444
4445  // "i" is the head of the list.  We want to start at the end of the
4446  // list and move forward to the tail.
4447  size_t i = HeadIndex();
4448  const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
4449  uint16_t count = capped_count;
4450
4451  LOG(INFO) << "Tracked allocations, (head=" << alloc_record_head_ << " count=" << count << ")";
4452  while (count--) {
4453    AllocRecord* record = &recent_allocation_records_[i];
4454
4455    LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->ThinLockId(), record->ByteCount())
4456              << PrettyClass(record->Type());
4457
4458    for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
4459      AllocRecordStackTraceElement* stack_element = record->StackElement(stack_frame);
4460      mirror::ArtMethod* m = stack_element->Method();
4461      if (m == NULL) {
4462        break;
4463      }
4464      LOG(INFO) << "    " << PrettyMethod(m) << " line " << stack_element->LineNumber();
4465    }
4466
4467    // pause periodically to help logcat catch up
4468    if ((count % 5) == 0) {
4469      usleep(40000);
4470    }
4471
4472    i = (i + 1) & (alloc_record_max_ - 1);
4473  }
4474}
4475
4476class StringTable {
4477 public:
4478  StringTable() {
4479  }
4480
4481  void Add(const std::string& str) {
4482    table_.insert(str);
4483  }
4484
4485  void Add(const char* str) {
4486    table_.insert(str);
4487  }
4488
4489  size_t IndexOf(const char* s) const {
4490    auto it = table_.find(s);
4491    if (it == table_.end()) {
4492      LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
4493    }
4494    return std::distance(table_.begin(), it);
4495  }
4496
4497  size_t Size() const {
4498    return table_.size();
4499  }
4500
4501  void WriteTo(std::vector<uint8_t>& bytes) const {
4502    for (const std::string& str : table_) {
4503      const char* s = str.c_str();
4504      size_t s_len = CountModifiedUtf8Chars(s);
4505      std::unique_ptr<uint16_t> s_utf16(new uint16_t[s_len]);
4506      ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
4507      JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len);
4508    }
4509  }
4510
4511 private:
4512  std::set<std::string> table_;
4513  DISALLOW_COPY_AND_ASSIGN(StringTable);
4514};
4515
4516static const char* GetMethodSourceFile(mirror::ArtMethod* method)
4517    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4518  DCHECK(method != nullptr);
4519  const char* source_file = method->GetDeclaringClassSourceFile();
4520  return (source_file != nullptr) ? source_file : "";
4521}
4522
4523/*
4524 * The data we send to DDMS contains everything we have recorded.
4525 *
4526 * Message header (all values big-endian):
4527 * (1b) message header len (to allow future expansion); includes itself
4528 * (1b) entry header len
4529 * (1b) stack frame len
4530 * (2b) number of entries
4531 * (4b) offset to string table from start of message
4532 * (2b) number of class name strings
4533 * (2b) number of method name strings
4534 * (2b) number of source file name strings
4535 * For each entry:
4536 *   (4b) total allocation size
4537 *   (2b) thread id
4538 *   (2b) allocated object's class name index
4539 *   (1b) stack depth
4540 *   For each stack frame:
4541 *     (2b) method's class name
4542 *     (2b) method name
4543 *     (2b) method source file
4544 *     (2b) line number, clipped to 32767; -2 if native; -1 if no source
4545 * (xb) class name strings
4546 * (xb) method name strings
4547 * (xb) source file strings
4548 *
4549 * As with other DDM traffic, strings are sent as a 4-byte length
4550 * followed by UTF-16 data.
4551 *
4552 * We send up 16-bit unsigned indexes into string tables.  In theory there
4553 * can be (kMaxAllocRecordStackDepth * alloc_record_max_) unique strings in
4554 * each table, but in practice there should be far fewer.
4555 *
4556 * The chief reason for using a string table here is to keep the size of
4557 * the DDMS message to a minimum.  This is partly to make the protocol
4558 * efficient, but also because we have to form the whole thing up all at
4559 * once in a memory buffer.
4560 *
4561 * We use separate string tables for class names, method names, and source
4562 * files to keep the indexes small.  There will generally be no overlap
4563 * between the contents of these tables.
4564 */
4565jbyteArray Dbg::GetRecentAllocations() {
4566  if (false) {
4567    DumpRecentAllocations();
4568  }
4569
4570  Thread* self = Thread::Current();
4571  std::vector<uint8_t> bytes;
4572  {
4573    MutexLock mu(self, *Locks::alloc_tracker_lock_);
4574    //
4575    // Part 1: generate string tables.
4576    //
4577    StringTable class_names;
4578    StringTable method_names;
4579    StringTable filenames;
4580
4581    const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
4582    uint16_t count = capped_count;
4583    size_t idx = HeadIndex();
4584    while (count--) {
4585      AllocRecord* record = &recent_allocation_records_[idx];
4586      std::string temp;
4587      class_names.Add(record->Type()->GetDescriptor(&temp));
4588      for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) {
4589        mirror::ArtMethod* m = record->StackElement(i)->Method();
4590        if (m != NULL) {
4591          class_names.Add(m->GetDeclaringClassDescriptor());
4592          method_names.Add(m->GetName());
4593          filenames.Add(GetMethodSourceFile(m));
4594        }
4595      }
4596
4597      idx = (idx + 1) & (alloc_record_max_ - 1);
4598    }
4599
4600    LOG(INFO) << "allocation records: " << capped_count;
4601
4602    //
4603    // Part 2: Generate the output and store it in the buffer.
4604    //
4605
4606    // (1b) message header len (to allow future expansion); includes itself
4607    // (1b) entry header len
4608    // (1b) stack frame len
4609    const int kMessageHeaderLen = 15;
4610    const int kEntryHeaderLen = 9;
4611    const int kStackFrameLen = 8;
4612    JDWP::Append1BE(bytes, kMessageHeaderLen);
4613    JDWP::Append1BE(bytes, kEntryHeaderLen);
4614    JDWP::Append1BE(bytes, kStackFrameLen);
4615
4616    // (2b) number of entries
4617    // (4b) offset to string table from start of message
4618    // (2b) number of class name strings
4619    // (2b) number of method name strings
4620    // (2b) number of source file name strings
4621    JDWP::Append2BE(bytes, capped_count);
4622    size_t string_table_offset = bytes.size();
4623    JDWP::Append4BE(bytes, 0);  // We'll patch this later...
4624    JDWP::Append2BE(bytes, class_names.Size());
4625    JDWP::Append2BE(bytes, method_names.Size());
4626    JDWP::Append2BE(bytes, filenames.Size());
4627
4628    idx = HeadIndex();
4629    std::string temp;
4630    for (count = capped_count; count != 0; --count) {
4631      // For each entry:
4632      // (4b) total allocation size
4633      // (2b) thread id
4634      // (2b) allocated object's class name index
4635      // (1b) stack depth
4636      AllocRecord* record = &recent_allocation_records_[idx];
4637      size_t stack_depth = record->GetDepth();
4638      size_t allocated_object_class_name_index =
4639          class_names.IndexOf(record->Type()->GetDescriptor(&temp));
4640      JDWP::Append4BE(bytes, record->ByteCount());
4641      JDWP::Append2BE(bytes, record->ThinLockId());
4642      JDWP::Append2BE(bytes, allocated_object_class_name_index);
4643      JDWP::Append1BE(bytes, stack_depth);
4644
4645      for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) {
4646        // For each stack frame:
4647        // (2b) method's class name
4648        // (2b) method name
4649        // (2b) method source file
4650        // (2b) line number, clipped to 32767; -2 if native; -1 if no source
4651        mirror::ArtMethod* m = record->StackElement(stack_frame)->Method();
4652        size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor());
4653        size_t method_name_index = method_names.IndexOf(m->GetName());
4654        size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m));
4655        JDWP::Append2BE(bytes, class_name_index);
4656        JDWP::Append2BE(bytes, method_name_index);
4657        JDWP::Append2BE(bytes, file_name_index);
4658        JDWP::Append2BE(bytes, record->StackElement(stack_frame)->LineNumber());
4659      }
4660      idx = (idx + 1) & (alloc_record_max_ - 1);
4661    }
4662
4663    // (xb) class name strings
4664    // (xb) method name strings
4665    // (xb) source file strings
4666    JDWP::Set4BE(&bytes[string_table_offset], bytes.size());
4667    class_names.WriteTo(bytes);
4668    method_names.WriteTo(bytes);
4669    filenames.WriteTo(bytes);
4670  }
4671  JNIEnv* env = self->GetJniEnv();
4672  jbyteArray result = env->NewByteArray(bytes.size());
4673  if (result != NULL) {
4674    env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
4675  }
4676  return result;
4677}
4678
4679mirror::ArtMethod* DeoptimizationRequest::Method() const {
4680  ScopedObjectAccessUnchecked soa(Thread::Current());
4681  return soa.DecodeMethod(method_);
4682}
4683
4684void DeoptimizationRequest::SetMethod(mirror::ArtMethod* m) {
4685  ScopedObjectAccessUnchecked soa(Thread::Current());
4686  method_ = soa.EncodeMethod(m);
4687}
4688
4689}  // namespace art
4690