debugger.cc revision 1d6ee090fddd4bfd35c304d6ceb929d5c529dfcc
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "debugger.h"
18
19#include <sys/uio.h>
20
21#include <set>
22
23#include "arch/context.h"
24#include "class_linker.h"
25#include "class_linker-inl.h"
26#include "dex_file-inl.h"
27#include "dex_instruction.h"
28#include "field_helper.h"
29#include "gc/accounting/card_table-inl.h"
30#include "gc/space/large_object_space.h"
31#include "gc/space/space-inl.h"
32#include "handle_scope.h"
33#include "jdwp/object_registry.h"
34#include "method_helper.h"
35#include "mirror/art_field-inl.h"
36#include "mirror/art_method-inl.h"
37#include "mirror/class.h"
38#include "mirror/class-inl.h"
39#include "mirror/class_loader.h"
40#include "mirror/object-inl.h"
41#include "mirror/object_array-inl.h"
42#include "mirror/string-inl.h"
43#include "mirror/throwable.h"
44#include "quick/inline_method_analyser.h"
45#include "reflection.h"
46#include "safe_map.h"
47#include "scoped_thread_state_change.h"
48#include "ScopedLocalRef.h"
49#include "ScopedPrimitiveArray.h"
50#include "handle_scope-inl.h"
51#include "thread_list.h"
52#include "throw_location.h"
53#include "utf.h"
54#include "verifier/method_verifier-inl.h"
55#include "well_known_classes.h"
56
57#ifdef HAVE_ANDROID_OS
58#include "cutils/properties.h"
59#endif
60
61namespace art {
62
63static const size_t kMaxAllocRecordStackDepth = 16;  // Max 255.
64static const size_t kDefaultNumAllocRecords = 64*1024;  // Must be a power of 2. 2BE can hold 64k-1.
65
66// Limit alloc_record_count to the 2BE value that is the limit of the current protocol.
67static uint16_t CappedAllocRecordCount(size_t alloc_record_count) {
68  if (alloc_record_count > 0xffff) {
69    return 0xffff;
70  }
71  return alloc_record_count;
72}
73
74class AllocRecordStackTraceElement {
75 public:
76  AllocRecordStackTraceElement() : method_(nullptr), dex_pc_(0) {
77  }
78
79  int32_t LineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
80    mirror::ArtMethod* method = Method();
81    DCHECK(method != nullptr);
82    return method->GetLineNumFromDexPC(DexPc());
83  }
84
85  mirror::ArtMethod* Method() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
86    ScopedObjectAccessUnchecked soa(Thread::Current());
87    return soa.DecodeMethod(method_);
88  }
89
90  void SetMethod(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
91    ScopedObjectAccessUnchecked soa(Thread::Current());
92    method_ = soa.EncodeMethod(m);
93  }
94
95  uint32_t DexPc() const {
96    return dex_pc_;
97  }
98
99  void SetDexPc(uint32_t pc) {
100    dex_pc_ = pc;
101  }
102
103 private:
104  jmethodID method_;
105  uint32_t dex_pc_;
106};
107
108jobject Dbg::TypeCache::Add(mirror::Class* t) {
109  ScopedObjectAccessUnchecked soa(Thread::Current());
110  int32_t hash_code = t->IdentityHashCode();
111  auto range = objects_.equal_range(hash_code);
112  for (auto it = range.first; it != range.second; ++it) {
113    if (soa.Decode<mirror::Class*>(it->second) == t) {
114      // Found a matching weak global, return it.
115      return it->second;
116    }
117  }
118  JNIEnv* env = soa.Env();
119  const jobject local_ref = soa.AddLocalReference<jobject>(t);
120  const jobject weak_global = env->NewWeakGlobalRef(local_ref);
121  env->DeleteLocalRef(local_ref);
122  objects_.insert(std::make_pair(hash_code, weak_global));
123  return weak_global;
124}
125
126void Dbg::TypeCache::Clear() {
127  JavaVMExt* vm = Runtime::Current()->GetJavaVM();
128  Thread* self = Thread::Current();
129  for (const auto& p : objects_) {
130    vm->DeleteWeakGlobalRef(self, p.second);
131  }
132  objects_.clear();
133}
134
135class AllocRecord {
136 public:
137  AllocRecord() : type_(nullptr), byte_count_(0), thin_lock_id_(0) {}
138
139  mirror::Class* Type() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
140    return down_cast<mirror::Class*>(Thread::Current()->DecodeJObject(type_));
141  }
142
143  void SetType(mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
144                                                       Locks::alloc_tracker_lock_) {
145    type_ = Dbg::type_cache_.Add(t);
146  }
147
148  size_t GetDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
149    size_t depth = 0;
150    while (depth < kMaxAllocRecordStackDepth && stack_[depth].Method() != NULL) {
151      ++depth;
152    }
153    return depth;
154  }
155
156  size_t ByteCount() const {
157    return byte_count_;
158  }
159
160  void SetByteCount(size_t count) {
161    byte_count_ = count;
162  }
163
164  uint16_t ThinLockId() const {
165    return thin_lock_id_;
166  }
167
168  void SetThinLockId(uint16_t id) {
169    thin_lock_id_ = id;
170  }
171
172  AllocRecordStackTraceElement* StackElement(size_t index) {
173    DCHECK_LT(index, kMaxAllocRecordStackDepth);
174    return &stack_[index];
175  }
176
177 private:
178  jobject type_;  // This is a weak global.
179  size_t byte_count_;
180  uint16_t thin_lock_id_;
181  AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth];  // Unused entries have NULL method.
182};
183
184class Breakpoint {
185 public:
186  Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization)
187    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
188    : method_(nullptr), dex_pc_(dex_pc), need_full_deoptimization_(need_full_deoptimization) {
189    ScopedObjectAccessUnchecked soa(Thread::Current());
190    method_ = soa.EncodeMethod(method);
191  }
192
193  Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
194    : method_(nullptr), dex_pc_(other.dex_pc_),
195      need_full_deoptimization_(other.need_full_deoptimization_) {
196    ScopedObjectAccessUnchecked soa(Thread::Current());
197    method_ = soa.EncodeMethod(other.Method());
198  }
199
200  mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
201    ScopedObjectAccessUnchecked soa(Thread::Current());
202    return soa.DecodeMethod(method_);
203  }
204
205  uint32_t DexPc() const {
206    return dex_pc_;
207  }
208
209  bool NeedFullDeoptimization() const {
210    return need_full_deoptimization_;
211  }
212
213 private:
214  // The location of this breakpoint.
215  jmethodID method_;
216  uint32_t dex_pc_;
217
218  // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
219  bool need_full_deoptimization_;
220};
221
222static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
223    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
224  os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
225  return os;
226}
227
228class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
229 public:
230  DebugInstrumentationListener() {}
231  virtual ~DebugInstrumentationListener() {}
232
233  void MethodEntered(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
234                     uint32_t dex_pc)
235      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
236    if (method->IsNative()) {
237      // TODO: post location events is a suspension point and native method entry stubs aren't.
238      return;
239    }
240    Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr);
241  }
242
243  void MethodExited(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
244                    uint32_t dex_pc, const JValue& return_value)
245      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
246    if (method->IsNative()) {
247      // TODO: post location events is a suspension point and native method entry stubs aren't.
248      return;
249    }
250    Dbg::UpdateDebugger(thread, this_object, method, dex_pc, Dbg::kMethodExit, &return_value);
251  }
252
253  void MethodUnwind(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
254                    uint32_t dex_pc)
255      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
256    // We're not recorded to listen to this kind of event, so complain.
257    LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
258               << " " << dex_pc;
259  }
260
261  void DexPcMoved(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
262                  uint32_t new_dex_pc)
263      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
264    Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, 0, nullptr);
265  }
266
267  void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
268                 uint32_t dex_pc, mirror::ArtField* field)
269      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
270    Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
271  }
272
273  void FieldWritten(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
274                    uint32_t dex_pc, mirror::ArtField* field, const JValue& field_value)
275      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
276    Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
277  }
278
279  void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
280                       mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
281                       mirror::Throwable* exception_object)
282      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
283    Dbg::PostException(throw_location, catch_method, catch_dex_pc, exception_object);
284  }
285
286 private:
287  DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
288} gDebugInstrumentationListener;
289
290// JDWP is allowed unless the Zygote forbids it.
291static bool gJdwpAllowed = true;
292
293// Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line?
294static bool gJdwpConfigured = false;
295
296// Broken-down JDWP options. (Only valid if IsJdwpConfigured() is true.)
297static JDWP::JdwpOptions gJdwpOptions;
298
299// Runtime JDWP state.
300static JDWP::JdwpState* gJdwpState = NULL;
301static bool gDebuggerConnected;  // debugger or DDMS is connected.
302static bool gDebuggerActive;     // debugger is making requests.
303static bool gDisposed;           // debugger called VirtualMachine.Dispose, so we should drop the connection.
304
305static bool gDdmThreadNotification = false;
306
307// DDMS GC-related settings.
308static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER;
309static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER;
310static Dbg::HpsgWhat gDdmHpsgWhat;
311static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
312static Dbg::HpsgWhat gDdmNhsgWhat;
313
314static ObjectRegistry* gRegistry = nullptr;
315
316// Recent allocation tracking.
317AllocRecord* Dbg::recent_allocation_records_ = nullptr;  // TODO: CircularBuffer<AllocRecord>
318size_t Dbg::alloc_record_max_ = 0;
319size_t Dbg::alloc_record_head_ = 0;
320size_t Dbg::alloc_record_count_ = 0;
321Dbg::TypeCache Dbg::type_cache_;
322
323// Deoptimization support.
324std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
325size_t Dbg::full_deoptimization_event_count_ = 0;
326size_t Dbg::delayed_full_undeoptimization_count_ = 0;
327
328// Instrumentation event reference counters.
329size_t Dbg::dex_pc_change_event_ref_count_ = 0;
330size_t Dbg::method_enter_event_ref_count_ = 0;
331size_t Dbg::method_exit_event_ref_count_ = 0;
332size_t Dbg::field_read_event_ref_count_ = 0;
333size_t Dbg::field_write_event_ref_count_ = 0;
334size_t Dbg::exception_catch_event_ref_count_ = 0;
335uint32_t Dbg::instrumentation_events_ = 0;
336
337// Breakpoints.
338static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
339
340void DebugInvokeReq::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
341                                RootType root_type) {
342  if (receiver != nullptr) {
343    callback(&receiver, arg, tid, root_type);
344  }
345  if (thread != nullptr) {
346    callback(&thread, arg, tid, root_type);
347  }
348  if (klass != nullptr) {
349    callback(reinterpret_cast<mirror::Object**>(&klass), arg, tid, root_type);
350  }
351  if (method != nullptr) {
352    callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
353  }
354}
355
356void DebugInvokeReq::Clear() {
357  invoke_needed = false;
358  receiver = nullptr;
359  thread = nullptr;
360  klass = nullptr;
361  method = nullptr;
362}
363
364void SingleStepControl::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
365                                   RootType root_type) {
366  if (method != nullptr) {
367    callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
368  }
369}
370
371bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const {
372  return dex_pcs.find(dex_pc) == dex_pcs.end();
373}
374
375void SingleStepControl::Clear() {
376  is_active = false;
377  method = nullptr;
378  dex_pcs.clear();
379}
380
381static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc)
382    LOCKS_EXCLUDED(Locks::breakpoint_lock_)
383    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
384  ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
385  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
386    if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) {
387      VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
388      return true;
389    }
390  }
391  return false;
392}
393
394static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
395    LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) {
396  MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
397  // A thread may be suspended for GC; in this code, we really want to know whether
398  // there's a debugger suspension active.
399  return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0;
400}
401
402static mirror::Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status)
403    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
404  mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
405  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
406    status = JDWP::ERR_INVALID_OBJECT;
407    return NULL;
408  }
409  if (!o->IsArrayInstance()) {
410    status = JDWP::ERR_INVALID_ARRAY;
411    return NULL;
412  }
413  status = JDWP::ERR_NONE;
414  return o->AsArray();
415}
416
417static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status)
418    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
419  mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
420  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
421    status = JDWP::ERR_INVALID_OBJECT;
422    return NULL;
423  }
424  if (!o->IsClass()) {
425    status = JDWP::ERR_INVALID_CLASS;
426    return NULL;
427  }
428  status = JDWP::ERR_NONE;
429  return o->AsClass();
430}
431
432static JDWP::JdwpError DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id, Thread*& thread)
433    EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
434    LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
435    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
436  mirror::Object* thread_peer = gRegistry->Get<mirror::Object*>(thread_id);
437  if (thread_peer == NULL || thread_peer == ObjectRegistry::kInvalidObject) {
438    // This isn't even an object.
439    return JDWP::ERR_INVALID_OBJECT;
440  }
441
442  mirror::Class* java_lang_Thread = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
443  if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) {
444    // This isn't a thread.
445    return JDWP::ERR_INVALID_THREAD;
446  }
447
448  thread = Thread::FromManagedThread(soa, thread_peer);
449  if (thread == NULL) {
450    // This is a java.lang.Thread without a Thread*. Must be a zombie.
451    return JDWP::ERR_THREAD_NOT_ALIVE;
452  }
453  return JDWP::ERR_NONE;
454}
455
456static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) {
457  // JDWP deliberately uses the descriptor characters' ASCII values for its enum.
458  // Note that by "basic" we mean that we don't get more specific than JT_OBJECT.
459  return static_cast<JDWP::JdwpTag>(descriptor[0]);
460}
461
462static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
463    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
464  std::string temp;
465  const char* descriptor = klass->GetDescriptor(&temp);
466  return BasicTagFromDescriptor(descriptor);
467}
468
469static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
470    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
471  CHECK(c != NULL);
472  if (c->IsArrayClass()) {
473    return JDWP::JT_ARRAY;
474  }
475  if (c->IsStringClass()) {
476    return JDWP::JT_STRING;
477  }
478  if (c->IsClassClass()) {
479    return JDWP::JT_CLASS_OBJECT;
480  }
481  {
482    mirror::Class* thread_class = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
483    if (thread_class->IsAssignableFrom(c)) {
484      return JDWP::JT_THREAD;
485    }
486  }
487  {
488    mirror::Class* thread_group_class =
489        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
490    if (thread_group_class->IsAssignableFrom(c)) {
491      return JDWP::JT_THREAD_GROUP;
492    }
493  }
494  {
495    mirror::Class* class_loader_class =
496        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader);
497    if (class_loader_class->IsAssignableFrom(c)) {
498      return JDWP::JT_CLASS_LOADER;
499    }
500  }
501  return JDWP::JT_OBJECT;
502}
503
504/*
505 * Objects declared to hold Object might actually hold a more specific
506 * type.  The debugger may take a special interest in these (e.g. it
507 * wants to display the contents of Strings), so we want to return an
508 * appropriate tag.
509 *
510 * Null objects are tagged JT_OBJECT.
511 */
512static JDWP::JdwpTag TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o)
513    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
514  return (o == NULL) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass());
515}
516
517static bool IsPrimitiveTag(JDWP::JdwpTag tag) {
518  switch (tag) {
519  case JDWP::JT_BOOLEAN:
520  case JDWP::JT_BYTE:
521  case JDWP::JT_CHAR:
522  case JDWP::JT_FLOAT:
523  case JDWP::JT_DOUBLE:
524  case JDWP::JT_INT:
525  case JDWP::JT_LONG:
526  case JDWP::JT_SHORT:
527  case JDWP::JT_VOID:
528    return true;
529  default:
530    return false;
531  }
532}
533
534/*
535 * Handle one of the JDWP name/value pairs.
536 *
537 * JDWP options are:
538 *  help: if specified, show help message and bail
539 *  transport: may be dt_socket or dt_shmem
540 *  address: for dt_socket, "host:port", or just "port" when listening
541 *  server: if "y", wait for debugger to attach; if "n", attach to debugger
542 *  timeout: how long to wait for debugger to connect / listen
543 *
544 * Useful with server=n (these aren't supported yet):
545 *  onthrow=<exception-name>: connect to debugger when exception thrown
546 *  onuncaught=y|n: connect to debugger when uncaught exception thrown
547 *  launch=<command-line>: launch the debugger itself
548 *
549 * The "transport" option is required, as is "address" if server=n.
550 */
551static bool ParseJdwpOption(const std::string& name, const std::string& value) {
552  if (name == "transport") {
553    if (value == "dt_socket") {
554      gJdwpOptions.transport = JDWP::kJdwpTransportSocket;
555    } else if (value == "dt_android_adb") {
556      gJdwpOptions.transport = JDWP::kJdwpTransportAndroidAdb;
557    } else {
558      LOG(ERROR) << "JDWP transport not supported: " << value;
559      return false;
560    }
561  } else if (name == "server") {
562    if (value == "n") {
563      gJdwpOptions.server = false;
564    } else if (value == "y") {
565      gJdwpOptions.server = true;
566    } else {
567      LOG(ERROR) << "JDWP option 'server' must be 'y' or 'n'";
568      return false;
569    }
570  } else if (name == "suspend") {
571    if (value == "n") {
572      gJdwpOptions.suspend = false;
573    } else if (value == "y") {
574      gJdwpOptions.suspend = true;
575    } else {
576      LOG(ERROR) << "JDWP option 'suspend' must be 'y' or 'n'";
577      return false;
578    }
579  } else if (name == "address") {
580    /* this is either <port> or <host>:<port> */
581    std::string port_string;
582    gJdwpOptions.host.clear();
583    std::string::size_type colon = value.find(':');
584    if (colon != std::string::npos) {
585      gJdwpOptions.host = value.substr(0, colon);
586      port_string = value.substr(colon + 1);
587    } else {
588      port_string = value;
589    }
590    if (port_string.empty()) {
591      LOG(ERROR) << "JDWP address missing port: " << value;
592      return false;
593    }
594    char* end;
595    uint64_t port = strtoul(port_string.c_str(), &end, 10);
596    if (*end != '\0' || port > 0xffff) {
597      LOG(ERROR) << "JDWP address has junk in port field: " << value;
598      return false;
599    }
600    gJdwpOptions.port = port;
601  } else if (name == "launch" || name == "onthrow" || name == "oncaught" || name == "timeout") {
602    /* valid but unsupported */
603    LOG(INFO) << "Ignoring JDWP option '" << name << "'='" << value << "'";
604  } else {
605    LOG(INFO) << "Ignoring unrecognized JDWP option '" << name << "'='" << value << "'";
606  }
607
608  return true;
609}
610
611/*
612 * Parse the latter half of a -Xrunjdwp/-agentlib:jdwp= string, e.g.:
613 * "transport=dt_socket,address=8000,server=y,suspend=n"
614 */
615bool Dbg::ParseJdwpOptions(const std::string& options) {
616  VLOG(jdwp) << "ParseJdwpOptions: " << options;
617
618  std::vector<std::string> pairs;
619  Split(options, ',', pairs);
620
621  for (size_t i = 0; i < pairs.size(); ++i) {
622    std::string::size_type equals = pairs[i].find('=');
623    if (equals == std::string::npos) {
624      LOG(ERROR) << "Can't parse JDWP option '" << pairs[i] << "' in '" << options << "'";
625      return false;
626    }
627    ParseJdwpOption(pairs[i].substr(0, equals), pairs[i].substr(equals + 1));
628  }
629
630  if (gJdwpOptions.transport == JDWP::kJdwpTransportUnknown) {
631    LOG(ERROR) << "Must specify JDWP transport: " << options;
632  }
633  if (!gJdwpOptions.server && (gJdwpOptions.host.empty() || gJdwpOptions.port == 0)) {
634    LOG(ERROR) << "Must specify JDWP host and port when server=n: " << options;
635    return false;
636  }
637
638  gJdwpConfigured = true;
639  return true;
640}
641
642void Dbg::StartJdwp() {
643  if (!gJdwpAllowed || !IsJdwpConfigured()) {
644    // No JDWP for you!
645    return;
646  }
647
648  CHECK(gRegistry == nullptr);
649  gRegistry = new ObjectRegistry;
650
651  // Init JDWP if the debugger is enabled. This may connect out to a
652  // debugger, passively listen for a debugger, or block waiting for a
653  // debugger.
654  gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions);
655  if (gJdwpState == NULL) {
656    // We probably failed because some other process has the port already, which means that
657    // if we don't abort the user is likely to think they're talking to us when they're actually
658    // talking to that other process.
659    LOG(FATAL) << "Debugger thread failed to initialize";
660  }
661
662  // If a debugger has already attached, send the "welcome" message.
663  // This may cause us to suspend all threads.
664  if (gJdwpState->IsActive()) {
665    ScopedObjectAccess soa(Thread::Current());
666    if (!gJdwpState->PostVMStart()) {
667      LOG(WARNING) << "Failed to post 'start' message to debugger";
668    }
669  }
670}
671
672void Dbg::StopJdwp() {
673  // Post VM_DEATH event before the JDWP connection is closed (either by the JDWP thread or the
674  // destruction of gJdwpState).
675  if (gJdwpState != nullptr && gJdwpState->IsActive()) {
676    gJdwpState->PostVMDeath();
677  }
678  // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
679  Disposed();
680  delete gJdwpState;
681  gJdwpState = nullptr;
682  delete gRegistry;
683  gRegistry = nullptr;
684}
685
686void Dbg::GcDidFinish() {
687  if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
688    ScopedObjectAccess soa(Thread::Current());
689    VLOG(jdwp) << "Sending heap info to DDM";
690    DdmSendHeapInfo(gDdmHpifWhen);
691  }
692  if (gDdmHpsgWhen != HPSG_WHEN_NEVER) {
693    ScopedObjectAccess soa(Thread::Current());
694    VLOG(jdwp) << "Dumping heap to DDM";
695    DdmSendHeapSegments(false);
696  }
697  if (gDdmNhsgWhen != HPSG_WHEN_NEVER) {
698    ScopedObjectAccess soa(Thread::Current());
699    VLOG(jdwp) << "Dumping native heap to DDM";
700    DdmSendHeapSegments(true);
701  }
702}
703
704void Dbg::SetJdwpAllowed(bool allowed) {
705  gJdwpAllowed = allowed;
706}
707
708DebugInvokeReq* Dbg::GetInvokeReq() {
709  return Thread::Current()->GetInvokeReq();
710}
711
712Thread* Dbg::GetDebugThread() {
713  return (gJdwpState != NULL) ? gJdwpState->GetDebugThread() : NULL;
714}
715
716void Dbg::ClearWaitForEventThread() {
717  gJdwpState->ClearWaitForEventThread();
718}
719
720void Dbg::Connected() {
721  CHECK(!gDebuggerConnected);
722  VLOG(jdwp) << "JDWP has attached";
723  gDebuggerConnected = true;
724  gDisposed = false;
725}
726
727void Dbg::Disposed() {
728  gDisposed = true;
729}
730
731bool Dbg::IsDisposed() {
732  return gDisposed;
733}
734
735void Dbg::GoActive() {
736  // Enable all debugging features, including scans for breakpoints.
737  // This is a no-op if we're already active.
738  // Only called from the JDWP handler thread.
739  if (gDebuggerActive) {
740    return;
741  }
742
743  {
744    // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected?
745    ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
746    CHECK_EQ(gBreakpoints.size(), 0U);
747  }
748
749  {
750    MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
751    CHECK_EQ(deoptimization_requests_.size(), 0U);
752    CHECK_EQ(full_deoptimization_event_count_, 0U);
753    CHECK_EQ(delayed_full_undeoptimization_count_, 0U);
754    CHECK_EQ(dex_pc_change_event_ref_count_, 0U);
755    CHECK_EQ(method_enter_event_ref_count_, 0U);
756    CHECK_EQ(method_exit_event_ref_count_, 0U);
757    CHECK_EQ(field_read_event_ref_count_, 0U);
758    CHECK_EQ(field_write_event_ref_count_, 0U);
759    CHECK_EQ(exception_catch_event_ref_count_, 0U);
760  }
761
762  Runtime* runtime = Runtime::Current();
763  runtime->GetThreadList()->SuspendAll();
764  Thread* self = Thread::Current();
765  ThreadState old_state = self->SetStateUnsafe(kRunnable);
766  CHECK_NE(old_state, kRunnable);
767  runtime->GetInstrumentation()->EnableDeoptimization();
768  instrumentation_events_ = 0;
769  gDebuggerActive = true;
770  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
771  runtime->GetThreadList()->ResumeAll();
772
773  LOG(INFO) << "Debugger is active";
774}
775
776void Dbg::Disconnected() {
777  CHECK(gDebuggerConnected);
778
779  LOG(INFO) << "Debugger is no longer active";
780
781  // Suspend all threads and exclusively acquire the mutator lock. Set the state of the thread
782  // to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener
783  // and clear the object registry.
784  Runtime* runtime = Runtime::Current();
785  runtime->GetThreadList()->SuspendAll();
786  Thread* self = Thread::Current();
787  ThreadState old_state = self->SetStateUnsafe(kRunnable);
788
789  // Debugger may not be active at this point.
790  if (gDebuggerActive) {
791    {
792      // Since we're going to disable deoptimization, we clear the deoptimization requests queue.
793      // This prevents us from having any pending deoptimization request when the debugger attaches
794      // to us again while no event has been requested yet.
795      MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
796      deoptimization_requests_.clear();
797      full_deoptimization_event_count_ = 0U;
798      delayed_full_undeoptimization_count_ = 0U;
799    }
800    if (instrumentation_events_ != 0) {
801      runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
802                                                    instrumentation_events_);
803      instrumentation_events_ = 0;
804    }
805    runtime->GetInstrumentation()->DisableDeoptimization();
806    gDebuggerActive = false;
807  }
808  gRegistry->Clear();
809  gDebuggerConnected = false;
810  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
811  runtime->GetThreadList()->ResumeAll();
812}
813
814bool Dbg::IsDebuggerActive() {
815  return gDebuggerActive;
816}
817
818bool Dbg::IsJdwpConfigured() {
819  return gJdwpConfigured;
820}
821
822int64_t Dbg::LastDebuggerActivity() {
823  return gJdwpState->LastDebuggerActivity();
824}
825
826void Dbg::UndoDebuggerSuspensions() {
827  Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
828}
829
830std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
831  mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id);
832  if (o == NULL) {
833    return "NULL";
834  }
835  if (o == ObjectRegistry::kInvalidObject) {
836    return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
837  }
838  if (!o->IsClass()) {
839    return StringPrintf("non-class %p", o);  // This is only used for debugging output anyway.
840  }
841  std::string temp;
842  return DescriptorToName(o->AsClass()->GetDescriptor(&temp));
843}
844
845JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& class_object_id) {
846  JDWP::JdwpError status;
847  mirror::Class* c = DecodeClass(id, status);
848  if (c == NULL) {
849    return status;
850  }
851  class_object_id = gRegistry->Add(c);
852  return JDWP::ERR_NONE;
853}
854
855JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId& superclass_id) {
856  JDWP::JdwpError status;
857  mirror::Class* c = DecodeClass(id, status);
858  if (c == NULL) {
859    return status;
860  }
861  if (c->IsInterface()) {
862    // http://code.google.com/p/android/issues/detail?id=20856
863    superclass_id = 0;
864  } else {
865    superclass_id = gRegistry->Add(c->GetSuperClass());
866  }
867  return JDWP::ERR_NONE;
868}
869
870JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
871  mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
872  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
873    return JDWP::ERR_INVALID_OBJECT;
874  }
875  expandBufAddObjectId(pReply, gRegistry->Add(o->GetClass()->GetClassLoader()));
876  return JDWP::ERR_NONE;
877}
878
879JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
880  JDWP::JdwpError status;
881  mirror::Class* c = DecodeClass(id, status);
882  if (c == NULL) {
883    return status;
884  }
885
886  uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask;
887
888  // Set ACC_SUPER. Dex files don't contain this flag but only classes are supposed to have it set,
889  // not interfaces.
890  // Class.getModifiers doesn't return it, but JDWP does, so we set it here.
891  if ((access_flags & kAccInterface) == 0) {
892    access_flags |= kAccSuper;
893  }
894
895  expandBufAdd4BE(pReply, access_flags);
896
897  return JDWP::ERR_NONE;
898}
899
900JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply)
901    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
902  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
903  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
904    return JDWP::ERR_INVALID_OBJECT;
905  }
906
907  // Ensure all threads are suspended while we read objects' lock words.
908  Thread* self = Thread::Current();
909  CHECK_EQ(self->GetState(), kRunnable);
910  self->TransitionFromRunnableToSuspended(kSuspended);
911  Runtime::Current()->GetThreadList()->SuspendAll();
912
913  MonitorInfo monitor_info(o);
914
915  Runtime::Current()->GetThreadList()->ResumeAll();
916  self->TransitionFromSuspendedToRunnable();
917
918  if (monitor_info.owner_ != NULL) {
919    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeer()));
920  } else {
921    expandBufAddObjectId(reply, gRegistry->Add(NULL));
922  }
923  expandBufAdd4BE(reply, monitor_info.entry_count_);
924  expandBufAdd4BE(reply, monitor_info.waiters_.size());
925  for (size_t i = 0; i < monitor_info.waiters_.size(); ++i) {
926    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters_[i]->GetPeer()));
927  }
928  return JDWP::ERR_NONE;
929}
930
931JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id,
932                                      std::vector<JDWP::ObjectId>& monitors,
933                                      std::vector<uint32_t>& stack_depths) {
934  struct OwnedMonitorVisitor : public StackVisitor {
935    OwnedMonitorVisitor(Thread* thread, Context* context,
936                        std::vector<JDWP::ObjectId>* monitor_vector,
937                        std::vector<uint32_t>* stack_depth_vector)
938        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
939      : StackVisitor(thread, context), current_stack_depth(0),
940        monitors(monitor_vector), stack_depths(stack_depth_vector) {}
941
942    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
943    // annotalysis.
944    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
945      if (!GetMethod()->IsRuntimeMethod()) {
946        Monitor::VisitLocks(this, AppendOwnedMonitors, this);
947        ++current_stack_depth;
948      }
949      return true;
950    }
951
952    static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg)
953        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
954      OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
955      visitor->monitors->push_back(gRegistry->Add(owned_monitor));
956      visitor->stack_depths->push_back(visitor->current_stack_depth);
957    }
958
959    size_t current_stack_depth;
960    std::vector<JDWP::ObjectId>* monitors;
961    std::vector<uint32_t>* stack_depths;
962  };
963
964  ScopedObjectAccessUnchecked soa(Thread::Current());
965  Thread* thread;
966  {
967    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
968    JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
969    if (error != JDWP::ERR_NONE) {
970      return error;
971    }
972    if (!IsSuspendedForDebugger(soa, thread)) {
973      return JDWP::ERR_THREAD_NOT_SUSPENDED;
974    }
975  }
976  std::unique_ptr<Context> context(Context::Create());
977  OwnedMonitorVisitor visitor(thread, context.get(), &monitors, &stack_depths);
978  visitor.WalkStack();
979  return JDWP::ERR_NONE;
980}
981
982JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id,
983                                         JDWP::ObjectId& contended_monitor) {
984  mirror::Object* contended_monitor_obj;
985  ScopedObjectAccessUnchecked soa(Thread::Current());
986  {
987    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
988    Thread* thread;
989    JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
990    if (error != JDWP::ERR_NONE) {
991      return error;
992    }
993    if (!IsSuspendedForDebugger(soa, thread)) {
994      return JDWP::ERR_THREAD_NOT_SUSPENDED;
995    }
996    contended_monitor_obj = Monitor::GetContendedMonitor(thread);
997  }
998  // Add() requires the thread_list_lock_ not held to avoid the lock
999  // level violation.
1000  contended_monitor = gRegistry->Add(contended_monitor_obj);
1001  return JDWP::ERR_NONE;
1002}
1003
1004JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
1005                                       std::vector<uint64_t>& counts)
1006    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1007  gc::Heap* heap = Runtime::Current()->GetHeap();
1008  heap->CollectGarbage(false);
1009  std::vector<mirror::Class*> classes;
1010  counts.clear();
1011  for (size_t i = 0; i < class_ids.size(); ++i) {
1012    JDWP::JdwpError status;
1013    mirror::Class* c = DecodeClass(class_ids[i], status);
1014    if (c == NULL) {
1015      return status;
1016    }
1017    classes.push_back(c);
1018    counts.push_back(0);
1019  }
1020  heap->CountInstances(classes, false, &counts[0]);
1021  return JDWP::ERR_NONE;
1022}
1023
1024JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, std::vector<JDWP::ObjectId>& instances)
1025    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1026  gc::Heap* heap = Runtime::Current()->GetHeap();
1027  // We only want reachable instances, so do a GC.
1028  heap->CollectGarbage(false);
1029  JDWP::JdwpError status;
1030  mirror::Class* c = DecodeClass(class_id, status);
1031  if (c == nullptr) {
1032    return status;
1033  }
1034  std::vector<mirror::Object*> raw_instances;
1035  Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
1036  for (size_t i = 0; i < raw_instances.size(); ++i) {
1037    instances.push_back(gRegistry->Add(raw_instances[i]));
1038  }
1039  return JDWP::ERR_NONE;
1040}
1041
1042JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
1043                                         std::vector<JDWP::ObjectId>& referring_objects)
1044    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1045  gc::Heap* heap = Runtime::Current()->GetHeap();
1046  heap->CollectGarbage(false);
1047  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1048  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1049    return JDWP::ERR_INVALID_OBJECT;
1050  }
1051  std::vector<mirror::Object*> raw_instances;
1052  heap->GetReferringObjects(o, max_count, raw_instances);
1053  for (size_t i = 0; i < raw_instances.size(); ++i) {
1054    referring_objects.push_back(gRegistry->Add(raw_instances[i]));
1055  }
1056  return JDWP::ERR_NONE;
1057}
1058
1059JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id)
1060    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1061  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1062  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1063    return JDWP::ERR_INVALID_OBJECT;
1064  }
1065  gRegistry->DisableCollection(object_id);
1066  return JDWP::ERR_NONE;
1067}
1068
1069JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id)
1070    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1071  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1072  // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI
1073  // also ignores these cases and never return an error. However it's not obvious why this command
1074  // should behave differently from DisableCollection and IsCollected commands. So let's be more
1075  // strict and return an error if this happens.
1076  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1077    return JDWP::ERR_INVALID_OBJECT;
1078  }
1079  gRegistry->EnableCollection(object_id);
1080  return JDWP::ERR_NONE;
1081}
1082
1083JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool& is_collected)
1084    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1085  if (object_id == 0) {
1086    // Null object id is invalid.
1087    return JDWP::ERR_INVALID_OBJECT;
1088  }
1089  // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However
1090  // the RI seems to ignore this and assume object has been collected.
1091  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1092  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1093    is_collected = true;
1094  } else {
1095    is_collected = gRegistry->IsCollected(object_id);
1096  }
1097  return JDWP::ERR_NONE;
1098}
1099
1100void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count)
1101    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1102  gRegistry->DisposeObject(object_id, reference_count);
1103}
1104
1105static JDWP::JdwpTypeTag GetTypeTag(mirror::Class* klass)
1106    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1107  DCHECK(klass != nullptr);
1108  if (klass->IsArrayClass()) {
1109    return JDWP::TT_ARRAY;
1110  } else if (klass->IsInterface()) {
1111    return JDWP::TT_INTERFACE;
1112  } else {
1113    return JDWP::TT_CLASS;
1114  }
1115}
1116
1117JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1118  JDWP::JdwpError status;
1119  mirror::Class* c = DecodeClass(class_id, status);
1120  if (c == NULL) {
1121    return status;
1122  }
1123
1124  JDWP::JdwpTypeTag type_tag = GetTypeTag(c);
1125  expandBufAdd1(pReply, type_tag);
1126  expandBufAddRefTypeId(pReply, class_id);
1127  return JDWP::ERR_NONE;
1128}
1129
1130void Dbg::GetClassList(std::vector<JDWP::RefTypeId>& classes) {
1131  // Get the complete list of reference classes (i.e. all classes except
1132  // the primitive types).
1133  // Returns a newly-allocated buffer full of RefTypeId values.
1134  struct ClassListCreator {
1135    explicit ClassListCreator(std::vector<JDWP::RefTypeId>& classes) : classes(classes) {
1136    }
1137
1138    static bool Visit(mirror::Class* c, void* arg) {
1139      return reinterpret_cast<ClassListCreator*>(arg)->Visit(c);
1140    }
1141
1142    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
1143    // annotalysis.
1144    bool Visit(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS {
1145      if (!c->IsPrimitive()) {
1146        classes.push_back(gRegistry->AddRefType(c));
1147      }
1148      return true;
1149    }
1150
1151    std::vector<JDWP::RefTypeId>& classes;
1152  };
1153
1154  ClassListCreator clc(classes);
1155  Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(ClassListCreator::Visit,
1156                                                                       &clc);
1157}
1158
1159JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
1160                                  uint32_t* pStatus, std::string* pDescriptor) {
1161  JDWP::JdwpError status;
1162  mirror::Class* c = DecodeClass(class_id, status);
1163  if (c == NULL) {
1164    return status;
1165  }
1166
1167  if (c->IsArrayClass()) {
1168    *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
1169    *pTypeTag = JDWP::TT_ARRAY;
1170  } else {
1171    if (c->IsErroneous()) {
1172      *pStatus = JDWP::CS_ERROR;
1173    } else {
1174      *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED;
1175    }
1176    *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
1177  }
1178
1179  if (pDescriptor != NULL) {
1180    std::string temp;
1181    *pDescriptor = c->GetDescriptor(&temp);
1182  }
1183  return JDWP::ERR_NONE;
1184}
1185
1186void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>& ids) {
1187  std::vector<mirror::Class*> classes;
1188  Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
1189  ids.clear();
1190  for (size_t i = 0; i < classes.size(); ++i) {
1191    ids.push_back(gRegistry->Add(classes[i]));
1192  }
1193}
1194
1195JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply)
1196    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1197  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1198  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1199    return JDWP::ERR_INVALID_OBJECT;
1200  }
1201
1202  JDWP::JdwpTypeTag type_tag = GetTypeTag(o->GetClass());
1203  JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass());
1204
1205  expandBufAdd1(pReply, type_tag);
1206  expandBufAddRefTypeId(pReply, type_id);
1207
1208  return JDWP::ERR_NONE;
1209}
1210
1211JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) {
1212  JDWP::JdwpError status;
1213  mirror::Class* c = DecodeClass(class_id, status);
1214  if (c == NULL) {
1215    return status;
1216  }
1217  std::string temp;
1218  *signature = c->GetDescriptor(&temp);
1219  return JDWP::ERR_NONE;
1220}
1221
1222JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string& result) {
1223  JDWP::JdwpError status;
1224  mirror::Class* c = DecodeClass(class_id, status);
1225  if (c == nullptr) {
1226    return status;
1227  }
1228  const char* source_file = c->GetSourceFile();
1229  if (source_file == nullptr) {
1230    return JDWP::ERR_ABSENT_INFORMATION;
1231  }
1232  result = source_file;
1233  return JDWP::ERR_NONE;
1234}
1235
1236JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t& tag) {
1237  ScopedObjectAccessUnchecked soa(Thread::Current());
1238  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1239  if (o == ObjectRegistry::kInvalidObject) {
1240    return JDWP::ERR_INVALID_OBJECT;
1241  }
1242  tag = TagFromObject(soa, o);
1243  return JDWP::ERR_NONE;
1244}
1245
1246size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) {
1247  switch (tag) {
1248  case JDWP::JT_VOID:
1249    return 0;
1250  case JDWP::JT_BYTE:
1251  case JDWP::JT_BOOLEAN:
1252    return 1;
1253  case JDWP::JT_CHAR:
1254  case JDWP::JT_SHORT:
1255    return 2;
1256  case JDWP::JT_FLOAT:
1257  case JDWP::JT_INT:
1258    return 4;
1259  case JDWP::JT_ARRAY:
1260  case JDWP::JT_OBJECT:
1261  case JDWP::JT_STRING:
1262  case JDWP::JT_THREAD:
1263  case JDWP::JT_THREAD_GROUP:
1264  case JDWP::JT_CLASS_LOADER:
1265  case JDWP::JT_CLASS_OBJECT:
1266    return sizeof(JDWP::ObjectId);
1267  case JDWP::JT_DOUBLE:
1268  case JDWP::JT_LONG:
1269    return 8;
1270  default:
1271    LOG(FATAL) << "Unknown tag " << tag;
1272    return -1;
1273  }
1274}
1275
1276JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int& length) {
1277  JDWP::JdwpError status;
1278  mirror::Array* a = DecodeArray(array_id, status);
1279  if (a == NULL) {
1280    return status;
1281  }
1282  length = a->GetLength();
1283  return JDWP::ERR_NONE;
1284}
1285
1286JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) {
1287  JDWP::JdwpError status;
1288  mirror::Array* a = DecodeArray(array_id, status);
1289  if (a == nullptr) {
1290    return status;
1291  }
1292
1293  if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) {
1294    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1295    return JDWP::ERR_INVALID_LENGTH;
1296  }
1297  JDWP::JdwpTag element_tag = BasicTagFromClass(a->GetClass()->GetComponentType());
1298  expandBufAdd1(pReply, element_tag);
1299  expandBufAdd4BE(pReply, count);
1300
1301  if (IsPrimitiveTag(element_tag)) {
1302    size_t width = GetTagWidth(element_tag);
1303    uint8_t* dst = expandBufAddSpace(pReply, count * width);
1304    if (width == 8) {
1305      const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0));
1306      for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]);
1307    } else if (width == 4) {
1308      const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0));
1309      for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]);
1310    } else if (width == 2) {
1311      const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0));
1312      for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]);
1313    } else {
1314      const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0));
1315      memcpy(dst, &src[offset * width], count * width);
1316    }
1317  } else {
1318    ScopedObjectAccessUnchecked soa(Thread::Current());
1319    mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>();
1320    for (int i = 0; i < count; ++i) {
1321      mirror::Object* element = oa->Get(offset + i);
1322      JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element)
1323                                                        : element_tag;
1324      expandBufAdd1(pReply, specific_tag);
1325      expandBufAddObjectId(pReply, gRegistry->Add(element));
1326    }
1327  }
1328
1329  return JDWP::ERR_NONE;
1330}
1331
1332template <typename T>
1333static void CopyArrayData(mirror::Array* a, JDWP::Request& src, int offset, int count)
1334    NO_THREAD_SAFETY_ANALYSIS {
1335  // TODO: fix when annotalysis correctly handles non-member functions.
1336  DCHECK(a->GetClass()->IsPrimitiveArray());
1337
1338  T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset));
1339  for (int i = 0; i < count; ++i) {
1340    *dst++ = src.ReadValue(sizeof(T));
1341  }
1342}
1343
1344JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
1345                                      JDWP::Request& request)
1346    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1347  JDWP::JdwpError status;
1348  mirror::Array* dst = DecodeArray(array_id, status);
1349  if (dst == NULL) {
1350    return status;
1351  }
1352
1353  if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) {
1354    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1355    return JDWP::ERR_INVALID_LENGTH;
1356  }
1357  JDWP::JdwpTag element_tag = BasicTagFromClass(dst->GetClass()->GetComponentType());
1358
1359  if (IsPrimitiveTag(element_tag)) {
1360    size_t width = GetTagWidth(element_tag);
1361    if (width == 8) {
1362      CopyArrayData<uint64_t>(dst, request, offset, count);
1363    } else if (width == 4) {
1364      CopyArrayData<uint32_t>(dst, request, offset, count);
1365    } else if (width == 2) {
1366      CopyArrayData<uint16_t>(dst, request, offset, count);
1367    } else {
1368      CopyArrayData<uint8_t>(dst, request, offset, count);
1369    }
1370  } else {
1371    mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>();
1372    for (int i = 0; i < count; ++i) {
1373      JDWP::ObjectId id = request.ReadObjectId();
1374      mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
1375      if (o == ObjectRegistry::kInvalidObject) {
1376        return JDWP::ERR_INVALID_OBJECT;
1377      }
1378      oa->Set<false>(offset + i, o);
1379    }
1380  }
1381
1382  return JDWP::ERR_NONE;
1383}
1384
1385JDWP::ObjectId Dbg::CreateString(const std::string& str) {
1386  return gRegistry->Add(mirror::String::AllocFromModifiedUtf8(Thread::Current(), str.c_str()));
1387}
1388
1389JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId& new_object) {
1390  JDWP::JdwpError status;
1391  mirror::Class* c = DecodeClass(class_id, status);
1392  if (c == NULL) {
1393    return status;
1394  }
1395  new_object = gRegistry->Add(c->AllocObject(Thread::Current()));
1396  return JDWP::ERR_NONE;
1397}
1398
1399/*
1400 * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]".
1401 */
1402JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
1403                                       JDWP::ObjectId& new_array) {
1404  JDWP::JdwpError status;
1405  mirror::Class* c = DecodeClass(array_class_id, status);
1406  if (c == NULL) {
1407    return status;
1408  }
1409  new_array = gRegistry->Add(mirror::Array::Alloc<true>(Thread::Current(), c, length,
1410                                                        c->GetComponentSize(),
1411                                                        Runtime::Current()->GetHeap()->GetCurrentAllocator()));
1412  return JDWP::ERR_NONE;
1413}
1414
1415bool Dbg::MatchType(JDWP::RefTypeId instance_class_id, JDWP::RefTypeId class_id) {
1416  JDWP::JdwpError status;
1417  mirror::Class* c1 = DecodeClass(instance_class_id, status);
1418  CHECK(c1 != NULL);
1419  mirror::Class* c2 = DecodeClass(class_id, status);
1420  CHECK(c2 != NULL);
1421  return c2->IsAssignableFrom(c1);
1422}
1423
1424static JDWP::FieldId ToFieldId(const mirror::ArtField* f)
1425    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1426  CHECK(!kMovingFields);
1427  return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
1428}
1429
1430static JDWP::MethodId ToMethodId(const mirror::ArtMethod* m)
1431    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1432  CHECK(!kMovingMethods);
1433  return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
1434}
1435
1436static mirror::ArtField* FromFieldId(JDWP::FieldId fid)
1437    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1438  CHECK(!kMovingFields);
1439  return reinterpret_cast<mirror::ArtField*>(static_cast<uintptr_t>(fid));
1440}
1441
1442static mirror::ArtMethod* FromMethodId(JDWP::MethodId mid)
1443    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1444  CHECK(!kMovingMethods);
1445  return reinterpret_cast<mirror::ArtMethod*>(static_cast<uintptr_t>(mid));
1446}
1447
1448static void SetLocation(JDWP::JdwpLocation& location, mirror::ArtMethod* m, uint32_t dex_pc)
1449    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1450  if (m == NULL) {
1451    memset(&location, 0, sizeof(location));
1452  } else {
1453    mirror::Class* c = m->GetDeclaringClass();
1454    location.type_tag = GetTypeTag(c);
1455    location.class_id = gRegistry->AddRefType(c);
1456    location.method_id = ToMethodId(m);
1457    location.dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc;
1458  }
1459}
1460
1461std::string Dbg::GetMethodName(JDWP::MethodId method_id)
1462    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1463  mirror::ArtMethod* m = FromMethodId(method_id);
1464  return m->GetName();
1465}
1466
1467std::string Dbg::GetFieldName(JDWP::FieldId field_id)
1468    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1469  return FromFieldId(field_id)->GetName();
1470}
1471
1472/*
1473 * Augment the access flags for synthetic methods and fields by setting
1474 * the (as described by the spec) "0xf0000000 bit".  Also, strip out any
1475 * flags not specified by the Java programming language.
1476 */
1477static uint32_t MangleAccessFlags(uint32_t accessFlags) {
1478  accessFlags &= kAccJavaFlagsMask;
1479  if ((accessFlags & kAccSynthetic) != 0) {
1480    accessFlags |= 0xf0000000;
1481  }
1482  return accessFlags;
1483}
1484
1485/*
1486 * Circularly shifts registers so that arguments come first. Debuggers
1487 * expect slots to begin with arguments, but dex code places them at
1488 * the end.
1489 */
1490static uint16_t MangleSlot(uint16_t slot, mirror::ArtMethod* m)
1491    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1492  const DexFile::CodeItem* code_item = m->GetCodeItem();
1493  if (code_item == nullptr) {
1494    // We should not get here for a method without code (native, proxy or abstract). Log it and
1495    // return the slot as is since all registers are arguments.
1496    LOG(WARNING) << "Trying to mangle slot for method without code " << PrettyMethod(m);
1497    return slot;
1498  }
1499  uint16_t ins_size = code_item->ins_size_;
1500  uint16_t locals_size = code_item->registers_size_ - ins_size;
1501  if (slot >= locals_size) {
1502    return slot - locals_size;
1503  } else {
1504    return slot + ins_size;
1505  }
1506}
1507
1508/*
1509 * Circularly shifts registers so that arguments come last. Reverts
1510 * slots to dex style argument placement.
1511 */
1512static uint16_t DemangleSlot(uint16_t slot, mirror::ArtMethod* m)
1513    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1514  const DexFile::CodeItem* code_item = m->GetCodeItem();
1515  if (code_item == nullptr) {
1516    // We should not get here for a method without code (native, proxy or abstract). Log it and
1517    // return the slot as is since all registers are arguments.
1518    LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m);
1519    return slot;
1520  }
1521  uint16_t ins_size = code_item->ins_size_;
1522  uint16_t locals_size = code_item->registers_size_ - ins_size;
1523  if (slot < ins_size) {
1524    return slot + locals_size;
1525  } else {
1526    return slot - ins_size;
1527  }
1528}
1529
1530JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) {
1531  JDWP::JdwpError status;
1532  mirror::Class* c = DecodeClass(class_id, status);
1533  if (c == NULL) {
1534    return status;
1535  }
1536
1537  size_t instance_field_count = c->NumInstanceFields();
1538  size_t static_field_count = c->NumStaticFields();
1539
1540  expandBufAdd4BE(pReply, instance_field_count + static_field_count);
1541
1542  for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
1543    mirror::ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
1544    expandBufAddFieldId(pReply, ToFieldId(f));
1545    expandBufAddUtf8String(pReply, f->GetName());
1546    expandBufAddUtf8String(pReply, f->GetTypeDescriptor());
1547    if (with_generic) {
1548      static const char genericSignature[1] = "";
1549      expandBufAddUtf8String(pReply, genericSignature);
1550    }
1551    expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags()));
1552  }
1553  return JDWP::ERR_NONE;
1554}
1555
1556JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic,
1557                                           JDWP::ExpandBuf* pReply) {
1558  JDWP::JdwpError status;
1559  mirror::Class* c = DecodeClass(class_id, status);
1560  if (c == NULL) {
1561    return status;
1562  }
1563
1564  size_t direct_method_count = c->NumDirectMethods();
1565  size_t virtual_method_count = c->NumVirtualMethods();
1566
1567  expandBufAdd4BE(pReply, direct_method_count + virtual_method_count);
1568
1569  for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) {
1570    mirror::ArtMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count);
1571    expandBufAddMethodId(pReply, ToMethodId(m));
1572    expandBufAddUtf8String(pReply, m->GetName());
1573    expandBufAddUtf8String(pReply, m->GetSignature().ToString());
1574    if (with_generic) {
1575      static const char genericSignature[1] = "";
1576      expandBufAddUtf8String(pReply, genericSignature);
1577    }
1578    expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags()));
1579  }
1580  return JDWP::ERR_NONE;
1581}
1582
1583JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1584  JDWP::JdwpError status;
1585  Thread* self = Thread::Current();
1586  StackHandleScope<1> hs(self);
1587  Handle<mirror::Class> c(hs.NewHandle(DecodeClass(class_id, status)));
1588  if (c.Get() == nullptr) {
1589    return status;
1590  }
1591  size_t interface_count = c->NumDirectInterfaces();
1592  expandBufAdd4BE(pReply, interface_count);
1593  for (size_t i = 0; i < interface_count; ++i) {
1594    expandBufAddRefTypeId(pReply,
1595                          gRegistry->AddRefType(mirror::Class::GetDirectInterface(self, c, i)));
1596  }
1597  return JDWP::ERR_NONE;
1598}
1599
1600void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply)
1601    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1602  struct DebugCallbackContext {
1603    int numItems;
1604    JDWP::ExpandBuf* pReply;
1605
1606    static bool Callback(void* context, uint32_t address, uint32_t line_number) {
1607      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1608      expandBufAdd8BE(pContext->pReply, address);
1609      expandBufAdd4BE(pContext->pReply, line_number);
1610      pContext->numItems++;
1611      return false;
1612    }
1613  };
1614  mirror::ArtMethod* m = FromMethodId(method_id);
1615  const DexFile::CodeItem* code_item = m->GetCodeItem();
1616  uint64_t start, end;
1617  if (code_item == nullptr) {
1618    DCHECK(m->IsNative() || m->IsProxyMethod());
1619    start = -1;
1620    end = -1;
1621  } else {
1622    start = 0;
1623    // Return the index of the last instruction
1624    end = code_item->insns_size_in_code_units_ - 1;
1625  }
1626
1627  expandBufAdd8BE(pReply, start);
1628  expandBufAdd8BE(pReply, end);
1629
1630  // Add numLines later
1631  size_t numLinesOffset = expandBufGetLength(pReply);
1632  expandBufAdd4BE(pReply, 0);
1633
1634  DebugCallbackContext context;
1635  context.numItems = 0;
1636  context.pReply = pReply;
1637
1638  if (code_item != nullptr) {
1639    m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
1640                                     DebugCallbackContext::Callback, NULL, &context);
1641  }
1642
1643  JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
1644}
1645
1646void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic,
1647                              JDWP::ExpandBuf* pReply) {
1648  struct DebugCallbackContext {
1649    mirror::ArtMethod* method;
1650    JDWP::ExpandBuf* pReply;
1651    size_t variable_count;
1652    bool with_generic;
1653
1654    static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress,
1655                         const char* name, const char* descriptor, const char* signature)
1656        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1657      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1658
1659      VLOG(jdwp) << StringPrintf("    %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
1660                                 pContext->variable_count, startAddress, endAddress - startAddress,
1661                                 name, descriptor, signature, slot,
1662                                 MangleSlot(slot, pContext->method));
1663
1664      slot = MangleSlot(slot, pContext->method);
1665
1666      expandBufAdd8BE(pContext->pReply, startAddress);
1667      expandBufAddUtf8String(pContext->pReply, name);
1668      expandBufAddUtf8String(pContext->pReply, descriptor);
1669      if (pContext->with_generic) {
1670        expandBufAddUtf8String(pContext->pReply, signature);
1671      }
1672      expandBufAdd4BE(pContext->pReply, endAddress - startAddress);
1673      expandBufAdd4BE(pContext->pReply, slot);
1674
1675      ++pContext->variable_count;
1676    }
1677  };
1678  mirror::ArtMethod* m = FromMethodId(method_id);
1679
1680  // arg_count considers doubles and longs to take 2 units.
1681  // variable_count considers everything to take 1 unit.
1682  std::string shorty(m->GetShorty());
1683  expandBufAdd4BE(pReply, mirror::ArtMethod::NumArgRegisters(shorty));
1684
1685  // We don't know the total number of variables yet, so leave a blank and update it later.
1686  size_t variable_count_offset = expandBufGetLength(pReply);
1687  expandBufAdd4BE(pReply, 0);
1688
1689  DebugCallbackContext context;
1690  context.method = m;
1691  context.pReply = pReply;
1692  context.variable_count = 0;
1693  context.with_generic = with_generic;
1694
1695  const DexFile::CodeItem* code_item = m->GetCodeItem();
1696  if (code_item != nullptr) {
1697    m->GetDexFile()->DecodeDebugInfo(
1698        code_item, m->IsStatic(), m->GetDexMethodIndex(), NULL, DebugCallbackContext::Callback,
1699        &context);
1700  }
1701
1702  JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count);
1703}
1704
1705void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
1706                                  JDWP::ExpandBuf* pReply) {
1707  mirror::ArtMethod* m = FromMethodId(method_id);
1708  JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty());
1709  OutputJValue(tag, return_value, pReply);
1710}
1711
1712void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
1713                           JDWP::ExpandBuf* pReply) {
1714  mirror::ArtField* f = FromFieldId(field_id);
1715  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1716  OutputJValue(tag, field_value, pReply);
1717}
1718
1719JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
1720                                  std::vector<uint8_t>& bytecodes)
1721    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1722  mirror::ArtMethod* m = FromMethodId(method_id);
1723  if (m == NULL) {
1724    return JDWP::ERR_INVALID_METHODID;
1725  }
1726  const DexFile::CodeItem* code_item = m->GetCodeItem();
1727  size_t byte_count = code_item->insns_size_in_code_units_ * 2;
1728  const uint8_t* begin = reinterpret_cast<const uint8_t*>(code_item->insns_);
1729  const uint8_t* end = begin + byte_count;
1730  for (const uint8_t* p = begin; p != end; ++p) {
1731    bytecodes.push_back(*p);
1732  }
1733  return JDWP::ERR_NONE;
1734}
1735
1736JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) {
1737  return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1738}
1739
1740JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) {
1741  return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1742}
1743
1744static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
1745                                         JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
1746                                         bool is_static)
1747    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1748  JDWP::JdwpError status;
1749  mirror::Class* c = DecodeClass(ref_type_id, status);
1750  if (ref_type_id != 0 && c == NULL) {
1751    return status;
1752  }
1753
1754  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1755  if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) {
1756    return JDWP::ERR_INVALID_OBJECT;
1757  }
1758  mirror::ArtField* f = FromFieldId(field_id);
1759
1760  mirror::Class* receiver_class = c;
1761  if (receiver_class == NULL && o != NULL) {
1762    receiver_class = o->GetClass();
1763  }
1764  // TODO: should we give up now if receiver_class is NULL?
1765  if (receiver_class != NULL && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
1766    LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class);
1767    return JDWP::ERR_INVALID_FIELDID;
1768  }
1769
1770  // The RI only enforces the static/non-static mismatch in one direction.
1771  // TODO: should we change the tests and check both?
1772  if (is_static) {
1773    if (!f->IsStatic()) {
1774      return JDWP::ERR_INVALID_FIELDID;
1775    }
1776  } else {
1777    if (f->IsStatic()) {
1778      LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f);
1779    }
1780  }
1781  if (f->IsStatic()) {
1782    o = f->GetDeclaringClass();
1783  }
1784
1785  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1786  JValue field_value;
1787  if (tag == JDWP::JT_VOID) {
1788    LOG(FATAL) << "Unknown tag: " << tag;
1789  } else if (!IsPrimitiveTag(tag)) {
1790    field_value.SetL(f->GetObject(o));
1791  } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1792    field_value.SetJ(f->Get64(o));
1793  } else {
1794    field_value.SetI(f->Get32(o));
1795  }
1796  Dbg::OutputJValue(tag, &field_value, pReply);
1797
1798  return JDWP::ERR_NONE;
1799}
1800
1801JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1802                                   JDWP::ExpandBuf* pReply) {
1803  return GetFieldValueImpl(0, object_id, field_id, pReply, false);
1804}
1805
1806JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id, JDWP::ExpandBuf* pReply) {
1807  return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true);
1808}
1809
1810static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1811                                         uint64_t value, int width, bool is_static)
1812    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1813  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1814  if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) {
1815    return JDWP::ERR_INVALID_OBJECT;
1816  }
1817  mirror::ArtField* f = FromFieldId(field_id);
1818
1819  // The RI only enforces the static/non-static mismatch in one direction.
1820  // TODO: should we change the tests and check both?
1821  if (is_static) {
1822    if (!f->IsStatic()) {
1823      return JDWP::ERR_INVALID_FIELDID;
1824    }
1825  } else {
1826    if (f->IsStatic()) {
1827      LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f);
1828    }
1829  }
1830  if (f->IsStatic()) {
1831    o = f->GetDeclaringClass();
1832  }
1833
1834  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1835
1836  if (IsPrimitiveTag(tag)) {
1837    if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1838      CHECK_EQ(width, 8);
1839      // Debugging can't use transactional mode (runtime only).
1840      f->Set64<false>(o, value);
1841    } else {
1842      CHECK_LE(width, 4);
1843      // Debugging can't use transactional mode (runtime only).
1844      f->Set32<false>(o, value);
1845    }
1846  } else {
1847    mirror::Object* v = gRegistry->Get<mirror::Object*>(value);
1848    if (v == ObjectRegistry::kInvalidObject) {
1849      return JDWP::ERR_INVALID_OBJECT;
1850    }
1851    if (v != NULL) {
1852      mirror::Class* field_type;
1853      {
1854        StackHandleScope<3> hs(Thread::Current());
1855        HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
1856        HandleWrapper<mirror::ArtField> h_f(hs.NewHandleWrapper(&f));
1857        HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
1858        field_type = FieldHelper(h_f).GetType();
1859      }
1860      if (!field_type->IsAssignableFrom(v->GetClass())) {
1861        return JDWP::ERR_INVALID_OBJECT;
1862      }
1863    }
1864    // Debugging can't use transactional mode (runtime only).
1865    f->SetObject<false>(o, v);
1866  }
1867
1868  return JDWP::ERR_NONE;
1869}
1870
1871JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value,
1872                                   int width) {
1873  return SetFieldValueImpl(object_id, field_id, value, width, false);
1874}
1875
1876JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) {
1877  return SetFieldValueImpl(0, field_id, value, width, true);
1878}
1879
1880std::string Dbg::StringToUtf8(JDWP::ObjectId string_id) {
1881  mirror::String* s = gRegistry->Get<mirror::String*>(string_id);
1882  return s->ToModifiedUtf8();
1883}
1884
1885void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) {
1886  if (IsPrimitiveTag(tag)) {
1887    expandBufAdd1(pReply, tag);
1888    if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) {
1889      expandBufAdd1(pReply, return_value->GetI());
1890    } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) {
1891      expandBufAdd2BE(pReply, return_value->GetI());
1892    } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) {
1893      expandBufAdd4BE(pReply, return_value->GetI());
1894    } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1895      expandBufAdd8BE(pReply, return_value->GetJ());
1896    } else {
1897      CHECK_EQ(tag, JDWP::JT_VOID);
1898    }
1899  } else {
1900    ScopedObjectAccessUnchecked soa(Thread::Current());
1901    mirror::Object* value = return_value->GetL();
1902    expandBufAdd1(pReply, TagFromObject(soa, value));
1903    expandBufAddObjectId(pReply, gRegistry->Add(value));
1904  }
1905}
1906
1907JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string& name) {
1908  ScopedObjectAccessUnchecked soa(Thread::Current());
1909  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1910  Thread* thread;
1911  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1912  if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
1913    return error;
1914  }
1915
1916  // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName.
1917  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
1918  mirror::ArtField* java_lang_Thread_name_field =
1919      soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
1920  mirror::String* s =
1921      reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object));
1922  if (s != NULL) {
1923    name = s->ToModifiedUtf8();
1924  }
1925  return JDWP::ERR_NONE;
1926}
1927
1928JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
1929  ScopedObjectAccess soa(Thread::Current());
1930  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
1931  if (thread_object == ObjectRegistry::kInvalidObject) {
1932    return JDWP::ERR_INVALID_OBJECT;
1933  }
1934  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroup");
1935  // Okay, so it's an object, but is it actually a thread?
1936  JDWP::JdwpError error;
1937  {
1938    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1939    Thread* thread;
1940    error = DecodeThread(soa, thread_id, thread);
1941  }
1942  if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
1943    // Zombie threads are in the null group.
1944    expandBufAddObjectId(pReply, JDWP::ObjectId(0));
1945    error = JDWP::ERR_NONE;
1946  } else if (error == JDWP::ERR_NONE) {
1947    mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
1948    CHECK(c != nullptr);
1949    mirror::ArtField* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;");
1950    CHECK(f != nullptr);
1951    mirror::Object* group = f->GetObject(thread_object);
1952    CHECK(group != nullptr);
1953    JDWP::ObjectId thread_group_id = gRegistry->Add(group);
1954    expandBufAddObjectId(pReply, thread_group_id);
1955  }
1956  soa.Self()->EndAssertNoThreadSuspension(old_cause);
1957  return error;
1958}
1959
1960std::string Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id) {
1961  ScopedObjectAccess soa(Thread::Current());
1962  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
1963  CHECK(thread_group != nullptr);
1964  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupName");
1965  mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
1966  CHECK(c != nullptr);
1967  mirror::ArtField* f = c->FindInstanceField("name", "Ljava/lang/String;");
1968  CHECK(f != NULL);
1969  mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
1970  soa.Self()->EndAssertNoThreadSuspension(old_cause);
1971  return s->ToModifiedUtf8();
1972}
1973
1974JDWP::ObjectId Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id) {
1975  ScopedObjectAccessUnchecked soa(Thread::Current());
1976  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
1977  CHECK(thread_group != nullptr);
1978  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupParent");
1979  mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
1980  CHECK(c != nullptr);
1981  mirror::ArtField* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;");
1982  CHECK(f != NULL);
1983  mirror::Object* parent = f->GetObject(thread_group);
1984  soa.Self()->EndAssertNoThreadSuspension(old_cause);
1985  return gRegistry->Add(parent);
1986}
1987
1988JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
1989  ScopedObjectAccessUnchecked soa(Thread::Current());
1990  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
1991  mirror::Object* group = f->GetObject(f->GetDeclaringClass());
1992  return gRegistry->Add(group);
1993}
1994
1995JDWP::ObjectId Dbg::GetMainThreadGroupId() {
1996  ScopedObjectAccess soa(Thread::Current());
1997  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup);
1998  mirror::Object* group = f->GetObject(f->GetDeclaringClass());
1999  return gRegistry->Add(group);
2000}
2001
2002JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
2003  switch (state) {
2004    case kBlocked:
2005      return JDWP::TS_MONITOR;
2006    case kNative:
2007    case kRunnable:
2008    case kSuspended:
2009      return JDWP::TS_RUNNING;
2010    case kSleeping:
2011      return JDWP::TS_SLEEPING;
2012    case kStarting:
2013    case kTerminated:
2014      return JDWP::TS_ZOMBIE;
2015    case kTimedWaiting:
2016    case kWaitingForCheckPointsToRun:
2017    case kWaitingForDebuggerSend:
2018    case kWaitingForDebuggerSuspension:
2019    case kWaitingForDebuggerToAttach:
2020    case kWaitingForDeoptimization:
2021    case kWaitingForGcToComplete:
2022    case kWaitingForJniOnLoad:
2023    case kWaitingForMethodTracingStart:
2024    case kWaitingForSignalCatcherOutput:
2025    case kWaitingInMainDebuggerLoop:
2026    case kWaitingInMainSignalCatcherLoop:
2027    case kWaitingPerformingGc:
2028    case kWaiting:
2029      return JDWP::TS_WAIT;
2030      // Don't add a 'default' here so the compiler can spot incompatible enum changes.
2031  }
2032  LOG(FATAL) << "Unknown thread state: " << state;
2033  return JDWP::TS_ZOMBIE;
2034}
2035
2036JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus,
2037                                     JDWP::JdwpSuspendStatus* pSuspendStatus) {
2038  ScopedObjectAccess soa(Thread::Current());
2039
2040  *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED;
2041
2042  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2043  Thread* thread;
2044  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2045  if (error != JDWP::ERR_NONE) {
2046    if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
2047      *pThreadStatus = JDWP::TS_ZOMBIE;
2048      return JDWP::ERR_NONE;
2049    }
2050    return error;
2051  }
2052
2053  if (IsSuspendedForDebugger(soa, thread)) {
2054    *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED;
2055  }
2056
2057  *pThreadStatus = ToJdwpThreadStatus(thread->GetState());
2058  return JDWP::ERR_NONE;
2059}
2060
2061JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
2062  ScopedObjectAccess soa(Thread::Current());
2063  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2064  Thread* thread;
2065  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2066  if (error != JDWP::ERR_NONE) {
2067    return error;
2068  }
2069  MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2070  expandBufAdd4BE(pReply, thread->GetDebugSuspendCount());
2071  return JDWP::ERR_NONE;
2072}
2073
2074JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) {
2075  ScopedObjectAccess soa(Thread::Current());
2076  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2077  Thread* thread;
2078  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2079  if (error != JDWP::ERR_NONE) {
2080    return error;
2081  }
2082  thread->Interrupt(soa.Self());
2083  return JDWP::ERR_NONE;
2084}
2085
2086static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
2087                                   mirror::Object* desired_thread_group, mirror::Object* peer)
2088    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2089  // Do we want threads from all thread groups?
2090  if (desired_thread_group == nullptr) {
2091    return true;
2092  }
2093  mirror::ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
2094  DCHECK(thread_group_field != nullptr);
2095  mirror::Object* group = thread_group_field->GetObject(peer);
2096  return (group == desired_thread_group);
2097}
2098
2099void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& thread_ids) {
2100  ScopedObjectAccessUnchecked soa(Thread::Current());
2101  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
2102  std::list<Thread*> all_threads_list;
2103  {
2104    MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
2105    all_threads_list = Runtime::Current()->GetThreadList()->GetList();
2106  }
2107  for (Thread* t : all_threads_list) {
2108    if (t == Dbg::GetDebugThread()) {
2109      // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
2110      // query all threads, so it's easier if we just don't tell them about this thread.
2111      continue;
2112    }
2113    if (t->IsStillStarting()) {
2114      // This thread is being started (and has been registered in the thread list). However, it is
2115      // not completely started yet so we must ignore it.
2116      continue;
2117    }
2118    mirror::Object* peer = t->GetPeer();
2119    if (peer == nullptr) {
2120      // peer might be NULL if the thread is still starting up. We can't tell the debugger about
2121      // this thread yet.
2122      // TODO: if we identified threads to the debugger by their Thread*
2123      // rather than their peer's mirror::Object*, we could fix this.
2124      // Doing so might help us report ZOMBIE threads too.
2125      continue;
2126    }
2127    if (IsInDesiredThreadGroup(soa, thread_group, peer)) {
2128      thread_ids.push_back(gRegistry->Add(peer));
2129    }
2130  }
2131}
2132
2133void Dbg::GetChildThreadGroups(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& child_thread_group_ids) {
2134  ScopedObjectAccess soa(Thread::Current());
2135  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
2136
2137  // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
2138  mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
2139  mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
2140
2141  // Get the array and size out of the ArrayList<ThreadGroup>...
2142  mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
2143  mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
2144  mirror::ObjectArray<mirror::Object>* groups_array =
2145      array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
2146  const int32_t size = size_field->GetInt(groups_array_list);
2147
2148  // Copy the first 'size' elements out of the array into the result.
2149  for (int32_t i = 0; i < size; ++i) {
2150    child_thread_group_ids.push_back(gRegistry->Add(groups_array->Get(i)));
2151  }
2152}
2153
2154static int GetStackDepth(Thread* thread)
2155    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2156  struct CountStackDepthVisitor : public StackVisitor {
2157    explicit CountStackDepthVisitor(Thread* thread)
2158        : StackVisitor(thread, NULL), depth(0) {}
2159
2160    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2161    // annotalysis.
2162    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2163      if (!GetMethod()->IsRuntimeMethod()) {
2164        ++depth;
2165      }
2166      return true;
2167    }
2168    size_t depth;
2169  };
2170
2171  CountStackDepthVisitor visitor(thread);
2172  visitor.WalkStack();
2173  return visitor.depth;
2174}
2175
2176JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t& result) {
2177  ScopedObjectAccess soa(Thread::Current());
2178  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2179  Thread* thread;
2180  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2181  if (error != JDWP::ERR_NONE) {
2182    return error;
2183  }
2184  if (!IsSuspendedForDebugger(soa, thread)) {
2185    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2186  }
2187  result = GetStackDepth(thread);
2188  return JDWP::ERR_NONE;
2189}
2190
2191JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
2192                                     size_t frame_count, JDWP::ExpandBuf* buf) {
2193  class GetFrameVisitor : public StackVisitor {
2194   public:
2195    GetFrameVisitor(Thread* thread, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf)
2196        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2197        : StackVisitor(thread, NULL), depth_(0),
2198          start_frame_(start_frame), frame_count_(frame_count), buf_(buf) {
2199      expandBufAdd4BE(buf_, frame_count_);
2200    }
2201
2202    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2203    // annotalysis.
2204    virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2205      if (GetMethod()->IsRuntimeMethod()) {
2206        return true;  // The debugger can't do anything useful with a frame that has no Method*.
2207      }
2208      if (depth_ >= start_frame_ + frame_count_) {
2209        return false;
2210      }
2211      if (depth_ >= start_frame_) {
2212        JDWP::FrameId frame_id(GetFrameId());
2213        JDWP::JdwpLocation location;
2214        SetLocation(location, GetMethod(), GetDexPc());
2215        VLOG(jdwp) << StringPrintf("    Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
2216        expandBufAdd8BE(buf_, frame_id);
2217        expandBufAddLocation(buf_, location);
2218      }
2219      ++depth_;
2220      return true;
2221    }
2222
2223   private:
2224    size_t depth_;
2225    const size_t start_frame_;
2226    const size_t frame_count_;
2227    JDWP::ExpandBuf* buf_;
2228  };
2229
2230  ScopedObjectAccessUnchecked soa(Thread::Current());
2231  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2232  Thread* thread;
2233  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2234  if (error != JDWP::ERR_NONE) {
2235    return error;
2236  }
2237  if (!IsSuspendedForDebugger(soa, thread)) {
2238    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2239  }
2240  GetFrameVisitor visitor(thread, start_frame, frame_count, buf);
2241  visitor.WalkStack();
2242  return JDWP::ERR_NONE;
2243}
2244
2245JDWP::ObjectId Dbg::GetThreadSelfId() {
2246  ScopedObjectAccessUnchecked soa(Thread::Current());
2247  return gRegistry->Add(soa.Self()->GetPeer());
2248}
2249
2250void Dbg::SuspendVM() {
2251  Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
2252}
2253
2254void Dbg::ResumeVM() {
2255  Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
2256}
2257
2258JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
2259  Thread* self = Thread::Current();
2260  ScopedLocalRef<jobject> peer(self->GetJniEnv(), NULL);
2261  {
2262    ScopedObjectAccess soa(self);
2263    peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id)));
2264  }
2265  if (peer.get() == NULL) {
2266    return JDWP::ERR_THREAD_NOT_ALIVE;
2267  }
2268  // Suspend thread to build stack trace. Take suspend thread lock to avoid races with threads
2269  // trying to suspend this one.
2270  MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
2271  bool timed_out;
2272  ThreadList* thread_list = Runtime::Current()->GetThreadList();
2273  Thread* thread = thread_list->SuspendThreadByPeer(peer.get(), request_suspension, true,
2274                                                    &timed_out);
2275  if (thread != NULL) {
2276    return JDWP::ERR_NONE;
2277  } else if (timed_out) {
2278    return JDWP::ERR_INTERNAL;
2279  } else {
2280    return JDWP::ERR_THREAD_NOT_ALIVE;
2281  }
2282}
2283
2284void Dbg::ResumeThread(JDWP::ObjectId thread_id) {
2285  ScopedObjectAccessUnchecked soa(Thread::Current());
2286  mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id);
2287  Thread* thread;
2288  {
2289    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2290    thread = Thread::FromManagedThread(soa, peer);
2291  }
2292  if (thread == NULL) {
2293    LOG(WARNING) << "No such thread for resume: " << peer;
2294    return;
2295  }
2296  bool needs_resume;
2297  {
2298    MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2299    needs_resume = thread->GetSuspendCount() > 0;
2300  }
2301  if (needs_resume) {
2302    Runtime::Current()->GetThreadList()->Resume(thread, true);
2303  }
2304}
2305
2306void Dbg::SuspendSelf() {
2307  Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
2308}
2309
2310struct GetThisVisitor : public StackVisitor {
2311  GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
2312      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2313      : StackVisitor(thread, context), this_object(NULL), frame_id(frame_id) {}
2314
2315  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2316  // annotalysis.
2317  virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2318    if (frame_id != GetFrameId()) {
2319      return true;  // continue
2320    } else {
2321      this_object = GetThisObject();
2322      return false;
2323    }
2324  }
2325
2326  mirror::Object* this_object;
2327  JDWP::FrameId frame_id;
2328};
2329
2330JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
2331                                   JDWP::ObjectId* result) {
2332  ScopedObjectAccessUnchecked soa(Thread::Current());
2333  Thread* thread;
2334  {
2335    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2336    JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2337    if (error != JDWP::ERR_NONE) {
2338      return error;
2339    }
2340    if (!IsSuspendedForDebugger(soa, thread)) {
2341      return JDWP::ERR_THREAD_NOT_SUSPENDED;
2342    }
2343  }
2344  std::unique_ptr<Context> context(Context::Create());
2345  GetThisVisitor visitor(thread, context.get(), frame_id);
2346  visitor.WalkStack();
2347  *result = gRegistry->Add(visitor.this_object);
2348  return JDWP::ERR_NONE;
2349}
2350
2351JDWP::JdwpError Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
2352                                   JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
2353  struct GetLocalVisitor : public StackVisitor {
2354    GetLocalVisitor(const ScopedObjectAccessUnchecked& soa, Thread* thread, Context* context,
2355                    JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width)
2356        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2357        : StackVisitor(thread, context), soa_(soa), frame_id_(frame_id), slot_(slot), tag_(tag),
2358          buf_(buf), width_(width), error_(JDWP::ERR_NONE) {}
2359
2360    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2361    // annotalysis.
2362    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2363      if (GetFrameId() != frame_id_) {
2364        return true;  // Not our frame, carry on.
2365      }
2366      // TODO: check that the tag is compatible with the actual type of the slot!
2367      // TODO: check slot is valid for this method or return INVALID_SLOT error.
2368      mirror::ArtMethod* m = GetMethod();
2369      if (m->IsNative()) {
2370        // We can't read local value from native method.
2371        error_ = JDWP::ERR_OPAQUE_FRAME;
2372        return false;
2373      }
2374      uint16_t reg = DemangleSlot(slot_, m);
2375      constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
2376      switch (tag_) {
2377        case JDWP::JT_BOOLEAN: {
2378          CHECK_EQ(width_, 1U);
2379          uint32_t intVal;
2380          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2381            VLOG(jdwp) << "get boolean local " << reg << " = " << intVal;
2382            JDWP::Set1(buf_+1, intVal != 0);
2383          } else {
2384            VLOG(jdwp) << "failed to get boolean local " << reg;
2385            error_ = kFailureErrorCode;
2386          }
2387          break;
2388        }
2389        case JDWP::JT_BYTE: {
2390          CHECK_EQ(width_, 1U);
2391          uint32_t intVal;
2392          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2393            VLOG(jdwp) << "get byte local " << reg << " = " << intVal;
2394            JDWP::Set1(buf_+1, intVal);
2395          } else {
2396            VLOG(jdwp) << "failed to get byte local " << reg;
2397            error_ = kFailureErrorCode;
2398          }
2399          break;
2400        }
2401        case JDWP::JT_SHORT:
2402        case JDWP::JT_CHAR: {
2403          CHECK_EQ(width_, 2U);
2404          uint32_t intVal;
2405          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2406            VLOG(jdwp) << "get short/char local " << reg << " = " << intVal;
2407            JDWP::Set2BE(buf_+1, intVal);
2408          } else {
2409            VLOG(jdwp) << "failed to get short/char local " << reg;
2410            error_ = kFailureErrorCode;
2411          }
2412          break;
2413        }
2414        case JDWP::JT_INT: {
2415          CHECK_EQ(width_, 4U);
2416          uint32_t intVal;
2417          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2418            VLOG(jdwp) << "get int local " << reg << " = " << intVal;
2419            JDWP::Set4BE(buf_+1, intVal);
2420          } else {
2421            VLOG(jdwp) << "failed to get int local " << reg;
2422            error_ = kFailureErrorCode;
2423          }
2424          break;
2425        }
2426        case JDWP::JT_FLOAT: {
2427          CHECK_EQ(width_, 4U);
2428          uint32_t intVal;
2429          if (GetVReg(m, reg, kFloatVReg, &intVal)) {
2430            VLOG(jdwp) << "get float local " << reg << " = " << intVal;
2431            JDWP::Set4BE(buf_+1, intVal);
2432          } else {
2433            VLOG(jdwp) << "failed to get float local " << reg;
2434            error_ = kFailureErrorCode;
2435          }
2436          break;
2437        }
2438        case JDWP::JT_ARRAY:
2439        case JDWP::JT_CLASS_LOADER:
2440        case JDWP::JT_CLASS_OBJECT:
2441        case JDWP::JT_OBJECT:
2442        case JDWP::JT_STRING:
2443        case JDWP::JT_THREAD:
2444        case JDWP::JT_THREAD_GROUP: {
2445          CHECK_EQ(width_, sizeof(JDWP::ObjectId));
2446          uint32_t intVal;
2447          if (GetVReg(m, reg, kReferenceVReg, &intVal)) {
2448            mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal);
2449            VLOG(jdwp) << "get " << tag_ << " object local " << reg << " = " << o;
2450            if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
2451              LOG(FATAL) << "Register " << reg << " expected to hold " << tag_ << " object: " << o;
2452            }
2453            tag_ = TagFromObject(soa_, o);
2454            JDWP::SetObjectId(buf_+1, gRegistry->Add(o));
2455          } else {
2456            VLOG(jdwp) << "failed to get " << tag_ << " object local " << reg;
2457            error_ = kFailureErrorCode;
2458          }
2459          break;
2460        }
2461        case JDWP::JT_DOUBLE: {
2462          CHECK_EQ(width_, 8U);
2463          uint64_t longVal;
2464          if (GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
2465            VLOG(jdwp) << "get double local " << reg << " = " << longVal;
2466            JDWP::Set8BE(buf_+1, longVal);
2467          } else {
2468            VLOG(jdwp) << "failed to get double local " << reg;
2469            error_ = kFailureErrorCode;
2470          }
2471          break;
2472        }
2473        case JDWP::JT_LONG: {
2474          CHECK_EQ(width_, 8U);
2475          uint64_t longVal;
2476          if (GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &longVal)) {
2477            VLOG(jdwp) << "get long local " << reg << " = " << longVal;
2478            JDWP::Set8BE(buf_+1, longVal);
2479          } else {
2480            VLOG(jdwp) << "failed to get long local " << reg;
2481            error_ = kFailureErrorCode;
2482          }
2483          break;
2484        }
2485        default:
2486          LOG(FATAL) << "Unknown tag " << tag_;
2487          break;
2488      }
2489
2490      // Prepend tag, which may have been updated.
2491      JDWP::Set1(buf_, tag_);
2492      return false;
2493    }
2494    const ScopedObjectAccessUnchecked& soa_;
2495    const JDWP::FrameId frame_id_;
2496    const int slot_;
2497    JDWP::JdwpTag tag_;
2498    uint8_t* const buf_;
2499    const size_t width_;
2500    JDWP::JdwpError error_;
2501  };
2502
2503  ScopedObjectAccessUnchecked soa(Thread::Current());
2504  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2505  Thread* thread;
2506  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2507  if (error != JDWP::ERR_NONE) {
2508    return error;
2509  }
2510  // TODO check thread is suspended by the debugger ?
2511  std::unique_ptr<Context> context(Context::Create());
2512  GetLocalVisitor visitor(soa, thread, context.get(), frame_id, slot, tag, buf, width);
2513  visitor.WalkStack();
2514  return visitor.error_;
2515}
2516
2517JDWP::JdwpError Dbg::SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
2518                                   JDWP::JdwpTag tag, uint64_t value, size_t width) {
2519  struct SetLocalVisitor : public StackVisitor {
2520    SetLocalVisitor(Thread* thread, Context* context,
2521                    JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint64_t value,
2522                    size_t width)
2523        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2524        : StackVisitor(thread, context),
2525          frame_id_(frame_id), slot_(slot), tag_(tag), value_(value), width_(width),
2526          error_(JDWP::ERR_NONE) {}
2527
2528    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2529    // annotalysis.
2530    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2531      if (GetFrameId() != frame_id_) {
2532        return true;  // Not our frame, carry on.
2533      }
2534      // TODO: check that the tag is compatible with the actual type of the slot!
2535      // TODO: check slot is valid for this method or return INVALID_SLOT error.
2536      mirror::ArtMethod* m = GetMethod();
2537      if (m->IsNative()) {
2538        // We can't read local value from native method.
2539        error_ = JDWP::ERR_OPAQUE_FRAME;
2540        return false;
2541      }
2542      uint16_t reg = DemangleSlot(slot_, m);
2543      constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
2544      switch (tag_) {
2545        case JDWP::JT_BOOLEAN:
2546        case JDWP::JT_BYTE:
2547          CHECK_EQ(width_, 1U);
2548          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2549            VLOG(jdwp) << "failed to set boolean/byte local " << reg << " = "
2550                       << static_cast<uint32_t>(value_);
2551            error_ = kFailureErrorCode;
2552          }
2553          break;
2554        case JDWP::JT_SHORT:
2555        case JDWP::JT_CHAR:
2556          CHECK_EQ(width_, 2U);
2557          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2558            VLOG(jdwp) << "failed to set short/char local " << reg << " = "
2559                       << static_cast<uint32_t>(value_);
2560            error_ = kFailureErrorCode;
2561          }
2562          break;
2563        case JDWP::JT_INT:
2564          CHECK_EQ(width_, 4U);
2565          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2566            VLOG(jdwp) << "failed to set int local " << reg << " = "
2567                       << static_cast<uint32_t>(value_);
2568            error_ = kFailureErrorCode;
2569          }
2570          break;
2571        case JDWP::JT_FLOAT:
2572          CHECK_EQ(width_, 4U);
2573          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kFloatVReg)) {
2574            VLOG(jdwp) << "failed to set float local " << reg << " = "
2575                       << static_cast<uint32_t>(value_);
2576            error_ = kFailureErrorCode;
2577          }
2578          break;
2579        case JDWP::JT_ARRAY:
2580        case JDWP::JT_CLASS_LOADER:
2581        case JDWP::JT_CLASS_OBJECT:
2582        case JDWP::JT_OBJECT:
2583        case JDWP::JT_STRING:
2584        case JDWP::JT_THREAD:
2585        case JDWP::JT_THREAD_GROUP: {
2586          CHECK_EQ(width_, sizeof(JDWP::ObjectId));
2587          mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value_));
2588          if (o == ObjectRegistry::kInvalidObject) {
2589            VLOG(jdwp) << tag_ << " object " << o << " is an invalid object";
2590            error_ = JDWP::ERR_INVALID_OBJECT;
2591          } else if (!SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)),
2592                              kReferenceVReg)) {
2593            VLOG(jdwp) << "failed to set " << tag_ << " object local " << reg << " = " << o;
2594            error_ = kFailureErrorCode;
2595          }
2596          break;
2597        }
2598        case JDWP::JT_DOUBLE: {
2599          CHECK_EQ(width_, 8U);
2600          bool success = SetVRegPair(m, reg, value_, kDoubleLoVReg, kDoubleHiVReg);
2601          if (!success) {
2602            VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
2603            error_ = kFailureErrorCode;
2604          }
2605          break;
2606        }
2607        case JDWP::JT_LONG: {
2608          CHECK_EQ(width_, 8U);
2609          bool success = SetVRegPair(m, reg, value_, kLongLoVReg, kLongHiVReg);
2610          if (!success) {
2611            VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
2612            error_ = kFailureErrorCode;
2613          }
2614          break;
2615        }
2616        default:
2617          LOG(FATAL) << "Unknown tag " << tag_;
2618          break;
2619      }
2620      return false;
2621    }
2622
2623    const JDWP::FrameId frame_id_;
2624    const int slot_;
2625    const JDWP::JdwpTag tag_;
2626    const uint64_t value_;
2627    const size_t width_;
2628    JDWP::JdwpError error_;
2629  };
2630
2631  ScopedObjectAccessUnchecked soa(Thread::Current());
2632  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2633  Thread* thread;
2634  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2635  if (error != JDWP::ERR_NONE) {
2636    return error;
2637  }
2638  // TODO check thread is suspended by the debugger ?
2639  std::unique_ptr<Context> context(Context::Create());
2640  SetLocalVisitor visitor(thread, context.get(), frame_id, slot, tag, value, width);
2641  visitor.WalkStack();
2642  return visitor.error_;
2643}
2644
2645JDWP::ObjectId Dbg::GetThisObjectIdForEvent(mirror::Object* this_object) {
2646  // If 'this_object' isn't already in the registry, we know that we're not looking for it, so
2647  // there's no point adding it to the registry and burning through ids.
2648  // When registering an event request with an instance filter, we've been given an existing object
2649  // id so it must already be present in the registry when the event fires.
2650  JDWP::ObjectId this_id = 0;
2651  if (this_object != nullptr && gRegistry->Contains(this_object)) {
2652    this_id = gRegistry->Add(this_object);
2653  }
2654  return this_id;
2655}
2656
2657void Dbg::PostLocationEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object,
2658                            int event_flags, const JValue* return_value) {
2659  if (!IsDebuggerActive()) {
2660    return;
2661  }
2662  DCHECK(m != nullptr);
2663  DCHECK_EQ(m->IsStatic(), this_object == nullptr);
2664  JDWP::JdwpLocation location;
2665  SetLocation(location, m, dex_pc);
2666
2667  // We need 'this' for InstanceOnly filters only.
2668  JDWP::ObjectId this_id = GetThisObjectIdForEvent(this_object);
2669  gJdwpState->PostLocationEvent(&location, this_id, event_flags, return_value);
2670}
2671
2672void Dbg::PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc,
2673                               mirror::Object* this_object, mirror::ArtField* f) {
2674  if (!IsDebuggerActive()) {
2675    return;
2676  }
2677  DCHECK(m != nullptr);
2678  DCHECK(f != nullptr);
2679  JDWP::JdwpLocation location;
2680  SetLocation(location, m, dex_pc);
2681
2682  JDWP::RefTypeId type_id = gRegistry->AddRefType(f->GetDeclaringClass());
2683  JDWP::FieldId field_id = ToFieldId(f);
2684  JDWP::ObjectId this_id = gRegistry->Add(this_object);
2685
2686  gJdwpState->PostFieldEvent(&location, type_id, field_id, this_id, nullptr, false);
2687}
2688
2689void Dbg::PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc,
2690                                     mirror::Object* this_object, mirror::ArtField* f,
2691                                     const JValue* field_value) {
2692  if (!IsDebuggerActive()) {
2693    return;
2694  }
2695  DCHECK(m != nullptr);
2696  DCHECK(f != nullptr);
2697  DCHECK(field_value != nullptr);
2698  JDWP::JdwpLocation location;
2699  SetLocation(location, m, dex_pc);
2700
2701  JDWP::RefTypeId type_id = gRegistry->AddRefType(f->GetDeclaringClass());
2702  JDWP::FieldId field_id = ToFieldId(f);
2703  JDWP::ObjectId this_id = gRegistry->Add(this_object);
2704
2705  gJdwpState->PostFieldEvent(&location, type_id, field_id, this_id, field_value, true);
2706}
2707
2708void Dbg::PostException(const ThrowLocation& throw_location,
2709                        mirror::ArtMethod* catch_method,
2710                        uint32_t catch_dex_pc, mirror::Throwable* exception_object) {
2711  if (!IsDebuggerActive()) {
2712    return;
2713  }
2714
2715  JDWP::JdwpLocation jdwp_throw_location;
2716  SetLocation(jdwp_throw_location, throw_location.GetMethod(), throw_location.GetDexPc());
2717  JDWP::JdwpLocation catch_location;
2718  SetLocation(catch_location, catch_method, catch_dex_pc);
2719
2720  // We need 'this' for InstanceOnly filters only.
2721  JDWP::ObjectId this_id = GetThisObjectIdForEvent(throw_location.GetThis());
2722  JDWP::ObjectId exception_id = gRegistry->Add(exception_object);
2723  JDWP::RefTypeId exception_class_id = gRegistry->AddRefType(exception_object->GetClass());
2724
2725  gJdwpState->PostException(&jdwp_throw_location, exception_id, exception_class_id, &catch_location,
2726                            this_id);
2727}
2728
2729void Dbg::PostClassPrepare(mirror::Class* c) {
2730  if (!IsDebuggerActive()) {
2731    return;
2732  }
2733
2734  // OLD-TODO - we currently always send both "verified" and "prepared" since
2735  // debuggers seem to like that.  There might be some advantage to honesty,
2736  // since the class may not yet be verified.
2737  int state = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
2738  JDWP::JdwpTypeTag tag = GetTypeTag(c);
2739  std::string temp;
2740  gJdwpState->PostClassPrepare(tag, gRegistry->Add(c), c->GetDescriptor(&temp), state);
2741}
2742
2743void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
2744                         mirror::ArtMethod* m, uint32_t dex_pc,
2745                         int event_flags, const JValue* return_value) {
2746  if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
2747    return;
2748  }
2749
2750  if (IsBreakpoint(m, dex_pc)) {
2751    event_flags |= kBreakpoint;
2752  }
2753
2754  // If the debugger is single-stepping one of our threads, check to
2755  // see if we're that thread and we've reached a step point.
2756  const SingleStepControl* single_step_control = thread->GetSingleStepControl();
2757  DCHECK(single_step_control != nullptr);
2758  if (single_step_control->is_active) {
2759    CHECK(!m->IsNative());
2760    if (single_step_control->step_depth == JDWP::SD_INTO) {
2761      // Step into method calls.  We break when the line number
2762      // or method pointer changes.  If we're in SS_MIN mode, we
2763      // always stop.
2764      if (single_step_control->method != m) {
2765        event_flags |= kSingleStep;
2766        VLOG(jdwp) << "SS new method";
2767      } else if (single_step_control->step_size == JDWP::SS_MIN) {
2768        event_flags |= kSingleStep;
2769        VLOG(jdwp) << "SS new instruction";
2770      } else if (single_step_control->ContainsDexPc(dex_pc)) {
2771        event_flags |= kSingleStep;
2772        VLOG(jdwp) << "SS new line";
2773      }
2774    } else if (single_step_control->step_depth == JDWP::SD_OVER) {
2775      // Step over method calls.  We break when the line number is
2776      // different and the frame depth is <= the original frame
2777      // depth.  (We can't just compare on the method, because we
2778      // might get unrolled past it by an exception, and it's tricky
2779      // to identify recursion.)
2780
2781      int stack_depth = GetStackDepth(thread);
2782
2783      if (stack_depth < single_step_control->stack_depth) {
2784        // Popped up one or more frames, always trigger.
2785        event_flags |= kSingleStep;
2786        VLOG(jdwp) << "SS method pop";
2787      } else if (stack_depth == single_step_control->stack_depth) {
2788        // Same depth, see if we moved.
2789        if (single_step_control->step_size == JDWP::SS_MIN) {
2790          event_flags |= kSingleStep;
2791          VLOG(jdwp) << "SS new instruction";
2792        } else if (single_step_control->ContainsDexPc(dex_pc)) {
2793          event_flags |= kSingleStep;
2794          VLOG(jdwp) << "SS new line";
2795        }
2796      }
2797    } else {
2798      CHECK_EQ(single_step_control->step_depth, JDWP::SD_OUT);
2799      // Return from the current method.  We break when the frame
2800      // depth pops up.
2801
2802      // This differs from the "method exit" break in that it stops
2803      // with the PC at the next instruction in the returned-to
2804      // function, rather than the end of the returning function.
2805
2806      int stack_depth = GetStackDepth(thread);
2807      if (stack_depth < single_step_control->stack_depth) {
2808        event_flags |= kSingleStep;
2809        VLOG(jdwp) << "SS method pop";
2810      }
2811    }
2812  }
2813
2814  // If there's something interesting going on, see if it matches one
2815  // of the debugger filters.
2816  if (event_flags != 0) {
2817    Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, return_value);
2818  }
2819}
2820
2821size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) {
2822  switch (instrumentation_event) {
2823    case instrumentation::Instrumentation::kMethodEntered:
2824      return &method_enter_event_ref_count_;
2825    case instrumentation::Instrumentation::kMethodExited:
2826      return &method_exit_event_ref_count_;
2827    case instrumentation::Instrumentation::kDexPcMoved:
2828      return &dex_pc_change_event_ref_count_;
2829    case instrumentation::Instrumentation::kFieldRead:
2830      return &field_read_event_ref_count_;
2831    case instrumentation::Instrumentation::kFieldWritten:
2832      return &field_write_event_ref_count_;
2833    case instrumentation::Instrumentation::kExceptionCaught:
2834      return &exception_catch_event_ref_count_;
2835    default:
2836      return nullptr;
2837  }
2838}
2839
2840// Process request while all mutator threads are suspended.
2841void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
2842  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
2843  switch (request.GetKind()) {
2844    case DeoptimizationRequest::kNothing:
2845      LOG(WARNING) << "Ignoring empty deoptimization request.";
2846      break;
2847    case DeoptimizationRequest::kRegisterForEvent:
2848      VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x",
2849                                 request.InstrumentationEvent());
2850      instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent());
2851      instrumentation_events_ |= request.InstrumentationEvent();
2852      break;
2853    case DeoptimizationRequest::kUnregisterForEvent:
2854      VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x",
2855                                 request.InstrumentationEvent());
2856      instrumentation->RemoveListener(&gDebugInstrumentationListener,
2857                                      request.InstrumentationEvent());
2858      instrumentation_events_ &= ~request.InstrumentationEvent();
2859      break;
2860    case DeoptimizationRequest::kFullDeoptimization:
2861      VLOG(jdwp) << "Deoptimize the world ...";
2862      instrumentation->DeoptimizeEverything();
2863      VLOG(jdwp) << "Deoptimize the world DONE";
2864      break;
2865    case DeoptimizationRequest::kFullUndeoptimization:
2866      VLOG(jdwp) << "Undeoptimize the world ...";
2867      instrumentation->UndeoptimizeEverything();
2868      VLOG(jdwp) << "Undeoptimize the world DONE";
2869      break;
2870    case DeoptimizationRequest::kSelectiveDeoptimization:
2871      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " ...";
2872      instrumentation->Deoptimize(request.Method());
2873      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " DONE";
2874      break;
2875    case DeoptimizationRequest::kSelectiveUndeoptimization:
2876      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " ...";
2877      instrumentation->Undeoptimize(request.Method());
2878      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " DONE";
2879      break;
2880    default:
2881      LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind();
2882      break;
2883  }
2884}
2885
2886void Dbg::DelayFullUndeoptimization() {
2887  MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
2888  ++delayed_full_undeoptimization_count_;
2889  DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_);
2890}
2891
2892void Dbg::ProcessDelayedFullUndeoptimizations() {
2893  // TODO: avoid taking the lock twice (once here and once in ManageDeoptimization).
2894  {
2895    MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
2896    while (delayed_full_undeoptimization_count_ > 0) {
2897      DeoptimizationRequest req;
2898      req.SetKind(DeoptimizationRequest::kFullUndeoptimization);
2899      req.SetMethod(nullptr);
2900      RequestDeoptimizationLocked(req);
2901      --delayed_full_undeoptimization_count_;
2902    }
2903  }
2904  ManageDeoptimization();
2905}
2906
2907void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
2908  if (req.GetKind() == DeoptimizationRequest::kNothing) {
2909    // Nothing to do.
2910    return;
2911  }
2912  MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
2913  RequestDeoptimizationLocked(req);
2914}
2915
2916void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
2917  switch (req.GetKind()) {
2918    case DeoptimizationRequest::kRegisterForEvent: {
2919      DCHECK_NE(req.InstrumentationEvent(), 0u);
2920      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
2921      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
2922                                                req.InstrumentationEvent());
2923      if (*counter == 0) {
2924        VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x",
2925                                   deoptimization_requests_.size(), req.InstrumentationEvent());
2926        deoptimization_requests_.push_back(req);
2927      }
2928      *counter = *counter + 1;
2929      break;
2930    }
2931    case DeoptimizationRequest::kUnregisterForEvent: {
2932      DCHECK_NE(req.InstrumentationEvent(), 0u);
2933      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
2934      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
2935                                                req.InstrumentationEvent());
2936      *counter = *counter - 1;
2937      if (*counter == 0) {
2938        VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x",
2939                                   deoptimization_requests_.size(), req.InstrumentationEvent());
2940        deoptimization_requests_.push_back(req);
2941      }
2942      break;
2943    }
2944    case DeoptimizationRequest::kFullDeoptimization: {
2945      DCHECK(req.Method() == nullptr);
2946      if (full_deoptimization_event_count_ == 0) {
2947        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2948                   << " for full deoptimization";
2949        deoptimization_requests_.push_back(req);
2950      }
2951      ++full_deoptimization_event_count_;
2952      break;
2953    }
2954    case DeoptimizationRequest::kFullUndeoptimization: {
2955      DCHECK(req.Method() == nullptr);
2956      DCHECK_GT(full_deoptimization_event_count_, 0U);
2957      --full_deoptimization_event_count_;
2958      if (full_deoptimization_event_count_ == 0) {
2959        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2960                   << " for full undeoptimization";
2961        deoptimization_requests_.push_back(req);
2962      }
2963      break;
2964    }
2965    case DeoptimizationRequest::kSelectiveDeoptimization: {
2966      DCHECK(req.Method() != nullptr);
2967      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2968                 << " for deoptimization of " << PrettyMethod(req.Method());
2969      deoptimization_requests_.push_back(req);
2970      break;
2971    }
2972    case DeoptimizationRequest::kSelectiveUndeoptimization: {
2973      DCHECK(req.Method() != nullptr);
2974      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2975                 << " for undeoptimization of " << PrettyMethod(req.Method());
2976      deoptimization_requests_.push_back(req);
2977      break;
2978    }
2979    default: {
2980      LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind();
2981      break;
2982    }
2983  }
2984}
2985
2986void Dbg::ManageDeoptimization() {
2987  Thread* const self = Thread::Current();
2988  {
2989    // Avoid suspend/resume if there is no pending request.
2990    MutexLock mu(self, *Locks::deoptimization_lock_);
2991    if (deoptimization_requests_.empty()) {
2992      return;
2993    }
2994  }
2995  CHECK_EQ(self->GetState(), kRunnable);
2996  self->TransitionFromRunnableToSuspended(kWaitingForDeoptimization);
2997  // We need to suspend mutator threads first.
2998  Runtime* const runtime = Runtime::Current();
2999  runtime->GetThreadList()->SuspendAll();
3000  const ThreadState old_state = self->SetStateUnsafe(kRunnable);
3001  {
3002    MutexLock mu(self, *Locks::deoptimization_lock_);
3003    size_t req_index = 0;
3004    for (DeoptimizationRequest& request : deoptimization_requests_) {
3005      VLOG(jdwp) << "Process deoptimization request #" << req_index++;
3006      ProcessDeoptimizationRequest(request);
3007    }
3008    deoptimization_requests_.clear();
3009  }
3010  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
3011  runtime->GetThreadList()->ResumeAll();
3012  self->TransitionFromSuspendedToRunnable();
3013}
3014
3015static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m)
3016    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
3017  const DexFile::CodeItem* code_item = m->GetCodeItem();
3018  if (code_item == nullptr) {
3019    // TODO We should not be asked to watch location in a native or abstract method so the code item
3020    // should never be null. We could just check we never encounter this case.
3021    return false;
3022  }
3023  StackHandleScope<2> hs(self);
3024  mirror::Class* declaring_class = m->GetDeclaringClass();
3025  Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
3026  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
3027  verifier::MethodVerifier verifier(dex_cache->GetDexFile(), &dex_cache, &class_loader,
3028                                    &m->GetClassDef(), code_item, m->GetDexMethodIndex(), m,
3029                                    m->GetAccessFlags(), false, true, false);
3030  // Note: we don't need to verify the method.
3031  return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr);
3032}
3033
3034static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m)
3035    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3036  for (Breakpoint& breakpoint : gBreakpoints) {
3037    if (breakpoint.Method() == m) {
3038      return &breakpoint;
3039    }
3040  }
3041  return nullptr;
3042}
3043
3044// Sanity checks all existing breakpoints on the same method.
3045static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_deoptimization)
3046    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3047  if (kIsDebugBuild) {
3048    for (const Breakpoint& breakpoint : gBreakpoints) {
3049      CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization());
3050    }
3051    if (need_full_deoptimization) {
3052      // We should have deoptimized everything but not "selectively" deoptimized this method.
3053      CHECK(Runtime::Current()->GetInstrumentation()->AreAllMethodsDeoptimized());
3054      CHECK(!Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3055    } else {
3056      // We should have "selectively" deoptimized this method.
3057      // Note: while we have not deoptimized everything for this method, we may have done it for
3058      // another event.
3059      CHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3060    }
3061  }
3062}
3063
3064// Installs a breakpoint at the specified location. Also indicates through the deoptimization
3065// request if we need to deoptimize.
3066void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3067  Thread* const self = Thread::Current();
3068  mirror::ArtMethod* m = FromMethodId(location->method_id);
3069  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3070
3071  WriterMutexLock mu(self, *Locks::breakpoint_lock_);
3072  const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
3073  bool need_full_deoptimization;
3074  if (existing_breakpoint == nullptr) {
3075    // There is no breakpoint on this method yet: we need to deoptimize. If this method may be
3076    // inlined, we deoptimize everything; otherwise we deoptimize only this method.
3077    need_full_deoptimization = IsMethodPossiblyInlined(self, m);
3078    if (need_full_deoptimization) {
3079      req->SetKind(DeoptimizationRequest::kFullDeoptimization);
3080      req->SetMethod(nullptr);
3081    } else {
3082      req->SetKind(DeoptimizationRequest::kSelectiveDeoptimization);
3083      req->SetMethod(m);
3084    }
3085  } else {
3086    // There is at least one breakpoint for this method: we don't need to deoptimize.
3087    req->SetKind(DeoptimizationRequest::kNothing);
3088    req->SetMethod(nullptr);
3089
3090    need_full_deoptimization = existing_breakpoint->NeedFullDeoptimization();
3091    SanityCheckExistingBreakpoints(m, need_full_deoptimization);
3092  }
3093
3094  gBreakpoints.push_back(Breakpoint(m, location->dex_pc, need_full_deoptimization));
3095  VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
3096             << gBreakpoints[gBreakpoints.size() - 1];
3097}
3098
3099// Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
3100// request if we need to undeoptimize.
3101void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3102  WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
3103  mirror::ArtMethod* m = FromMethodId(location->method_id);
3104  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3105  bool need_full_deoptimization = false;
3106  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
3107    if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].Method() == m) {
3108      VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
3109      need_full_deoptimization = gBreakpoints[i].NeedFullDeoptimization();
3110      DCHECK_NE(need_full_deoptimization, Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3111      gBreakpoints.erase(gBreakpoints.begin() + i);
3112      break;
3113    }
3114  }
3115  const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
3116  if (existing_breakpoint == nullptr) {
3117    // There is no more breakpoint on this method: we need to undeoptimize.
3118    if (need_full_deoptimization) {
3119      // This method required full deoptimization: we need to undeoptimize everything.
3120      req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
3121      req->SetMethod(nullptr);
3122    } else {
3123      // This method required selective deoptimization: we need to undeoptimize only that method.
3124      req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
3125      req->SetMethod(m);
3126    }
3127  } else {
3128    // There is at least one breakpoint for this method: we don't need to undeoptimize.
3129    req->SetKind(DeoptimizationRequest::kNothing);
3130    req->SetMethod(nullptr);
3131    SanityCheckExistingBreakpoints(m, need_full_deoptimization);
3132  }
3133}
3134
3135// Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
3136// cause suspension if the thread is the current thread.
3137class ScopedThreadSuspension {
3138 public:
3139  ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
3140      LOCKS_EXCLUDED(Locks::thread_list_lock_)
3141      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
3142      thread_(nullptr),
3143      error_(JDWP::ERR_NONE),
3144      self_suspend_(false),
3145      other_suspend_(false) {
3146    ScopedObjectAccessUnchecked soa(self);
3147    {
3148      MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3149      error_ = DecodeThread(soa, thread_id, thread_);
3150    }
3151    if (error_ == JDWP::ERR_NONE) {
3152      if (thread_ == soa.Self()) {
3153        self_suspend_ = true;
3154      } else {
3155        soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3156        jobject thread_peer = gRegistry->GetJObject(thread_id);
3157        bool timed_out;
3158        Thread* suspended_thread;
3159        {
3160          // Take suspend thread lock to avoid races with threads trying to suspend this one.
3161          MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
3162          ThreadList* thread_list = Runtime::Current()->GetThreadList();
3163          suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true, &timed_out);
3164        }
3165        CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
3166        if (suspended_thread == nullptr) {
3167          // Thread terminated from under us while suspending.
3168          error_ = JDWP::ERR_INVALID_THREAD;
3169        } else {
3170          CHECK_EQ(suspended_thread, thread_);
3171          other_suspend_ = true;
3172        }
3173      }
3174    }
3175  }
3176
3177  Thread* GetThread() const {
3178    return thread_;
3179  }
3180
3181  JDWP::JdwpError GetError() const {
3182    return error_;
3183  }
3184
3185  ~ScopedThreadSuspension() {
3186    if (other_suspend_) {
3187      Runtime::Current()->GetThreadList()->Resume(thread_, true);
3188    }
3189  }
3190
3191 private:
3192  Thread* thread_;
3193  JDWP::JdwpError error_;
3194  bool self_suspend_;
3195  bool other_suspend_;
3196};
3197
3198JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size,
3199                                   JDWP::JdwpStepDepth step_depth) {
3200  Thread* self = Thread::Current();
3201  ScopedThreadSuspension sts(self, thread_id);
3202  if (sts.GetError() != JDWP::ERR_NONE) {
3203    return sts.GetError();
3204  }
3205
3206  //
3207  // Work out what Method* we're in, the current line number, and how deep the stack currently
3208  // is for step-out.
3209  //
3210
3211  struct SingleStepStackVisitor : public StackVisitor {
3212    explicit SingleStepStackVisitor(Thread* thread, SingleStepControl* single_step_control,
3213                                    int32_t* line_number)
3214        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
3215        : StackVisitor(thread, NULL), single_step_control_(single_step_control),
3216          line_number_(line_number) {
3217      DCHECK_EQ(single_step_control_, thread->GetSingleStepControl());
3218      single_step_control_->method = NULL;
3219      single_step_control_->stack_depth = 0;
3220    }
3221
3222    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
3223    // annotalysis.
3224    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
3225      mirror::ArtMethod* m = GetMethod();
3226      if (!m->IsRuntimeMethod()) {
3227        ++single_step_control_->stack_depth;
3228        if (single_step_control_->method == NULL) {
3229          mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
3230          single_step_control_->method = m;
3231          *line_number_ = -1;
3232          if (dex_cache != NULL) {
3233            const DexFile& dex_file = *dex_cache->GetDexFile();
3234            *line_number_ = dex_file.GetLineNumFromPC(m, GetDexPc());
3235          }
3236        }
3237      }
3238      return true;
3239    }
3240
3241    SingleStepControl* const single_step_control_;
3242    int32_t* const line_number_;
3243  };
3244
3245  Thread* const thread = sts.GetThread();
3246  SingleStepControl* const single_step_control = thread->GetSingleStepControl();
3247  DCHECK(single_step_control != nullptr);
3248  int32_t line_number = -1;
3249  SingleStepStackVisitor visitor(thread, single_step_control, &line_number);
3250  visitor.WalkStack();
3251
3252  //
3253  // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
3254  //
3255
3256  struct DebugCallbackContext {
3257    explicit DebugCallbackContext(SingleStepControl* single_step_control, int32_t line_number,
3258                                  const DexFile::CodeItem* code_item)
3259      : single_step_control_(single_step_control), line_number_(line_number), code_item_(code_item),
3260        last_pc_valid(false), last_pc(0) {
3261    }
3262
3263    static bool Callback(void* raw_context, uint32_t address, uint32_t line_number) {
3264      DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
3265      if (static_cast<int32_t>(line_number) == context->line_number_) {
3266        if (!context->last_pc_valid) {
3267          // Everything from this address until the next line change is ours.
3268          context->last_pc = address;
3269          context->last_pc_valid = true;
3270        }
3271        // Otherwise, if we're already in a valid range for this line,
3272        // just keep going (shouldn't really happen)...
3273      } else if (context->last_pc_valid) {  // and the line number is new
3274        // Add everything from the last entry up until here to the set
3275        for (uint32_t dex_pc = context->last_pc; dex_pc < address; ++dex_pc) {
3276          context->single_step_control_->dex_pcs.insert(dex_pc);
3277        }
3278        context->last_pc_valid = false;
3279      }
3280      return false;  // There may be multiple entries for any given line.
3281    }
3282
3283    ~DebugCallbackContext() {
3284      // If the line number was the last in the position table...
3285      if (last_pc_valid) {
3286        size_t end = code_item_->insns_size_in_code_units_;
3287        for (uint32_t dex_pc = last_pc; dex_pc < end; ++dex_pc) {
3288          single_step_control_->dex_pcs.insert(dex_pc);
3289        }
3290      }
3291    }
3292
3293    SingleStepControl* const single_step_control_;
3294    const int32_t line_number_;
3295    const DexFile::CodeItem* const code_item_;
3296    bool last_pc_valid;
3297    uint32_t last_pc;
3298  };
3299  single_step_control->dex_pcs.clear();
3300  mirror::ArtMethod* m = single_step_control->method;
3301  if (!m->IsNative()) {
3302    const DexFile::CodeItem* const code_item = m->GetCodeItem();
3303    DebugCallbackContext context(single_step_control, line_number, code_item);
3304    m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
3305                                     DebugCallbackContext::Callback, NULL, &context);
3306  }
3307
3308  //
3309  // Everything else...
3310  //
3311
3312  single_step_control->step_size = step_size;
3313  single_step_control->step_depth = step_depth;
3314  single_step_control->is_active = true;
3315
3316  if (VLOG_IS_ON(jdwp)) {
3317    VLOG(jdwp) << "Single-step thread: " << *thread;
3318    VLOG(jdwp) << "Single-step step size: " << single_step_control->step_size;
3319    VLOG(jdwp) << "Single-step step depth: " << single_step_control->step_depth;
3320    VLOG(jdwp) << "Single-step current method: " << PrettyMethod(single_step_control->method);
3321    VLOG(jdwp) << "Single-step current line: " << line_number;
3322    VLOG(jdwp) << "Single-step current stack depth: " << single_step_control->stack_depth;
3323    VLOG(jdwp) << "Single-step dex_pc values:";
3324    for (uint32_t dex_pc : single_step_control->dex_pcs) {
3325      VLOG(jdwp) << StringPrintf(" %#x", dex_pc);
3326    }
3327  }
3328
3329  return JDWP::ERR_NONE;
3330}
3331
3332void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) {
3333  ScopedObjectAccessUnchecked soa(Thread::Current());
3334  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3335  Thread* thread;
3336  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
3337  if (error == JDWP::ERR_NONE) {
3338    SingleStepControl* single_step_control = thread->GetSingleStepControl();
3339    DCHECK(single_step_control != nullptr);
3340    single_step_control->Clear();
3341  }
3342}
3343
3344static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
3345  switch (tag) {
3346    default:
3347      LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag);
3348
3349    // Primitives.
3350    case JDWP::JT_BYTE:    return 'B';
3351    case JDWP::JT_CHAR:    return 'C';
3352    case JDWP::JT_FLOAT:   return 'F';
3353    case JDWP::JT_DOUBLE:  return 'D';
3354    case JDWP::JT_INT:     return 'I';
3355    case JDWP::JT_LONG:    return 'J';
3356    case JDWP::JT_SHORT:   return 'S';
3357    case JDWP::JT_VOID:    return 'V';
3358    case JDWP::JT_BOOLEAN: return 'Z';
3359
3360    // Reference types.
3361    case JDWP::JT_ARRAY:
3362    case JDWP::JT_OBJECT:
3363    case JDWP::JT_STRING:
3364    case JDWP::JT_THREAD:
3365    case JDWP::JT_THREAD_GROUP:
3366    case JDWP::JT_CLASS_LOADER:
3367    case JDWP::JT_CLASS_OBJECT:
3368      return 'L';
3369  }
3370}
3371
3372JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id,
3373                                  JDWP::RefTypeId class_id, JDWP::MethodId method_id,
3374                                  uint32_t arg_count, uint64_t* arg_values,
3375                                  JDWP::JdwpTag* arg_types, uint32_t options,
3376                                  JDWP::JdwpTag* pResultTag, uint64_t* pResultValue,
3377                                  JDWP::ObjectId* pExceptionId) {
3378  ThreadList* thread_list = Runtime::Current()->GetThreadList();
3379
3380  Thread* targetThread = NULL;
3381  DebugInvokeReq* req = NULL;
3382  Thread* self = Thread::Current();
3383  {
3384    ScopedObjectAccessUnchecked soa(self);
3385    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3386    JDWP::JdwpError error = DecodeThread(soa, thread_id, targetThread);
3387    if (error != JDWP::ERR_NONE) {
3388      LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id;
3389      return error;
3390    }
3391    req = targetThread->GetInvokeReq();
3392    if (!req->ready) {
3393      LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
3394      return JDWP::ERR_INVALID_THREAD;
3395    }
3396
3397    /*
3398     * We currently have a bug where we don't successfully resume the
3399     * target thread if the suspend count is too deep.  We're expected to
3400     * require one "resume" for each "suspend", but when asked to execute
3401     * a method we have to resume fully and then re-suspend it back to the
3402     * same level.  (The easiest way to cause this is to type "suspend"
3403     * multiple times in jdb.)
3404     *
3405     * It's unclear what this means when the event specifies "resume all"
3406     * and some threads are suspended more deeply than others.  This is
3407     * a rare problem, so for now we just prevent it from hanging forever
3408     * by rejecting the method invocation request.  Without this, we will
3409     * be stuck waiting on a suspended thread.
3410     */
3411    int suspend_count;
3412    {
3413      MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
3414      suspend_count = targetThread->GetSuspendCount();
3415    }
3416    if (suspend_count > 1) {
3417      LOG(ERROR) << *targetThread << " suspend count too deep for method invocation: " << suspend_count;
3418      return JDWP::ERR_THREAD_SUSPENDED;  // Probably not expected here.
3419    }
3420
3421    JDWP::JdwpError status;
3422    mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id);
3423    if (receiver == ObjectRegistry::kInvalidObject) {
3424      return JDWP::ERR_INVALID_OBJECT;
3425    }
3426
3427    mirror::Object* thread = gRegistry->Get<mirror::Object*>(thread_id);
3428    if (thread == ObjectRegistry::kInvalidObject) {
3429      return JDWP::ERR_INVALID_OBJECT;
3430    }
3431    // TODO: check that 'thread' is actually a java.lang.Thread!
3432
3433    mirror::Class* c = DecodeClass(class_id, status);
3434    if (c == NULL) {
3435      return status;
3436    }
3437
3438    mirror::ArtMethod* m = FromMethodId(method_id);
3439    if (m->IsStatic() != (receiver == NULL)) {
3440      return JDWP::ERR_INVALID_METHODID;
3441    }
3442    if (m->IsStatic()) {
3443      if (m->GetDeclaringClass() != c) {
3444        return JDWP::ERR_INVALID_METHODID;
3445      }
3446    } else {
3447      if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
3448        return JDWP::ERR_INVALID_METHODID;
3449      }
3450    }
3451
3452    // Check the argument list matches the method.
3453    uint32_t shorty_len = 0;
3454    const char* shorty = m->GetShorty(&shorty_len);
3455    if (shorty_len - 1 != arg_count) {
3456      return JDWP::ERR_ILLEGAL_ARGUMENT;
3457    }
3458
3459    {
3460      StackHandleScope<3> hs(soa.Self());
3461      MethodHelper mh(hs.NewHandle(m));
3462      HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver));
3463      HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c));
3464      const DexFile::TypeList* types = m->GetParameterTypeList();
3465      for (size_t i = 0; i < arg_count; ++i) {
3466        if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) {
3467          return JDWP::ERR_ILLEGAL_ARGUMENT;
3468        }
3469
3470        if (shorty[i + 1] == 'L') {
3471          // Did we really get an argument of an appropriate reference type?
3472          mirror::Class* parameter_type = mh.GetClassFromTypeIdx(types->GetTypeItem(i).type_idx_);
3473          mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i]);
3474          if (argument == ObjectRegistry::kInvalidObject) {
3475            return JDWP::ERR_INVALID_OBJECT;
3476          }
3477          if (argument != NULL && !argument->InstanceOf(parameter_type)) {
3478            return JDWP::ERR_ILLEGAL_ARGUMENT;
3479          }
3480
3481          // Turn the on-the-wire ObjectId into a jobject.
3482          jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]);
3483          v.l = gRegistry->GetJObject(arg_values[i]);
3484        }
3485      }
3486      // Update in case it moved.
3487      m = mh.GetMethod();
3488    }
3489
3490    req->receiver = receiver;
3491    req->thread = thread;
3492    req->klass = c;
3493    req->method = m;
3494    req->arg_count = arg_count;
3495    req->arg_values = arg_values;
3496    req->options = options;
3497    req->invoke_needed = true;
3498  }
3499
3500  // The fact that we've released the thread list lock is a bit risky --- if the thread goes
3501  // away we're sitting high and dry -- but we must release this before the ResumeAllThreads
3502  // call, and it's unwise to hold it during WaitForSuspend.
3503
3504  {
3505    /*
3506     * We change our (JDWP thread) status, which should be THREAD_RUNNING,
3507     * so we can suspend for a GC if the invoke request causes us to
3508     * run out of memory.  It's also a good idea to change it before locking
3509     * the invokeReq mutex, although that should never be held for long.
3510     */
3511    self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
3512
3513    VLOG(jdwp) << "    Transferring control to event thread";
3514    {
3515      MutexLock mu(self, req->lock);
3516
3517      if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3518        VLOG(jdwp) << "      Resuming all threads";
3519        thread_list->UndoDebuggerSuspensions();
3520      } else {
3521        VLOG(jdwp) << "      Resuming event thread only";
3522        thread_list->Resume(targetThread, true);
3523      }
3524
3525      // Wait for the request to finish executing.
3526      while (req->invoke_needed) {
3527        req->cond.Wait(self);
3528      }
3529    }
3530    VLOG(jdwp) << "    Control has returned from event thread";
3531
3532    /* wait for thread to re-suspend itself */
3533    SuspendThread(thread_id, false /* request_suspension */);
3534    self->TransitionFromSuspendedToRunnable();
3535  }
3536
3537  /*
3538   * Suspend the threads.  We waited for the target thread to suspend
3539   * itself, so all we need to do is suspend the others.
3540   *
3541   * The suspendAllThreads() call will double-suspend the event thread,
3542   * so we want to resume the target thread once to keep the books straight.
3543   */
3544  if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3545    self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3546    VLOG(jdwp) << "      Suspending all threads";
3547    thread_list->SuspendAllForDebugger();
3548    self->TransitionFromSuspendedToRunnable();
3549    VLOG(jdwp) << "      Resuming event thread to balance the count";
3550    thread_list->Resume(targetThread, true);
3551  }
3552
3553  // Copy the result.
3554  *pResultTag = req->result_tag;
3555  if (IsPrimitiveTag(req->result_tag)) {
3556    *pResultValue = req->result_value.GetJ();
3557  } else {
3558    *pResultValue = gRegistry->Add(req->result_value.GetL());
3559  }
3560  *pExceptionId = req->exception;
3561  return req->error;
3562}
3563
3564void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
3565  ScopedObjectAccess soa(Thread::Current());
3566
3567  // We can be called while an exception is pending. We need
3568  // to preserve that across the method invocation.
3569  StackHandleScope<4> hs(soa.Self());
3570  auto old_throw_this_object = hs.NewHandle<mirror::Object>(nullptr);
3571  auto old_throw_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
3572  auto old_exception = hs.NewHandle<mirror::Throwable>(nullptr);
3573  uint32_t old_throw_dex_pc;
3574  bool old_exception_report_flag;
3575  {
3576    ThrowLocation old_throw_location;
3577    mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location);
3578    old_throw_this_object.Assign(old_throw_location.GetThis());
3579    old_throw_method.Assign(old_throw_location.GetMethod());
3580    old_exception.Assign(old_exception_obj);
3581    old_throw_dex_pc = old_throw_location.GetDexPc();
3582    old_exception_report_flag = soa.Self()->IsExceptionReportedToInstrumentation();
3583    soa.Self()->ClearException();
3584  }
3585
3586  // Translate the method through the vtable, unless the debugger wants to suppress it.
3587  Handle<mirror::ArtMethod> m(hs.NewHandle(pReq->method));
3588  if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver != NULL) {
3589    mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.Get());
3590    if (actual_method != m.Get()) {
3591      VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.Get()) << " to " << PrettyMethod(actual_method);
3592      m.Assign(actual_method);
3593    }
3594  }
3595  VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.Get())
3596             << " receiver=" << pReq->receiver
3597             << " arg_count=" << pReq->arg_count;
3598  CHECK(m.Get() != nullptr);
3599
3600  CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
3601
3602  pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(m.Get()),
3603                                         reinterpret_cast<jvalue*>(pReq->arg_values));
3604
3605  mirror::Throwable* exception = soa.Self()->GetException(NULL);
3606  soa.Self()->ClearException();
3607  pReq->exception = gRegistry->Add(exception);
3608  pReq->result_tag = BasicTagFromDescriptor(m.Get()->GetShorty());
3609  if (pReq->exception != 0) {
3610    VLOG(jdwp) << "  JDWP invocation returning with exception=" << exception
3611        << " " << exception->Dump();
3612    pReq->result_value.SetJ(0);
3613  } else if (pReq->result_tag == JDWP::JT_OBJECT) {
3614    /* if no exception thrown, examine object result more closely */
3615    JDWP::JdwpTag new_tag = TagFromObject(soa, pReq->result_value.GetL());
3616    if (new_tag != pReq->result_tag) {
3617      VLOG(jdwp) << "  JDWP promoted result from " << pReq->result_tag << " to " << new_tag;
3618      pReq->result_tag = new_tag;
3619    }
3620
3621    /*
3622     * Register the object.  We don't actually need an ObjectId yet,
3623     * but we do need to be sure that the GC won't move or discard the
3624     * object when we switch out of RUNNING.  The ObjectId conversion
3625     * will add the object to the "do not touch" list.
3626     *
3627     * We can't use the "tracked allocation" mechanism here because
3628     * the object is going to be handed off to a different thread.
3629     */
3630    gRegistry->Add(pReq->result_value.GetL());
3631  }
3632
3633  if (old_exception.Get() != NULL) {
3634    ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
3635                                         old_throw_dex_pc);
3636    soa.Self()->SetException(gc_safe_throw_location, old_exception.Get());
3637    soa.Self()->SetExceptionReportedToInstrumentation(old_exception_report_flag);
3638  }
3639}
3640
3641/*
3642 * "request" contains a full JDWP packet, possibly with multiple chunks.  We
3643 * need to process each, accumulate the replies, and ship the whole thing
3644 * back.
3645 *
3646 * Returns "true" if we have a reply.  The reply buffer is newly allocated,
3647 * and includes the chunk type/length, followed by the data.
3648 *
3649 * OLD-TODO: we currently assume that the request and reply include a single
3650 * chunk.  If this becomes inconvenient we will need to adapt.
3651 */
3652bool Dbg::DdmHandlePacket(JDWP::Request& request, uint8_t** pReplyBuf, int* pReplyLen) {
3653  Thread* self = Thread::Current();
3654  JNIEnv* env = self->GetJniEnv();
3655
3656  uint32_t type = request.ReadUnsigned32("type");
3657  uint32_t length = request.ReadUnsigned32("length");
3658
3659  // Create a byte[] corresponding to 'request'.
3660  size_t request_length = request.size();
3661  ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(request_length));
3662  if (dataArray.get() == NULL) {
3663    LOG(WARNING) << "byte[] allocation failed: " << request_length;
3664    env->ExceptionClear();
3665    return false;
3666  }
3667  env->SetByteArrayRegion(dataArray.get(), 0, request_length, reinterpret_cast<const jbyte*>(request.data()));
3668  request.Skip(request_length);
3669
3670  // Run through and find all chunks.  [Currently just find the first.]
3671  ScopedByteArrayRO contents(env, dataArray.get());
3672  if (length != request_length) {
3673    LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length);
3674    return false;
3675  }
3676
3677  // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)".
3678  ScopedLocalRef<jobject> chunk(env, env->CallStaticObjectMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3679                                                                 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch,
3680                                                                 type, dataArray.get(), 0, length));
3681  if (env->ExceptionCheck()) {
3682    LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type);
3683    env->ExceptionDescribe();
3684    env->ExceptionClear();
3685    return false;
3686  }
3687
3688  if (chunk.get() == NULL) {
3689    return false;
3690  }
3691
3692  /*
3693   * Pull the pieces out of the chunk.  We copy the results into a
3694   * newly-allocated buffer that the caller can free.  We don't want to
3695   * continue using the Chunk object because nothing has a reference to it.
3696   *
3697   * We could avoid this by returning type/data/offset/length and having
3698   * the caller be aware of the object lifetime issues, but that
3699   * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work
3700   * if we have responses for multiple chunks.
3701   *
3702   * So we're pretty much stuck with copying data around multiple times.
3703   */
3704  ScopedLocalRef<jbyteArray> replyData(env, reinterpret_cast<jbyteArray>(env->GetObjectField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data)));
3705  jint offset = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset);
3706  length = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length);
3707  type = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type);
3708
3709  VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length);
3710  if (length == 0 || replyData.get() == NULL) {
3711    return false;
3712  }
3713
3714  const int kChunkHdrLen = 8;
3715  uint8_t* reply = new uint8_t[length + kChunkHdrLen];
3716  if (reply == NULL) {
3717    LOG(WARNING) << "malloc failed: " << (length + kChunkHdrLen);
3718    return false;
3719  }
3720  JDWP::Set4BE(reply + 0, type);
3721  JDWP::Set4BE(reply + 4, length);
3722  env->GetByteArrayRegion(replyData.get(), offset, length, reinterpret_cast<jbyte*>(reply + kChunkHdrLen));
3723
3724  *pReplyBuf = reply;
3725  *pReplyLen = length + kChunkHdrLen;
3726
3727  VLOG(jdwp) << StringPrintf("dvmHandleDdm returning type=%.4s %p len=%d", reinterpret_cast<char*>(reply), reply, length);
3728  return true;
3729}
3730
3731void Dbg::DdmBroadcast(bool connect) {
3732  VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
3733
3734  Thread* self = Thread::Current();
3735  if (self->GetState() != kRunnable) {
3736    LOG(ERROR) << "DDM broadcast in thread state " << self->GetState();
3737    /* try anyway? */
3738  }
3739
3740  JNIEnv* env = self->GetJniEnv();
3741  jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/;
3742  env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3743                            WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast,
3744                            event);
3745  if (env->ExceptionCheck()) {
3746    LOG(ERROR) << "DdmServer.broadcast " << event << " failed";
3747    env->ExceptionDescribe();
3748    env->ExceptionClear();
3749  }
3750}
3751
3752void Dbg::DdmConnected() {
3753  Dbg::DdmBroadcast(true);
3754}
3755
3756void Dbg::DdmDisconnected() {
3757  Dbg::DdmBroadcast(false);
3758  gDdmThreadNotification = false;
3759}
3760
3761/*
3762 * Send a notification when a thread starts, stops, or changes its name.
3763 *
3764 * Because we broadcast the full set of threads when the notifications are
3765 * first enabled, it's possible for "thread" to be actively executing.
3766 */
3767void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
3768  if (!gDdmThreadNotification) {
3769    return;
3770  }
3771
3772  if (type == CHUNK_TYPE("THDE")) {
3773    uint8_t buf[4];
3774    JDWP::Set4BE(&buf[0], t->GetThreadId());
3775    Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf);
3776  } else {
3777    CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
3778    ScopedObjectAccessUnchecked soa(Thread::Current());
3779    StackHandleScope<1> hs(soa.Self());
3780    Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa)));
3781    size_t char_count = (name.Get() != NULL) ? name->GetLength() : 0;
3782    const jchar* chars = (name.Get() != NULL) ? name->GetCharArray()->GetData() : NULL;
3783
3784    std::vector<uint8_t> bytes;
3785    JDWP::Append4BE(bytes, t->GetThreadId());
3786    JDWP::AppendUtf16BE(bytes, chars, char_count);
3787    CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
3788    Dbg::DdmSendChunk(type, bytes);
3789  }
3790}
3791
3792void Dbg::DdmSetThreadNotification(bool enable) {
3793  // Enable/disable thread notifications.
3794  gDdmThreadNotification = enable;
3795  if (enable) {
3796    // Suspend the VM then post thread start notifications for all threads. Threads attaching will
3797    // see a suspension in progress and block until that ends. They then post their own start
3798    // notification.
3799    SuspendVM();
3800    std::list<Thread*> threads;
3801    Thread* self = Thread::Current();
3802    {
3803      MutexLock mu(self, *Locks::thread_list_lock_);
3804      threads = Runtime::Current()->GetThreadList()->GetList();
3805    }
3806    {
3807      ScopedObjectAccess soa(self);
3808      for (Thread* thread : threads) {
3809        Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
3810      }
3811    }
3812    ResumeVM();
3813  }
3814}
3815
3816void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
3817  if (IsDebuggerActive()) {
3818    ScopedObjectAccessUnchecked soa(Thread::Current());
3819    JDWP::ObjectId id = gRegistry->Add(t->GetPeer());
3820    gJdwpState->PostThreadChange(id, type == CHUNK_TYPE("THCR"));
3821  }
3822  Dbg::DdmSendThreadNotification(t, type);
3823}
3824
3825void Dbg::PostThreadStart(Thread* t) {
3826  Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR"));
3827}
3828
3829void Dbg::PostThreadDeath(Thread* t) {
3830  Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
3831}
3832
3833void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) {
3834  CHECK(buf != NULL);
3835  iovec vec[1];
3836  vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf));
3837  vec[0].iov_len = byte_count;
3838  Dbg::DdmSendChunkV(type, vec, 1);
3839}
3840
3841void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) {
3842  DdmSendChunk(type, bytes.size(), &bytes[0]);
3843}
3844
3845void Dbg::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) {
3846  if (gJdwpState == NULL) {
3847    VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
3848  } else {
3849    gJdwpState->DdmSendChunkV(type, iov, iov_count);
3850  }
3851}
3852
3853int Dbg::DdmHandleHpifChunk(HpifWhen when) {
3854  if (when == HPIF_WHEN_NOW) {
3855    DdmSendHeapInfo(when);
3856    return true;
3857  }
3858
3859  if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
3860    LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
3861    return false;
3862  }
3863
3864  gDdmHpifWhen = when;
3865  return true;
3866}
3867
3868bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
3869  if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) {
3870    LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when);
3871    return false;
3872  }
3873
3874  if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) {
3875    LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what);
3876    return false;
3877  }
3878
3879  if (native) {
3880    gDdmNhsgWhen = when;
3881    gDdmNhsgWhat = what;
3882  } else {
3883    gDdmHpsgWhen = when;
3884    gDdmHpsgWhat = what;
3885  }
3886  return true;
3887}
3888
3889void Dbg::DdmSendHeapInfo(HpifWhen reason) {
3890  // If there's a one-shot 'when', reset it.
3891  if (reason == gDdmHpifWhen) {
3892    if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) {
3893      gDdmHpifWhen = HPIF_WHEN_NEVER;
3894    }
3895  }
3896
3897  /*
3898   * Chunk HPIF (client --> server)
3899   *
3900   * Heap Info. General information about the heap,
3901   * suitable for a summary display.
3902   *
3903   *   [u4]: number of heaps
3904   *
3905   *   For each heap:
3906   *     [u4]: heap ID
3907   *     [u8]: timestamp in ms since Unix epoch
3908   *     [u1]: capture reason (same as 'when' value from server)
3909   *     [u4]: max heap size in bytes (-Xmx)
3910   *     [u4]: current heap size in bytes
3911   *     [u4]: current number of bytes allocated
3912   *     [u4]: current number of objects allocated
3913   */
3914  uint8_t heap_count = 1;
3915  gc::Heap* heap = Runtime::Current()->GetHeap();
3916  std::vector<uint8_t> bytes;
3917  JDWP::Append4BE(bytes, heap_count);
3918  JDWP::Append4BE(bytes, 1);  // Heap id (bogus; we only have one heap).
3919  JDWP::Append8BE(bytes, MilliTime());
3920  JDWP::Append1BE(bytes, reason);
3921  JDWP::Append4BE(bytes, heap->GetMaxMemory());  // Max allowed heap size in bytes.
3922  JDWP::Append4BE(bytes, heap->GetTotalMemory());  // Current heap size in bytes.
3923  JDWP::Append4BE(bytes, heap->GetBytesAllocated());
3924  JDWP::Append4BE(bytes, heap->GetObjectsAllocated());
3925  CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
3926  Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes);
3927}
3928
3929enum HpsgSolidity {
3930  SOLIDITY_FREE = 0,
3931  SOLIDITY_HARD = 1,
3932  SOLIDITY_SOFT = 2,
3933  SOLIDITY_WEAK = 3,
3934  SOLIDITY_PHANTOM = 4,
3935  SOLIDITY_FINALIZABLE = 5,
3936  SOLIDITY_SWEEP = 6,
3937};
3938
3939enum HpsgKind {
3940  KIND_OBJECT = 0,
3941  KIND_CLASS_OBJECT = 1,
3942  KIND_ARRAY_1 = 2,
3943  KIND_ARRAY_2 = 3,
3944  KIND_ARRAY_4 = 4,
3945  KIND_ARRAY_8 = 5,
3946  KIND_UNKNOWN = 6,
3947  KIND_NATIVE = 7,
3948};
3949
3950#define HPSG_PARTIAL (1<<7)
3951#define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
3952
3953class HeapChunkContext {
3954 public:
3955  // Maximum chunk size.  Obtain this from the formula:
3956  // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
3957  HeapChunkContext(bool merge, bool native)
3958      : buf_(16384 - 16),
3959        type_(0),
3960        merge_(merge),
3961        chunk_overhead_(0) {
3962    Reset();
3963    if (native) {
3964      type_ = CHUNK_TYPE("NHSG");
3965    } else {
3966      type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO");
3967    }
3968  }
3969
3970  ~HeapChunkContext() {
3971    if (p_ > &buf_[0]) {
3972      Flush();
3973    }
3974  }
3975
3976  void SetChunkOverhead(size_t chunk_overhead) {
3977    chunk_overhead_ = chunk_overhead;
3978  }
3979
3980  void ResetStartOfNextChunk() {
3981    startOfNextMemoryChunk_ = nullptr;
3982  }
3983
3984  void EnsureHeader(const void* chunk_ptr) {
3985    if (!needHeader_) {
3986      return;
3987    }
3988
3989    // Start a new HPSx chunk.
3990    JDWP::Write4BE(&p_, 1);  // Heap id (bogus; we only have one heap).
3991    JDWP::Write1BE(&p_, 8);  // Size of allocation unit, in bytes.
3992
3993    JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr));  // virtual address of segment start.
3994    JDWP::Write4BE(&p_, 0);  // offset of this piece (relative to the virtual address).
3995    // [u4]: length of piece, in allocation units
3996    // We won't know this until we're done, so save the offset and stuff in a dummy value.
3997    pieceLenField_ = p_;
3998    JDWP::Write4BE(&p_, 0x55555555);
3999    needHeader_ = false;
4000  }
4001
4002  void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4003    if (pieceLenField_ == NULL) {
4004      // Flush immediately post Reset (maybe back-to-back Flush). Ignore.
4005      CHECK(needHeader_);
4006      return;
4007    }
4008    // Patch the "length of piece" field.
4009    CHECK_LE(&buf_[0], pieceLenField_);
4010    CHECK_LE(pieceLenField_, p_);
4011    JDWP::Set4BE(pieceLenField_, totalAllocationUnits_);
4012
4013    Dbg::DdmSendChunk(type_, p_ - &buf_[0], &buf_[0]);
4014    Reset();
4015  }
4016
4017  static void HeapChunkCallback(void* start, void* end, size_t used_bytes, void* arg)
4018      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
4019                            Locks::mutator_lock_) {
4020    reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkCallback(start, end, used_bytes);
4021  }
4022
4023 private:
4024  enum { ALLOCATION_UNIT_SIZE = 8 };
4025
4026  void Reset() {
4027    p_ = &buf_[0];
4028    ResetStartOfNextChunk();
4029    totalAllocationUnits_ = 0;
4030    needHeader_ = true;
4031    pieceLenField_ = NULL;
4032  }
4033
4034  void HeapChunkCallback(void* start, void* /*end*/, size_t used_bytes)
4035      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
4036                            Locks::mutator_lock_) {
4037    // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
4038    // in the following code not to allocate memory, by ensuring buf_ is of the correct size
4039    if (used_bytes == 0) {
4040        if (start == NULL) {
4041            // Reset for start of new heap.
4042            startOfNextMemoryChunk_ = NULL;
4043            Flush();
4044        }
4045        // Only process in use memory so that free region information
4046        // also includes dlmalloc book keeping.
4047        return;
4048    }
4049
4050    /* If we're looking at the native heap, we'll just return
4051     * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
4052     */
4053    bool native = type_ == CHUNK_TYPE("NHSG");
4054
4055    // TODO: I'm not sure using start of next chunk works well with multiple spaces. We shouldn't
4056    // count gaps inbetween spaces as free memory.
4057    if (startOfNextMemoryChunk_ != NULL) {
4058        // Transmit any pending free memory. Native free memory of
4059        // over kMaxFreeLen could be because of the use of mmaps, so
4060        // don't report. If not free memory then start a new segment.
4061        bool flush = true;
4062        if (start > startOfNextMemoryChunk_) {
4063            const size_t kMaxFreeLen = 2 * kPageSize;
4064            void* freeStart = startOfNextMemoryChunk_;
4065            void* freeEnd = start;
4066            size_t freeLen = reinterpret_cast<char*>(freeEnd) - reinterpret_cast<char*>(freeStart);
4067            if (!native || freeLen < kMaxFreeLen) {
4068                AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), freeStart, freeLen);
4069                flush = false;
4070            }
4071        }
4072        if (flush) {
4073            startOfNextMemoryChunk_ = NULL;
4074            Flush();
4075        }
4076    }
4077    mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
4078
4079    // Determine the type of this chunk.
4080    // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
4081    // If it's the same, we should combine them.
4082    uint8_t state = ExamineObject(obj, native);
4083    AppendChunk(state, start, used_bytes + chunk_overhead_);
4084    startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
4085  }
4086
4087  void AppendChunk(uint8_t state, void* ptr, size_t length)
4088      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4089    // Make sure there's enough room left in the buffer.
4090    // We need to use two bytes for every fractional 256 allocation units used by the chunk plus
4091    // 17 bytes for any header.
4092    size_t needed = (((length/ALLOCATION_UNIT_SIZE + 255) / 256) * 2) + 17;
4093    size_t bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
4094    if (bytesLeft < needed) {
4095      Flush();
4096    }
4097
4098    bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
4099    if (bytesLeft < needed) {
4100      LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", "
4101          << needed << " bytes)";
4102      return;
4103    }
4104    EnsureHeader(ptr);
4105    // Write out the chunk description.
4106    length /= ALLOCATION_UNIT_SIZE;   // Convert to allocation units.
4107    totalAllocationUnits_ += length;
4108    while (length > 256) {
4109      *p_++ = state | HPSG_PARTIAL;
4110      *p_++ = 255;     // length - 1
4111      length -= 256;
4112    }
4113    *p_++ = state;
4114    *p_++ = length - 1;
4115  }
4116
4117  uint8_t ExamineObject(mirror::Object* o, bool is_native_heap)
4118      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
4119    if (o == NULL) {
4120      return HPSG_STATE(SOLIDITY_FREE, 0);
4121    }
4122
4123    // It's an allocated chunk. Figure out what it is.
4124
4125    // If we're looking at the native heap, we'll just return
4126    // (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks.
4127    if (is_native_heap) {
4128      return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4129    }
4130
4131    if (!Runtime::Current()->GetHeap()->IsLiveObjectLocked(o)) {
4132      return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4133    }
4134
4135    mirror::Class* c = o->GetClass();
4136    if (c == NULL) {
4137      // The object was probably just created but hasn't been initialized yet.
4138      return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4139    }
4140
4141    if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
4142      LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
4143      return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4144    }
4145
4146    if (c->IsClassClass()) {
4147      return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
4148    }
4149
4150    if (c->IsArrayClass()) {
4151      if (o->IsObjectArray()) {
4152        return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4153      }
4154      switch (c->GetComponentSize()) {
4155      case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
4156      case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
4157      case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4158      case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
4159      }
4160    }
4161
4162    return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4163  }
4164
4165  std::vector<uint8_t> buf_;
4166  uint8_t* p_;
4167  uint8_t* pieceLenField_;
4168  void* startOfNextMemoryChunk_;
4169  size_t totalAllocationUnits_;
4170  uint32_t type_;
4171  bool merge_;
4172  bool needHeader_;
4173  size_t chunk_overhead_;
4174
4175  DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
4176};
4177
4178static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
4179    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
4180  const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
4181  HeapChunkContext::HeapChunkCallback(
4182      obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
4183}
4184
4185void Dbg::DdmSendHeapSegments(bool native) {
4186  Dbg::HpsgWhen when;
4187  Dbg::HpsgWhat what;
4188  if (!native) {
4189    when = gDdmHpsgWhen;
4190    what = gDdmHpsgWhat;
4191  } else {
4192    when = gDdmNhsgWhen;
4193    what = gDdmNhsgWhat;
4194  }
4195  if (when == HPSG_WHEN_NEVER) {
4196    return;
4197  }
4198
4199  // Figure out what kind of chunks we'll be sending.
4200  CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS) << static_cast<int>(what);
4201
4202  // First, send a heap start chunk.
4203  uint8_t heap_id[4];
4204  JDWP::Set4BE(&heap_id[0], 1);  // Heap id (bogus; we only have one heap).
4205  Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id);
4206
4207  Thread* self = Thread::Current();
4208
4209  // To allow the Walk/InspectAll() below to exclusively-lock the
4210  // mutator lock, temporarily release the shared access to the
4211  // mutator lock here by transitioning to the suspended state.
4212  Locks::mutator_lock_->AssertSharedHeld(self);
4213  self->TransitionFromRunnableToSuspended(kSuspended);
4214
4215  // Send a series of heap segment chunks.
4216  HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
4217  if (native) {
4218#ifdef USE_DLMALLOC
4219    dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context);
4220#else
4221    UNIMPLEMENTED(WARNING) << "Native heap inspection is only supported with dlmalloc";
4222#endif
4223  } else {
4224    gc::Heap* heap = Runtime::Current()->GetHeap();
4225    for (const auto& space : heap->GetContinuousSpaces()) {
4226      if (space->IsDlMallocSpace()) {
4227        // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
4228        // allocation then the first sizeof(size_t) may belong to it.
4229        context.SetChunkOverhead(sizeof(size_t));
4230        space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4231      } else if (space->IsRosAllocSpace()) {
4232        context.SetChunkOverhead(0);
4233        space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4234      } else if (space->IsBumpPointerSpace()) {
4235        context.SetChunkOverhead(0);
4236        ReaderMutexLock mu(self, *Locks::mutator_lock_);
4237        WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
4238        space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
4239      } else {
4240        UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
4241      }
4242      context.ResetStartOfNextChunk();
4243    }
4244    // Walk the large objects, these are not in the AllocSpace.
4245    context.SetChunkOverhead(0);
4246    heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4247  }
4248
4249  // Shared-lock the mutator lock back.
4250  self->TransitionFromSuspendedToRunnable();
4251  Locks::mutator_lock_->AssertSharedHeld(self);
4252
4253  // Finally, send a heap end chunk.
4254  Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
4255}
4256
4257static size_t GetAllocTrackerMax() {
4258#ifdef HAVE_ANDROID_OS
4259  // Check whether there's a system property overriding the number of records.
4260  const char* propertyName = "dalvik.vm.allocTrackerMax";
4261  char allocRecordMaxString[PROPERTY_VALUE_MAX];
4262  if (property_get(propertyName, allocRecordMaxString, "") > 0) {
4263    char* end;
4264    size_t value = strtoul(allocRecordMaxString, &end, 10);
4265    if (*end != '\0') {
4266      LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocRecordMaxString
4267                 << "' --- invalid";
4268      return kDefaultNumAllocRecords;
4269    }
4270    if (!IsPowerOfTwo(value)) {
4271      LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocRecordMaxString
4272                 << "' --- not power of two";
4273      return kDefaultNumAllocRecords;
4274    }
4275    return value;
4276  }
4277#endif
4278  return kDefaultNumAllocRecords;
4279}
4280
4281void Dbg::SetAllocTrackingEnabled(bool enable) {
4282  Thread* self = Thread::Current();
4283  if (enable) {
4284    {
4285      MutexLock mu(self, *Locks::alloc_tracker_lock_);
4286      if (recent_allocation_records_ != NULL) {
4287        return;  // Already enabled, bail.
4288      }
4289      alloc_record_max_ = GetAllocTrackerMax();
4290      LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
4291                << kMaxAllocRecordStackDepth << " frames, taking "
4292                << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
4293      DCHECK_EQ(alloc_record_head_, 0U);
4294      DCHECK_EQ(alloc_record_count_, 0U);
4295      recent_allocation_records_ = new AllocRecord[alloc_record_max_];
4296      CHECK(recent_allocation_records_ != NULL);
4297    }
4298    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints(false);
4299  } else {
4300    {
4301      ScopedObjectAccess soa(self);  // For type_cache_.Clear();
4302      MutexLock mu(self, *Locks::alloc_tracker_lock_);
4303      if (recent_allocation_records_ == NULL) {
4304        return;  // Already disabled, bail.
4305      }
4306      LOG(INFO) << "Disabling alloc tracker";
4307      delete[] recent_allocation_records_;
4308      recent_allocation_records_ = NULL;
4309      alloc_record_head_ = 0;
4310      alloc_record_count_ = 0;
4311      type_cache_.Clear();
4312    }
4313    // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
4314    Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints(false);
4315  }
4316}
4317
4318struct AllocRecordStackVisitor : public StackVisitor {
4319  AllocRecordStackVisitor(Thread* thread, AllocRecord* record)
4320      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
4321      : StackVisitor(thread, NULL), record(record), depth(0) {}
4322
4323  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
4324  // annotalysis.
4325  bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
4326    if (depth >= kMaxAllocRecordStackDepth) {
4327      return false;
4328    }
4329    mirror::ArtMethod* m = GetMethod();
4330    if (!m->IsRuntimeMethod()) {
4331      record->StackElement(depth)->SetMethod(m);
4332      record->StackElement(depth)->SetDexPc(GetDexPc());
4333      ++depth;
4334    }
4335    return true;
4336  }
4337
4338  ~AllocRecordStackVisitor() {
4339    // Clear out any unused stack trace elements.
4340    for (; depth < kMaxAllocRecordStackDepth; ++depth) {
4341      record->StackElement(depth)->SetMethod(nullptr);
4342      record->StackElement(depth)->SetDexPc(0);
4343    }
4344  }
4345
4346  AllocRecord* record;
4347  size_t depth;
4348};
4349
4350void Dbg::RecordAllocation(mirror::Class* type, size_t byte_count) {
4351  Thread* self = Thread::Current();
4352  CHECK(self != NULL);
4353
4354  MutexLock mu(self, *Locks::alloc_tracker_lock_);
4355  if (recent_allocation_records_ == NULL) {
4356    // In the process of shutting down recording, bail.
4357    return;
4358  }
4359
4360  // Advance and clip.
4361  if (++alloc_record_head_ == alloc_record_max_) {
4362    alloc_record_head_ = 0;
4363  }
4364
4365  // Fill in the basics.
4366  AllocRecord* record = &recent_allocation_records_[alloc_record_head_];
4367  record->SetType(type);
4368  record->SetByteCount(byte_count);
4369  record->SetThinLockId(self->GetThreadId());
4370
4371  // Fill in the stack trace.
4372  AllocRecordStackVisitor visitor(self, record);
4373  visitor.WalkStack();
4374
4375  if (alloc_record_count_ < alloc_record_max_) {
4376    ++alloc_record_count_;
4377  }
4378}
4379
4380// Returns the index of the head element.
4381//
4382// We point at the most-recently-written record, so if alloc_record_count_ is 1
4383// we want to use the current element.  Take "head+1" and subtract count
4384// from it.
4385//
4386// We need to handle underflow in our circular buffer, so we add
4387// alloc_record_max_ and then mask it back down.
4388size_t Dbg::HeadIndex() {
4389  return (Dbg::alloc_record_head_ + 1 + Dbg::alloc_record_max_ - Dbg::alloc_record_count_) &
4390      (Dbg::alloc_record_max_ - 1);
4391}
4392
4393void Dbg::DumpRecentAllocations() {
4394  ScopedObjectAccess soa(Thread::Current());
4395  MutexLock mu(soa.Self(), *Locks::alloc_tracker_lock_);
4396  if (recent_allocation_records_ == NULL) {
4397    LOG(INFO) << "Not recording tracked allocations";
4398    return;
4399  }
4400
4401  // "i" is the head of the list.  We want to start at the end of the
4402  // list and move forward to the tail.
4403  size_t i = HeadIndex();
4404  const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
4405  uint16_t count = capped_count;
4406
4407  LOG(INFO) << "Tracked allocations, (head=" << alloc_record_head_ << " count=" << count << ")";
4408  while (count--) {
4409    AllocRecord* record = &recent_allocation_records_[i];
4410
4411    LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->ThinLockId(), record->ByteCount())
4412              << PrettyClass(record->Type());
4413
4414    for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
4415      AllocRecordStackTraceElement* stack_element = record->StackElement(stack_frame);
4416      mirror::ArtMethod* m = stack_element->Method();
4417      if (m == NULL) {
4418        break;
4419      }
4420      LOG(INFO) << "    " << PrettyMethod(m) << " line " << stack_element->LineNumber();
4421    }
4422
4423    // pause periodically to help logcat catch up
4424    if ((count % 5) == 0) {
4425      usleep(40000);
4426    }
4427
4428    i = (i + 1) & (alloc_record_max_ - 1);
4429  }
4430}
4431
4432class StringTable {
4433 public:
4434  StringTable() {
4435  }
4436
4437  void Add(const std::string& str) {
4438    table_.insert(str);
4439  }
4440
4441  void Add(const char* str) {
4442    table_.insert(str);
4443  }
4444
4445  size_t IndexOf(const char* s) const {
4446    auto it = table_.find(s);
4447    if (it == table_.end()) {
4448      LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
4449    }
4450    return std::distance(table_.begin(), it);
4451  }
4452
4453  size_t Size() const {
4454    return table_.size();
4455  }
4456
4457  void WriteTo(std::vector<uint8_t>& bytes) const {
4458    for (const std::string& str : table_) {
4459      const char* s = str.c_str();
4460      size_t s_len = CountModifiedUtf8Chars(s);
4461      std::unique_ptr<uint16_t> s_utf16(new uint16_t[s_len]);
4462      ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
4463      JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len);
4464    }
4465  }
4466
4467 private:
4468  std::set<std::string> table_;
4469  DISALLOW_COPY_AND_ASSIGN(StringTable);
4470};
4471
4472static const char* GetMethodSourceFile(mirror::ArtMethod* method)
4473    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4474  DCHECK(method != nullptr);
4475  const char* source_file = method->GetDeclaringClassSourceFile();
4476  return (source_file != nullptr) ? source_file : "";
4477}
4478
4479/*
4480 * The data we send to DDMS contains everything we have recorded.
4481 *
4482 * Message header (all values big-endian):
4483 * (1b) message header len (to allow future expansion); includes itself
4484 * (1b) entry header len
4485 * (1b) stack frame len
4486 * (2b) number of entries
4487 * (4b) offset to string table from start of message
4488 * (2b) number of class name strings
4489 * (2b) number of method name strings
4490 * (2b) number of source file name strings
4491 * For each entry:
4492 *   (4b) total allocation size
4493 *   (2b) thread id
4494 *   (2b) allocated object's class name index
4495 *   (1b) stack depth
4496 *   For each stack frame:
4497 *     (2b) method's class name
4498 *     (2b) method name
4499 *     (2b) method source file
4500 *     (2b) line number, clipped to 32767; -2 if native; -1 if no source
4501 * (xb) class name strings
4502 * (xb) method name strings
4503 * (xb) source file strings
4504 *
4505 * As with other DDM traffic, strings are sent as a 4-byte length
4506 * followed by UTF-16 data.
4507 *
4508 * We send up 16-bit unsigned indexes into string tables.  In theory there
4509 * can be (kMaxAllocRecordStackDepth * alloc_record_max_) unique strings in
4510 * each table, but in practice there should be far fewer.
4511 *
4512 * The chief reason for using a string table here is to keep the size of
4513 * the DDMS message to a minimum.  This is partly to make the protocol
4514 * efficient, but also because we have to form the whole thing up all at
4515 * once in a memory buffer.
4516 *
4517 * We use separate string tables for class names, method names, and source
4518 * files to keep the indexes small.  There will generally be no overlap
4519 * between the contents of these tables.
4520 */
4521jbyteArray Dbg::GetRecentAllocations() {
4522  if (false) {
4523    DumpRecentAllocations();
4524  }
4525
4526  Thread* self = Thread::Current();
4527  std::vector<uint8_t> bytes;
4528  {
4529    MutexLock mu(self, *Locks::alloc_tracker_lock_);
4530    //
4531    // Part 1: generate string tables.
4532    //
4533    StringTable class_names;
4534    StringTable method_names;
4535    StringTable filenames;
4536
4537    const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
4538    uint16_t count = capped_count;
4539    size_t idx = HeadIndex();
4540    while (count--) {
4541      AllocRecord* record = &recent_allocation_records_[idx];
4542      std::string temp;
4543      class_names.Add(record->Type()->GetDescriptor(&temp));
4544      for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) {
4545        mirror::ArtMethod* m = record->StackElement(i)->Method();
4546        if (m != NULL) {
4547          class_names.Add(m->GetDeclaringClassDescriptor());
4548          method_names.Add(m->GetName());
4549          filenames.Add(GetMethodSourceFile(m));
4550        }
4551      }
4552
4553      idx = (idx + 1) & (alloc_record_max_ - 1);
4554    }
4555
4556    LOG(INFO) << "allocation records: " << capped_count;
4557
4558    //
4559    // Part 2: Generate the output and store it in the buffer.
4560    //
4561
4562    // (1b) message header len (to allow future expansion); includes itself
4563    // (1b) entry header len
4564    // (1b) stack frame len
4565    const int kMessageHeaderLen = 15;
4566    const int kEntryHeaderLen = 9;
4567    const int kStackFrameLen = 8;
4568    JDWP::Append1BE(bytes, kMessageHeaderLen);
4569    JDWP::Append1BE(bytes, kEntryHeaderLen);
4570    JDWP::Append1BE(bytes, kStackFrameLen);
4571
4572    // (2b) number of entries
4573    // (4b) offset to string table from start of message
4574    // (2b) number of class name strings
4575    // (2b) number of method name strings
4576    // (2b) number of source file name strings
4577    JDWP::Append2BE(bytes, capped_count);
4578    size_t string_table_offset = bytes.size();
4579    JDWP::Append4BE(bytes, 0);  // We'll patch this later...
4580    JDWP::Append2BE(bytes, class_names.Size());
4581    JDWP::Append2BE(bytes, method_names.Size());
4582    JDWP::Append2BE(bytes, filenames.Size());
4583
4584    idx = HeadIndex();
4585    std::string temp;
4586    for (count = capped_count; count != 0; --count) {
4587      // For each entry:
4588      // (4b) total allocation size
4589      // (2b) thread id
4590      // (2b) allocated object's class name index
4591      // (1b) stack depth
4592      AllocRecord* record = &recent_allocation_records_[idx];
4593      size_t stack_depth = record->GetDepth();
4594      size_t allocated_object_class_name_index =
4595          class_names.IndexOf(record->Type()->GetDescriptor(&temp));
4596      JDWP::Append4BE(bytes, record->ByteCount());
4597      JDWP::Append2BE(bytes, record->ThinLockId());
4598      JDWP::Append2BE(bytes, allocated_object_class_name_index);
4599      JDWP::Append1BE(bytes, stack_depth);
4600
4601      for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) {
4602        // For each stack frame:
4603        // (2b) method's class name
4604        // (2b) method name
4605        // (2b) method source file
4606        // (2b) line number, clipped to 32767; -2 if native; -1 if no source
4607        mirror::ArtMethod* m = record->StackElement(stack_frame)->Method();
4608        size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor());
4609        size_t method_name_index = method_names.IndexOf(m->GetName());
4610        size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m));
4611        JDWP::Append2BE(bytes, class_name_index);
4612        JDWP::Append2BE(bytes, method_name_index);
4613        JDWP::Append2BE(bytes, file_name_index);
4614        JDWP::Append2BE(bytes, record->StackElement(stack_frame)->LineNumber());
4615      }
4616      idx = (idx + 1) & (alloc_record_max_ - 1);
4617    }
4618
4619    // (xb) class name strings
4620    // (xb) method name strings
4621    // (xb) source file strings
4622    JDWP::Set4BE(&bytes[string_table_offset], bytes.size());
4623    class_names.WriteTo(bytes);
4624    method_names.WriteTo(bytes);
4625    filenames.WriteTo(bytes);
4626  }
4627  JNIEnv* env = self->GetJniEnv();
4628  jbyteArray result = env->NewByteArray(bytes.size());
4629  if (result != NULL) {
4630    env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
4631  }
4632  return result;
4633}
4634
4635mirror::ArtMethod* DeoptimizationRequest::Method() const {
4636  ScopedObjectAccessUnchecked soa(Thread::Current());
4637  return soa.DecodeMethod(method_);
4638}
4639
4640void DeoptimizationRequest::SetMethod(mirror::ArtMethod* m) {
4641  ScopedObjectAccessUnchecked soa(Thread::Current());
4642  method_ = soa.EncodeMethod(m);
4643}
4644
4645}  // namespace art
4646