debugger.cc revision d35776413901a6a9d478e06dc354ea4f7d962e04
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "debugger.h"
18
19#include <sys/uio.h>
20
21#include <set>
22
23#include "arch/context.h"
24#include "class_linker.h"
25#include "class_linker-inl.h"
26#include "dex_file-inl.h"
27#include "dex_instruction.h"
28#include "field_helper.h"
29#include "gc/accounting/card_table-inl.h"
30#include "gc/space/large_object_space.h"
31#include "gc/space/space-inl.h"
32#include "handle_scope.h"
33#include "jdwp/object_registry.h"
34#include "method_helper.h"
35#include "mirror/art_field-inl.h"
36#include "mirror/art_method-inl.h"
37#include "mirror/class.h"
38#include "mirror/class-inl.h"
39#include "mirror/class_loader.h"
40#include "mirror/object-inl.h"
41#include "mirror/object_array-inl.h"
42#include "mirror/string-inl.h"
43#include "mirror/throwable.h"
44#include "quick/inline_method_analyser.h"
45#include "reflection.h"
46#include "safe_map.h"
47#include "scoped_thread_state_change.h"
48#include "ScopedLocalRef.h"
49#include "ScopedPrimitiveArray.h"
50#include "handle_scope-inl.h"
51#include "thread_list.h"
52#include "throw_location.h"
53#include "utf.h"
54#include "verifier/method_verifier-inl.h"
55#include "well_known_classes.h"
56
57#ifdef HAVE_ANDROID_OS
58#include "cutils/properties.h"
59#endif
60
61namespace art {
62
63static const size_t kMaxAllocRecordStackDepth = 16;  // Max 255.
64static const size_t kDefaultNumAllocRecords = 64*1024;  // Must be a power of 2. 2BE can hold 64k-1.
65
66// Limit alloc_record_count to the 2BE value that is the limit of the current protocol.
67static uint16_t CappedAllocRecordCount(size_t alloc_record_count) {
68  if (alloc_record_count > 0xffff) {
69    return 0xffff;
70  }
71  return alloc_record_count;
72}
73
74class AllocRecordStackTraceElement {
75 public:
76  AllocRecordStackTraceElement() : method_(nullptr), dex_pc_(0) {
77  }
78
79  int32_t LineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
80    mirror::ArtMethod* method = Method();
81    DCHECK(method != nullptr);
82    return method->GetLineNumFromDexPC(DexPc());
83  }
84
85  mirror::ArtMethod* Method() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
86    ScopedObjectAccessUnchecked soa(Thread::Current());
87    return soa.DecodeMethod(method_);
88  }
89
90  void SetMethod(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
91    ScopedObjectAccessUnchecked soa(Thread::Current());
92    method_ = soa.EncodeMethod(m);
93  }
94
95  uint32_t DexPc() const {
96    return dex_pc_;
97  }
98
99  void SetDexPc(uint32_t pc) {
100    dex_pc_ = pc;
101  }
102
103 private:
104  jmethodID method_;
105  uint32_t dex_pc_;
106};
107
108jobject Dbg::TypeCache::Add(mirror::Class* t) {
109  ScopedObjectAccessUnchecked soa(Thread::Current());
110  int32_t hash_code = t->IdentityHashCode();
111  auto range = objects_.equal_range(hash_code);
112  for (auto it = range.first; it != range.second; ++it) {
113    if (soa.Decode<mirror::Class*>(it->second) == t) {
114      // Found a matching weak global, return it.
115      return it->second;
116    }
117  }
118  JNIEnv* env = soa.Env();
119  const jobject local_ref = soa.AddLocalReference<jobject>(t);
120  const jobject weak_global = env->NewWeakGlobalRef(local_ref);
121  env->DeleteLocalRef(local_ref);
122  objects_.insert(std::make_pair(hash_code, weak_global));
123  return weak_global;
124}
125
126void Dbg::TypeCache::Clear() {
127  JavaVMExt* vm = Runtime::Current()->GetJavaVM();
128  Thread* self = Thread::Current();
129  for (const auto& p : objects_) {
130    vm->DeleteWeakGlobalRef(self, p.second);
131  }
132  objects_.clear();
133}
134
135class AllocRecord {
136 public:
137  AllocRecord() : type_(nullptr), byte_count_(0), thin_lock_id_(0) {}
138
139  mirror::Class* Type() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
140    return down_cast<mirror::Class*>(Thread::Current()->DecodeJObject(type_));
141  }
142
143  void SetType(mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
144                                                       Locks::alloc_tracker_lock_) {
145    type_ = Dbg::type_cache_.Add(t);
146  }
147
148  size_t GetDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
149    size_t depth = 0;
150    while (depth < kMaxAllocRecordStackDepth && stack_[depth].Method() != NULL) {
151      ++depth;
152    }
153    return depth;
154  }
155
156  size_t ByteCount() const {
157    return byte_count_;
158  }
159
160  void SetByteCount(size_t count) {
161    byte_count_ = count;
162  }
163
164  uint16_t ThinLockId() const {
165    return thin_lock_id_;
166  }
167
168  void SetThinLockId(uint16_t id) {
169    thin_lock_id_ = id;
170  }
171
172  AllocRecordStackTraceElement* StackElement(size_t index) {
173    DCHECK_LT(index, kMaxAllocRecordStackDepth);
174    return &stack_[index];
175  }
176
177 private:
178  jobject type_;  // This is a weak global.
179  size_t byte_count_;
180  uint16_t thin_lock_id_;
181  AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth];  // Unused entries have NULL method.
182};
183
184class Breakpoint {
185 public:
186  Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization)
187    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
188    : method_(nullptr), dex_pc_(dex_pc), need_full_deoptimization_(need_full_deoptimization) {
189    ScopedObjectAccessUnchecked soa(Thread::Current());
190    method_ = soa.EncodeMethod(method);
191  }
192
193  Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
194    : method_(nullptr), dex_pc_(other.dex_pc_),
195      need_full_deoptimization_(other.need_full_deoptimization_) {
196    ScopedObjectAccessUnchecked soa(Thread::Current());
197    method_ = soa.EncodeMethod(other.Method());
198  }
199
200  mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
201    ScopedObjectAccessUnchecked soa(Thread::Current());
202    return soa.DecodeMethod(method_);
203  }
204
205  uint32_t DexPc() const {
206    return dex_pc_;
207  }
208
209  bool NeedFullDeoptimization() const {
210    return need_full_deoptimization_;
211  }
212
213 private:
214  // The location of this breakpoint.
215  jmethodID method_;
216  uint32_t dex_pc_;
217
218  // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
219  bool need_full_deoptimization_;
220};
221
222static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
223    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
224  os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
225  return os;
226}
227
228class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
229 public:
230  DebugInstrumentationListener() {}
231  virtual ~DebugInstrumentationListener() {}
232
233  void MethodEntered(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
234                     uint32_t dex_pc)
235      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
236    if (method->IsNative()) {
237      // TODO: post location events is a suspension point and native method entry stubs aren't.
238      return;
239    }
240    Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr);
241  }
242
243  void MethodExited(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
244                    uint32_t dex_pc, const JValue& return_value)
245      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
246    if (method->IsNative()) {
247      // TODO: post location events is a suspension point and native method entry stubs aren't.
248      return;
249    }
250    Dbg::UpdateDebugger(thread, this_object, method, dex_pc, Dbg::kMethodExit, &return_value);
251  }
252
253  void MethodUnwind(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
254                    uint32_t dex_pc)
255      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
256    // We're not recorded to listen to this kind of event, so complain.
257    LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
258               << " " << dex_pc;
259  }
260
261  void DexPcMoved(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
262                  uint32_t new_dex_pc)
263      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
264    Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, 0, nullptr);
265  }
266
267  void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
268                 uint32_t dex_pc, mirror::ArtField* field)
269      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
270    Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
271  }
272
273  void FieldWritten(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
274                    uint32_t dex_pc, mirror::ArtField* field, const JValue& field_value)
275      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
276    Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
277  }
278
279  void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
280                       mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
281                       mirror::Throwable* exception_object)
282      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
283    Dbg::PostException(throw_location, catch_method, catch_dex_pc, exception_object);
284  }
285
286 private:
287  DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
288} gDebugInstrumentationListener;
289
290// JDWP is allowed unless the Zygote forbids it.
291static bool gJdwpAllowed = true;
292
293// Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line?
294static bool gJdwpConfigured = false;
295
296// Broken-down JDWP options. (Only valid if IsJdwpConfigured() is true.)
297static JDWP::JdwpOptions gJdwpOptions;
298
299// Runtime JDWP state.
300static JDWP::JdwpState* gJdwpState = NULL;
301static bool gDebuggerConnected;  // debugger or DDMS is connected.
302static bool gDebuggerActive;     // debugger is making requests.
303static bool gDisposed;           // debugger called VirtualMachine.Dispose, so we should drop the connection.
304
305static bool gDdmThreadNotification = false;
306
307// DDMS GC-related settings.
308static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER;
309static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER;
310static Dbg::HpsgWhat gDdmHpsgWhat;
311static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
312static Dbg::HpsgWhat gDdmNhsgWhat;
313
314static ObjectRegistry* gRegistry = nullptr;
315
316// Recent allocation tracking.
317AllocRecord* Dbg::recent_allocation_records_ = nullptr;  // TODO: CircularBuffer<AllocRecord>
318size_t Dbg::alloc_record_max_ = 0;
319size_t Dbg::alloc_record_head_ = 0;
320size_t Dbg::alloc_record_count_ = 0;
321Dbg::TypeCache Dbg::type_cache_;
322
323// Deoptimization support.
324std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
325size_t Dbg::full_deoptimization_event_count_ = 0;
326size_t Dbg::delayed_full_undeoptimization_count_ = 0;
327
328// Instrumentation event reference counters.
329size_t Dbg::dex_pc_change_event_ref_count_ = 0;
330size_t Dbg::method_enter_event_ref_count_ = 0;
331size_t Dbg::method_exit_event_ref_count_ = 0;
332size_t Dbg::field_read_event_ref_count_ = 0;
333size_t Dbg::field_write_event_ref_count_ = 0;
334size_t Dbg::exception_catch_event_ref_count_ = 0;
335uint32_t Dbg::instrumentation_events_ = 0;
336
337// Breakpoints.
338static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
339
340void DebugInvokeReq::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
341                                RootType root_type) {
342  if (receiver != nullptr) {
343    callback(&receiver, arg, tid, root_type);
344  }
345  if (thread != nullptr) {
346    callback(&thread, arg, tid, root_type);
347  }
348  if (klass != nullptr) {
349    callback(reinterpret_cast<mirror::Object**>(&klass), arg, tid, root_type);
350  }
351  if (method != nullptr) {
352    callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
353  }
354}
355
356void DebugInvokeReq::Clear() {
357  invoke_needed = false;
358  receiver = nullptr;
359  thread = nullptr;
360  klass = nullptr;
361  method = nullptr;
362}
363
364void SingleStepControl::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
365                                   RootType root_type) {
366  if (method != nullptr) {
367    callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
368  }
369}
370
371bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const {
372  return dex_pcs.find(dex_pc) == dex_pcs.end();
373}
374
375void SingleStepControl::Clear() {
376  is_active = false;
377  method = nullptr;
378  dex_pcs.clear();
379}
380
381static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc)
382    LOCKS_EXCLUDED(Locks::breakpoint_lock_)
383    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
384  ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
385  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
386    if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) {
387      VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
388      return true;
389    }
390  }
391  return false;
392}
393
394static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
395    LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) {
396  MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
397  // A thread may be suspended for GC; in this code, we really want to know whether
398  // there's a debugger suspension active.
399  return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0;
400}
401
402static mirror::Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status)
403    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
404  mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
405  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
406    status = JDWP::ERR_INVALID_OBJECT;
407    return NULL;
408  }
409  if (!o->IsArrayInstance()) {
410    status = JDWP::ERR_INVALID_ARRAY;
411    return NULL;
412  }
413  status = JDWP::ERR_NONE;
414  return o->AsArray();
415}
416
417static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status)
418    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
419  mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
420  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
421    status = JDWP::ERR_INVALID_OBJECT;
422    return NULL;
423  }
424  if (!o->IsClass()) {
425    status = JDWP::ERR_INVALID_CLASS;
426    return NULL;
427  }
428  status = JDWP::ERR_NONE;
429  return o->AsClass();
430}
431
432static JDWP::JdwpError DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id, Thread*& thread)
433    EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
434    LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
435    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
436  mirror::Object* thread_peer = gRegistry->Get<mirror::Object*>(thread_id);
437  if (thread_peer == NULL || thread_peer == ObjectRegistry::kInvalidObject) {
438    // This isn't even an object.
439    return JDWP::ERR_INVALID_OBJECT;
440  }
441
442  mirror::Class* java_lang_Thread = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
443  if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) {
444    // This isn't a thread.
445    return JDWP::ERR_INVALID_THREAD;
446  }
447
448  thread = Thread::FromManagedThread(soa, thread_peer);
449  if (thread == NULL) {
450    // This is a java.lang.Thread without a Thread*. Must be a zombie.
451    return JDWP::ERR_THREAD_NOT_ALIVE;
452  }
453  return JDWP::ERR_NONE;
454}
455
456static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) {
457  // JDWP deliberately uses the descriptor characters' ASCII values for its enum.
458  // Note that by "basic" we mean that we don't get more specific than JT_OBJECT.
459  return static_cast<JDWP::JdwpTag>(descriptor[0]);
460}
461
462static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
463    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
464  std::string temp;
465  const char* descriptor = klass->GetDescriptor(&temp);
466  return BasicTagFromDescriptor(descriptor);
467}
468
469static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
470    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
471  CHECK(c != NULL);
472  if (c->IsArrayClass()) {
473    return JDWP::JT_ARRAY;
474  }
475  if (c->IsStringClass()) {
476    return JDWP::JT_STRING;
477  }
478  if (c->IsClassClass()) {
479    return JDWP::JT_CLASS_OBJECT;
480  }
481  {
482    mirror::Class* thread_class = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
483    if (thread_class->IsAssignableFrom(c)) {
484      return JDWP::JT_THREAD;
485    }
486  }
487  {
488    mirror::Class* thread_group_class =
489        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
490    if (thread_group_class->IsAssignableFrom(c)) {
491      return JDWP::JT_THREAD_GROUP;
492    }
493  }
494  {
495    mirror::Class* class_loader_class =
496        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader);
497    if (class_loader_class->IsAssignableFrom(c)) {
498      return JDWP::JT_CLASS_LOADER;
499    }
500  }
501  return JDWP::JT_OBJECT;
502}
503
504/*
505 * Objects declared to hold Object might actually hold a more specific
506 * type.  The debugger may take a special interest in these (e.g. it
507 * wants to display the contents of Strings), so we want to return an
508 * appropriate tag.
509 *
510 * Null objects are tagged JT_OBJECT.
511 */
512static JDWP::JdwpTag TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o)
513    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
514  return (o == NULL) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass());
515}
516
517static bool IsPrimitiveTag(JDWP::JdwpTag tag) {
518  switch (tag) {
519  case JDWP::JT_BOOLEAN:
520  case JDWP::JT_BYTE:
521  case JDWP::JT_CHAR:
522  case JDWP::JT_FLOAT:
523  case JDWP::JT_DOUBLE:
524  case JDWP::JT_INT:
525  case JDWP::JT_LONG:
526  case JDWP::JT_SHORT:
527  case JDWP::JT_VOID:
528    return true;
529  default:
530    return false;
531  }
532}
533
534/*
535 * Handle one of the JDWP name/value pairs.
536 *
537 * JDWP options are:
538 *  help: if specified, show help message and bail
539 *  transport: may be dt_socket or dt_shmem
540 *  address: for dt_socket, "host:port", or just "port" when listening
541 *  server: if "y", wait for debugger to attach; if "n", attach to debugger
542 *  timeout: how long to wait for debugger to connect / listen
543 *
544 * Useful with server=n (these aren't supported yet):
545 *  onthrow=<exception-name>: connect to debugger when exception thrown
546 *  onuncaught=y|n: connect to debugger when uncaught exception thrown
547 *  launch=<command-line>: launch the debugger itself
548 *
549 * The "transport" option is required, as is "address" if server=n.
550 */
551static bool ParseJdwpOption(const std::string& name, const std::string& value) {
552  if (name == "transport") {
553    if (value == "dt_socket") {
554      gJdwpOptions.transport = JDWP::kJdwpTransportSocket;
555    } else if (value == "dt_android_adb") {
556      gJdwpOptions.transport = JDWP::kJdwpTransportAndroidAdb;
557    } else {
558      LOG(ERROR) << "JDWP transport not supported: " << value;
559      return false;
560    }
561  } else if (name == "server") {
562    if (value == "n") {
563      gJdwpOptions.server = false;
564    } else if (value == "y") {
565      gJdwpOptions.server = true;
566    } else {
567      LOG(ERROR) << "JDWP option 'server' must be 'y' or 'n'";
568      return false;
569    }
570  } else if (name == "suspend") {
571    if (value == "n") {
572      gJdwpOptions.suspend = false;
573    } else if (value == "y") {
574      gJdwpOptions.suspend = true;
575    } else {
576      LOG(ERROR) << "JDWP option 'suspend' must be 'y' or 'n'";
577      return false;
578    }
579  } else if (name == "address") {
580    /* this is either <port> or <host>:<port> */
581    std::string port_string;
582    gJdwpOptions.host.clear();
583    std::string::size_type colon = value.find(':');
584    if (colon != std::string::npos) {
585      gJdwpOptions.host = value.substr(0, colon);
586      port_string = value.substr(colon + 1);
587    } else {
588      port_string = value;
589    }
590    if (port_string.empty()) {
591      LOG(ERROR) << "JDWP address missing port: " << value;
592      return false;
593    }
594    char* end;
595    uint64_t port = strtoul(port_string.c_str(), &end, 10);
596    if (*end != '\0' || port > 0xffff) {
597      LOG(ERROR) << "JDWP address has junk in port field: " << value;
598      return false;
599    }
600    gJdwpOptions.port = port;
601  } else if (name == "launch" || name == "onthrow" || name == "oncaught" || name == "timeout") {
602    /* valid but unsupported */
603    LOG(INFO) << "Ignoring JDWP option '" << name << "'='" << value << "'";
604  } else {
605    LOG(INFO) << "Ignoring unrecognized JDWP option '" << name << "'='" << value << "'";
606  }
607
608  return true;
609}
610
611/*
612 * Parse the latter half of a -Xrunjdwp/-agentlib:jdwp= string, e.g.:
613 * "transport=dt_socket,address=8000,server=y,suspend=n"
614 */
615bool Dbg::ParseJdwpOptions(const std::string& options) {
616  VLOG(jdwp) << "ParseJdwpOptions: " << options;
617
618  std::vector<std::string> pairs;
619  Split(options, ',', pairs);
620
621  for (size_t i = 0; i < pairs.size(); ++i) {
622    std::string::size_type equals = pairs[i].find('=');
623    if (equals == std::string::npos) {
624      LOG(ERROR) << "Can't parse JDWP option '" << pairs[i] << "' in '" << options << "'";
625      return false;
626    }
627    ParseJdwpOption(pairs[i].substr(0, equals), pairs[i].substr(equals + 1));
628  }
629
630  if (gJdwpOptions.transport == JDWP::kJdwpTransportUnknown) {
631    LOG(ERROR) << "Must specify JDWP transport: " << options;
632  }
633  if (!gJdwpOptions.server && (gJdwpOptions.host.empty() || gJdwpOptions.port == 0)) {
634    LOG(ERROR) << "Must specify JDWP host and port when server=n: " << options;
635    return false;
636  }
637
638  gJdwpConfigured = true;
639  return true;
640}
641
642void Dbg::StartJdwp() {
643  if (!gJdwpAllowed || !IsJdwpConfigured()) {
644    // No JDWP for you!
645    return;
646  }
647
648  CHECK(gRegistry == nullptr);
649  gRegistry = new ObjectRegistry;
650
651  // Init JDWP if the debugger is enabled. This may connect out to a
652  // debugger, passively listen for a debugger, or block waiting for a
653  // debugger.
654  gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions);
655  if (gJdwpState == NULL) {
656    // We probably failed because some other process has the port already, which means that
657    // if we don't abort the user is likely to think they're talking to us when they're actually
658    // talking to that other process.
659    LOG(FATAL) << "Debugger thread failed to initialize";
660  }
661
662  // If a debugger has already attached, send the "welcome" message.
663  // This may cause us to suspend all threads.
664  if (gJdwpState->IsActive()) {
665    ScopedObjectAccess soa(Thread::Current());
666    if (!gJdwpState->PostVMStart()) {
667      LOG(WARNING) << "Failed to post 'start' message to debugger";
668    }
669  }
670}
671
672void Dbg::StopJdwp() {
673  // Post VM_DEATH event before the JDWP connection is closed (either by the JDWP thread or the
674  // destruction of gJdwpState).
675  if (gJdwpState != nullptr && gJdwpState->IsActive()) {
676    gJdwpState->PostVMDeath();
677  }
678  // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
679  Disposed();
680  delete gJdwpState;
681  gJdwpState = nullptr;
682  delete gRegistry;
683  gRegistry = nullptr;
684}
685
686void Dbg::GcDidFinish() {
687  if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
688    ScopedObjectAccess soa(Thread::Current());
689    VLOG(jdwp) << "Sending heap info to DDM";
690    DdmSendHeapInfo(gDdmHpifWhen);
691  }
692  if (gDdmHpsgWhen != HPSG_WHEN_NEVER) {
693    ScopedObjectAccess soa(Thread::Current());
694    VLOG(jdwp) << "Dumping heap to DDM";
695    DdmSendHeapSegments(false);
696  }
697  if (gDdmNhsgWhen != HPSG_WHEN_NEVER) {
698    ScopedObjectAccess soa(Thread::Current());
699    VLOG(jdwp) << "Dumping native heap to DDM";
700    DdmSendHeapSegments(true);
701  }
702}
703
704void Dbg::SetJdwpAllowed(bool allowed) {
705  gJdwpAllowed = allowed;
706}
707
708DebugInvokeReq* Dbg::GetInvokeReq() {
709  return Thread::Current()->GetInvokeReq();
710}
711
712Thread* Dbg::GetDebugThread() {
713  return (gJdwpState != NULL) ? gJdwpState->GetDebugThread() : NULL;
714}
715
716void Dbg::ClearWaitForEventThread() {
717  gJdwpState->ClearWaitForEventThread();
718}
719
720void Dbg::Connected() {
721  CHECK(!gDebuggerConnected);
722  VLOG(jdwp) << "JDWP has attached";
723  gDebuggerConnected = true;
724  gDisposed = false;
725}
726
727void Dbg::Disposed() {
728  gDisposed = true;
729}
730
731bool Dbg::IsDisposed() {
732  return gDisposed;
733}
734
735void Dbg::GoActive() {
736  // Enable all debugging features, including scans for breakpoints.
737  // This is a no-op if we're already active.
738  // Only called from the JDWP handler thread.
739  if (gDebuggerActive) {
740    return;
741  }
742
743  {
744    // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected?
745    ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
746    CHECK_EQ(gBreakpoints.size(), 0U);
747  }
748
749  {
750    MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
751    CHECK_EQ(deoptimization_requests_.size(), 0U);
752    CHECK_EQ(full_deoptimization_event_count_, 0U);
753    CHECK_EQ(delayed_full_undeoptimization_count_, 0U);
754    CHECK_EQ(dex_pc_change_event_ref_count_, 0U);
755    CHECK_EQ(method_enter_event_ref_count_, 0U);
756    CHECK_EQ(method_exit_event_ref_count_, 0U);
757    CHECK_EQ(field_read_event_ref_count_, 0U);
758    CHECK_EQ(field_write_event_ref_count_, 0U);
759    CHECK_EQ(exception_catch_event_ref_count_, 0U);
760  }
761
762  Runtime* runtime = Runtime::Current();
763  runtime->GetThreadList()->SuspendAll();
764  Thread* self = Thread::Current();
765  ThreadState old_state = self->SetStateUnsafe(kRunnable);
766  CHECK_NE(old_state, kRunnable);
767  runtime->GetInstrumentation()->EnableDeoptimization();
768  instrumentation_events_ = 0;
769  gDebuggerActive = true;
770  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
771  runtime->GetThreadList()->ResumeAll();
772
773  LOG(INFO) << "Debugger is active";
774}
775
776void Dbg::Disconnected() {
777  CHECK(gDebuggerConnected);
778
779  LOG(INFO) << "Debugger is no longer active";
780
781  // Suspend all threads and exclusively acquire the mutator lock. Set the state of the thread
782  // to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener
783  // and clear the object registry.
784  Runtime* runtime = Runtime::Current();
785  runtime->GetThreadList()->SuspendAll();
786  Thread* self = Thread::Current();
787  ThreadState old_state = self->SetStateUnsafe(kRunnable);
788
789  // Debugger may not be active at this point.
790  if (gDebuggerActive) {
791    {
792      // Since we're going to disable deoptimization, we clear the deoptimization requests queue.
793      // This prevents us from having any pending deoptimization request when the debugger attaches
794      // to us again while no event has been requested yet.
795      MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
796      deoptimization_requests_.clear();
797      full_deoptimization_event_count_ = 0U;
798      delayed_full_undeoptimization_count_ = 0U;
799    }
800    if (instrumentation_events_ != 0) {
801      runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
802                                                    instrumentation_events_);
803      instrumentation_events_ = 0;
804    }
805    runtime->GetInstrumentation()->DisableDeoptimization();
806    gDebuggerActive = false;
807  }
808  gRegistry->Clear();
809  gDebuggerConnected = false;
810  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
811  runtime->GetThreadList()->ResumeAll();
812}
813
814bool Dbg::IsDebuggerActive() {
815  return gDebuggerActive;
816}
817
818bool Dbg::IsJdwpConfigured() {
819  return gJdwpConfigured;
820}
821
822int64_t Dbg::LastDebuggerActivity() {
823  return gJdwpState->LastDebuggerActivity();
824}
825
826void Dbg::UndoDebuggerSuspensions() {
827  Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
828}
829
830std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
831  mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id);
832  if (o == NULL) {
833    return "NULL";
834  }
835  if (o == ObjectRegistry::kInvalidObject) {
836    return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
837  }
838  if (!o->IsClass()) {
839    return StringPrintf("non-class %p", o);  // This is only used for debugging output anyway.
840  }
841  std::string temp;
842  return DescriptorToName(o->AsClass()->GetDescriptor(&temp));
843}
844
845JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& class_object_id) {
846  JDWP::JdwpError status;
847  mirror::Class* c = DecodeClass(id, status);
848  if (c == NULL) {
849    return status;
850  }
851  class_object_id = gRegistry->Add(c);
852  return JDWP::ERR_NONE;
853}
854
855JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId& superclass_id) {
856  JDWP::JdwpError status;
857  mirror::Class* c = DecodeClass(id, status);
858  if (c == NULL) {
859    return status;
860  }
861  if (c->IsInterface()) {
862    // http://code.google.com/p/android/issues/detail?id=20856
863    superclass_id = 0;
864  } else {
865    superclass_id = gRegistry->Add(c->GetSuperClass());
866  }
867  return JDWP::ERR_NONE;
868}
869
870JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
871  mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
872  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
873    return JDWP::ERR_INVALID_OBJECT;
874  }
875  expandBufAddObjectId(pReply, gRegistry->Add(o->GetClass()->GetClassLoader()));
876  return JDWP::ERR_NONE;
877}
878
879JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
880  JDWP::JdwpError status;
881  mirror::Class* c = DecodeClass(id, status);
882  if (c == NULL) {
883    return status;
884  }
885
886  uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask;
887
888  // Set ACC_SUPER. Dex files don't contain this flag but only classes are supposed to have it set,
889  // not interfaces.
890  // Class.getModifiers doesn't return it, but JDWP does, so we set it here.
891  if ((access_flags & kAccInterface) == 0) {
892    access_flags |= kAccSuper;
893  }
894
895  expandBufAdd4BE(pReply, access_flags);
896
897  return JDWP::ERR_NONE;
898}
899
900JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply)
901    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
902  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
903  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
904    return JDWP::ERR_INVALID_OBJECT;
905  }
906
907  // Ensure all threads are suspended while we read objects' lock words.
908  Thread* self = Thread::Current();
909  CHECK_EQ(self->GetState(), kRunnable);
910  self->TransitionFromRunnableToSuspended(kSuspended);
911  Runtime::Current()->GetThreadList()->SuspendAll();
912
913  MonitorInfo monitor_info(o);
914
915  Runtime::Current()->GetThreadList()->ResumeAll();
916  self->TransitionFromSuspendedToRunnable();
917
918  if (monitor_info.owner_ != NULL) {
919    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeer()));
920  } else {
921    expandBufAddObjectId(reply, gRegistry->Add(NULL));
922  }
923  expandBufAdd4BE(reply, monitor_info.entry_count_);
924  expandBufAdd4BE(reply, monitor_info.waiters_.size());
925  for (size_t i = 0; i < monitor_info.waiters_.size(); ++i) {
926    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters_[i]->GetPeer()));
927  }
928  return JDWP::ERR_NONE;
929}
930
931JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id,
932                                      std::vector<JDWP::ObjectId>& monitors,
933                                      std::vector<uint32_t>& stack_depths) {
934  struct OwnedMonitorVisitor : public StackVisitor {
935    OwnedMonitorVisitor(Thread* thread, Context* context,
936                        std::vector<JDWP::ObjectId>* monitor_vector,
937                        std::vector<uint32_t>* stack_depth_vector)
938        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
939      : StackVisitor(thread, context), current_stack_depth(0),
940        monitors(monitor_vector), stack_depths(stack_depth_vector) {}
941
942    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
943    // annotalysis.
944    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
945      if (!GetMethod()->IsRuntimeMethod()) {
946        Monitor::VisitLocks(this, AppendOwnedMonitors, this);
947        ++current_stack_depth;
948      }
949      return true;
950    }
951
952    static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg)
953        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
954      OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
955      visitor->monitors->push_back(gRegistry->Add(owned_monitor));
956      visitor->stack_depths->push_back(visitor->current_stack_depth);
957    }
958
959    size_t current_stack_depth;
960    std::vector<JDWP::ObjectId>* monitors;
961    std::vector<uint32_t>* stack_depths;
962  };
963
964  ScopedObjectAccessUnchecked soa(Thread::Current());
965  Thread* thread;
966  {
967    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
968    JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
969    if (error != JDWP::ERR_NONE) {
970      return error;
971    }
972    if (!IsSuspendedForDebugger(soa, thread)) {
973      return JDWP::ERR_THREAD_NOT_SUSPENDED;
974    }
975  }
976  std::unique_ptr<Context> context(Context::Create());
977  OwnedMonitorVisitor visitor(thread, context.get(), &monitors, &stack_depths);
978  visitor.WalkStack();
979  return JDWP::ERR_NONE;
980}
981
982JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id,
983                                         JDWP::ObjectId& contended_monitor) {
984  mirror::Object* contended_monitor_obj;
985  ScopedObjectAccessUnchecked soa(Thread::Current());
986  {
987    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
988    Thread* thread;
989    JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
990    if (error != JDWP::ERR_NONE) {
991      return error;
992    }
993    if (!IsSuspendedForDebugger(soa, thread)) {
994      return JDWP::ERR_THREAD_NOT_SUSPENDED;
995    }
996    contended_monitor_obj = Monitor::GetContendedMonitor(thread);
997  }
998  // Add() requires the thread_list_lock_ not held to avoid the lock
999  // level violation.
1000  contended_monitor = gRegistry->Add(contended_monitor_obj);
1001  return JDWP::ERR_NONE;
1002}
1003
1004JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
1005                                       std::vector<uint64_t>& counts)
1006    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1007  gc::Heap* heap = Runtime::Current()->GetHeap();
1008  heap->CollectGarbage(false);
1009  std::vector<mirror::Class*> classes;
1010  counts.clear();
1011  for (size_t i = 0; i < class_ids.size(); ++i) {
1012    JDWP::JdwpError status;
1013    mirror::Class* c = DecodeClass(class_ids[i], status);
1014    if (c == NULL) {
1015      return status;
1016    }
1017    classes.push_back(c);
1018    counts.push_back(0);
1019  }
1020  heap->CountInstances(classes, false, &counts[0]);
1021  return JDWP::ERR_NONE;
1022}
1023
1024JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, std::vector<JDWP::ObjectId>& instances)
1025    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1026  gc::Heap* heap = Runtime::Current()->GetHeap();
1027  // We only want reachable instances, so do a GC.
1028  heap->CollectGarbage(false);
1029  JDWP::JdwpError status;
1030  mirror::Class* c = DecodeClass(class_id, status);
1031  if (c == nullptr) {
1032    return status;
1033  }
1034  std::vector<mirror::Object*> raw_instances;
1035  Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
1036  for (size_t i = 0; i < raw_instances.size(); ++i) {
1037    instances.push_back(gRegistry->Add(raw_instances[i]));
1038  }
1039  return JDWP::ERR_NONE;
1040}
1041
1042JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
1043                                         std::vector<JDWP::ObjectId>& referring_objects)
1044    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1045  gc::Heap* heap = Runtime::Current()->GetHeap();
1046  heap->CollectGarbage(false);
1047  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1048  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1049    return JDWP::ERR_INVALID_OBJECT;
1050  }
1051  std::vector<mirror::Object*> raw_instances;
1052  heap->GetReferringObjects(o, max_count, raw_instances);
1053  for (size_t i = 0; i < raw_instances.size(); ++i) {
1054    referring_objects.push_back(gRegistry->Add(raw_instances[i]));
1055  }
1056  return JDWP::ERR_NONE;
1057}
1058
1059JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id)
1060    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1061  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1062  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1063    return JDWP::ERR_INVALID_OBJECT;
1064  }
1065  gRegistry->DisableCollection(object_id);
1066  return JDWP::ERR_NONE;
1067}
1068
1069JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id)
1070    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1071  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1072  // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI
1073  // also ignores these cases and never return an error. However it's not obvious why this command
1074  // should behave differently from DisableCollection and IsCollected commands. So let's be more
1075  // strict and return an error if this happens.
1076  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1077    return JDWP::ERR_INVALID_OBJECT;
1078  }
1079  gRegistry->EnableCollection(object_id);
1080  return JDWP::ERR_NONE;
1081}
1082
1083JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool& is_collected)
1084    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1085  if (object_id == 0) {
1086    // Null object id is invalid.
1087    return JDWP::ERR_INVALID_OBJECT;
1088  }
1089  // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However
1090  // the RI seems to ignore this and assume object has been collected.
1091  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1092  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1093    is_collected = true;
1094  } else {
1095    is_collected = gRegistry->IsCollected(object_id);
1096  }
1097  return JDWP::ERR_NONE;
1098}
1099
1100void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count)
1101    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1102  gRegistry->DisposeObject(object_id, reference_count);
1103}
1104
1105static JDWP::JdwpTypeTag GetTypeTag(mirror::Class* klass)
1106    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1107  DCHECK(klass != nullptr);
1108  if (klass->IsArrayClass()) {
1109    return JDWP::TT_ARRAY;
1110  } else if (klass->IsInterface()) {
1111    return JDWP::TT_INTERFACE;
1112  } else {
1113    return JDWP::TT_CLASS;
1114  }
1115}
1116
1117JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1118  JDWP::JdwpError status;
1119  mirror::Class* c = DecodeClass(class_id, status);
1120  if (c == NULL) {
1121    return status;
1122  }
1123
1124  JDWP::JdwpTypeTag type_tag = GetTypeTag(c);
1125  expandBufAdd1(pReply, type_tag);
1126  expandBufAddRefTypeId(pReply, class_id);
1127  return JDWP::ERR_NONE;
1128}
1129
1130void Dbg::GetClassList(std::vector<JDWP::RefTypeId>& classes) {
1131  // Get the complete list of reference classes (i.e. all classes except
1132  // the primitive types).
1133  // Returns a newly-allocated buffer full of RefTypeId values.
1134  struct ClassListCreator {
1135    explicit ClassListCreator(std::vector<JDWP::RefTypeId>& classes) : classes(classes) {
1136    }
1137
1138    static bool Visit(mirror::Class* c, void* arg) {
1139      return reinterpret_cast<ClassListCreator*>(arg)->Visit(c);
1140    }
1141
1142    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
1143    // annotalysis.
1144    bool Visit(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS {
1145      if (!c->IsPrimitive()) {
1146        classes.push_back(gRegistry->AddRefType(c));
1147      }
1148      return true;
1149    }
1150
1151    std::vector<JDWP::RefTypeId>& classes;
1152  };
1153
1154  ClassListCreator clc(classes);
1155  Runtime::Current()->GetClassLinker()->VisitClasses(ClassListCreator::Visit, &clc);
1156}
1157
1158JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
1159                                  uint32_t* pStatus, std::string* pDescriptor) {
1160  JDWP::JdwpError status;
1161  mirror::Class* c = DecodeClass(class_id, status);
1162  if (c == NULL) {
1163    return status;
1164  }
1165
1166  if (c->IsArrayClass()) {
1167    *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
1168    *pTypeTag = JDWP::TT_ARRAY;
1169  } else {
1170    if (c->IsErroneous()) {
1171      *pStatus = JDWP::CS_ERROR;
1172    } else {
1173      *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED;
1174    }
1175    *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
1176  }
1177
1178  if (pDescriptor != NULL) {
1179    std::string temp;
1180    *pDescriptor = c->GetDescriptor(&temp);
1181  }
1182  return JDWP::ERR_NONE;
1183}
1184
1185void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>& ids) {
1186  std::vector<mirror::Class*> classes;
1187  Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
1188  ids.clear();
1189  for (size_t i = 0; i < classes.size(); ++i) {
1190    ids.push_back(gRegistry->Add(classes[i]));
1191  }
1192}
1193
1194JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply)
1195    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1196  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1197  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1198    return JDWP::ERR_INVALID_OBJECT;
1199  }
1200
1201  JDWP::JdwpTypeTag type_tag = GetTypeTag(o->GetClass());
1202  JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass());
1203
1204  expandBufAdd1(pReply, type_tag);
1205  expandBufAddRefTypeId(pReply, type_id);
1206
1207  return JDWP::ERR_NONE;
1208}
1209
1210JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) {
1211  JDWP::JdwpError status;
1212  mirror::Class* c = DecodeClass(class_id, status);
1213  if (c == NULL) {
1214    return status;
1215  }
1216  std::string temp;
1217  *signature = c->GetDescriptor(&temp);
1218  return JDWP::ERR_NONE;
1219}
1220
1221JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string& result) {
1222  JDWP::JdwpError status;
1223  mirror::Class* c = DecodeClass(class_id, status);
1224  if (c == nullptr) {
1225    return status;
1226  }
1227  const char* source_file = c->GetSourceFile();
1228  if (source_file == nullptr) {
1229    return JDWP::ERR_ABSENT_INFORMATION;
1230  }
1231  result = source_file;
1232  return JDWP::ERR_NONE;
1233}
1234
1235JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t& tag) {
1236  ScopedObjectAccessUnchecked soa(Thread::Current());
1237  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1238  if (o == ObjectRegistry::kInvalidObject) {
1239    return JDWP::ERR_INVALID_OBJECT;
1240  }
1241  tag = TagFromObject(soa, o);
1242  return JDWP::ERR_NONE;
1243}
1244
1245size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) {
1246  switch (tag) {
1247  case JDWP::JT_VOID:
1248    return 0;
1249  case JDWP::JT_BYTE:
1250  case JDWP::JT_BOOLEAN:
1251    return 1;
1252  case JDWP::JT_CHAR:
1253  case JDWP::JT_SHORT:
1254    return 2;
1255  case JDWP::JT_FLOAT:
1256  case JDWP::JT_INT:
1257    return 4;
1258  case JDWP::JT_ARRAY:
1259  case JDWP::JT_OBJECT:
1260  case JDWP::JT_STRING:
1261  case JDWP::JT_THREAD:
1262  case JDWP::JT_THREAD_GROUP:
1263  case JDWP::JT_CLASS_LOADER:
1264  case JDWP::JT_CLASS_OBJECT:
1265    return sizeof(JDWP::ObjectId);
1266  case JDWP::JT_DOUBLE:
1267  case JDWP::JT_LONG:
1268    return 8;
1269  default:
1270    LOG(FATAL) << "Unknown tag " << tag;
1271    return -1;
1272  }
1273}
1274
1275JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int& length) {
1276  JDWP::JdwpError status;
1277  mirror::Array* a = DecodeArray(array_id, status);
1278  if (a == NULL) {
1279    return status;
1280  }
1281  length = a->GetLength();
1282  return JDWP::ERR_NONE;
1283}
1284
1285JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) {
1286  JDWP::JdwpError status;
1287  mirror::Array* a = DecodeArray(array_id, status);
1288  if (a == nullptr) {
1289    return status;
1290  }
1291
1292  if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) {
1293    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1294    return JDWP::ERR_INVALID_LENGTH;
1295  }
1296  JDWP::JdwpTag element_tag = BasicTagFromClass(a->GetClass()->GetComponentType());
1297  expandBufAdd1(pReply, element_tag);
1298  expandBufAdd4BE(pReply, count);
1299
1300  if (IsPrimitiveTag(element_tag)) {
1301    size_t width = GetTagWidth(element_tag);
1302    uint8_t* dst = expandBufAddSpace(pReply, count * width);
1303    if (width == 8) {
1304      const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0));
1305      for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]);
1306    } else if (width == 4) {
1307      const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0));
1308      for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]);
1309    } else if (width == 2) {
1310      const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0));
1311      for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]);
1312    } else {
1313      const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0));
1314      memcpy(dst, &src[offset * width], count * width);
1315    }
1316  } else {
1317    ScopedObjectAccessUnchecked soa(Thread::Current());
1318    mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>();
1319    for (int i = 0; i < count; ++i) {
1320      mirror::Object* element = oa->Get(offset + i);
1321      JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element)
1322                                                        : element_tag;
1323      expandBufAdd1(pReply, specific_tag);
1324      expandBufAddObjectId(pReply, gRegistry->Add(element));
1325    }
1326  }
1327
1328  return JDWP::ERR_NONE;
1329}
1330
1331template <typename T>
1332static void CopyArrayData(mirror::Array* a, JDWP::Request& src, int offset, int count)
1333    NO_THREAD_SAFETY_ANALYSIS {
1334  // TODO: fix when annotalysis correctly handles non-member functions.
1335  DCHECK(a->GetClass()->IsPrimitiveArray());
1336
1337  T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset));
1338  for (int i = 0; i < count; ++i) {
1339    *dst++ = src.ReadValue(sizeof(T));
1340  }
1341}
1342
1343JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
1344                                      JDWP::Request& request)
1345    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1346  JDWP::JdwpError status;
1347  mirror::Array* dst = DecodeArray(array_id, status);
1348  if (dst == NULL) {
1349    return status;
1350  }
1351
1352  if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) {
1353    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1354    return JDWP::ERR_INVALID_LENGTH;
1355  }
1356  JDWP::JdwpTag element_tag = BasicTagFromClass(dst->GetClass()->GetComponentType());
1357
1358  if (IsPrimitiveTag(element_tag)) {
1359    size_t width = GetTagWidth(element_tag);
1360    if (width == 8) {
1361      CopyArrayData<uint64_t>(dst, request, offset, count);
1362    } else if (width == 4) {
1363      CopyArrayData<uint32_t>(dst, request, offset, count);
1364    } else if (width == 2) {
1365      CopyArrayData<uint16_t>(dst, request, offset, count);
1366    } else {
1367      CopyArrayData<uint8_t>(dst, request, offset, count);
1368    }
1369  } else {
1370    mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>();
1371    for (int i = 0; i < count; ++i) {
1372      JDWP::ObjectId id = request.ReadObjectId();
1373      mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
1374      if (o == ObjectRegistry::kInvalidObject) {
1375        return JDWP::ERR_INVALID_OBJECT;
1376      }
1377      oa->Set<false>(offset + i, o);
1378    }
1379  }
1380
1381  return JDWP::ERR_NONE;
1382}
1383
1384JDWP::ObjectId Dbg::CreateString(const std::string& str) {
1385  return gRegistry->Add(mirror::String::AllocFromModifiedUtf8(Thread::Current(), str.c_str()));
1386}
1387
1388JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId& new_object) {
1389  JDWP::JdwpError status;
1390  mirror::Class* c = DecodeClass(class_id, status);
1391  if (c == NULL) {
1392    return status;
1393  }
1394  new_object = gRegistry->Add(c->AllocObject(Thread::Current()));
1395  return JDWP::ERR_NONE;
1396}
1397
1398/*
1399 * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]".
1400 */
1401JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
1402                                       JDWP::ObjectId& new_array) {
1403  JDWP::JdwpError status;
1404  mirror::Class* c = DecodeClass(array_class_id, status);
1405  if (c == NULL) {
1406    return status;
1407  }
1408  new_array = gRegistry->Add(mirror::Array::Alloc<true>(Thread::Current(), c, length,
1409                                                        c->GetComponentSize(),
1410                                                        Runtime::Current()->GetHeap()->GetCurrentAllocator()));
1411  return JDWP::ERR_NONE;
1412}
1413
1414bool Dbg::MatchType(JDWP::RefTypeId instance_class_id, JDWP::RefTypeId class_id) {
1415  JDWP::JdwpError status;
1416  mirror::Class* c1 = DecodeClass(instance_class_id, status);
1417  CHECK(c1 != NULL);
1418  mirror::Class* c2 = DecodeClass(class_id, status);
1419  CHECK(c2 != NULL);
1420  return c2->IsAssignableFrom(c1);
1421}
1422
1423static JDWP::FieldId ToFieldId(const mirror::ArtField* f)
1424    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1425  CHECK(!kMovingFields);
1426  return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
1427}
1428
1429static JDWP::MethodId ToMethodId(const mirror::ArtMethod* m)
1430    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1431  CHECK(!kMovingMethods);
1432  return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
1433}
1434
1435static mirror::ArtField* FromFieldId(JDWP::FieldId fid)
1436    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1437  CHECK(!kMovingFields);
1438  return reinterpret_cast<mirror::ArtField*>(static_cast<uintptr_t>(fid));
1439}
1440
1441static mirror::ArtMethod* FromMethodId(JDWP::MethodId mid)
1442    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1443  CHECK(!kMovingMethods);
1444  return reinterpret_cast<mirror::ArtMethod*>(static_cast<uintptr_t>(mid));
1445}
1446
1447static void SetLocation(JDWP::JdwpLocation& location, mirror::ArtMethod* m, uint32_t dex_pc)
1448    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1449  if (m == NULL) {
1450    memset(&location, 0, sizeof(location));
1451  } else {
1452    mirror::Class* c = m->GetDeclaringClass();
1453    location.type_tag = GetTypeTag(c);
1454    location.class_id = gRegistry->AddRefType(c);
1455    location.method_id = ToMethodId(m);
1456    location.dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc;
1457  }
1458}
1459
1460std::string Dbg::GetMethodName(JDWP::MethodId method_id)
1461    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1462  mirror::ArtMethod* m = FromMethodId(method_id);
1463  return m->GetName();
1464}
1465
1466std::string Dbg::GetFieldName(JDWP::FieldId field_id)
1467    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1468  return FromFieldId(field_id)->GetName();
1469}
1470
1471/*
1472 * Augment the access flags for synthetic methods and fields by setting
1473 * the (as described by the spec) "0xf0000000 bit".  Also, strip out any
1474 * flags not specified by the Java programming language.
1475 */
1476static uint32_t MangleAccessFlags(uint32_t accessFlags) {
1477  accessFlags &= kAccJavaFlagsMask;
1478  if ((accessFlags & kAccSynthetic) != 0) {
1479    accessFlags |= 0xf0000000;
1480  }
1481  return accessFlags;
1482}
1483
1484/*
1485 * Circularly shifts registers so that arguments come first. Debuggers
1486 * expect slots to begin with arguments, but dex code places them at
1487 * the end.
1488 */
1489static uint16_t MangleSlot(uint16_t slot, mirror::ArtMethod* m)
1490    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1491  const DexFile::CodeItem* code_item = m->GetCodeItem();
1492  if (code_item == nullptr) {
1493    // We should not get here for a method without code (native, proxy or abstract). Log it and
1494    // return the slot as is since all registers are arguments.
1495    LOG(WARNING) << "Trying to mangle slot for method without code " << PrettyMethod(m);
1496    return slot;
1497  }
1498  uint16_t ins_size = code_item->ins_size_;
1499  uint16_t locals_size = code_item->registers_size_ - ins_size;
1500  if (slot >= locals_size) {
1501    return slot - locals_size;
1502  } else {
1503    return slot + ins_size;
1504  }
1505}
1506
1507/*
1508 * Circularly shifts registers so that arguments come last. Reverts
1509 * slots to dex style argument placement.
1510 */
1511static uint16_t DemangleSlot(uint16_t slot, mirror::ArtMethod* m)
1512    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1513  const DexFile::CodeItem* code_item = m->GetCodeItem();
1514  if (code_item == nullptr) {
1515    // We should not get here for a method without code (native, proxy or abstract). Log it and
1516    // return the slot as is since all registers are arguments.
1517    LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m);
1518    return slot;
1519  }
1520  uint16_t ins_size = code_item->ins_size_;
1521  uint16_t locals_size = code_item->registers_size_ - ins_size;
1522  if (slot < ins_size) {
1523    return slot + locals_size;
1524  } else {
1525    return slot - ins_size;
1526  }
1527}
1528
1529JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) {
1530  JDWP::JdwpError status;
1531  mirror::Class* c = DecodeClass(class_id, status);
1532  if (c == NULL) {
1533    return status;
1534  }
1535
1536  size_t instance_field_count = c->NumInstanceFields();
1537  size_t static_field_count = c->NumStaticFields();
1538
1539  expandBufAdd4BE(pReply, instance_field_count + static_field_count);
1540
1541  for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
1542    mirror::ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
1543    expandBufAddFieldId(pReply, ToFieldId(f));
1544    expandBufAddUtf8String(pReply, f->GetName());
1545    expandBufAddUtf8String(pReply, f->GetTypeDescriptor());
1546    if (with_generic) {
1547      static const char genericSignature[1] = "";
1548      expandBufAddUtf8String(pReply, genericSignature);
1549    }
1550    expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags()));
1551  }
1552  return JDWP::ERR_NONE;
1553}
1554
1555JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic,
1556                                           JDWP::ExpandBuf* pReply) {
1557  JDWP::JdwpError status;
1558  mirror::Class* c = DecodeClass(class_id, status);
1559  if (c == NULL) {
1560    return status;
1561  }
1562
1563  size_t direct_method_count = c->NumDirectMethods();
1564  size_t virtual_method_count = c->NumVirtualMethods();
1565
1566  expandBufAdd4BE(pReply, direct_method_count + virtual_method_count);
1567
1568  for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) {
1569    mirror::ArtMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count);
1570    expandBufAddMethodId(pReply, ToMethodId(m));
1571    expandBufAddUtf8String(pReply, m->GetName());
1572    expandBufAddUtf8String(pReply, m->GetSignature().ToString());
1573    if (with_generic) {
1574      static const char genericSignature[1] = "";
1575      expandBufAddUtf8String(pReply, genericSignature);
1576    }
1577    expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags()));
1578  }
1579  return JDWP::ERR_NONE;
1580}
1581
1582JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1583  JDWP::JdwpError status;
1584  Thread* self = Thread::Current();
1585  StackHandleScope<1> hs(self);
1586  Handle<mirror::Class> c(hs.NewHandle(DecodeClass(class_id, status)));
1587  if (c.Get() == nullptr) {
1588    return status;
1589  }
1590  size_t interface_count = c->NumDirectInterfaces();
1591  expandBufAdd4BE(pReply, interface_count);
1592  for (size_t i = 0; i < interface_count; ++i) {
1593    expandBufAddRefTypeId(pReply,
1594                          gRegistry->AddRefType(mirror::Class::GetDirectInterface(self, c, i)));
1595  }
1596  return JDWP::ERR_NONE;
1597}
1598
1599void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply)
1600    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1601  struct DebugCallbackContext {
1602    int numItems;
1603    JDWP::ExpandBuf* pReply;
1604
1605    static bool Callback(void* context, uint32_t address, uint32_t line_number) {
1606      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1607      expandBufAdd8BE(pContext->pReply, address);
1608      expandBufAdd4BE(pContext->pReply, line_number);
1609      pContext->numItems++;
1610      return false;
1611    }
1612  };
1613  mirror::ArtMethod* m = FromMethodId(method_id);
1614  const DexFile::CodeItem* code_item = m->GetCodeItem();
1615  uint64_t start, end;
1616  if (code_item == nullptr) {
1617    DCHECK(m->IsNative() || m->IsProxyMethod());
1618    start = -1;
1619    end = -1;
1620  } else {
1621    start = 0;
1622    // Return the index of the last instruction
1623    end = code_item->insns_size_in_code_units_ - 1;
1624  }
1625
1626  expandBufAdd8BE(pReply, start);
1627  expandBufAdd8BE(pReply, end);
1628
1629  // Add numLines later
1630  size_t numLinesOffset = expandBufGetLength(pReply);
1631  expandBufAdd4BE(pReply, 0);
1632
1633  DebugCallbackContext context;
1634  context.numItems = 0;
1635  context.pReply = pReply;
1636
1637  if (code_item != nullptr) {
1638    m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
1639                                     DebugCallbackContext::Callback, NULL, &context);
1640  }
1641
1642  JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
1643}
1644
1645void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic,
1646                              JDWP::ExpandBuf* pReply) {
1647  struct DebugCallbackContext {
1648    mirror::ArtMethod* method;
1649    JDWP::ExpandBuf* pReply;
1650    size_t variable_count;
1651    bool with_generic;
1652
1653    static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress,
1654                         const char* name, const char* descriptor, const char* signature)
1655        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1656      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1657
1658      VLOG(jdwp) << StringPrintf("    %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
1659                                 pContext->variable_count, startAddress, endAddress - startAddress,
1660                                 name, descriptor, signature, slot,
1661                                 MangleSlot(slot, pContext->method));
1662
1663      slot = MangleSlot(slot, pContext->method);
1664
1665      expandBufAdd8BE(pContext->pReply, startAddress);
1666      expandBufAddUtf8String(pContext->pReply, name);
1667      expandBufAddUtf8String(pContext->pReply, descriptor);
1668      if (pContext->with_generic) {
1669        expandBufAddUtf8String(pContext->pReply, signature);
1670      }
1671      expandBufAdd4BE(pContext->pReply, endAddress - startAddress);
1672      expandBufAdd4BE(pContext->pReply, slot);
1673
1674      ++pContext->variable_count;
1675    }
1676  };
1677  mirror::ArtMethod* m = FromMethodId(method_id);
1678
1679  // arg_count considers doubles and longs to take 2 units.
1680  // variable_count considers everything to take 1 unit.
1681  std::string shorty(m->GetShorty());
1682  expandBufAdd4BE(pReply, mirror::ArtMethod::NumArgRegisters(shorty));
1683
1684  // We don't know the total number of variables yet, so leave a blank and update it later.
1685  size_t variable_count_offset = expandBufGetLength(pReply);
1686  expandBufAdd4BE(pReply, 0);
1687
1688  DebugCallbackContext context;
1689  context.method = m;
1690  context.pReply = pReply;
1691  context.variable_count = 0;
1692  context.with_generic = with_generic;
1693
1694  const DexFile::CodeItem* code_item = m->GetCodeItem();
1695  if (code_item != nullptr) {
1696    m->GetDexFile()->DecodeDebugInfo(
1697        code_item, m->IsStatic(), m->GetDexMethodIndex(), NULL, DebugCallbackContext::Callback,
1698        &context);
1699  }
1700
1701  JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count);
1702}
1703
1704void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
1705                                  JDWP::ExpandBuf* pReply) {
1706  mirror::ArtMethod* m = FromMethodId(method_id);
1707  JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty());
1708  OutputJValue(tag, return_value, pReply);
1709}
1710
1711void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
1712                           JDWP::ExpandBuf* pReply) {
1713  mirror::ArtField* f = FromFieldId(field_id);
1714  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1715  OutputJValue(tag, field_value, pReply);
1716}
1717
1718JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
1719                                  std::vector<uint8_t>& bytecodes)
1720    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1721  mirror::ArtMethod* m = FromMethodId(method_id);
1722  if (m == NULL) {
1723    return JDWP::ERR_INVALID_METHODID;
1724  }
1725  const DexFile::CodeItem* code_item = m->GetCodeItem();
1726  size_t byte_count = code_item->insns_size_in_code_units_ * 2;
1727  const uint8_t* begin = reinterpret_cast<const uint8_t*>(code_item->insns_);
1728  const uint8_t* end = begin + byte_count;
1729  for (const uint8_t* p = begin; p != end; ++p) {
1730    bytecodes.push_back(*p);
1731  }
1732  return JDWP::ERR_NONE;
1733}
1734
1735JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) {
1736  return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1737}
1738
1739JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) {
1740  return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1741}
1742
1743static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
1744                                         JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
1745                                         bool is_static)
1746    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1747  JDWP::JdwpError status;
1748  mirror::Class* c = DecodeClass(ref_type_id, status);
1749  if (ref_type_id != 0 && c == NULL) {
1750    return status;
1751  }
1752
1753  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1754  if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) {
1755    return JDWP::ERR_INVALID_OBJECT;
1756  }
1757  mirror::ArtField* f = FromFieldId(field_id);
1758
1759  mirror::Class* receiver_class = c;
1760  if (receiver_class == NULL && o != NULL) {
1761    receiver_class = o->GetClass();
1762  }
1763  // TODO: should we give up now if receiver_class is NULL?
1764  if (receiver_class != NULL && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
1765    LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class);
1766    return JDWP::ERR_INVALID_FIELDID;
1767  }
1768
1769  // The RI only enforces the static/non-static mismatch in one direction.
1770  // TODO: should we change the tests and check both?
1771  if (is_static) {
1772    if (!f->IsStatic()) {
1773      return JDWP::ERR_INVALID_FIELDID;
1774    }
1775  } else {
1776    if (f->IsStatic()) {
1777      LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f);
1778    }
1779  }
1780  if (f->IsStatic()) {
1781    o = f->GetDeclaringClass();
1782  }
1783
1784  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1785  JValue field_value;
1786  if (tag == JDWP::JT_VOID) {
1787    LOG(FATAL) << "Unknown tag: " << tag;
1788  } else if (!IsPrimitiveTag(tag)) {
1789    field_value.SetL(f->GetObject(o));
1790  } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1791    field_value.SetJ(f->Get64(o));
1792  } else {
1793    field_value.SetI(f->Get32(o));
1794  }
1795  Dbg::OutputJValue(tag, &field_value, pReply);
1796
1797  return JDWP::ERR_NONE;
1798}
1799
1800JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1801                                   JDWP::ExpandBuf* pReply) {
1802  return GetFieldValueImpl(0, object_id, field_id, pReply, false);
1803}
1804
1805JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id, JDWP::ExpandBuf* pReply) {
1806  return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true);
1807}
1808
1809static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1810                                         uint64_t value, int width, bool is_static)
1811    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1812  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1813  if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) {
1814    return JDWP::ERR_INVALID_OBJECT;
1815  }
1816  mirror::ArtField* f = FromFieldId(field_id);
1817
1818  // The RI only enforces the static/non-static mismatch in one direction.
1819  // TODO: should we change the tests and check both?
1820  if (is_static) {
1821    if (!f->IsStatic()) {
1822      return JDWP::ERR_INVALID_FIELDID;
1823    }
1824  } else {
1825    if (f->IsStatic()) {
1826      LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f);
1827    }
1828  }
1829  if (f->IsStatic()) {
1830    o = f->GetDeclaringClass();
1831  }
1832
1833  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1834
1835  if (IsPrimitiveTag(tag)) {
1836    if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1837      CHECK_EQ(width, 8);
1838      // Debugging can't use transactional mode (runtime only).
1839      f->Set64<false>(o, value);
1840    } else {
1841      CHECK_LE(width, 4);
1842      // Debugging can't use transactional mode (runtime only).
1843      f->Set32<false>(o, value);
1844    }
1845  } else {
1846    mirror::Object* v = gRegistry->Get<mirror::Object*>(value);
1847    if (v == ObjectRegistry::kInvalidObject) {
1848      return JDWP::ERR_INVALID_OBJECT;
1849    }
1850    if (v != NULL) {
1851      mirror::Class* field_type;
1852      {
1853        StackHandleScope<3> hs(Thread::Current());
1854        HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
1855        HandleWrapper<mirror::ArtField> h_f(hs.NewHandleWrapper(&f));
1856        HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
1857        field_type = FieldHelper(h_f).GetType();
1858      }
1859      if (!field_type->IsAssignableFrom(v->GetClass())) {
1860        return JDWP::ERR_INVALID_OBJECT;
1861      }
1862    }
1863    // Debugging can't use transactional mode (runtime only).
1864    f->SetObject<false>(o, v);
1865  }
1866
1867  return JDWP::ERR_NONE;
1868}
1869
1870JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value,
1871                                   int width) {
1872  return SetFieldValueImpl(object_id, field_id, value, width, false);
1873}
1874
1875JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) {
1876  return SetFieldValueImpl(0, field_id, value, width, true);
1877}
1878
1879std::string Dbg::StringToUtf8(JDWP::ObjectId string_id) {
1880  mirror::String* s = gRegistry->Get<mirror::String*>(string_id);
1881  return s->ToModifiedUtf8();
1882}
1883
1884void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) {
1885  if (IsPrimitiveTag(tag)) {
1886    expandBufAdd1(pReply, tag);
1887    if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) {
1888      expandBufAdd1(pReply, return_value->GetI());
1889    } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) {
1890      expandBufAdd2BE(pReply, return_value->GetI());
1891    } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) {
1892      expandBufAdd4BE(pReply, return_value->GetI());
1893    } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1894      expandBufAdd8BE(pReply, return_value->GetJ());
1895    } else {
1896      CHECK_EQ(tag, JDWP::JT_VOID);
1897    }
1898  } else {
1899    ScopedObjectAccessUnchecked soa(Thread::Current());
1900    mirror::Object* value = return_value->GetL();
1901    expandBufAdd1(pReply, TagFromObject(soa, value));
1902    expandBufAddObjectId(pReply, gRegistry->Add(value));
1903  }
1904}
1905
1906JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string& name) {
1907  ScopedObjectAccessUnchecked soa(Thread::Current());
1908  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1909  Thread* thread;
1910  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1911  if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
1912    return error;
1913  }
1914
1915  // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName.
1916  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
1917  mirror::ArtField* java_lang_Thread_name_field =
1918      soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
1919  mirror::String* s =
1920      reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object));
1921  if (s != NULL) {
1922    name = s->ToModifiedUtf8();
1923  }
1924  return JDWP::ERR_NONE;
1925}
1926
1927JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
1928  ScopedObjectAccess soa(Thread::Current());
1929  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
1930  if (thread_object == ObjectRegistry::kInvalidObject) {
1931    return JDWP::ERR_INVALID_OBJECT;
1932  }
1933  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroup");
1934  // Okay, so it's an object, but is it actually a thread?
1935  JDWP::JdwpError error;
1936  {
1937    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1938    Thread* thread;
1939    error = DecodeThread(soa, thread_id, thread);
1940  }
1941  if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
1942    // Zombie threads are in the null group.
1943    expandBufAddObjectId(pReply, JDWP::ObjectId(0));
1944    error = JDWP::ERR_NONE;
1945  } else if (error == JDWP::ERR_NONE) {
1946    mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
1947    CHECK(c != nullptr);
1948    mirror::ArtField* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;");
1949    CHECK(f != nullptr);
1950    mirror::Object* group = f->GetObject(thread_object);
1951    CHECK(group != nullptr);
1952    JDWP::ObjectId thread_group_id = gRegistry->Add(group);
1953    expandBufAddObjectId(pReply, thread_group_id);
1954  }
1955  soa.Self()->EndAssertNoThreadSuspension(old_cause);
1956  return error;
1957}
1958
1959std::string Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id) {
1960  ScopedObjectAccess soa(Thread::Current());
1961  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
1962  CHECK(thread_group != nullptr);
1963  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupName");
1964  mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
1965  CHECK(c != nullptr);
1966  mirror::ArtField* f = c->FindInstanceField("name", "Ljava/lang/String;");
1967  CHECK(f != NULL);
1968  mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
1969  soa.Self()->EndAssertNoThreadSuspension(old_cause);
1970  return s->ToModifiedUtf8();
1971}
1972
1973JDWP::ObjectId Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id) {
1974  ScopedObjectAccessUnchecked soa(Thread::Current());
1975  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
1976  CHECK(thread_group != nullptr);
1977  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupParent");
1978  mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
1979  CHECK(c != nullptr);
1980  mirror::ArtField* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;");
1981  CHECK(f != NULL);
1982  mirror::Object* parent = f->GetObject(thread_group);
1983  soa.Self()->EndAssertNoThreadSuspension(old_cause);
1984  return gRegistry->Add(parent);
1985}
1986
1987JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
1988  ScopedObjectAccessUnchecked soa(Thread::Current());
1989  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
1990  mirror::Object* group = f->GetObject(f->GetDeclaringClass());
1991  return gRegistry->Add(group);
1992}
1993
1994JDWP::ObjectId Dbg::GetMainThreadGroupId() {
1995  ScopedObjectAccess soa(Thread::Current());
1996  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup);
1997  mirror::Object* group = f->GetObject(f->GetDeclaringClass());
1998  return gRegistry->Add(group);
1999}
2000
2001JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
2002  switch (state) {
2003    case kBlocked:
2004      return JDWP::TS_MONITOR;
2005    case kNative:
2006    case kRunnable:
2007    case kSuspended:
2008      return JDWP::TS_RUNNING;
2009    case kSleeping:
2010      return JDWP::TS_SLEEPING;
2011    case kStarting:
2012    case kTerminated:
2013      return JDWP::TS_ZOMBIE;
2014    case kTimedWaiting:
2015    case kWaitingForCheckPointsToRun:
2016    case kWaitingForDebuggerSend:
2017    case kWaitingForDebuggerSuspension:
2018    case kWaitingForDebuggerToAttach:
2019    case kWaitingForDeoptimization:
2020    case kWaitingForGcToComplete:
2021    case kWaitingForJniOnLoad:
2022    case kWaitingForMethodTracingStart:
2023    case kWaitingForSignalCatcherOutput:
2024    case kWaitingInMainDebuggerLoop:
2025    case kWaitingInMainSignalCatcherLoop:
2026    case kWaitingPerformingGc:
2027    case kWaiting:
2028      return JDWP::TS_WAIT;
2029      // Don't add a 'default' here so the compiler can spot incompatible enum changes.
2030  }
2031  LOG(FATAL) << "Unknown thread state: " << state;
2032  return JDWP::TS_ZOMBIE;
2033}
2034
2035JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus,
2036                                     JDWP::JdwpSuspendStatus* pSuspendStatus) {
2037  ScopedObjectAccess soa(Thread::Current());
2038
2039  *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED;
2040
2041  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2042  Thread* thread;
2043  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2044  if (error != JDWP::ERR_NONE) {
2045    if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
2046      *pThreadStatus = JDWP::TS_ZOMBIE;
2047      return JDWP::ERR_NONE;
2048    }
2049    return error;
2050  }
2051
2052  if (IsSuspendedForDebugger(soa, thread)) {
2053    *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED;
2054  }
2055
2056  *pThreadStatus = ToJdwpThreadStatus(thread->GetState());
2057  return JDWP::ERR_NONE;
2058}
2059
2060JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
2061  ScopedObjectAccess soa(Thread::Current());
2062  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2063  Thread* thread;
2064  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2065  if (error != JDWP::ERR_NONE) {
2066    return error;
2067  }
2068  MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2069  expandBufAdd4BE(pReply, thread->GetDebugSuspendCount());
2070  return JDWP::ERR_NONE;
2071}
2072
2073JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) {
2074  ScopedObjectAccess soa(Thread::Current());
2075  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2076  Thread* thread;
2077  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2078  if (error != JDWP::ERR_NONE) {
2079    return error;
2080  }
2081  thread->Interrupt(soa.Self());
2082  return JDWP::ERR_NONE;
2083}
2084
2085static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
2086                                   mirror::Object* desired_thread_group, mirror::Object* peer)
2087    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2088  // Do we want threads from all thread groups?
2089  if (desired_thread_group == nullptr) {
2090    return true;
2091  }
2092  mirror::ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
2093  DCHECK(thread_group_field != nullptr);
2094  mirror::Object* group = thread_group_field->GetObject(peer);
2095  return (group == desired_thread_group);
2096}
2097
2098void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& thread_ids) {
2099  ScopedObjectAccessUnchecked soa(Thread::Current());
2100  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
2101  std::list<Thread*> all_threads_list;
2102  {
2103    MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
2104    all_threads_list = Runtime::Current()->GetThreadList()->GetList();
2105  }
2106  for (Thread* t : all_threads_list) {
2107    if (t == Dbg::GetDebugThread()) {
2108      // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
2109      // query all threads, so it's easier if we just don't tell them about this thread.
2110      continue;
2111    }
2112    if (t->IsStillStarting()) {
2113      // This thread is being started (and has been registered in the thread list). However, it is
2114      // not completely started yet so we must ignore it.
2115      continue;
2116    }
2117    mirror::Object* peer = t->GetPeer();
2118    if (peer == nullptr) {
2119      // peer might be NULL if the thread is still starting up. We can't tell the debugger about
2120      // this thread yet.
2121      // TODO: if we identified threads to the debugger by their Thread*
2122      // rather than their peer's mirror::Object*, we could fix this.
2123      // Doing so might help us report ZOMBIE threads too.
2124      continue;
2125    }
2126    if (IsInDesiredThreadGroup(soa, thread_group, peer)) {
2127      thread_ids.push_back(gRegistry->Add(peer));
2128    }
2129  }
2130}
2131
2132void Dbg::GetChildThreadGroups(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& child_thread_group_ids) {
2133  ScopedObjectAccess soa(Thread::Current());
2134  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
2135
2136  // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
2137  mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
2138  mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
2139
2140  // Get the array and size out of the ArrayList<ThreadGroup>...
2141  mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
2142  mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
2143  mirror::ObjectArray<mirror::Object>* groups_array =
2144      array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
2145  const int32_t size = size_field->GetInt(groups_array_list);
2146
2147  // Copy the first 'size' elements out of the array into the result.
2148  for (int32_t i = 0; i < size; ++i) {
2149    child_thread_group_ids.push_back(gRegistry->Add(groups_array->Get(i)));
2150  }
2151}
2152
2153static int GetStackDepth(Thread* thread)
2154    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2155  struct CountStackDepthVisitor : public StackVisitor {
2156    explicit CountStackDepthVisitor(Thread* thread)
2157        : StackVisitor(thread, NULL), depth(0) {}
2158
2159    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2160    // annotalysis.
2161    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2162      if (!GetMethod()->IsRuntimeMethod()) {
2163        ++depth;
2164      }
2165      return true;
2166    }
2167    size_t depth;
2168  };
2169
2170  CountStackDepthVisitor visitor(thread);
2171  visitor.WalkStack();
2172  return visitor.depth;
2173}
2174
2175JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t& result) {
2176  ScopedObjectAccess soa(Thread::Current());
2177  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2178  Thread* thread;
2179  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2180  if (error != JDWP::ERR_NONE) {
2181    return error;
2182  }
2183  if (!IsSuspendedForDebugger(soa, thread)) {
2184    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2185  }
2186  result = GetStackDepth(thread);
2187  return JDWP::ERR_NONE;
2188}
2189
2190JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
2191                                     size_t frame_count, JDWP::ExpandBuf* buf) {
2192  class GetFrameVisitor : public StackVisitor {
2193   public:
2194    GetFrameVisitor(Thread* thread, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf)
2195        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2196        : StackVisitor(thread, NULL), depth_(0),
2197          start_frame_(start_frame), frame_count_(frame_count), buf_(buf) {
2198      expandBufAdd4BE(buf_, frame_count_);
2199    }
2200
2201    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2202    // annotalysis.
2203    virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2204      if (GetMethod()->IsRuntimeMethod()) {
2205        return true;  // The debugger can't do anything useful with a frame that has no Method*.
2206      }
2207      if (depth_ >= start_frame_ + frame_count_) {
2208        return false;
2209      }
2210      if (depth_ >= start_frame_) {
2211        JDWP::FrameId frame_id(GetFrameId());
2212        JDWP::JdwpLocation location;
2213        SetLocation(location, GetMethod(), GetDexPc());
2214        VLOG(jdwp) << StringPrintf("    Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
2215        expandBufAdd8BE(buf_, frame_id);
2216        expandBufAddLocation(buf_, location);
2217      }
2218      ++depth_;
2219      return true;
2220    }
2221
2222   private:
2223    size_t depth_;
2224    const size_t start_frame_;
2225    const size_t frame_count_;
2226    JDWP::ExpandBuf* buf_;
2227  };
2228
2229  ScopedObjectAccessUnchecked soa(Thread::Current());
2230  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2231  Thread* thread;
2232  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2233  if (error != JDWP::ERR_NONE) {
2234    return error;
2235  }
2236  if (!IsSuspendedForDebugger(soa, thread)) {
2237    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2238  }
2239  GetFrameVisitor visitor(thread, start_frame, frame_count, buf);
2240  visitor.WalkStack();
2241  return JDWP::ERR_NONE;
2242}
2243
2244JDWP::ObjectId Dbg::GetThreadSelfId() {
2245  ScopedObjectAccessUnchecked soa(Thread::Current());
2246  return gRegistry->Add(soa.Self()->GetPeer());
2247}
2248
2249void Dbg::SuspendVM() {
2250  Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
2251}
2252
2253void Dbg::ResumeVM() {
2254  Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
2255}
2256
2257JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
2258  Thread* self = Thread::Current();
2259  ScopedLocalRef<jobject> peer(self->GetJniEnv(), NULL);
2260  {
2261    ScopedObjectAccess soa(self);
2262    peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id)));
2263  }
2264  if (peer.get() == NULL) {
2265    return JDWP::ERR_THREAD_NOT_ALIVE;
2266  }
2267  // Suspend thread to build stack trace. Take suspend thread lock to avoid races with threads
2268  // trying to suspend this one.
2269  MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
2270  bool timed_out;
2271  ThreadList* thread_list = Runtime::Current()->GetThreadList();
2272  Thread* thread = thread_list->SuspendThreadByPeer(peer.get(), request_suspension, true,
2273                                                    &timed_out);
2274  if (thread != NULL) {
2275    return JDWP::ERR_NONE;
2276  } else if (timed_out) {
2277    return JDWP::ERR_INTERNAL;
2278  } else {
2279    return JDWP::ERR_THREAD_NOT_ALIVE;
2280  }
2281}
2282
2283void Dbg::ResumeThread(JDWP::ObjectId thread_id) {
2284  ScopedObjectAccessUnchecked soa(Thread::Current());
2285  mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id);
2286  Thread* thread;
2287  {
2288    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2289    thread = Thread::FromManagedThread(soa, peer);
2290  }
2291  if (thread == NULL) {
2292    LOG(WARNING) << "No such thread for resume: " << peer;
2293    return;
2294  }
2295  bool needs_resume;
2296  {
2297    MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2298    needs_resume = thread->GetSuspendCount() > 0;
2299  }
2300  if (needs_resume) {
2301    Runtime::Current()->GetThreadList()->Resume(thread, true);
2302  }
2303}
2304
2305void Dbg::SuspendSelf() {
2306  Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
2307}
2308
2309struct GetThisVisitor : public StackVisitor {
2310  GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
2311      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2312      : StackVisitor(thread, context), this_object(NULL), frame_id(frame_id) {}
2313
2314  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2315  // annotalysis.
2316  virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2317    if (frame_id != GetFrameId()) {
2318      return true;  // continue
2319    } else {
2320      this_object = GetThisObject();
2321      return false;
2322    }
2323  }
2324
2325  mirror::Object* this_object;
2326  JDWP::FrameId frame_id;
2327};
2328
2329JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
2330                                   JDWP::ObjectId* result) {
2331  ScopedObjectAccessUnchecked soa(Thread::Current());
2332  Thread* thread;
2333  {
2334    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2335    JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2336    if (error != JDWP::ERR_NONE) {
2337      return error;
2338    }
2339    if (!IsSuspendedForDebugger(soa, thread)) {
2340      return JDWP::ERR_THREAD_NOT_SUSPENDED;
2341    }
2342  }
2343  std::unique_ptr<Context> context(Context::Create());
2344  GetThisVisitor visitor(thread, context.get(), frame_id);
2345  visitor.WalkStack();
2346  *result = gRegistry->Add(visitor.this_object);
2347  return JDWP::ERR_NONE;
2348}
2349
2350JDWP::JdwpError Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
2351                                   JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
2352  struct GetLocalVisitor : public StackVisitor {
2353    GetLocalVisitor(const ScopedObjectAccessUnchecked& soa, Thread* thread, Context* context,
2354                    JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width)
2355        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2356        : StackVisitor(thread, context), soa_(soa), frame_id_(frame_id), slot_(slot), tag_(tag),
2357          buf_(buf), width_(width), error_(JDWP::ERR_NONE) {}
2358
2359    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2360    // annotalysis.
2361    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2362      if (GetFrameId() != frame_id_) {
2363        return true;  // Not our frame, carry on.
2364      }
2365      // TODO: check that the tag is compatible with the actual type of the slot!
2366      // TODO: check slot is valid for this method or return INVALID_SLOT error.
2367      mirror::ArtMethod* m = GetMethod();
2368      if (m->IsNative()) {
2369        // We can't read local value from native method.
2370        error_ = JDWP::ERR_OPAQUE_FRAME;
2371        return false;
2372      }
2373      uint16_t reg = DemangleSlot(slot_, m);
2374      constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
2375      switch (tag_) {
2376        case JDWP::JT_BOOLEAN: {
2377          CHECK_EQ(width_, 1U);
2378          uint32_t intVal;
2379          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2380            VLOG(jdwp) << "get boolean local " << reg << " = " << intVal;
2381            JDWP::Set1(buf_+1, intVal != 0);
2382          } else {
2383            VLOG(jdwp) << "failed to get boolean local " << reg;
2384            error_ = kFailureErrorCode;
2385          }
2386          break;
2387        }
2388        case JDWP::JT_BYTE: {
2389          CHECK_EQ(width_, 1U);
2390          uint32_t intVal;
2391          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2392            VLOG(jdwp) << "get byte local " << reg << " = " << intVal;
2393            JDWP::Set1(buf_+1, intVal);
2394          } else {
2395            VLOG(jdwp) << "failed to get byte local " << reg;
2396            error_ = kFailureErrorCode;
2397          }
2398          break;
2399        }
2400        case JDWP::JT_SHORT:
2401        case JDWP::JT_CHAR: {
2402          CHECK_EQ(width_, 2U);
2403          uint32_t intVal;
2404          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2405            VLOG(jdwp) << "get short/char local " << reg << " = " << intVal;
2406            JDWP::Set2BE(buf_+1, intVal);
2407          } else {
2408            VLOG(jdwp) << "failed to get short/char local " << reg;
2409            error_ = kFailureErrorCode;
2410          }
2411          break;
2412        }
2413        case JDWP::JT_INT: {
2414          CHECK_EQ(width_, 4U);
2415          uint32_t intVal;
2416          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2417            VLOG(jdwp) << "get int local " << reg << " = " << intVal;
2418            JDWP::Set4BE(buf_+1, intVal);
2419          } else {
2420            VLOG(jdwp) << "failed to get int local " << reg;
2421            error_ = kFailureErrorCode;
2422          }
2423          break;
2424        }
2425        case JDWP::JT_FLOAT: {
2426          CHECK_EQ(width_, 4U);
2427          uint32_t intVal;
2428          if (GetVReg(m, reg, kFloatVReg, &intVal)) {
2429            VLOG(jdwp) << "get float local " << reg << " = " << intVal;
2430            JDWP::Set4BE(buf_+1, intVal);
2431          } else {
2432            VLOG(jdwp) << "failed to get float local " << reg;
2433            error_ = kFailureErrorCode;
2434          }
2435          break;
2436        }
2437        case JDWP::JT_ARRAY:
2438        case JDWP::JT_CLASS_LOADER:
2439        case JDWP::JT_CLASS_OBJECT:
2440        case JDWP::JT_OBJECT:
2441        case JDWP::JT_STRING:
2442        case JDWP::JT_THREAD:
2443        case JDWP::JT_THREAD_GROUP: {
2444          CHECK_EQ(width_, sizeof(JDWP::ObjectId));
2445          uint32_t intVal;
2446          if (GetVReg(m, reg, kReferenceVReg, &intVal)) {
2447            mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal);
2448            VLOG(jdwp) << "get " << tag_ << " object local " << reg << " = " << o;
2449            if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
2450              LOG(FATAL) << "Register " << reg << " expected to hold " << tag_ << " object: " << o;
2451            }
2452            tag_ = TagFromObject(soa_, o);
2453            JDWP::SetObjectId(buf_+1, gRegistry->Add(o));
2454          } else {
2455            VLOG(jdwp) << "failed to get " << tag_ << " object local " << reg;
2456            error_ = kFailureErrorCode;
2457          }
2458          break;
2459        }
2460        case JDWP::JT_DOUBLE: {
2461          CHECK_EQ(width_, 8U);
2462          uint64_t longVal;
2463          if (GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
2464            VLOG(jdwp) << "get double local " << reg << " = " << longVal;
2465            JDWP::Set8BE(buf_+1, longVal);
2466          } else {
2467            VLOG(jdwp) << "failed to get double local " << reg;
2468            error_ = kFailureErrorCode;
2469          }
2470          break;
2471        }
2472        case JDWP::JT_LONG: {
2473          CHECK_EQ(width_, 8U);
2474          uint64_t longVal;
2475          if (GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &longVal)) {
2476            VLOG(jdwp) << "get long local " << reg << " = " << longVal;
2477            JDWP::Set8BE(buf_+1, longVal);
2478          } else {
2479            VLOG(jdwp) << "failed to get long local " << reg;
2480            error_ = kFailureErrorCode;
2481          }
2482          break;
2483        }
2484        default:
2485          LOG(FATAL) << "Unknown tag " << tag_;
2486          break;
2487      }
2488
2489      // Prepend tag, which may have been updated.
2490      JDWP::Set1(buf_, tag_);
2491      return false;
2492    }
2493    const ScopedObjectAccessUnchecked& soa_;
2494    const JDWP::FrameId frame_id_;
2495    const int slot_;
2496    JDWP::JdwpTag tag_;
2497    uint8_t* const buf_;
2498    const size_t width_;
2499    JDWP::JdwpError error_;
2500  };
2501
2502  ScopedObjectAccessUnchecked soa(Thread::Current());
2503  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2504  Thread* thread;
2505  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2506  if (error != JDWP::ERR_NONE) {
2507    return error;
2508  }
2509  // TODO check thread is suspended by the debugger ?
2510  std::unique_ptr<Context> context(Context::Create());
2511  GetLocalVisitor visitor(soa, thread, context.get(), frame_id, slot, tag, buf, width);
2512  visitor.WalkStack();
2513  return visitor.error_;
2514}
2515
2516JDWP::JdwpError Dbg::SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
2517                                   JDWP::JdwpTag tag, uint64_t value, size_t width) {
2518  struct SetLocalVisitor : public StackVisitor {
2519    SetLocalVisitor(Thread* thread, Context* context,
2520                    JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint64_t value,
2521                    size_t width)
2522        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2523        : StackVisitor(thread, context),
2524          frame_id_(frame_id), slot_(slot), tag_(tag), value_(value), width_(width),
2525          error_(JDWP::ERR_NONE) {}
2526
2527    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2528    // annotalysis.
2529    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2530      if (GetFrameId() != frame_id_) {
2531        return true;  // Not our frame, carry on.
2532      }
2533      // TODO: check that the tag is compatible with the actual type of the slot!
2534      // TODO: check slot is valid for this method or return INVALID_SLOT error.
2535      mirror::ArtMethod* m = GetMethod();
2536      if (m->IsNative()) {
2537        // We can't read local value from native method.
2538        error_ = JDWP::ERR_OPAQUE_FRAME;
2539        return false;
2540      }
2541      uint16_t reg = DemangleSlot(slot_, m);
2542      constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
2543      switch (tag_) {
2544        case JDWP::JT_BOOLEAN:
2545        case JDWP::JT_BYTE:
2546          CHECK_EQ(width_, 1U);
2547          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2548            VLOG(jdwp) << "failed to set boolean/byte local " << reg << " = "
2549                       << static_cast<uint32_t>(value_);
2550            error_ = kFailureErrorCode;
2551          }
2552          break;
2553        case JDWP::JT_SHORT:
2554        case JDWP::JT_CHAR:
2555          CHECK_EQ(width_, 2U);
2556          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2557            VLOG(jdwp) << "failed to set short/char local " << reg << " = "
2558                       << static_cast<uint32_t>(value_);
2559            error_ = kFailureErrorCode;
2560          }
2561          break;
2562        case JDWP::JT_INT:
2563          CHECK_EQ(width_, 4U);
2564          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2565            VLOG(jdwp) << "failed to set int local " << reg << " = "
2566                       << static_cast<uint32_t>(value_);
2567            error_ = kFailureErrorCode;
2568          }
2569          break;
2570        case JDWP::JT_FLOAT:
2571          CHECK_EQ(width_, 4U);
2572          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kFloatVReg)) {
2573            VLOG(jdwp) << "failed to set float local " << reg << " = "
2574                       << static_cast<uint32_t>(value_);
2575            error_ = kFailureErrorCode;
2576          }
2577          break;
2578        case JDWP::JT_ARRAY:
2579        case JDWP::JT_CLASS_LOADER:
2580        case JDWP::JT_CLASS_OBJECT:
2581        case JDWP::JT_OBJECT:
2582        case JDWP::JT_STRING:
2583        case JDWP::JT_THREAD:
2584        case JDWP::JT_THREAD_GROUP: {
2585          CHECK_EQ(width_, sizeof(JDWP::ObjectId));
2586          mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value_));
2587          if (o == ObjectRegistry::kInvalidObject) {
2588            VLOG(jdwp) << tag_ << " object " << o << " is an invalid object";
2589            error_ = JDWP::ERR_INVALID_OBJECT;
2590          } else if (!SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)),
2591                              kReferenceVReg)) {
2592            VLOG(jdwp) << "failed to set " << tag_ << " object local " << reg << " = " << o;
2593            error_ = kFailureErrorCode;
2594          }
2595          break;
2596        }
2597        case JDWP::JT_DOUBLE: {
2598          CHECK_EQ(width_, 8U);
2599          bool success = SetVRegPair(m, reg, value_, kDoubleLoVReg, kDoubleHiVReg);
2600          if (!success) {
2601            VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
2602            error_ = kFailureErrorCode;
2603          }
2604          break;
2605        }
2606        case JDWP::JT_LONG: {
2607          CHECK_EQ(width_, 8U);
2608          bool success = SetVRegPair(m, reg, value_, kLongLoVReg, kLongHiVReg);
2609          if (!success) {
2610            VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
2611            error_ = kFailureErrorCode;
2612          }
2613          break;
2614        }
2615        default:
2616          LOG(FATAL) << "Unknown tag " << tag_;
2617          break;
2618      }
2619      return false;
2620    }
2621
2622    const JDWP::FrameId frame_id_;
2623    const int slot_;
2624    const JDWP::JdwpTag tag_;
2625    const uint64_t value_;
2626    const size_t width_;
2627    JDWP::JdwpError error_;
2628  };
2629
2630  ScopedObjectAccessUnchecked soa(Thread::Current());
2631  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2632  Thread* thread;
2633  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2634  if (error != JDWP::ERR_NONE) {
2635    return error;
2636  }
2637  // TODO check thread is suspended by the debugger ?
2638  std::unique_ptr<Context> context(Context::Create());
2639  SetLocalVisitor visitor(thread, context.get(), frame_id, slot, tag, value, width);
2640  visitor.WalkStack();
2641  return visitor.error_;
2642}
2643
2644JDWP::ObjectId Dbg::GetThisObjectIdForEvent(mirror::Object* this_object) {
2645  // If 'this_object' isn't already in the registry, we know that we're not looking for it, so
2646  // there's no point adding it to the registry and burning through ids.
2647  // When registering an event request with an instance filter, we've been given an existing object
2648  // id so it must already be present in the registry when the event fires.
2649  JDWP::ObjectId this_id = 0;
2650  if (this_object != nullptr && gRegistry->Contains(this_object)) {
2651    this_id = gRegistry->Add(this_object);
2652  }
2653  return this_id;
2654}
2655
2656void Dbg::PostLocationEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object,
2657                            int event_flags, const JValue* return_value) {
2658  if (!IsDebuggerActive()) {
2659    return;
2660  }
2661  DCHECK(m != nullptr);
2662  DCHECK_EQ(m->IsStatic(), this_object == nullptr);
2663  JDWP::JdwpLocation location;
2664  SetLocation(location, m, dex_pc);
2665
2666  // We need 'this' for InstanceOnly filters only.
2667  JDWP::ObjectId this_id = GetThisObjectIdForEvent(this_object);
2668  gJdwpState->PostLocationEvent(&location, this_id, event_flags, return_value);
2669}
2670
2671void Dbg::PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc,
2672                               mirror::Object* this_object, mirror::ArtField* f) {
2673  if (!IsDebuggerActive()) {
2674    return;
2675  }
2676  DCHECK(m != nullptr);
2677  DCHECK(f != nullptr);
2678  JDWP::JdwpLocation location;
2679  SetLocation(location, m, dex_pc);
2680
2681  JDWP::RefTypeId type_id = gRegistry->AddRefType(f->GetDeclaringClass());
2682  JDWP::FieldId field_id = ToFieldId(f);
2683  JDWP::ObjectId this_id = gRegistry->Add(this_object);
2684
2685  gJdwpState->PostFieldEvent(&location, type_id, field_id, this_id, nullptr, false);
2686}
2687
2688void Dbg::PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc,
2689                                     mirror::Object* this_object, mirror::ArtField* f,
2690                                     const JValue* field_value) {
2691  if (!IsDebuggerActive()) {
2692    return;
2693  }
2694  DCHECK(m != nullptr);
2695  DCHECK(f != nullptr);
2696  DCHECK(field_value != nullptr);
2697  JDWP::JdwpLocation location;
2698  SetLocation(location, m, dex_pc);
2699
2700  JDWP::RefTypeId type_id = gRegistry->AddRefType(f->GetDeclaringClass());
2701  JDWP::FieldId field_id = ToFieldId(f);
2702  JDWP::ObjectId this_id = gRegistry->Add(this_object);
2703
2704  gJdwpState->PostFieldEvent(&location, type_id, field_id, this_id, field_value, true);
2705}
2706
2707void Dbg::PostException(const ThrowLocation& throw_location,
2708                        mirror::ArtMethod* catch_method,
2709                        uint32_t catch_dex_pc, mirror::Throwable* exception_object) {
2710  if (!IsDebuggerActive()) {
2711    return;
2712  }
2713
2714  JDWP::JdwpLocation jdwp_throw_location;
2715  SetLocation(jdwp_throw_location, throw_location.GetMethod(), throw_location.GetDexPc());
2716  JDWP::JdwpLocation catch_location;
2717  SetLocation(catch_location, catch_method, catch_dex_pc);
2718
2719  // We need 'this' for InstanceOnly filters only.
2720  JDWP::ObjectId this_id = GetThisObjectIdForEvent(throw_location.GetThis());
2721  JDWP::ObjectId exception_id = gRegistry->Add(exception_object);
2722  JDWP::RefTypeId exception_class_id = gRegistry->AddRefType(exception_object->GetClass());
2723
2724  gJdwpState->PostException(&jdwp_throw_location, exception_id, exception_class_id, &catch_location,
2725                            this_id);
2726}
2727
2728void Dbg::PostClassPrepare(mirror::Class* c) {
2729  if (!IsDebuggerActive()) {
2730    return;
2731  }
2732
2733  // OLD-TODO - we currently always send both "verified" and "prepared" since
2734  // debuggers seem to like that.  There might be some advantage to honesty,
2735  // since the class may not yet be verified.
2736  int state = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
2737  JDWP::JdwpTypeTag tag = GetTypeTag(c);
2738  std::string temp;
2739  gJdwpState->PostClassPrepare(tag, gRegistry->Add(c), c->GetDescriptor(&temp), state);
2740}
2741
2742void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
2743                         mirror::ArtMethod* m, uint32_t dex_pc,
2744                         int event_flags, const JValue* return_value) {
2745  if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
2746    return;
2747  }
2748
2749  if (IsBreakpoint(m, dex_pc)) {
2750    event_flags |= kBreakpoint;
2751  }
2752
2753  // If the debugger is single-stepping one of our threads, check to
2754  // see if we're that thread and we've reached a step point.
2755  const SingleStepControl* single_step_control = thread->GetSingleStepControl();
2756  DCHECK(single_step_control != nullptr);
2757  if (single_step_control->is_active) {
2758    CHECK(!m->IsNative());
2759    if (single_step_control->step_depth == JDWP::SD_INTO) {
2760      // Step into method calls.  We break when the line number
2761      // or method pointer changes.  If we're in SS_MIN mode, we
2762      // always stop.
2763      if (single_step_control->method != m) {
2764        event_flags |= kSingleStep;
2765        VLOG(jdwp) << "SS new method";
2766      } else if (single_step_control->step_size == JDWP::SS_MIN) {
2767        event_flags |= kSingleStep;
2768        VLOG(jdwp) << "SS new instruction";
2769      } else if (single_step_control->ContainsDexPc(dex_pc)) {
2770        event_flags |= kSingleStep;
2771        VLOG(jdwp) << "SS new line";
2772      }
2773    } else if (single_step_control->step_depth == JDWP::SD_OVER) {
2774      // Step over method calls.  We break when the line number is
2775      // different and the frame depth is <= the original frame
2776      // depth.  (We can't just compare on the method, because we
2777      // might get unrolled past it by an exception, and it's tricky
2778      // to identify recursion.)
2779
2780      int stack_depth = GetStackDepth(thread);
2781
2782      if (stack_depth < single_step_control->stack_depth) {
2783        // Popped up one or more frames, always trigger.
2784        event_flags |= kSingleStep;
2785        VLOG(jdwp) << "SS method pop";
2786      } else if (stack_depth == single_step_control->stack_depth) {
2787        // Same depth, see if we moved.
2788        if (single_step_control->step_size == JDWP::SS_MIN) {
2789          event_flags |= kSingleStep;
2790          VLOG(jdwp) << "SS new instruction";
2791        } else if (single_step_control->ContainsDexPc(dex_pc)) {
2792          event_flags |= kSingleStep;
2793          VLOG(jdwp) << "SS new line";
2794        }
2795      }
2796    } else {
2797      CHECK_EQ(single_step_control->step_depth, JDWP::SD_OUT);
2798      // Return from the current method.  We break when the frame
2799      // depth pops up.
2800
2801      // This differs from the "method exit" break in that it stops
2802      // with the PC at the next instruction in the returned-to
2803      // function, rather than the end of the returning function.
2804
2805      int stack_depth = GetStackDepth(thread);
2806      if (stack_depth < single_step_control->stack_depth) {
2807        event_flags |= kSingleStep;
2808        VLOG(jdwp) << "SS method pop";
2809      }
2810    }
2811  }
2812
2813  // If there's something interesting going on, see if it matches one
2814  // of the debugger filters.
2815  if (event_flags != 0) {
2816    Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, return_value);
2817  }
2818}
2819
2820size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) {
2821  switch (instrumentation_event) {
2822    case instrumentation::Instrumentation::kMethodEntered:
2823      return &method_enter_event_ref_count_;
2824    case instrumentation::Instrumentation::kMethodExited:
2825      return &method_exit_event_ref_count_;
2826    case instrumentation::Instrumentation::kDexPcMoved:
2827      return &dex_pc_change_event_ref_count_;
2828    case instrumentation::Instrumentation::kFieldRead:
2829      return &field_read_event_ref_count_;
2830    case instrumentation::Instrumentation::kFieldWritten:
2831      return &field_write_event_ref_count_;
2832    case instrumentation::Instrumentation::kExceptionCaught:
2833      return &exception_catch_event_ref_count_;
2834    default:
2835      return nullptr;
2836  }
2837}
2838
2839// Process request while all mutator threads are suspended.
2840void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
2841  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
2842  switch (request.GetKind()) {
2843    case DeoptimizationRequest::kNothing:
2844      LOG(WARNING) << "Ignoring empty deoptimization request.";
2845      break;
2846    case DeoptimizationRequest::kRegisterForEvent:
2847      VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x",
2848                                 request.InstrumentationEvent());
2849      instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent());
2850      instrumentation_events_ |= request.InstrumentationEvent();
2851      break;
2852    case DeoptimizationRequest::kUnregisterForEvent:
2853      VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x",
2854                                 request.InstrumentationEvent());
2855      instrumentation->RemoveListener(&gDebugInstrumentationListener,
2856                                      request.InstrumentationEvent());
2857      instrumentation_events_ &= ~request.InstrumentationEvent();
2858      break;
2859    case DeoptimizationRequest::kFullDeoptimization:
2860      VLOG(jdwp) << "Deoptimize the world ...";
2861      instrumentation->DeoptimizeEverything();
2862      VLOG(jdwp) << "Deoptimize the world DONE";
2863      break;
2864    case DeoptimizationRequest::kFullUndeoptimization:
2865      VLOG(jdwp) << "Undeoptimize the world ...";
2866      instrumentation->UndeoptimizeEverything();
2867      VLOG(jdwp) << "Undeoptimize the world DONE";
2868      break;
2869    case DeoptimizationRequest::kSelectiveDeoptimization:
2870      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " ...";
2871      instrumentation->Deoptimize(request.Method());
2872      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " DONE";
2873      break;
2874    case DeoptimizationRequest::kSelectiveUndeoptimization:
2875      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " ...";
2876      instrumentation->Undeoptimize(request.Method());
2877      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " DONE";
2878      break;
2879    default:
2880      LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind();
2881      break;
2882  }
2883}
2884
2885void Dbg::DelayFullUndeoptimization() {
2886  MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
2887  ++delayed_full_undeoptimization_count_;
2888  DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_);
2889}
2890
2891void Dbg::ProcessDelayedFullUndeoptimizations() {
2892  // TODO: avoid taking the lock twice (once here and once in ManageDeoptimization).
2893  {
2894    MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
2895    while (delayed_full_undeoptimization_count_ > 0) {
2896      DeoptimizationRequest req;
2897      req.SetKind(DeoptimizationRequest::kFullUndeoptimization);
2898      req.SetMethod(nullptr);
2899      RequestDeoptimizationLocked(req);
2900      --delayed_full_undeoptimization_count_;
2901    }
2902  }
2903  ManageDeoptimization();
2904}
2905
2906void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
2907  if (req.GetKind() == DeoptimizationRequest::kNothing) {
2908    // Nothing to do.
2909    return;
2910  }
2911  MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
2912  RequestDeoptimizationLocked(req);
2913}
2914
2915void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
2916  switch (req.GetKind()) {
2917    case DeoptimizationRequest::kRegisterForEvent: {
2918      DCHECK_NE(req.InstrumentationEvent(), 0u);
2919      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
2920      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
2921                                                req.InstrumentationEvent());
2922      if (*counter == 0) {
2923        VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x",
2924                                   deoptimization_requests_.size(), req.InstrumentationEvent());
2925        deoptimization_requests_.push_back(req);
2926      }
2927      *counter = *counter + 1;
2928      break;
2929    }
2930    case DeoptimizationRequest::kUnregisterForEvent: {
2931      DCHECK_NE(req.InstrumentationEvent(), 0u);
2932      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
2933      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
2934                                                req.InstrumentationEvent());
2935      *counter = *counter - 1;
2936      if (*counter == 0) {
2937        VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x",
2938                                   deoptimization_requests_.size(), req.InstrumentationEvent());
2939        deoptimization_requests_.push_back(req);
2940      }
2941      break;
2942    }
2943    case DeoptimizationRequest::kFullDeoptimization: {
2944      DCHECK(req.Method() == nullptr);
2945      if (full_deoptimization_event_count_ == 0) {
2946        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2947                   << " for full deoptimization";
2948        deoptimization_requests_.push_back(req);
2949      }
2950      ++full_deoptimization_event_count_;
2951      break;
2952    }
2953    case DeoptimizationRequest::kFullUndeoptimization: {
2954      DCHECK(req.Method() == nullptr);
2955      DCHECK_GT(full_deoptimization_event_count_, 0U);
2956      --full_deoptimization_event_count_;
2957      if (full_deoptimization_event_count_ == 0) {
2958        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2959                   << " for full undeoptimization";
2960        deoptimization_requests_.push_back(req);
2961      }
2962      break;
2963    }
2964    case DeoptimizationRequest::kSelectiveDeoptimization: {
2965      DCHECK(req.Method() != nullptr);
2966      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2967                 << " for deoptimization of " << PrettyMethod(req.Method());
2968      deoptimization_requests_.push_back(req);
2969      break;
2970    }
2971    case DeoptimizationRequest::kSelectiveUndeoptimization: {
2972      DCHECK(req.Method() != nullptr);
2973      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2974                 << " for undeoptimization of " << PrettyMethod(req.Method());
2975      deoptimization_requests_.push_back(req);
2976      break;
2977    }
2978    default: {
2979      LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind();
2980      break;
2981    }
2982  }
2983}
2984
2985void Dbg::ManageDeoptimization() {
2986  Thread* const self = Thread::Current();
2987  {
2988    // Avoid suspend/resume if there is no pending request.
2989    MutexLock mu(self, *Locks::deoptimization_lock_);
2990    if (deoptimization_requests_.empty()) {
2991      return;
2992    }
2993  }
2994  CHECK_EQ(self->GetState(), kRunnable);
2995  self->TransitionFromRunnableToSuspended(kWaitingForDeoptimization);
2996  // We need to suspend mutator threads first.
2997  Runtime* const runtime = Runtime::Current();
2998  runtime->GetThreadList()->SuspendAll();
2999  const ThreadState old_state = self->SetStateUnsafe(kRunnable);
3000  {
3001    MutexLock mu(self, *Locks::deoptimization_lock_);
3002    size_t req_index = 0;
3003    for (DeoptimizationRequest& request : deoptimization_requests_) {
3004      VLOG(jdwp) << "Process deoptimization request #" << req_index++;
3005      ProcessDeoptimizationRequest(request);
3006    }
3007    deoptimization_requests_.clear();
3008  }
3009  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
3010  runtime->GetThreadList()->ResumeAll();
3011  self->TransitionFromSuspendedToRunnable();
3012}
3013
3014static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m)
3015    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
3016  const DexFile::CodeItem* code_item = m->GetCodeItem();
3017  if (code_item == nullptr) {
3018    // TODO We should not be asked to watch location in a native or abstract method so the code item
3019    // should never be null. We could just check we never encounter this case.
3020    return false;
3021  }
3022  StackHandleScope<2> hs(self);
3023  mirror::Class* declaring_class = m->GetDeclaringClass();
3024  Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
3025  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
3026  verifier::MethodVerifier verifier(dex_cache->GetDexFile(), &dex_cache, &class_loader,
3027                                    &m->GetClassDef(), code_item, m->GetDexMethodIndex(), m,
3028                                    m->GetAccessFlags(), false, true, false);
3029  // Note: we don't need to verify the method.
3030  return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr);
3031}
3032
3033static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m)
3034    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3035  for (Breakpoint& breakpoint : gBreakpoints) {
3036    if (breakpoint.Method() == m) {
3037      return &breakpoint;
3038    }
3039  }
3040  return nullptr;
3041}
3042
3043// Sanity checks all existing breakpoints on the same method.
3044static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_deoptimization)
3045    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3046  if (kIsDebugBuild) {
3047    for (const Breakpoint& breakpoint : gBreakpoints) {
3048      CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization());
3049    }
3050    if (need_full_deoptimization) {
3051      // We should have deoptimized everything but not "selectively" deoptimized this method.
3052      CHECK(Runtime::Current()->GetInstrumentation()->AreAllMethodsDeoptimized());
3053      CHECK(!Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3054    } else {
3055      // We should have "selectively" deoptimized this method.
3056      // Note: while we have not deoptimized everything for this method, we may have done it for
3057      // another event.
3058      CHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3059    }
3060  }
3061}
3062
3063// Installs a breakpoint at the specified location. Also indicates through the deoptimization
3064// request if we need to deoptimize.
3065void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3066  Thread* const self = Thread::Current();
3067  mirror::ArtMethod* m = FromMethodId(location->method_id);
3068  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3069
3070  WriterMutexLock mu(self, *Locks::breakpoint_lock_);
3071  const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
3072  bool need_full_deoptimization;
3073  if (existing_breakpoint == nullptr) {
3074    // There is no breakpoint on this method yet: we need to deoptimize. If this method may be
3075    // inlined, we deoptimize everything; otherwise we deoptimize only this method.
3076    need_full_deoptimization = IsMethodPossiblyInlined(self, m);
3077    if (need_full_deoptimization) {
3078      req->SetKind(DeoptimizationRequest::kFullDeoptimization);
3079      req->SetMethod(nullptr);
3080    } else {
3081      req->SetKind(DeoptimizationRequest::kSelectiveDeoptimization);
3082      req->SetMethod(m);
3083    }
3084  } else {
3085    // There is at least one breakpoint for this method: we don't need to deoptimize.
3086    req->SetKind(DeoptimizationRequest::kNothing);
3087    req->SetMethod(nullptr);
3088
3089    need_full_deoptimization = existing_breakpoint->NeedFullDeoptimization();
3090    SanityCheckExistingBreakpoints(m, need_full_deoptimization);
3091  }
3092
3093  gBreakpoints.push_back(Breakpoint(m, location->dex_pc, need_full_deoptimization));
3094  VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
3095             << gBreakpoints[gBreakpoints.size() - 1];
3096}
3097
3098// Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
3099// request if we need to undeoptimize.
3100void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3101  WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
3102  mirror::ArtMethod* m = FromMethodId(location->method_id);
3103  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3104  bool need_full_deoptimization = false;
3105  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
3106    if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].Method() == m) {
3107      VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
3108      need_full_deoptimization = gBreakpoints[i].NeedFullDeoptimization();
3109      DCHECK_NE(need_full_deoptimization, Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3110      gBreakpoints.erase(gBreakpoints.begin() + i);
3111      break;
3112    }
3113  }
3114  const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
3115  if (existing_breakpoint == nullptr) {
3116    // There is no more breakpoint on this method: we need to undeoptimize.
3117    if (need_full_deoptimization) {
3118      // This method required full deoptimization: we need to undeoptimize everything.
3119      req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
3120      req->SetMethod(nullptr);
3121    } else {
3122      // This method required selective deoptimization: we need to undeoptimize only that method.
3123      req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
3124      req->SetMethod(m);
3125    }
3126  } else {
3127    // There is at least one breakpoint for this method: we don't need to undeoptimize.
3128    req->SetKind(DeoptimizationRequest::kNothing);
3129    req->SetMethod(nullptr);
3130    SanityCheckExistingBreakpoints(m, need_full_deoptimization);
3131  }
3132}
3133
3134// Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
3135// cause suspension if the thread is the current thread.
3136class ScopedThreadSuspension {
3137 public:
3138  ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
3139      LOCKS_EXCLUDED(Locks::thread_list_lock_)
3140      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
3141      thread_(nullptr),
3142      error_(JDWP::ERR_NONE),
3143      self_suspend_(false),
3144      other_suspend_(false) {
3145    ScopedObjectAccessUnchecked soa(self);
3146    {
3147      MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3148      error_ = DecodeThread(soa, thread_id, thread_);
3149    }
3150    if (error_ == JDWP::ERR_NONE) {
3151      if (thread_ == soa.Self()) {
3152        self_suspend_ = true;
3153      } else {
3154        soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3155        jobject thread_peer = gRegistry->GetJObject(thread_id);
3156        bool timed_out;
3157        Thread* suspended_thread;
3158        {
3159          // Take suspend thread lock to avoid races with threads trying to suspend this one.
3160          MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
3161          ThreadList* thread_list = Runtime::Current()->GetThreadList();
3162          suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true, &timed_out);
3163        }
3164        CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
3165        if (suspended_thread == nullptr) {
3166          // Thread terminated from under us while suspending.
3167          error_ = JDWP::ERR_INVALID_THREAD;
3168        } else {
3169          CHECK_EQ(suspended_thread, thread_);
3170          other_suspend_ = true;
3171        }
3172      }
3173    }
3174  }
3175
3176  Thread* GetThread() const {
3177    return thread_;
3178  }
3179
3180  JDWP::JdwpError GetError() const {
3181    return error_;
3182  }
3183
3184  ~ScopedThreadSuspension() {
3185    if (other_suspend_) {
3186      Runtime::Current()->GetThreadList()->Resume(thread_, true);
3187    }
3188  }
3189
3190 private:
3191  Thread* thread_;
3192  JDWP::JdwpError error_;
3193  bool self_suspend_;
3194  bool other_suspend_;
3195};
3196
3197JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size,
3198                                   JDWP::JdwpStepDepth step_depth) {
3199  Thread* self = Thread::Current();
3200  ScopedThreadSuspension sts(self, thread_id);
3201  if (sts.GetError() != JDWP::ERR_NONE) {
3202    return sts.GetError();
3203  }
3204
3205  //
3206  // Work out what Method* we're in, the current line number, and how deep the stack currently
3207  // is for step-out.
3208  //
3209
3210  struct SingleStepStackVisitor : public StackVisitor {
3211    explicit SingleStepStackVisitor(Thread* thread, SingleStepControl* single_step_control,
3212                                    int32_t* line_number)
3213        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
3214        : StackVisitor(thread, NULL), single_step_control_(single_step_control),
3215          line_number_(line_number) {
3216      DCHECK_EQ(single_step_control_, thread->GetSingleStepControl());
3217      single_step_control_->method = NULL;
3218      single_step_control_->stack_depth = 0;
3219    }
3220
3221    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
3222    // annotalysis.
3223    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
3224      mirror::ArtMethod* m = GetMethod();
3225      if (!m->IsRuntimeMethod()) {
3226        ++single_step_control_->stack_depth;
3227        if (single_step_control_->method == NULL) {
3228          mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
3229          single_step_control_->method = m;
3230          *line_number_ = -1;
3231          if (dex_cache != NULL) {
3232            const DexFile& dex_file = *dex_cache->GetDexFile();
3233            *line_number_ = dex_file.GetLineNumFromPC(m, GetDexPc());
3234          }
3235        }
3236      }
3237      return true;
3238    }
3239
3240    SingleStepControl* const single_step_control_;
3241    int32_t* const line_number_;
3242  };
3243
3244  Thread* const thread = sts.GetThread();
3245  SingleStepControl* const single_step_control = thread->GetSingleStepControl();
3246  DCHECK(single_step_control != nullptr);
3247  int32_t line_number = -1;
3248  SingleStepStackVisitor visitor(thread, single_step_control, &line_number);
3249  visitor.WalkStack();
3250
3251  //
3252  // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
3253  //
3254
3255  struct DebugCallbackContext {
3256    explicit DebugCallbackContext(SingleStepControl* single_step_control, int32_t line_number,
3257                                  const DexFile::CodeItem* code_item)
3258      : single_step_control_(single_step_control), line_number_(line_number), code_item_(code_item),
3259        last_pc_valid(false), last_pc(0) {
3260    }
3261
3262    static bool Callback(void* raw_context, uint32_t address, uint32_t line_number) {
3263      DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
3264      if (static_cast<int32_t>(line_number) == context->line_number_) {
3265        if (!context->last_pc_valid) {
3266          // Everything from this address until the next line change is ours.
3267          context->last_pc = address;
3268          context->last_pc_valid = true;
3269        }
3270        // Otherwise, if we're already in a valid range for this line,
3271        // just keep going (shouldn't really happen)...
3272      } else if (context->last_pc_valid) {  // and the line number is new
3273        // Add everything from the last entry up until here to the set
3274        for (uint32_t dex_pc = context->last_pc; dex_pc < address; ++dex_pc) {
3275          context->single_step_control_->dex_pcs.insert(dex_pc);
3276        }
3277        context->last_pc_valid = false;
3278      }
3279      return false;  // There may be multiple entries for any given line.
3280    }
3281
3282    ~DebugCallbackContext() {
3283      // If the line number was the last in the position table...
3284      if (last_pc_valid) {
3285        size_t end = code_item_->insns_size_in_code_units_;
3286        for (uint32_t dex_pc = last_pc; dex_pc < end; ++dex_pc) {
3287          single_step_control_->dex_pcs.insert(dex_pc);
3288        }
3289      }
3290    }
3291
3292    SingleStepControl* const single_step_control_;
3293    const int32_t line_number_;
3294    const DexFile::CodeItem* const code_item_;
3295    bool last_pc_valid;
3296    uint32_t last_pc;
3297  };
3298  single_step_control->dex_pcs.clear();
3299  mirror::ArtMethod* m = single_step_control->method;
3300  if (!m->IsNative()) {
3301    const DexFile::CodeItem* const code_item = m->GetCodeItem();
3302    DebugCallbackContext context(single_step_control, line_number, code_item);
3303    m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
3304                                     DebugCallbackContext::Callback, NULL, &context);
3305  }
3306
3307  //
3308  // Everything else...
3309  //
3310
3311  single_step_control->step_size = step_size;
3312  single_step_control->step_depth = step_depth;
3313  single_step_control->is_active = true;
3314
3315  if (VLOG_IS_ON(jdwp)) {
3316    VLOG(jdwp) << "Single-step thread: " << *thread;
3317    VLOG(jdwp) << "Single-step step size: " << single_step_control->step_size;
3318    VLOG(jdwp) << "Single-step step depth: " << single_step_control->step_depth;
3319    VLOG(jdwp) << "Single-step current method: " << PrettyMethod(single_step_control->method);
3320    VLOG(jdwp) << "Single-step current line: " << line_number;
3321    VLOG(jdwp) << "Single-step current stack depth: " << single_step_control->stack_depth;
3322    VLOG(jdwp) << "Single-step dex_pc values:";
3323    for (uint32_t dex_pc : single_step_control->dex_pcs) {
3324      VLOG(jdwp) << StringPrintf(" %#x", dex_pc);
3325    }
3326  }
3327
3328  return JDWP::ERR_NONE;
3329}
3330
3331void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) {
3332  ScopedObjectAccessUnchecked soa(Thread::Current());
3333  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3334  Thread* thread;
3335  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
3336  if (error == JDWP::ERR_NONE) {
3337    SingleStepControl* single_step_control = thread->GetSingleStepControl();
3338    DCHECK(single_step_control != nullptr);
3339    single_step_control->Clear();
3340  }
3341}
3342
3343static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
3344  switch (tag) {
3345    default:
3346      LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag);
3347
3348    // Primitives.
3349    case JDWP::JT_BYTE:    return 'B';
3350    case JDWP::JT_CHAR:    return 'C';
3351    case JDWP::JT_FLOAT:   return 'F';
3352    case JDWP::JT_DOUBLE:  return 'D';
3353    case JDWP::JT_INT:     return 'I';
3354    case JDWP::JT_LONG:    return 'J';
3355    case JDWP::JT_SHORT:   return 'S';
3356    case JDWP::JT_VOID:    return 'V';
3357    case JDWP::JT_BOOLEAN: return 'Z';
3358
3359    // Reference types.
3360    case JDWP::JT_ARRAY:
3361    case JDWP::JT_OBJECT:
3362    case JDWP::JT_STRING:
3363    case JDWP::JT_THREAD:
3364    case JDWP::JT_THREAD_GROUP:
3365    case JDWP::JT_CLASS_LOADER:
3366    case JDWP::JT_CLASS_OBJECT:
3367      return 'L';
3368  }
3369}
3370
3371JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id,
3372                                  JDWP::RefTypeId class_id, JDWP::MethodId method_id,
3373                                  uint32_t arg_count, uint64_t* arg_values,
3374                                  JDWP::JdwpTag* arg_types, uint32_t options,
3375                                  JDWP::JdwpTag* pResultTag, uint64_t* pResultValue,
3376                                  JDWP::ObjectId* pExceptionId) {
3377  ThreadList* thread_list = Runtime::Current()->GetThreadList();
3378
3379  Thread* targetThread = NULL;
3380  DebugInvokeReq* req = NULL;
3381  Thread* self = Thread::Current();
3382  {
3383    ScopedObjectAccessUnchecked soa(self);
3384    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3385    JDWP::JdwpError error = DecodeThread(soa, thread_id, targetThread);
3386    if (error != JDWP::ERR_NONE) {
3387      LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id;
3388      return error;
3389    }
3390    req = targetThread->GetInvokeReq();
3391    if (!req->ready) {
3392      LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
3393      return JDWP::ERR_INVALID_THREAD;
3394    }
3395
3396    /*
3397     * We currently have a bug where we don't successfully resume the
3398     * target thread if the suspend count is too deep.  We're expected to
3399     * require one "resume" for each "suspend", but when asked to execute
3400     * a method we have to resume fully and then re-suspend it back to the
3401     * same level.  (The easiest way to cause this is to type "suspend"
3402     * multiple times in jdb.)
3403     *
3404     * It's unclear what this means when the event specifies "resume all"
3405     * and some threads are suspended more deeply than others.  This is
3406     * a rare problem, so for now we just prevent it from hanging forever
3407     * by rejecting the method invocation request.  Without this, we will
3408     * be stuck waiting on a suspended thread.
3409     */
3410    int suspend_count;
3411    {
3412      MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
3413      suspend_count = targetThread->GetSuspendCount();
3414    }
3415    if (suspend_count > 1) {
3416      LOG(ERROR) << *targetThread << " suspend count too deep for method invocation: " << suspend_count;
3417      return JDWP::ERR_THREAD_SUSPENDED;  // Probably not expected here.
3418    }
3419
3420    JDWP::JdwpError status;
3421    mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id);
3422    if (receiver == ObjectRegistry::kInvalidObject) {
3423      return JDWP::ERR_INVALID_OBJECT;
3424    }
3425
3426    mirror::Object* thread = gRegistry->Get<mirror::Object*>(thread_id);
3427    if (thread == ObjectRegistry::kInvalidObject) {
3428      return JDWP::ERR_INVALID_OBJECT;
3429    }
3430    // TODO: check that 'thread' is actually a java.lang.Thread!
3431
3432    mirror::Class* c = DecodeClass(class_id, status);
3433    if (c == NULL) {
3434      return status;
3435    }
3436
3437    mirror::ArtMethod* m = FromMethodId(method_id);
3438    if (m->IsStatic() != (receiver == NULL)) {
3439      return JDWP::ERR_INVALID_METHODID;
3440    }
3441    if (m->IsStatic()) {
3442      if (m->GetDeclaringClass() != c) {
3443        return JDWP::ERR_INVALID_METHODID;
3444      }
3445    } else {
3446      if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
3447        return JDWP::ERR_INVALID_METHODID;
3448      }
3449    }
3450
3451    // Check the argument list matches the method.
3452    uint32_t shorty_len = 0;
3453    const char* shorty = m->GetShorty(&shorty_len);
3454    if (shorty_len - 1 != arg_count) {
3455      return JDWP::ERR_ILLEGAL_ARGUMENT;
3456    }
3457
3458    {
3459      StackHandleScope<3> hs(soa.Self());
3460      MethodHelper mh(hs.NewHandle(m));
3461      HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver));
3462      HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c));
3463      const DexFile::TypeList* types = m->GetParameterTypeList();
3464      for (size_t i = 0; i < arg_count; ++i) {
3465        if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) {
3466          return JDWP::ERR_ILLEGAL_ARGUMENT;
3467        }
3468
3469        if (shorty[i + 1] == 'L') {
3470          // Did we really get an argument of an appropriate reference type?
3471          mirror::Class* parameter_type = mh.GetClassFromTypeIdx(types->GetTypeItem(i).type_idx_);
3472          mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i]);
3473          if (argument == ObjectRegistry::kInvalidObject) {
3474            return JDWP::ERR_INVALID_OBJECT;
3475          }
3476          if (argument != NULL && !argument->InstanceOf(parameter_type)) {
3477            return JDWP::ERR_ILLEGAL_ARGUMENT;
3478          }
3479
3480          // Turn the on-the-wire ObjectId into a jobject.
3481          jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]);
3482          v.l = gRegistry->GetJObject(arg_values[i]);
3483        }
3484      }
3485      // Update in case it moved.
3486      m = mh.GetMethod();
3487    }
3488
3489    req->receiver = receiver;
3490    req->thread = thread;
3491    req->klass = c;
3492    req->method = m;
3493    req->arg_count = arg_count;
3494    req->arg_values = arg_values;
3495    req->options = options;
3496    req->invoke_needed = true;
3497  }
3498
3499  // The fact that we've released the thread list lock is a bit risky --- if the thread goes
3500  // away we're sitting high and dry -- but we must release this before the ResumeAllThreads
3501  // call, and it's unwise to hold it during WaitForSuspend.
3502
3503  {
3504    /*
3505     * We change our (JDWP thread) status, which should be THREAD_RUNNING,
3506     * so we can suspend for a GC if the invoke request causes us to
3507     * run out of memory.  It's also a good idea to change it before locking
3508     * the invokeReq mutex, although that should never be held for long.
3509     */
3510    self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
3511
3512    VLOG(jdwp) << "    Transferring control to event thread";
3513    {
3514      MutexLock mu(self, req->lock);
3515
3516      if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3517        VLOG(jdwp) << "      Resuming all threads";
3518        thread_list->UndoDebuggerSuspensions();
3519      } else {
3520        VLOG(jdwp) << "      Resuming event thread only";
3521        thread_list->Resume(targetThread, true);
3522      }
3523
3524      // Wait for the request to finish executing.
3525      while (req->invoke_needed) {
3526        req->cond.Wait(self);
3527      }
3528    }
3529    VLOG(jdwp) << "    Control has returned from event thread";
3530
3531    /* wait for thread to re-suspend itself */
3532    SuspendThread(thread_id, false /* request_suspension */);
3533    self->TransitionFromSuspendedToRunnable();
3534  }
3535
3536  /*
3537   * Suspend the threads.  We waited for the target thread to suspend
3538   * itself, so all we need to do is suspend the others.
3539   *
3540   * The suspendAllThreads() call will double-suspend the event thread,
3541   * so we want to resume the target thread once to keep the books straight.
3542   */
3543  if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3544    self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3545    VLOG(jdwp) << "      Suspending all threads";
3546    thread_list->SuspendAllForDebugger();
3547    self->TransitionFromSuspendedToRunnable();
3548    VLOG(jdwp) << "      Resuming event thread to balance the count";
3549    thread_list->Resume(targetThread, true);
3550  }
3551
3552  // Copy the result.
3553  *pResultTag = req->result_tag;
3554  if (IsPrimitiveTag(req->result_tag)) {
3555    *pResultValue = req->result_value.GetJ();
3556  } else {
3557    *pResultValue = gRegistry->Add(req->result_value.GetL());
3558  }
3559  *pExceptionId = req->exception;
3560  return req->error;
3561}
3562
3563void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
3564  ScopedObjectAccess soa(Thread::Current());
3565
3566  // We can be called while an exception is pending. We need
3567  // to preserve that across the method invocation.
3568  StackHandleScope<4> hs(soa.Self());
3569  auto old_throw_this_object = hs.NewHandle<mirror::Object>(nullptr);
3570  auto old_throw_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
3571  auto old_exception = hs.NewHandle<mirror::Throwable>(nullptr);
3572  uint32_t old_throw_dex_pc;
3573  bool old_exception_report_flag;
3574  {
3575    ThrowLocation old_throw_location;
3576    mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location);
3577    old_throw_this_object.Assign(old_throw_location.GetThis());
3578    old_throw_method.Assign(old_throw_location.GetMethod());
3579    old_exception.Assign(old_exception_obj);
3580    old_throw_dex_pc = old_throw_location.GetDexPc();
3581    old_exception_report_flag = soa.Self()->IsExceptionReportedToInstrumentation();
3582    soa.Self()->ClearException();
3583  }
3584
3585  // Translate the method through the vtable, unless the debugger wants to suppress it.
3586  Handle<mirror::ArtMethod> m(hs.NewHandle(pReq->method));
3587  if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver != NULL) {
3588    mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.Get());
3589    if (actual_method != m.Get()) {
3590      VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.Get()) << " to " << PrettyMethod(actual_method);
3591      m.Assign(actual_method);
3592    }
3593  }
3594  VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.Get())
3595             << " receiver=" << pReq->receiver
3596             << " arg_count=" << pReq->arg_count;
3597  CHECK(m.Get() != nullptr);
3598
3599  CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
3600
3601  pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(m.Get()),
3602                                         reinterpret_cast<jvalue*>(pReq->arg_values));
3603
3604  mirror::Throwable* exception = soa.Self()->GetException(NULL);
3605  soa.Self()->ClearException();
3606  pReq->exception = gRegistry->Add(exception);
3607  pReq->result_tag = BasicTagFromDescriptor(m.Get()->GetShorty());
3608  if (pReq->exception != 0) {
3609    VLOG(jdwp) << "  JDWP invocation returning with exception=" << exception
3610        << " " << exception->Dump();
3611    pReq->result_value.SetJ(0);
3612  } else if (pReq->result_tag == JDWP::JT_OBJECT) {
3613    /* if no exception thrown, examine object result more closely */
3614    JDWP::JdwpTag new_tag = TagFromObject(soa, pReq->result_value.GetL());
3615    if (new_tag != pReq->result_tag) {
3616      VLOG(jdwp) << "  JDWP promoted result from " << pReq->result_tag << " to " << new_tag;
3617      pReq->result_tag = new_tag;
3618    }
3619
3620    /*
3621     * Register the object.  We don't actually need an ObjectId yet,
3622     * but we do need to be sure that the GC won't move or discard the
3623     * object when we switch out of RUNNING.  The ObjectId conversion
3624     * will add the object to the "do not touch" list.
3625     *
3626     * We can't use the "tracked allocation" mechanism here because
3627     * the object is going to be handed off to a different thread.
3628     */
3629    gRegistry->Add(pReq->result_value.GetL());
3630  }
3631
3632  if (old_exception.Get() != NULL) {
3633    ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
3634                                         old_throw_dex_pc);
3635    soa.Self()->SetException(gc_safe_throw_location, old_exception.Get());
3636    soa.Self()->SetExceptionReportedToInstrumentation(old_exception_report_flag);
3637  }
3638}
3639
3640/*
3641 * "request" contains a full JDWP packet, possibly with multiple chunks.  We
3642 * need to process each, accumulate the replies, and ship the whole thing
3643 * back.
3644 *
3645 * Returns "true" if we have a reply.  The reply buffer is newly allocated,
3646 * and includes the chunk type/length, followed by the data.
3647 *
3648 * OLD-TODO: we currently assume that the request and reply include a single
3649 * chunk.  If this becomes inconvenient we will need to adapt.
3650 */
3651bool Dbg::DdmHandlePacket(JDWP::Request& request, uint8_t** pReplyBuf, int* pReplyLen) {
3652  Thread* self = Thread::Current();
3653  JNIEnv* env = self->GetJniEnv();
3654
3655  uint32_t type = request.ReadUnsigned32("type");
3656  uint32_t length = request.ReadUnsigned32("length");
3657
3658  // Create a byte[] corresponding to 'request'.
3659  size_t request_length = request.size();
3660  ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(request_length));
3661  if (dataArray.get() == NULL) {
3662    LOG(WARNING) << "byte[] allocation failed: " << request_length;
3663    env->ExceptionClear();
3664    return false;
3665  }
3666  env->SetByteArrayRegion(dataArray.get(), 0, request_length, reinterpret_cast<const jbyte*>(request.data()));
3667  request.Skip(request_length);
3668
3669  // Run through and find all chunks.  [Currently just find the first.]
3670  ScopedByteArrayRO contents(env, dataArray.get());
3671  if (length != request_length) {
3672    LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length);
3673    return false;
3674  }
3675
3676  // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)".
3677  ScopedLocalRef<jobject> chunk(env, env->CallStaticObjectMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3678                                                                 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch,
3679                                                                 type, dataArray.get(), 0, length));
3680  if (env->ExceptionCheck()) {
3681    LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type);
3682    env->ExceptionDescribe();
3683    env->ExceptionClear();
3684    return false;
3685  }
3686
3687  if (chunk.get() == NULL) {
3688    return false;
3689  }
3690
3691  /*
3692   * Pull the pieces out of the chunk.  We copy the results into a
3693   * newly-allocated buffer that the caller can free.  We don't want to
3694   * continue using the Chunk object because nothing has a reference to it.
3695   *
3696   * We could avoid this by returning type/data/offset/length and having
3697   * the caller be aware of the object lifetime issues, but that
3698   * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work
3699   * if we have responses for multiple chunks.
3700   *
3701   * So we're pretty much stuck with copying data around multiple times.
3702   */
3703  ScopedLocalRef<jbyteArray> replyData(env, reinterpret_cast<jbyteArray>(env->GetObjectField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data)));
3704  jint offset = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset);
3705  length = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length);
3706  type = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type);
3707
3708  VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length);
3709  if (length == 0 || replyData.get() == NULL) {
3710    return false;
3711  }
3712
3713  const int kChunkHdrLen = 8;
3714  uint8_t* reply = new uint8_t[length + kChunkHdrLen];
3715  if (reply == NULL) {
3716    LOG(WARNING) << "malloc failed: " << (length + kChunkHdrLen);
3717    return false;
3718  }
3719  JDWP::Set4BE(reply + 0, type);
3720  JDWP::Set4BE(reply + 4, length);
3721  env->GetByteArrayRegion(replyData.get(), offset, length, reinterpret_cast<jbyte*>(reply + kChunkHdrLen));
3722
3723  *pReplyBuf = reply;
3724  *pReplyLen = length + kChunkHdrLen;
3725
3726  VLOG(jdwp) << StringPrintf("dvmHandleDdm returning type=%.4s %p len=%d", reinterpret_cast<char*>(reply), reply, length);
3727  return true;
3728}
3729
3730void Dbg::DdmBroadcast(bool connect) {
3731  VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
3732
3733  Thread* self = Thread::Current();
3734  if (self->GetState() != kRunnable) {
3735    LOG(ERROR) << "DDM broadcast in thread state " << self->GetState();
3736    /* try anyway? */
3737  }
3738
3739  JNIEnv* env = self->GetJniEnv();
3740  jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/;
3741  env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3742                            WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast,
3743                            event);
3744  if (env->ExceptionCheck()) {
3745    LOG(ERROR) << "DdmServer.broadcast " << event << " failed";
3746    env->ExceptionDescribe();
3747    env->ExceptionClear();
3748  }
3749}
3750
3751void Dbg::DdmConnected() {
3752  Dbg::DdmBroadcast(true);
3753}
3754
3755void Dbg::DdmDisconnected() {
3756  Dbg::DdmBroadcast(false);
3757  gDdmThreadNotification = false;
3758}
3759
3760/*
3761 * Send a notification when a thread starts, stops, or changes its name.
3762 *
3763 * Because we broadcast the full set of threads when the notifications are
3764 * first enabled, it's possible for "thread" to be actively executing.
3765 */
3766void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
3767  if (!gDdmThreadNotification) {
3768    return;
3769  }
3770
3771  if (type == CHUNK_TYPE("THDE")) {
3772    uint8_t buf[4];
3773    JDWP::Set4BE(&buf[0], t->GetThreadId());
3774    Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf);
3775  } else {
3776    CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
3777    ScopedObjectAccessUnchecked soa(Thread::Current());
3778    StackHandleScope<1> hs(soa.Self());
3779    Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa)));
3780    size_t char_count = (name.Get() != NULL) ? name->GetLength() : 0;
3781    const jchar* chars = (name.Get() != NULL) ? name->GetCharArray()->GetData() : NULL;
3782
3783    std::vector<uint8_t> bytes;
3784    JDWP::Append4BE(bytes, t->GetThreadId());
3785    JDWP::AppendUtf16BE(bytes, chars, char_count);
3786    CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
3787    Dbg::DdmSendChunk(type, bytes);
3788  }
3789}
3790
3791void Dbg::DdmSetThreadNotification(bool enable) {
3792  // Enable/disable thread notifications.
3793  gDdmThreadNotification = enable;
3794  if (enable) {
3795    // Suspend the VM then post thread start notifications for all threads. Threads attaching will
3796    // see a suspension in progress and block until that ends. They then post their own start
3797    // notification.
3798    SuspendVM();
3799    std::list<Thread*> threads;
3800    Thread* self = Thread::Current();
3801    {
3802      MutexLock mu(self, *Locks::thread_list_lock_);
3803      threads = Runtime::Current()->GetThreadList()->GetList();
3804    }
3805    {
3806      ScopedObjectAccess soa(self);
3807      for (Thread* thread : threads) {
3808        Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
3809      }
3810    }
3811    ResumeVM();
3812  }
3813}
3814
3815void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
3816  if (IsDebuggerActive()) {
3817    ScopedObjectAccessUnchecked soa(Thread::Current());
3818    JDWP::ObjectId id = gRegistry->Add(t->GetPeer());
3819    gJdwpState->PostThreadChange(id, type == CHUNK_TYPE("THCR"));
3820  }
3821  Dbg::DdmSendThreadNotification(t, type);
3822}
3823
3824void Dbg::PostThreadStart(Thread* t) {
3825  Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR"));
3826}
3827
3828void Dbg::PostThreadDeath(Thread* t) {
3829  Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
3830}
3831
3832void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) {
3833  CHECK(buf != NULL);
3834  iovec vec[1];
3835  vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf));
3836  vec[0].iov_len = byte_count;
3837  Dbg::DdmSendChunkV(type, vec, 1);
3838}
3839
3840void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) {
3841  DdmSendChunk(type, bytes.size(), &bytes[0]);
3842}
3843
3844void Dbg::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) {
3845  if (gJdwpState == NULL) {
3846    VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
3847  } else {
3848    gJdwpState->DdmSendChunkV(type, iov, iov_count);
3849  }
3850}
3851
3852int Dbg::DdmHandleHpifChunk(HpifWhen when) {
3853  if (when == HPIF_WHEN_NOW) {
3854    DdmSendHeapInfo(when);
3855    return true;
3856  }
3857
3858  if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
3859    LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
3860    return false;
3861  }
3862
3863  gDdmHpifWhen = when;
3864  return true;
3865}
3866
3867bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
3868  if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) {
3869    LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when);
3870    return false;
3871  }
3872
3873  if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) {
3874    LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what);
3875    return false;
3876  }
3877
3878  if (native) {
3879    gDdmNhsgWhen = when;
3880    gDdmNhsgWhat = what;
3881  } else {
3882    gDdmHpsgWhen = when;
3883    gDdmHpsgWhat = what;
3884  }
3885  return true;
3886}
3887
3888void Dbg::DdmSendHeapInfo(HpifWhen reason) {
3889  // If there's a one-shot 'when', reset it.
3890  if (reason == gDdmHpifWhen) {
3891    if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) {
3892      gDdmHpifWhen = HPIF_WHEN_NEVER;
3893    }
3894  }
3895
3896  /*
3897   * Chunk HPIF (client --> server)
3898   *
3899   * Heap Info. General information about the heap,
3900   * suitable for a summary display.
3901   *
3902   *   [u4]: number of heaps
3903   *
3904   *   For each heap:
3905   *     [u4]: heap ID
3906   *     [u8]: timestamp in ms since Unix epoch
3907   *     [u1]: capture reason (same as 'when' value from server)
3908   *     [u4]: max heap size in bytes (-Xmx)
3909   *     [u4]: current heap size in bytes
3910   *     [u4]: current number of bytes allocated
3911   *     [u4]: current number of objects allocated
3912   */
3913  uint8_t heap_count = 1;
3914  gc::Heap* heap = Runtime::Current()->GetHeap();
3915  std::vector<uint8_t> bytes;
3916  JDWP::Append4BE(bytes, heap_count);
3917  JDWP::Append4BE(bytes, 1);  // Heap id (bogus; we only have one heap).
3918  JDWP::Append8BE(bytes, MilliTime());
3919  JDWP::Append1BE(bytes, reason);
3920  JDWP::Append4BE(bytes, heap->GetMaxMemory());  // Max allowed heap size in bytes.
3921  JDWP::Append4BE(bytes, heap->GetTotalMemory());  // Current heap size in bytes.
3922  JDWP::Append4BE(bytes, heap->GetBytesAllocated());
3923  JDWP::Append4BE(bytes, heap->GetObjectsAllocated());
3924  CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
3925  Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes);
3926}
3927
3928enum HpsgSolidity {
3929  SOLIDITY_FREE = 0,
3930  SOLIDITY_HARD = 1,
3931  SOLIDITY_SOFT = 2,
3932  SOLIDITY_WEAK = 3,
3933  SOLIDITY_PHANTOM = 4,
3934  SOLIDITY_FINALIZABLE = 5,
3935  SOLIDITY_SWEEP = 6,
3936};
3937
3938enum HpsgKind {
3939  KIND_OBJECT = 0,
3940  KIND_CLASS_OBJECT = 1,
3941  KIND_ARRAY_1 = 2,
3942  KIND_ARRAY_2 = 3,
3943  KIND_ARRAY_4 = 4,
3944  KIND_ARRAY_8 = 5,
3945  KIND_UNKNOWN = 6,
3946  KIND_NATIVE = 7,
3947};
3948
3949#define HPSG_PARTIAL (1<<7)
3950#define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
3951
3952class HeapChunkContext {
3953 public:
3954  // Maximum chunk size.  Obtain this from the formula:
3955  // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
3956  HeapChunkContext(bool merge, bool native)
3957      : buf_(16384 - 16),
3958        type_(0),
3959        merge_(merge),
3960        chunk_overhead_(0) {
3961    Reset();
3962    if (native) {
3963      type_ = CHUNK_TYPE("NHSG");
3964    } else {
3965      type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO");
3966    }
3967  }
3968
3969  ~HeapChunkContext() {
3970    if (p_ > &buf_[0]) {
3971      Flush();
3972    }
3973  }
3974
3975  void SetChunkOverhead(size_t chunk_overhead) {
3976    chunk_overhead_ = chunk_overhead;
3977  }
3978
3979  void ResetStartOfNextChunk() {
3980    startOfNextMemoryChunk_ = nullptr;
3981  }
3982
3983  void EnsureHeader(const void* chunk_ptr) {
3984    if (!needHeader_) {
3985      return;
3986    }
3987
3988    // Start a new HPSx chunk.
3989    JDWP::Write4BE(&p_, 1);  // Heap id (bogus; we only have one heap).
3990    JDWP::Write1BE(&p_, 8);  // Size of allocation unit, in bytes.
3991
3992    JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr));  // virtual address of segment start.
3993    JDWP::Write4BE(&p_, 0);  // offset of this piece (relative to the virtual address).
3994    // [u4]: length of piece, in allocation units
3995    // We won't know this until we're done, so save the offset and stuff in a dummy value.
3996    pieceLenField_ = p_;
3997    JDWP::Write4BE(&p_, 0x55555555);
3998    needHeader_ = false;
3999  }
4000
4001  void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4002    if (pieceLenField_ == NULL) {
4003      // Flush immediately post Reset (maybe back-to-back Flush). Ignore.
4004      CHECK(needHeader_);
4005      return;
4006    }
4007    // Patch the "length of piece" field.
4008    CHECK_LE(&buf_[0], pieceLenField_);
4009    CHECK_LE(pieceLenField_, p_);
4010    JDWP::Set4BE(pieceLenField_, totalAllocationUnits_);
4011
4012    Dbg::DdmSendChunk(type_, p_ - &buf_[0], &buf_[0]);
4013    Reset();
4014  }
4015
4016  static void HeapChunkCallback(void* start, void* end, size_t used_bytes, void* arg)
4017      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
4018                            Locks::mutator_lock_) {
4019    reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkCallback(start, end, used_bytes);
4020  }
4021
4022 private:
4023  enum { ALLOCATION_UNIT_SIZE = 8 };
4024
4025  void Reset() {
4026    p_ = &buf_[0];
4027    ResetStartOfNextChunk();
4028    totalAllocationUnits_ = 0;
4029    needHeader_ = true;
4030    pieceLenField_ = NULL;
4031  }
4032
4033  void HeapChunkCallback(void* start, void* /*end*/, size_t used_bytes)
4034      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
4035                            Locks::mutator_lock_) {
4036    // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
4037    // in the following code not to allocate memory, by ensuring buf_ is of the correct size
4038    if (used_bytes == 0) {
4039        if (start == NULL) {
4040            // Reset for start of new heap.
4041            startOfNextMemoryChunk_ = NULL;
4042            Flush();
4043        }
4044        // Only process in use memory so that free region information
4045        // also includes dlmalloc book keeping.
4046        return;
4047    }
4048
4049    /* If we're looking at the native heap, we'll just return
4050     * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
4051     */
4052    bool native = type_ == CHUNK_TYPE("NHSG");
4053
4054    // TODO: I'm not sure using start of next chunk works well with multiple spaces. We shouldn't
4055    // count gaps inbetween spaces as free memory.
4056    if (startOfNextMemoryChunk_ != NULL) {
4057        // Transmit any pending free memory. Native free memory of
4058        // over kMaxFreeLen could be because of the use of mmaps, so
4059        // don't report. If not free memory then start a new segment.
4060        bool flush = true;
4061        if (start > startOfNextMemoryChunk_) {
4062            const size_t kMaxFreeLen = 2 * kPageSize;
4063            void* freeStart = startOfNextMemoryChunk_;
4064            void* freeEnd = start;
4065            size_t freeLen = reinterpret_cast<char*>(freeEnd) - reinterpret_cast<char*>(freeStart);
4066            if (!native || freeLen < kMaxFreeLen) {
4067                AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), freeStart, freeLen);
4068                flush = false;
4069            }
4070        }
4071        if (flush) {
4072            startOfNextMemoryChunk_ = NULL;
4073            Flush();
4074        }
4075    }
4076    mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
4077
4078    // Determine the type of this chunk.
4079    // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
4080    // If it's the same, we should combine them.
4081    uint8_t state = ExamineObject(obj, native);
4082    AppendChunk(state, start, used_bytes + chunk_overhead_);
4083    startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
4084  }
4085
4086  void AppendChunk(uint8_t state, void* ptr, size_t length)
4087      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4088    // Make sure there's enough room left in the buffer.
4089    // We need to use two bytes for every fractional 256 allocation units used by the chunk plus
4090    // 17 bytes for any header.
4091    size_t needed = (((length/ALLOCATION_UNIT_SIZE + 255) / 256) * 2) + 17;
4092    size_t bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
4093    if (bytesLeft < needed) {
4094      Flush();
4095    }
4096
4097    bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
4098    if (bytesLeft < needed) {
4099      LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", "
4100          << needed << " bytes)";
4101      return;
4102    }
4103    EnsureHeader(ptr);
4104    // Write out the chunk description.
4105    length /= ALLOCATION_UNIT_SIZE;   // Convert to allocation units.
4106    totalAllocationUnits_ += length;
4107    while (length > 256) {
4108      *p_++ = state | HPSG_PARTIAL;
4109      *p_++ = 255;     // length - 1
4110      length -= 256;
4111    }
4112    *p_++ = state;
4113    *p_++ = length - 1;
4114  }
4115
4116  uint8_t ExamineObject(mirror::Object* o, bool is_native_heap)
4117      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
4118    if (o == NULL) {
4119      return HPSG_STATE(SOLIDITY_FREE, 0);
4120    }
4121
4122    // It's an allocated chunk. Figure out what it is.
4123
4124    // If we're looking at the native heap, we'll just return
4125    // (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks.
4126    if (is_native_heap) {
4127      return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4128    }
4129
4130    if (!Runtime::Current()->GetHeap()->IsLiveObjectLocked(o)) {
4131      return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4132    }
4133
4134    mirror::Class* c = o->GetClass();
4135    if (c == NULL) {
4136      // The object was probably just created but hasn't been initialized yet.
4137      return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4138    }
4139
4140    if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
4141      LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
4142      return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4143    }
4144
4145    if (c->IsClassClass()) {
4146      return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
4147    }
4148
4149    if (c->IsArrayClass()) {
4150      if (o->IsObjectArray()) {
4151        return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4152      }
4153      switch (c->GetComponentSize()) {
4154      case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
4155      case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
4156      case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4157      case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
4158      }
4159    }
4160
4161    return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4162  }
4163
4164  std::vector<uint8_t> buf_;
4165  uint8_t* p_;
4166  uint8_t* pieceLenField_;
4167  void* startOfNextMemoryChunk_;
4168  size_t totalAllocationUnits_;
4169  uint32_t type_;
4170  bool merge_;
4171  bool needHeader_;
4172  size_t chunk_overhead_;
4173
4174  DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
4175};
4176
4177static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
4178    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
4179  const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
4180  HeapChunkContext::HeapChunkCallback(
4181      obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
4182}
4183
4184void Dbg::DdmSendHeapSegments(bool native) {
4185  Dbg::HpsgWhen when;
4186  Dbg::HpsgWhat what;
4187  if (!native) {
4188    when = gDdmHpsgWhen;
4189    what = gDdmHpsgWhat;
4190  } else {
4191    when = gDdmNhsgWhen;
4192    what = gDdmNhsgWhat;
4193  }
4194  if (when == HPSG_WHEN_NEVER) {
4195    return;
4196  }
4197
4198  // Figure out what kind of chunks we'll be sending.
4199  CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS) << static_cast<int>(what);
4200
4201  // First, send a heap start chunk.
4202  uint8_t heap_id[4];
4203  JDWP::Set4BE(&heap_id[0], 1);  // Heap id (bogus; we only have one heap).
4204  Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id);
4205
4206  Thread* self = Thread::Current();
4207
4208  // To allow the Walk/InspectAll() below to exclusively-lock the
4209  // mutator lock, temporarily release the shared access to the
4210  // mutator lock here by transitioning to the suspended state.
4211  Locks::mutator_lock_->AssertSharedHeld(self);
4212  self->TransitionFromRunnableToSuspended(kSuspended);
4213
4214  // Send a series of heap segment chunks.
4215  HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
4216  if (native) {
4217#ifdef USE_DLMALLOC
4218    dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context);
4219#else
4220    UNIMPLEMENTED(WARNING) << "Native heap inspection is only supported with dlmalloc";
4221#endif
4222  } else {
4223    gc::Heap* heap = Runtime::Current()->GetHeap();
4224    for (const auto& space : heap->GetContinuousSpaces()) {
4225      if (space->IsDlMallocSpace()) {
4226        // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
4227        // allocation then the first sizeof(size_t) may belong to it.
4228        context.SetChunkOverhead(sizeof(size_t));
4229        space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4230      } else if (space->IsRosAllocSpace()) {
4231        context.SetChunkOverhead(0);
4232        space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4233      } else if (space->IsBumpPointerSpace()) {
4234        context.SetChunkOverhead(0);
4235        ReaderMutexLock mu(self, *Locks::mutator_lock_);
4236        WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
4237        space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
4238      } else {
4239        UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
4240      }
4241      context.ResetStartOfNextChunk();
4242    }
4243    // Walk the large objects, these are not in the AllocSpace.
4244    context.SetChunkOverhead(0);
4245    heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4246  }
4247
4248  // Shared-lock the mutator lock back.
4249  self->TransitionFromSuspendedToRunnable();
4250  Locks::mutator_lock_->AssertSharedHeld(self);
4251
4252  // Finally, send a heap end chunk.
4253  Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
4254}
4255
4256static size_t GetAllocTrackerMax() {
4257#ifdef HAVE_ANDROID_OS
4258  // Check whether there's a system property overriding the number of records.
4259  const char* propertyName = "dalvik.vm.allocTrackerMax";
4260  char allocRecordMaxString[PROPERTY_VALUE_MAX];
4261  if (property_get(propertyName, allocRecordMaxString, "") > 0) {
4262    char* end;
4263    size_t value = strtoul(allocRecordMaxString, &end, 10);
4264    if (*end != '\0') {
4265      LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocRecordMaxString
4266                 << "' --- invalid";
4267      return kDefaultNumAllocRecords;
4268    }
4269    if (!IsPowerOfTwo(value)) {
4270      LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocRecordMaxString
4271                 << "' --- not power of two";
4272      return kDefaultNumAllocRecords;
4273    }
4274    return value;
4275  }
4276#endif
4277  return kDefaultNumAllocRecords;
4278}
4279
4280void Dbg::SetAllocTrackingEnabled(bool enable) {
4281  Thread* self = Thread::Current();
4282  if (enable) {
4283    {
4284      MutexLock mu(self, *Locks::alloc_tracker_lock_);
4285      if (recent_allocation_records_ != NULL) {
4286        return;  // Already enabled, bail.
4287      }
4288      alloc_record_max_ = GetAllocTrackerMax();
4289      LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
4290                << kMaxAllocRecordStackDepth << " frames, taking "
4291                << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
4292      DCHECK_EQ(alloc_record_head_, 0U);
4293      DCHECK_EQ(alloc_record_count_, 0U);
4294      recent_allocation_records_ = new AllocRecord[alloc_record_max_];
4295      CHECK(recent_allocation_records_ != NULL);
4296    }
4297    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
4298  } else {
4299    {
4300      ScopedObjectAccess soa(self);  // For type_cache_.Clear();
4301      MutexLock mu(self, *Locks::alloc_tracker_lock_);
4302      if (recent_allocation_records_ == NULL) {
4303        return;  // Already disabled, bail.
4304      }
4305      LOG(INFO) << "Disabling alloc tracker";
4306      delete[] recent_allocation_records_;
4307      recent_allocation_records_ = NULL;
4308      alloc_record_head_ = 0;
4309      alloc_record_count_ = 0;
4310      type_cache_.Clear();
4311    }
4312    // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
4313    Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
4314  }
4315}
4316
4317struct AllocRecordStackVisitor : public StackVisitor {
4318  AllocRecordStackVisitor(Thread* thread, AllocRecord* record)
4319      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
4320      : StackVisitor(thread, NULL), record(record), depth(0) {}
4321
4322  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
4323  // annotalysis.
4324  bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
4325    if (depth >= kMaxAllocRecordStackDepth) {
4326      return false;
4327    }
4328    mirror::ArtMethod* m = GetMethod();
4329    if (!m->IsRuntimeMethod()) {
4330      record->StackElement(depth)->SetMethod(m);
4331      record->StackElement(depth)->SetDexPc(GetDexPc());
4332      ++depth;
4333    }
4334    return true;
4335  }
4336
4337  ~AllocRecordStackVisitor() {
4338    // Clear out any unused stack trace elements.
4339    for (; depth < kMaxAllocRecordStackDepth; ++depth) {
4340      record->StackElement(depth)->SetMethod(nullptr);
4341      record->StackElement(depth)->SetDexPc(0);
4342    }
4343  }
4344
4345  AllocRecord* record;
4346  size_t depth;
4347};
4348
4349void Dbg::RecordAllocation(mirror::Class* type, size_t byte_count) {
4350  Thread* self = Thread::Current();
4351  CHECK(self != NULL);
4352
4353  MutexLock mu(self, *Locks::alloc_tracker_lock_);
4354  if (recent_allocation_records_ == NULL) {
4355    // In the process of shutting down recording, bail.
4356    return;
4357  }
4358
4359  // Advance and clip.
4360  if (++alloc_record_head_ == alloc_record_max_) {
4361    alloc_record_head_ = 0;
4362  }
4363
4364  // Fill in the basics.
4365  AllocRecord* record = &recent_allocation_records_[alloc_record_head_];
4366  record->SetType(type);
4367  record->SetByteCount(byte_count);
4368  record->SetThinLockId(self->GetThreadId());
4369
4370  // Fill in the stack trace.
4371  AllocRecordStackVisitor visitor(self, record);
4372  visitor.WalkStack();
4373
4374  if (alloc_record_count_ < alloc_record_max_) {
4375    ++alloc_record_count_;
4376  }
4377}
4378
4379// Returns the index of the head element.
4380//
4381// We point at the most-recently-written record, so if alloc_record_count_ is 1
4382// we want to use the current element.  Take "head+1" and subtract count
4383// from it.
4384//
4385// We need to handle underflow in our circular buffer, so we add
4386// alloc_record_max_ and then mask it back down.
4387size_t Dbg::HeadIndex() {
4388  return (Dbg::alloc_record_head_ + 1 + Dbg::alloc_record_max_ - Dbg::alloc_record_count_) &
4389      (Dbg::alloc_record_max_ - 1);
4390}
4391
4392void Dbg::DumpRecentAllocations() {
4393  ScopedObjectAccess soa(Thread::Current());
4394  MutexLock mu(soa.Self(), *Locks::alloc_tracker_lock_);
4395  if (recent_allocation_records_ == NULL) {
4396    LOG(INFO) << "Not recording tracked allocations";
4397    return;
4398  }
4399
4400  // "i" is the head of the list.  We want to start at the end of the
4401  // list and move forward to the tail.
4402  size_t i = HeadIndex();
4403  const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
4404  uint16_t count = capped_count;
4405
4406  LOG(INFO) << "Tracked allocations, (head=" << alloc_record_head_ << " count=" << count << ")";
4407  while (count--) {
4408    AllocRecord* record = &recent_allocation_records_[i];
4409
4410    LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->ThinLockId(), record->ByteCount())
4411              << PrettyClass(record->Type());
4412
4413    for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
4414      AllocRecordStackTraceElement* stack_element = record->StackElement(stack_frame);
4415      mirror::ArtMethod* m = stack_element->Method();
4416      if (m == NULL) {
4417        break;
4418      }
4419      LOG(INFO) << "    " << PrettyMethod(m) << " line " << stack_element->LineNumber();
4420    }
4421
4422    // pause periodically to help logcat catch up
4423    if ((count % 5) == 0) {
4424      usleep(40000);
4425    }
4426
4427    i = (i + 1) & (alloc_record_max_ - 1);
4428  }
4429}
4430
4431class StringTable {
4432 public:
4433  StringTable() {
4434  }
4435
4436  void Add(const std::string& str) {
4437    table_.insert(str);
4438  }
4439
4440  void Add(const char* str) {
4441    table_.insert(str);
4442  }
4443
4444  size_t IndexOf(const char* s) const {
4445    auto it = table_.find(s);
4446    if (it == table_.end()) {
4447      LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
4448    }
4449    return std::distance(table_.begin(), it);
4450  }
4451
4452  size_t Size() const {
4453    return table_.size();
4454  }
4455
4456  void WriteTo(std::vector<uint8_t>& bytes) const {
4457    for (const std::string& str : table_) {
4458      const char* s = str.c_str();
4459      size_t s_len = CountModifiedUtf8Chars(s);
4460      std::unique_ptr<uint16_t> s_utf16(new uint16_t[s_len]);
4461      ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
4462      JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len);
4463    }
4464  }
4465
4466 private:
4467  std::set<std::string> table_;
4468  DISALLOW_COPY_AND_ASSIGN(StringTable);
4469};
4470
4471static const char* GetMethodSourceFile(mirror::ArtMethod* method)
4472    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4473  DCHECK(method != nullptr);
4474  const char* source_file = method->GetDeclaringClassSourceFile();
4475  return (source_file != nullptr) ? source_file : "";
4476}
4477
4478/*
4479 * The data we send to DDMS contains everything we have recorded.
4480 *
4481 * Message header (all values big-endian):
4482 * (1b) message header len (to allow future expansion); includes itself
4483 * (1b) entry header len
4484 * (1b) stack frame len
4485 * (2b) number of entries
4486 * (4b) offset to string table from start of message
4487 * (2b) number of class name strings
4488 * (2b) number of method name strings
4489 * (2b) number of source file name strings
4490 * For each entry:
4491 *   (4b) total allocation size
4492 *   (2b) thread id
4493 *   (2b) allocated object's class name index
4494 *   (1b) stack depth
4495 *   For each stack frame:
4496 *     (2b) method's class name
4497 *     (2b) method name
4498 *     (2b) method source file
4499 *     (2b) line number, clipped to 32767; -2 if native; -1 if no source
4500 * (xb) class name strings
4501 * (xb) method name strings
4502 * (xb) source file strings
4503 *
4504 * As with other DDM traffic, strings are sent as a 4-byte length
4505 * followed by UTF-16 data.
4506 *
4507 * We send up 16-bit unsigned indexes into string tables.  In theory there
4508 * can be (kMaxAllocRecordStackDepth * alloc_record_max_) unique strings in
4509 * each table, but in practice there should be far fewer.
4510 *
4511 * The chief reason for using a string table here is to keep the size of
4512 * the DDMS message to a minimum.  This is partly to make the protocol
4513 * efficient, but also because we have to form the whole thing up all at
4514 * once in a memory buffer.
4515 *
4516 * We use separate string tables for class names, method names, and source
4517 * files to keep the indexes small.  There will generally be no overlap
4518 * between the contents of these tables.
4519 */
4520jbyteArray Dbg::GetRecentAllocations() {
4521  if (false) {
4522    DumpRecentAllocations();
4523  }
4524
4525  Thread* self = Thread::Current();
4526  std::vector<uint8_t> bytes;
4527  {
4528    MutexLock mu(self, *Locks::alloc_tracker_lock_);
4529    //
4530    // Part 1: generate string tables.
4531    //
4532    StringTable class_names;
4533    StringTable method_names;
4534    StringTable filenames;
4535
4536    const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
4537    uint16_t count = capped_count;
4538    size_t idx = HeadIndex();
4539    while (count--) {
4540      AllocRecord* record = &recent_allocation_records_[idx];
4541      std::string temp;
4542      class_names.Add(record->Type()->GetDescriptor(&temp));
4543      for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) {
4544        mirror::ArtMethod* m = record->StackElement(i)->Method();
4545        if (m != NULL) {
4546          class_names.Add(m->GetDeclaringClassDescriptor());
4547          method_names.Add(m->GetName());
4548          filenames.Add(GetMethodSourceFile(m));
4549        }
4550      }
4551
4552      idx = (idx + 1) & (alloc_record_max_ - 1);
4553    }
4554
4555    LOG(INFO) << "allocation records: " << capped_count;
4556
4557    //
4558    // Part 2: Generate the output and store it in the buffer.
4559    //
4560
4561    // (1b) message header len (to allow future expansion); includes itself
4562    // (1b) entry header len
4563    // (1b) stack frame len
4564    const int kMessageHeaderLen = 15;
4565    const int kEntryHeaderLen = 9;
4566    const int kStackFrameLen = 8;
4567    JDWP::Append1BE(bytes, kMessageHeaderLen);
4568    JDWP::Append1BE(bytes, kEntryHeaderLen);
4569    JDWP::Append1BE(bytes, kStackFrameLen);
4570
4571    // (2b) number of entries
4572    // (4b) offset to string table from start of message
4573    // (2b) number of class name strings
4574    // (2b) number of method name strings
4575    // (2b) number of source file name strings
4576    JDWP::Append2BE(bytes, capped_count);
4577    size_t string_table_offset = bytes.size();
4578    JDWP::Append4BE(bytes, 0);  // We'll patch this later...
4579    JDWP::Append2BE(bytes, class_names.Size());
4580    JDWP::Append2BE(bytes, method_names.Size());
4581    JDWP::Append2BE(bytes, filenames.Size());
4582
4583    idx = HeadIndex();
4584    std::string temp;
4585    for (count = capped_count; count != 0; --count) {
4586      // For each entry:
4587      // (4b) total allocation size
4588      // (2b) thread id
4589      // (2b) allocated object's class name index
4590      // (1b) stack depth
4591      AllocRecord* record = &recent_allocation_records_[idx];
4592      size_t stack_depth = record->GetDepth();
4593      size_t allocated_object_class_name_index =
4594          class_names.IndexOf(record->Type()->GetDescriptor(&temp));
4595      JDWP::Append4BE(bytes, record->ByteCount());
4596      JDWP::Append2BE(bytes, record->ThinLockId());
4597      JDWP::Append2BE(bytes, allocated_object_class_name_index);
4598      JDWP::Append1BE(bytes, stack_depth);
4599
4600      for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) {
4601        // For each stack frame:
4602        // (2b) method's class name
4603        // (2b) method name
4604        // (2b) method source file
4605        // (2b) line number, clipped to 32767; -2 if native; -1 if no source
4606        mirror::ArtMethod* m = record->StackElement(stack_frame)->Method();
4607        size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor());
4608        size_t method_name_index = method_names.IndexOf(m->GetName());
4609        size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m));
4610        JDWP::Append2BE(bytes, class_name_index);
4611        JDWP::Append2BE(bytes, method_name_index);
4612        JDWP::Append2BE(bytes, file_name_index);
4613        JDWP::Append2BE(bytes, record->StackElement(stack_frame)->LineNumber());
4614      }
4615      idx = (idx + 1) & (alloc_record_max_ - 1);
4616    }
4617
4618    // (xb) class name strings
4619    // (xb) method name strings
4620    // (xb) source file strings
4621    JDWP::Set4BE(&bytes[string_table_offset], bytes.size());
4622    class_names.WriteTo(bytes);
4623    method_names.WriteTo(bytes);
4624    filenames.WriteTo(bytes);
4625  }
4626  JNIEnv* env = self->GetJniEnv();
4627  jbyteArray result = env->NewByteArray(bytes.size());
4628  if (result != NULL) {
4629    env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
4630  }
4631  return result;
4632}
4633
4634mirror::ArtMethod* DeoptimizationRequest::Method() const {
4635  ScopedObjectAccessUnchecked soa(Thread::Current());
4636  return soa.DecodeMethod(method_);
4637}
4638
4639void DeoptimizationRequest::SetMethod(mirror::ArtMethod* m) {
4640  ScopedObjectAccessUnchecked soa(Thread::Current());
4641  method_ = soa.EncodeMethod(m);
4642}
4643
4644}  // namespace art
4645