debugger.cc revision d539167b7f11136fe570a77aff2ee4935842007a
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "debugger.h"
18
19#include <sys/uio.h>
20
21#include <set>
22
23#include "arch/context.h"
24#include "class_linker.h"
25#include "class_linker-inl.h"
26#include "dex_file-inl.h"
27#include "dex_instruction.h"
28#include "field_helper.h"
29#include "gc/accounting/card_table-inl.h"
30#include "gc/space/large_object_space.h"
31#include "gc/space/space-inl.h"
32#include "handle_scope.h"
33#include "jdwp/object_registry.h"
34#include "method_helper.h"
35#include "mirror/art_field-inl.h"
36#include "mirror/art_method-inl.h"
37#include "mirror/class.h"
38#include "mirror/class-inl.h"
39#include "mirror/class_loader.h"
40#include "mirror/object-inl.h"
41#include "mirror/object_array-inl.h"
42#include "mirror/string-inl.h"
43#include "mirror/throwable.h"
44#include "quick/inline_method_analyser.h"
45#include "reflection.h"
46#include "safe_map.h"
47#include "scoped_thread_state_change.h"
48#include "ScopedLocalRef.h"
49#include "ScopedPrimitiveArray.h"
50#include "handle_scope-inl.h"
51#include "thread_list.h"
52#include "throw_location.h"
53#include "utf.h"
54#include "verifier/method_verifier-inl.h"
55#include "well_known_classes.h"
56
57#ifdef HAVE_ANDROID_OS
58#include "cutils/properties.h"
59#endif
60
61namespace art {
62
63static const size_t kMaxAllocRecordStackDepth = 16;  // Max 255.
64static const size_t kDefaultNumAllocRecords = 64*1024;  // Must be a power of 2. 2BE can hold 64k-1.
65
66// Limit alloc_record_count to the 2BE value that is the limit of the current protocol.
67static uint16_t CappedAllocRecordCount(size_t alloc_record_count) {
68  if (alloc_record_count > 0xffff) {
69    return 0xffff;
70  }
71  return alloc_record_count;
72}
73
74class AllocRecordStackTraceElement {
75 public:
76  AllocRecordStackTraceElement() : method_(nullptr), dex_pc_(0) {
77  }
78
79  int32_t LineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
80    mirror::ArtMethod* method = Method();
81    DCHECK(method != nullptr);
82    return method->GetLineNumFromDexPC(DexPc());
83  }
84
85  mirror::ArtMethod* Method() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
86    ScopedObjectAccessUnchecked soa(Thread::Current());
87    return soa.DecodeMethod(method_);
88  }
89
90  void SetMethod(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
91    ScopedObjectAccessUnchecked soa(Thread::Current());
92    method_ = soa.EncodeMethod(m);
93  }
94
95  uint32_t DexPc() const {
96    return dex_pc_;
97  }
98
99  void SetDexPc(uint32_t pc) {
100    dex_pc_ = pc;
101  }
102
103 private:
104  jmethodID method_;
105  uint32_t dex_pc_;
106};
107
108jobject Dbg::TypeCache::Add(mirror::Class* t) {
109  ScopedObjectAccessUnchecked soa(Thread::Current());
110  int32_t hash_code = t->IdentityHashCode();
111  auto range = objects_.equal_range(hash_code);
112  for (auto it = range.first; it != range.second; ++it) {
113    if (soa.Decode<mirror::Class*>(it->second) == t) {
114      // Found a matching weak global, return it.
115      return it->second;
116    }
117  }
118  JNIEnv* env = soa.Env();
119  const jobject local_ref = soa.AddLocalReference<jobject>(t);
120  const jobject weak_global = env->NewWeakGlobalRef(local_ref);
121  env->DeleteLocalRef(local_ref);
122  objects_.insert(std::make_pair(hash_code, weak_global));
123  return weak_global;
124}
125
126void Dbg::TypeCache::Clear() {
127  JavaVMExt* vm = Runtime::Current()->GetJavaVM();
128  Thread* self = Thread::Current();
129  for (const auto& p : objects_) {
130    vm->DeleteWeakGlobalRef(self, p.second);
131  }
132  objects_.clear();
133}
134
135class AllocRecord {
136 public:
137  AllocRecord() : type_(nullptr), byte_count_(0), thin_lock_id_(0) {}
138
139  mirror::Class* Type() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
140    return down_cast<mirror::Class*>(Thread::Current()->DecodeJObject(type_));
141  }
142
143  void SetType(mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
144                                                       Locks::alloc_tracker_lock_) {
145    type_ = Dbg::type_cache_.Add(t);
146  }
147
148  size_t GetDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
149    size_t depth = 0;
150    while (depth < kMaxAllocRecordStackDepth && stack_[depth].Method() != NULL) {
151      ++depth;
152    }
153    return depth;
154  }
155
156  size_t ByteCount() const {
157    return byte_count_;
158  }
159
160  void SetByteCount(size_t count) {
161    byte_count_ = count;
162  }
163
164  uint16_t ThinLockId() const {
165    return thin_lock_id_;
166  }
167
168  void SetThinLockId(uint16_t id) {
169    thin_lock_id_ = id;
170  }
171
172  AllocRecordStackTraceElement* StackElement(size_t index) {
173    DCHECK_LT(index, kMaxAllocRecordStackDepth);
174    return &stack_[index];
175  }
176
177 private:
178  jobject type_;  // This is a weak global.
179  size_t byte_count_;
180  uint16_t thin_lock_id_;
181  AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth];  // Unused entries have NULL method.
182};
183
184class Breakpoint {
185 public:
186  Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization)
187    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
188    : method_(nullptr), dex_pc_(dex_pc), need_full_deoptimization_(need_full_deoptimization) {
189    ScopedObjectAccessUnchecked soa(Thread::Current());
190    method_ = soa.EncodeMethod(method);
191  }
192
193  Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
194    : method_(nullptr), dex_pc_(other.dex_pc_),
195      need_full_deoptimization_(other.need_full_deoptimization_) {
196    ScopedObjectAccessUnchecked soa(Thread::Current());
197    method_ = soa.EncodeMethod(other.Method());
198  }
199
200  mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
201    ScopedObjectAccessUnchecked soa(Thread::Current());
202    return soa.DecodeMethod(method_);
203  }
204
205  uint32_t DexPc() const {
206    return dex_pc_;
207  }
208
209  bool NeedFullDeoptimization() const {
210    return need_full_deoptimization_;
211  }
212
213 private:
214  // The location of this breakpoint.
215  jmethodID method_;
216  uint32_t dex_pc_;
217
218  // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
219  bool need_full_deoptimization_;
220};
221
222static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
223    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
224  os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
225  return os;
226}
227
228class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
229 public:
230  DebugInstrumentationListener() {}
231  virtual ~DebugInstrumentationListener() {}
232
233  void MethodEntered(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
234                     uint32_t dex_pc)
235      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
236    if (method->IsNative()) {
237      // TODO: post location events is a suspension point and native method entry stubs aren't.
238      return;
239    }
240    Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr);
241  }
242
243  void MethodExited(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
244                    uint32_t dex_pc, const JValue& return_value)
245      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
246    if (method->IsNative()) {
247      // TODO: post location events is a suspension point and native method entry stubs aren't.
248      return;
249    }
250    Dbg::UpdateDebugger(thread, this_object, method, dex_pc, Dbg::kMethodExit, &return_value);
251  }
252
253  void MethodUnwind(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
254                    uint32_t dex_pc)
255      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
256    // We're not recorded to listen to this kind of event, so complain.
257    LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
258               << " " << dex_pc;
259  }
260
261  void DexPcMoved(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
262                  uint32_t new_dex_pc)
263      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
264    Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, 0, nullptr);
265  }
266
267  void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
268                 uint32_t dex_pc, mirror::ArtField* field)
269      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
270    Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
271  }
272
273  void FieldWritten(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
274                    uint32_t dex_pc, mirror::ArtField* field, const JValue& field_value)
275      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
276    Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
277  }
278
279  void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
280                       mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
281                       mirror::Throwable* exception_object)
282      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
283    Dbg::PostException(throw_location, catch_method, catch_dex_pc, exception_object);
284  }
285
286 private:
287  DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
288} gDebugInstrumentationListener;
289
290// JDWP is allowed unless the Zygote forbids it.
291static bool gJdwpAllowed = true;
292
293// Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line?
294static bool gJdwpConfigured = false;
295
296// Broken-down JDWP options. (Only valid if IsJdwpConfigured() is true.)
297static JDWP::JdwpOptions gJdwpOptions;
298
299// Runtime JDWP state.
300static JDWP::JdwpState* gJdwpState = NULL;
301static bool gDebuggerConnected;  // debugger or DDMS is connected.
302static bool gDebuggerActive;     // debugger is making requests.
303static bool gDisposed;           // debugger called VirtualMachine.Dispose, so we should drop the connection.
304
305static bool gDdmThreadNotification = false;
306
307// DDMS GC-related settings.
308static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER;
309static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER;
310static Dbg::HpsgWhat gDdmHpsgWhat;
311static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
312static Dbg::HpsgWhat gDdmNhsgWhat;
313
314ObjectRegistry* Dbg::gRegistry = nullptr;
315
316// Recent allocation tracking.
317AllocRecord* Dbg::recent_allocation_records_ = nullptr;  // TODO: CircularBuffer<AllocRecord>
318size_t Dbg::alloc_record_max_ = 0;
319size_t Dbg::alloc_record_head_ = 0;
320size_t Dbg::alloc_record_count_ = 0;
321Dbg::TypeCache Dbg::type_cache_;
322
323// Deoptimization support.
324std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
325size_t Dbg::full_deoptimization_event_count_ = 0;
326size_t Dbg::delayed_full_undeoptimization_count_ = 0;
327
328// Instrumentation event reference counters.
329size_t Dbg::dex_pc_change_event_ref_count_ = 0;
330size_t Dbg::method_enter_event_ref_count_ = 0;
331size_t Dbg::method_exit_event_ref_count_ = 0;
332size_t Dbg::field_read_event_ref_count_ = 0;
333size_t Dbg::field_write_event_ref_count_ = 0;
334size_t Dbg::exception_catch_event_ref_count_ = 0;
335uint32_t Dbg::instrumentation_events_ = 0;
336
337// Breakpoints.
338static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
339
340void DebugInvokeReq::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
341                                RootType root_type) {
342  if (receiver != nullptr) {
343    callback(&receiver, arg, tid, root_type);
344  }
345  if (thread != nullptr) {
346    callback(&thread, arg, tid, root_type);
347  }
348  if (klass != nullptr) {
349    callback(reinterpret_cast<mirror::Object**>(&klass), arg, tid, root_type);
350  }
351  if (method != nullptr) {
352    callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
353  }
354}
355
356void DebugInvokeReq::Clear() {
357  invoke_needed = false;
358  receiver = nullptr;
359  thread = nullptr;
360  klass = nullptr;
361  method = nullptr;
362}
363
364void SingleStepControl::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
365                                   RootType root_type) {
366  if (method != nullptr) {
367    callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
368  }
369}
370
371bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const {
372  return dex_pcs.find(dex_pc) == dex_pcs.end();
373}
374
375void SingleStepControl::Clear() {
376  is_active = false;
377  method = nullptr;
378  dex_pcs.clear();
379}
380
381static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc)
382    LOCKS_EXCLUDED(Locks::breakpoint_lock_)
383    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
384  ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
385  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
386    if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) {
387      VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
388      return true;
389    }
390  }
391  return false;
392}
393
394static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
395    LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) {
396  MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
397  // A thread may be suspended for GC; in this code, we really want to know whether
398  // there's a debugger suspension active.
399  return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0;
400}
401
402static mirror::Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status)
403    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
404  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id);
405  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
406    status = JDWP::ERR_INVALID_OBJECT;
407    return NULL;
408  }
409  if (!o->IsArrayInstance()) {
410    status = JDWP::ERR_INVALID_ARRAY;
411    return NULL;
412  }
413  status = JDWP::ERR_NONE;
414  return o->AsArray();
415}
416
417static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status)
418    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
419  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id);
420  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
421    status = JDWP::ERR_INVALID_OBJECT;
422    return NULL;
423  }
424  if (!o->IsClass()) {
425    status = JDWP::ERR_INVALID_CLASS;
426    return NULL;
427  }
428  status = JDWP::ERR_NONE;
429  return o->AsClass();
430}
431
432static JDWP::JdwpError DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id, Thread*& thread)
433    EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
434    LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
435    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
436  mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id);
437  if (thread_peer == NULL || thread_peer == ObjectRegistry::kInvalidObject) {
438    // This isn't even an object.
439    return JDWP::ERR_INVALID_OBJECT;
440  }
441
442  mirror::Class* java_lang_Thread = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
443  if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) {
444    // This isn't a thread.
445    return JDWP::ERR_INVALID_THREAD;
446  }
447
448  thread = Thread::FromManagedThread(soa, thread_peer);
449  if (thread == NULL) {
450    // This is a java.lang.Thread without a Thread*. Must be a zombie.
451    return JDWP::ERR_THREAD_NOT_ALIVE;
452  }
453  return JDWP::ERR_NONE;
454}
455
456static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) {
457  // JDWP deliberately uses the descriptor characters' ASCII values for its enum.
458  // Note that by "basic" we mean that we don't get more specific than JT_OBJECT.
459  return static_cast<JDWP::JdwpTag>(descriptor[0]);
460}
461
462static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
463    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
464  std::string temp;
465  const char* descriptor = klass->GetDescriptor(&temp);
466  return BasicTagFromDescriptor(descriptor);
467}
468
469static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
470    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
471  CHECK(c != NULL);
472  if (c->IsArrayClass()) {
473    return JDWP::JT_ARRAY;
474  }
475  if (c->IsStringClass()) {
476    return JDWP::JT_STRING;
477  }
478  if (c->IsClassClass()) {
479    return JDWP::JT_CLASS_OBJECT;
480  }
481  {
482    mirror::Class* thread_class = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
483    if (thread_class->IsAssignableFrom(c)) {
484      return JDWP::JT_THREAD;
485    }
486  }
487  {
488    mirror::Class* thread_group_class =
489        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
490    if (thread_group_class->IsAssignableFrom(c)) {
491      return JDWP::JT_THREAD_GROUP;
492    }
493  }
494  {
495    mirror::Class* class_loader_class =
496        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader);
497    if (class_loader_class->IsAssignableFrom(c)) {
498      return JDWP::JT_CLASS_LOADER;
499    }
500  }
501  return JDWP::JT_OBJECT;
502}
503
504/*
505 * Objects declared to hold Object might actually hold a more specific
506 * type.  The debugger may take a special interest in these (e.g. it
507 * wants to display the contents of Strings), so we want to return an
508 * appropriate tag.
509 *
510 * Null objects are tagged JT_OBJECT.
511 */
512JDWP::JdwpTag Dbg::TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o) {
513  return (o == NULL) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass());
514}
515
516static bool IsPrimitiveTag(JDWP::JdwpTag tag) {
517  switch (tag) {
518  case JDWP::JT_BOOLEAN:
519  case JDWP::JT_BYTE:
520  case JDWP::JT_CHAR:
521  case JDWP::JT_FLOAT:
522  case JDWP::JT_DOUBLE:
523  case JDWP::JT_INT:
524  case JDWP::JT_LONG:
525  case JDWP::JT_SHORT:
526  case JDWP::JT_VOID:
527    return true;
528  default:
529    return false;
530  }
531}
532
533/*
534 * Handle one of the JDWP name/value pairs.
535 *
536 * JDWP options are:
537 *  help: if specified, show help message and bail
538 *  transport: may be dt_socket or dt_shmem
539 *  address: for dt_socket, "host:port", or just "port" when listening
540 *  server: if "y", wait for debugger to attach; if "n", attach to debugger
541 *  timeout: how long to wait for debugger to connect / listen
542 *
543 * Useful with server=n (these aren't supported yet):
544 *  onthrow=<exception-name>: connect to debugger when exception thrown
545 *  onuncaught=y|n: connect to debugger when uncaught exception thrown
546 *  launch=<command-line>: launch the debugger itself
547 *
548 * The "transport" option is required, as is "address" if server=n.
549 */
550static bool ParseJdwpOption(const std::string& name, const std::string& value) {
551  if (name == "transport") {
552    if (value == "dt_socket") {
553      gJdwpOptions.transport = JDWP::kJdwpTransportSocket;
554    } else if (value == "dt_android_adb") {
555      gJdwpOptions.transport = JDWP::kJdwpTransportAndroidAdb;
556    } else {
557      LOG(ERROR) << "JDWP transport not supported: " << value;
558      return false;
559    }
560  } else if (name == "server") {
561    if (value == "n") {
562      gJdwpOptions.server = false;
563    } else if (value == "y") {
564      gJdwpOptions.server = true;
565    } else {
566      LOG(ERROR) << "JDWP option 'server' must be 'y' or 'n'";
567      return false;
568    }
569  } else if (name == "suspend") {
570    if (value == "n") {
571      gJdwpOptions.suspend = false;
572    } else if (value == "y") {
573      gJdwpOptions.suspend = true;
574    } else {
575      LOG(ERROR) << "JDWP option 'suspend' must be 'y' or 'n'";
576      return false;
577    }
578  } else if (name == "address") {
579    /* this is either <port> or <host>:<port> */
580    std::string port_string;
581    gJdwpOptions.host.clear();
582    std::string::size_type colon = value.find(':');
583    if (colon != std::string::npos) {
584      gJdwpOptions.host = value.substr(0, colon);
585      port_string = value.substr(colon + 1);
586    } else {
587      port_string = value;
588    }
589    if (port_string.empty()) {
590      LOG(ERROR) << "JDWP address missing port: " << value;
591      return false;
592    }
593    char* end;
594    uint64_t port = strtoul(port_string.c_str(), &end, 10);
595    if (*end != '\0' || port > 0xffff) {
596      LOG(ERROR) << "JDWP address has junk in port field: " << value;
597      return false;
598    }
599    gJdwpOptions.port = port;
600  } else if (name == "launch" || name == "onthrow" || name == "oncaught" || name == "timeout") {
601    /* valid but unsupported */
602    LOG(INFO) << "Ignoring JDWP option '" << name << "'='" << value << "'";
603  } else {
604    LOG(INFO) << "Ignoring unrecognized JDWP option '" << name << "'='" << value << "'";
605  }
606
607  return true;
608}
609
610/*
611 * Parse the latter half of a -Xrunjdwp/-agentlib:jdwp= string, e.g.:
612 * "transport=dt_socket,address=8000,server=y,suspend=n"
613 */
614bool Dbg::ParseJdwpOptions(const std::string& options) {
615  VLOG(jdwp) << "ParseJdwpOptions: " << options;
616
617  std::vector<std::string> pairs;
618  Split(options, ',', pairs);
619
620  for (size_t i = 0; i < pairs.size(); ++i) {
621    std::string::size_type equals = pairs[i].find('=');
622    if (equals == std::string::npos) {
623      LOG(ERROR) << "Can't parse JDWP option '" << pairs[i] << "' in '" << options << "'";
624      return false;
625    }
626    ParseJdwpOption(pairs[i].substr(0, equals), pairs[i].substr(equals + 1));
627  }
628
629  if (gJdwpOptions.transport == JDWP::kJdwpTransportUnknown) {
630    LOG(ERROR) << "Must specify JDWP transport: " << options;
631  }
632  if (!gJdwpOptions.server && (gJdwpOptions.host.empty() || gJdwpOptions.port == 0)) {
633    LOG(ERROR) << "Must specify JDWP host and port when server=n: " << options;
634    return false;
635  }
636
637  gJdwpConfigured = true;
638  return true;
639}
640
641void Dbg::StartJdwp() {
642  if (!gJdwpAllowed || !IsJdwpConfigured()) {
643    // No JDWP for you!
644    return;
645  }
646
647  CHECK(gRegistry == nullptr);
648  gRegistry = new ObjectRegistry;
649
650  // Init JDWP if the debugger is enabled. This may connect out to a
651  // debugger, passively listen for a debugger, or block waiting for a
652  // debugger.
653  gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions);
654  if (gJdwpState == NULL) {
655    // We probably failed because some other process has the port already, which means that
656    // if we don't abort the user is likely to think they're talking to us when they're actually
657    // talking to that other process.
658    LOG(FATAL) << "Debugger thread failed to initialize";
659  }
660
661  // If a debugger has already attached, send the "welcome" message.
662  // This may cause us to suspend all threads.
663  if (gJdwpState->IsActive()) {
664    ScopedObjectAccess soa(Thread::Current());
665    if (!gJdwpState->PostVMStart()) {
666      LOG(WARNING) << "Failed to post 'start' message to debugger";
667    }
668  }
669}
670
671void Dbg::StopJdwp() {
672  // Post VM_DEATH event before the JDWP connection is closed (either by the JDWP thread or the
673  // destruction of gJdwpState).
674  if (gJdwpState != nullptr && gJdwpState->IsActive()) {
675    gJdwpState->PostVMDeath();
676  }
677  // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
678  Disposed();
679  delete gJdwpState;
680  gJdwpState = nullptr;
681  delete gRegistry;
682  gRegistry = nullptr;
683}
684
685void Dbg::GcDidFinish() {
686  if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
687    ScopedObjectAccess soa(Thread::Current());
688    VLOG(jdwp) << "Sending heap info to DDM";
689    DdmSendHeapInfo(gDdmHpifWhen);
690  }
691  if (gDdmHpsgWhen != HPSG_WHEN_NEVER) {
692    ScopedObjectAccess soa(Thread::Current());
693    VLOG(jdwp) << "Dumping heap to DDM";
694    DdmSendHeapSegments(false);
695  }
696  if (gDdmNhsgWhen != HPSG_WHEN_NEVER) {
697    ScopedObjectAccess soa(Thread::Current());
698    VLOG(jdwp) << "Dumping native heap to DDM";
699    DdmSendHeapSegments(true);
700  }
701}
702
703void Dbg::SetJdwpAllowed(bool allowed) {
704  gJdwpAllowed = allowed;
705}
706
707DebugInvokeReq* Dbg::GetInvokeReq() {
708  return Thread::Current()->GetInvokeReq();
709}
710
711Thread* Dbg::GetDebugThread() {
712  return (gJdwpState != NULL) ? gJdwpState->GetDebugThread() : NULL;
713}
714
715void Dbg::ClearWaitForEventThread() {
716  gJdwpState->ClearWaitForEventThread();
717}
718
719void Dbg::Connected() {
720  CHECK(!gDebuggerConnected);
721  VLOG(jdwp) << "JDWP has attached";
722  gDebuggerConnected = true;
723  gDisposed = false;
724}
725
726void Dbg::Disposed() {
727  gDisposed = true;
728}
729
730bool Dbg::IsDisposed() {
731  return gDisposed;
732}
733
734void Dbg::GoActive() {
735  // Enable all debugging features, including scans for breakpoints.
736  // This is a no-op if we're already active.
737  // Only called from the JDWP handler thread.
738  if (gDebuggerActive) {
739    return;
740  }
741
742  {
743    // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected?
744    ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
745    CHECK_EQ(gBreakpoints.size(), 0U);
746  }
747
748  {
749    MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
750    CHECK_EQ(deoptimization_requests_.size(), 0U);
751    CHECK_EQ(full_deoptimization_event_count_, 0U);
752    CHECK_EQ(delayed_full_undeoptimization_count_, 0U);
753    CHECK_EQ(dex_pc_change_event_ref_count_, 0U);
754    CHECK_EQ(method_enter_event_ref_count_, 0U);
755    CHECK_EQ(method_exit_event_ref_count_, 0U);
756    CHECK_EQ(field_read_event_ref_count_, 0U);
757    CHECK_EQ(field_write_event_ref_count_, 0U);
758    CHECK_EQ(exception_catch_event_ref_count_, 0U);
759  }
760
761  Runtime* runtime = Runtime::Current();
762  runtime->GetThreadList()->SuspendAll();
763  Thread* self = Thread::Current();
764  ThreadState old_state = self->SetStateUnsafe(kRunnable);
765  CHECK_NE(old_state, kRunnable);
766  runtime->GetInstrumentation()->EnableDeoptimization();
767  instrumentation_events_ = 0;
768  gDebuggerActive = true;
769  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
770  runtime->GetThreadList()->ResumeAll();
771
772  LOG(INFO) << "Debugger is active";
773}
774
775void Dbg::Disconnected() {
776  CHECK(gDebuggerConnected);
777
778  LOG(INFO) << "Debugger is no longer active";
779
780  // Suspend all threads and exclusively acquire the mutator lock. Set the state of the thread
781  // to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener
782  // and clear the object registry.
783  Runtime* runtime = Runtime::Current();
784  runtime->GetThreadList()->SuspendAll();
785  Thread* self = Thread::Current();
786  ThreadState old_state = self->SetStateUnsafe(kRunnable);
787
788  // Debugger may not be active at this point.
789  if (gDebuggerActive) {
790    {
791      // Since we're going to disable deoptimization, we clear the deoptimization requests queue.
792      // This prevents us from having any pending deoptimization request when the debugger attaches
793      // to us again while no event has been requested yet.
794      MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
795      deoptimization_requests_.clear();
796      full_deoptimization_event_count_ = 0U;
797      delayed_full_undeoptimization_count_ = 0U;
798    }
799    if (instrumentation_events_ != 0) {
800      runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
801                                                    instrumentation_events_);
802      instrumentation_events_ = 0;
803    }
804    runtime->GetInstrumentation()->DisableDeoptimization();
805    gDebuggerActive = false;
806  }
807  gRegistry->Clear();
808  gDebuggerConnected = false;
809  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
810  runtime->GetThreadList()->ResumeAll();
811}
812
813bool Dbg::IsDebuggerActive() {
814  return gDebuggerActive;
815}
816
817bool Dbg::IsJdwpConfigured() {
818  return gJdwpConfigured;
819}
820
821int64_t Dbg::LastDebuggerActivity() {
822  return gJdwpState->LastDebuggerActivity();
823}
824
825void Dbg::UndoDebuggerSuspensions() {
826  Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
827}
828
829std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
830  mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id);
831  if (o == NULL) {
832    return "NULL";
833  }
834  if (o == ObjectRegistry::kInvalidObject) {
835    return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
836  }
837  if (!o->IsClass()) {
838    return StringPrintf("non-class %p", o);  // This is only used for debugging output anyway.
839  }
840  return GetClassName(o->AsClass());
841}
842
843std::string Dbg::GetClassName(mirror::Class* klass) {
844  DCHECK(klass != nullptr);
845  std::string temp;
846  return DescriptorToName(klass->GetDescriptor(&temp));
847}
848
849JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& class_object_id) {
850  JDWP::JdwpError status;
851  mirror::Class* c = DecodeClass(id, status);
852  if (c == NULL) {
853    return status;
854  }
855  class_object_id = gRegistry->Add(c);
856  return JDWP::ERR_NONE;
857}
858
859JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId& superclass_id) {
860  JDWP::JdwpError status;
861  mirror::Class* c = DecodeClass(id, status);
862  if (c == NULL) {
863    return status;
864  }
865  if (c->IsInterface()) {
866    // http://code.google.com/p/android/issues/detail?id=20856
867    superclass_id = 0;
868  } else {
869    superclass_id = gRegistry->Add(c->GetSuperClass());
870  }
871  return JDWP::ERR_NONE;
872}
873
874JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
875  mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
876  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
877    return JDWP::ERR_INVALID_OBJECT;
878  }
879  expandBufAddObjectId(pReply, gRegistry->Add(o->GetClass()->GetClassLoader()));
880  return JDWP::ERR_NONE;
881}
882
883JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
884  JDWP::JdwpError status;
885  mirror::Class* c = DecodeClass(id, status);
886  if (c == NULL) {
887    return status;
888  }
889
890  uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask;
891
892  // Set ACC_SUPER. Dex files don't contain this flag but only classes are supposed to have it set,
893  // not interfaces.
894  // Class.getModifiers doesn't return it, but JDWP does, so we set it here.
895  if ((access_flags & kAccInterface) == 0) {
896    access_flags |= kAccSuper;
897  }
898
899  expandBufAdd4BE(pReply, access_flags);
900
901  return JDWP::ERR_NONE;
902}
903
904JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply)
905    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
906  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
907  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
908    return JDWP::ERR_INVALID_OBJECT;
909  }
910
911  // Ensure all threads are suspended while we read objects' lock words.
912  Thread* self = Thread::Current();
913  CHECK_EQ(self->GetState(), kRunnable);
914  self->TransitionFromRunnableToSuspended(kSuspended);
915  Runtime::Current()->GetThreadList()->SuspendAll();
916
917  MonitorInfo monitor_info(o);
918
919  Runtime::Current()->GetThreadList()->ResumeAll();
920  self->TransitionFromSuspendedToRunnable();
921
922  if (monitor_info.owner_ != NULL) {
923    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeer()));
924  } else {
925    expandBufAddObjectId(reply, gRegistry->Add(NULL));
926  }
927  expandBufAdd4BE(reply, monitor_info.entry_count_);
928  expandBufAdd4BE(reply, monitor_info.waiters_.size());
929  for (size_t i = 0; i < monitor_info.waiters_.size(); ++i) {
930    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters_[i]->GetPeer()));
931  }
932  return JDWP::ERR_NONE;
933}
934
935JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id,
936                                      std::vector<JDWP::ObjectId>& monitors,
937                                      std::vector<uint32_t>& stack_depths) {
938  struct OwnedMonitorVisitor : public StackVisitor {
939    OwnedMonitorVisitor(Thread* thread, Context* context,
940                        std::vector<JDWP::ObjectId>* monitor_vector,
941                        std::vector<uint32_t>* stack_depth_vector)
942        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
943      : StackVisitor(thread, context), current_stack_depth(0),
944        monitors(monitor_vector), stack_depths(stack_depth_vector) {}
945
946    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
947    // annotalysis.
948    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
949      if (!GetMethod()->IsRuntimeMethod()) {
950        Monitor::VisitLocks(this, AppendOwnedMonitors, this);
951        ++current_stack_depth;
952      }
953      return true;
954    }
955
956    static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg)
957        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
958      OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
959      visitor->monitors->push_back(gRegistry->Add(owned_monitor));
960      visitor->stack_depths->push_back(visitor->current_stack_depth);
961    }
962
963    size_t current_stack_depth;
964    std::vector<JDWP::ObjectId>* monitors;
965    std::vector<uint32_t>* stack_depths;
966  };
967
968  ScopedObjectAccessUnchecked soa(Thread::Current());
969  Thread* thread;
970  {
971    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
972    JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
973    if (error != JDWP::ERR_NONE) {
974      return error;
975    }
976    if (!IsSuspendedForDebugger(soa, thread)) {
977      return JDWP::ERR_THREAD_NOT_SUSPENDED;
978    }
979  }
980  std::unique_ptr<Context> context(Context::Create());
981  OwnedMonitorVisitor visitor(thread, context.get(), &monitors, &stack_depths);
982  visitor.WalkStack();
983  return JDWP::ERR_NONE;
984}
985
986JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id,
987                                         JDWP::ObjectId& contended_monitor) {
988  mirror::Object* contended_monitor_obj;
989  ScopedObjectAccessUnchecked soa(Thread::Current());
990  {
991    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
992    Thread* thread;
993    JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
994    if (error != JDWP::ERR_NONE) {
995      return error;
996    }
997    if (!IsSuspendedForDebugger(soa, thread)) {
998      return JDWP::ERR_THREAD_NOT_SUSPENDED;
999    }
1000    contended_monitor_obj = Monitor::GetContendedMonitor(thread);
1001  }
1002  // Add() requires the thread_list_lock_ not held to avoid the lock
1003  // level violation.
1004  contended_monitor = gRegistry->Add(contended_monitor_obj);
1005  return JDWP::ERR_NONE;
1006}
1007
1008JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
1009                                       std::vector<uint64_t>& counts)
1010    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1011  gc::Heap* heap = Runtime::Current()->GetHeap();
1012  heap->CollectGarbage(false);
1013  std::vector<mirror::Class*> classes;
1014  counts.clear();
1015  for (size_t i = 0; i < class_ids.size(); ++i) {
1016    JDWP::JdwpError status;
1017    mirror::Class* c = DecodeClass(class_ids[i], status);
1018    if (c == NULL) {
1019      return status;
1020    }
1021    classes.push_back(c);
1022    counts.push_back(0);
1023  }
1024  heap->CountInstances(classes, false, &counts[0]);
1025  return JDWP::ERR_NONE;
1026}
1027
1028JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, std::vector<JDWP::ObjectId>& instances)
1029    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1030  gc::Heap* heap = Runtime::Current()->GetHeap();
1031  // We only want reachable instances, so do a GC.
1032  heap->CollectGarbage(false);
1033  JDWP::JdwpError status;
1034  mirror::Class* c = DecodeClass(class_id, status);
1035  if (c == nullptr) {
1036    return status;
1037  }
1038  std::vector<mirror::Object*> raw_instances;
1039  Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
1040  for (size_t i = 0; i < raw_instances.size(); ++i) {
1041    instances.push_back(gRegistry->Add(raw_instances[i]));
1042  }
1043  return JDWP::ERR_NONE;
1044}
1045
1046JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
1047                                         std::vector<JDWP::ObjectId>& referring_objects)
1048    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1049  gc::Heap* heap = Runtime::Current()->GetHeap();
1050  heap->CollectGarbage(false);
1051  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1052  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1053    return JDWP::ERR_INVALID_OBJECT;
1054  }
1055  std::vector<mirror::Object*> raw_instances;
1056  heap->GetReferringObjects(o, max_count, raw_instances);
1057  for (size_t i = 0; i < raw_instances.size(); ++i) {
1058    referring_objects.push_back(gRegistry->Add(raw_instances[i]));
1059  }
1060  return JDWP::ERR_NONE;
1061}
1062
1063JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id)
1064    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1065  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1066  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1067    return JDWP::ERR_INVALID_OBJECT;
1068  }
1069  gRegistry->DisableCollection(object_id);
1070  return JDWP::ERR_NONE;
1071}
1072
1073JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id)
1074    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1075  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1076  // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI
1077  // also ignores these cases and never return an error. However it's not obvious why this command
1078  // should behave differently from DisableCollection and IsCollected commands. So let's be more
1079  // strict and return an error if this happens.
1080  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1081    return JDWP::ERR_INVALID_OBJECT;
1082  }
1083  gRegistry->EnableCollection(object_id);
1084  return JDWP::ERR_NONE;
1085}
1086
1087JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool& is_collected)
1088    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1089  if (object_id == 0) {
1090    // Null object id is invalid.
1091    return JDWP::ERR_INVALID_OBJECT;
1092  }
1093  // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However
1094  // the RI seems to ignore this and assume object has been collected.
1095  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1096  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1097    is_collected = true;
1098  } else {
1099    is_collected = gRegistry->IsCollected(object_id);
1100  }
1101  return JDWP::ERR_NONE;
1102}
1103
1104void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count)
1105    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1106  gRegistry->DisposeObject(object_id, reference_count);
1107}
1108
1109JDWP::JdwpTypeTag Dbg::GetTypeTag(mirror::Class* klass) {
1110  DCHECK(klass != nullptr);
1111  if (klass->IsArrayClass()) {
1112    return JDWP::TT_ARRAY;
1113  } else if (klass->IsInterface()) {
1114    return JDWP::TT_INTERFACE;
1115  } else {
1116    return JDWP::TT_CLASS;
1117  }
1118}
1119
1120JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1121  JDWP::JdwpError status;
1122  mirror::Class* c = DecodeClass(class_id, status);
1123  if (c == NULL) {
1124    return status;
1125  }
1126
1127  JDWP::JdwpTypeTag type_tag = GetTypeTag(c);
1128  expandBufAdd1(pReply, type_tag);
1129  expandBufAddRefTypeId(pReply, class_id);
1130  return JDWP::ERR_NONE;
1131}
1132
1133void Dbg::GetClassList(std::vector<JDWP::RefTypeId>& classes) {
1134  // Get the complete list of reference classes (i.e. all classes except
1135  // the primitive types).
1136  // Returns a newly-allocated buffer full of RefTypeId values.
1137  struct ClassListCreator {
1138    explicit ClassListCreator(std::vector<JDWP::RefTypeId>& classes) : classes(classes) {
1139    }
1140
1141    static bool Visit(mirror::Class* c, void* arg) {
1142      return reinterpret_cast<ClassListCreator*>(arg)->Visit(c);
1143    }
1144
1145    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
1146    // annotalysis.
1147    bool Visit(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS {
1148      if (!c->IsPrimitive()) {
1149        classes.push_back(gRegistry->AddRefType(c));
1150      }
1151      return true;
1152    }
1153
1154    std::vector<JDWP::RefTypeId>& classes;
1155  };
1156
1157  ClassListCreator clc(classes);
1158  Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(ClassListCreator::Visit,
1159                                                                       &clc);
1160}
1161
1162JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
1163                                  uint32_t* pStatus, std::string* pDescriptor) {
1164  JDWP::JdwpError status;
1165  mirror::Class* c = DecodeClass(class_id, status);
1166  if (c == NULL) {
1167    return status;
1168  }
1169
1170  if (c->IsArrayClass()) {
1171    *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
1172    *pTypeTag = JDWP::TT_ARRAY;
1173  } else {
1174    if (c->IsErroneous()) {
1175      *pStatus = JDWP::CS_ERROR;
1176    } else {
1177      *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED;
1178    }
1179    *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
1180  }
1181
1182  if (pDescriptor != NULL) {
1183    std::string temp;
1184    *pDescriptor = c->GetDescriptor(&temp);
1185  }
1186  return JDWP::ERR_NONE;
1187}
1188
1189void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>& ids) {
1190  std::vector<mirror::Class*> classes;
1191  Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
1192  ids.clear();
1193  for (size_t i = 0; i < classes.size(); ++i) {
1194    ids.push_back(gRegistry->Add(classes[i]));
1195  }
1196}
1197
1198JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply)
1199    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1200  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1201  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1202    return JDWP::ERR_INVALID_OBJECT;
1203  }
1204
1205  JDWP::JdwpTypeTag type_tag = GetTypeTag(o->GetClass());
1206  JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass());
1207
1208  expandBufAdd1(pReply, type_tag);
1209  expandBufAddRefTypeId(pReply, type_id);
1210
1211  return JDWP::ERR_NONE;
1212}
1213
1214JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) {
1215  JDWP::JdwpError status;
1216  mirror::Class* c = DecodeClass(class_id, status);
1217  if (c == NULL) {
1218    return status;
1219  }
1220  std::string temp;
1221  *signature = c->GetDescriptor(&temp);
1222  return JDWP::ERR_NONE;
1223}
1224
1225JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string& result) {
1226  JDWP::JdwpError status;
1227  mirror::Class* c = DecodeClass(class_id, status);
1228  if (c == nullptr) {
1229    return status;
1230  }
1231  const char* source_file = c->GetSourceFile();
1232  if (source_file == nullptr) {
1233    return JDWP::ERR_ABSENT_INFORMATION;
1234  }
1235  result = source_file;
1236  return JDWP::ERR_NONE;
1237}
1238
1239JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t& tag) {
1240  ScopedObjectAccessUnchecked soa(Thread::Current());
1241  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1242  if (o == ObjectRegistry::kInvalidObject) {
1243    return JDWP::ERR_INVALID_OBJECT;
1244  }
1245  tag = TagFromObject(soa, o);
1246  return JDWP::ERR_NONE;
1247}
1248
1249size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) {
1250  switch (tag) {
1251  case JDWP::JT_VOID:
1252    return 0;
1253  case JDWP::JT_BYTE:
1254  case JDWP::JT_BOOLEAN:
1255    return 1;
1256  case JDWP::JT_CHAR:
1257  case JDWP::JT_SHORT:
1258    return 2;
1259  case JDWP::JT_FLOAT:
1260  case JDWP::JT_INT:
1261    return 4;
1262  case JDWP::JT_ARRAY:
1263  case JDWP::JT_OBJECT:
1264  case JDWP::JT_STRING:
1265  case JDWP::JT_THREAD:
1266  case JDWP::JT_THREAD_GROUP:
1267  case JDWP::JT_CLASS_LOADER:
1268  case JDWP::JT_CLASS_OBJECT:
1269    return sizeof(JDWP::ObjectId);
1270  case JDWP::JT_DOUBLE:
1271  case JDWP::JT_LONG:
1272    return 8;
1273  default:
1274    LOG(FATAL) << "Unknown tag " << tag;
1275    return -1;
1276  }
1277}
1278
1279JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int& length) {
1280  JDWP::JdwpError status;
1281  mirror::Array* a = DecodeArray(array_id, status);
1282  if (a == NULL) {
1283    return status;
1284  }
1285  length = a->GetLength();
1286  return JDWP::ERR_NONE;
1287}
1288
1289JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) {
1290  JDWP::JdwpError status;
1291  mirror::Array* a = DecodeArray(array_id, status);
1292  if (a == nullptr) {
1293    return status;
1294  }
1295
1296  if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) {
1297    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1298    return JDWP::ERR_INVALID_LENGTH;
1299  }
1300  JDWP::JdwpTag element_tag = BasicTagFromClass(a->GetClass()->GetComponentType());
1301  expandBufAdd1(pReply, element_tag);
1302  expandBufAdd4BE(pReply, count);
1303
1304  if (IsPrimitiveTag(element_tag)) {
1305    size_t width = GetTagWidth(element_tag);
1306    uint8_t* dst = expandBufAddSpace(pReply, count * width);
1307    if (width == 8) {
1308      const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0));
1309      for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]);
1310    } else if (width == 4) {
1311      const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0));
1312      for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]);
1313    } else if (width == 2) {
1314      const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0));
1315      for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]);
1316    } else {
1317      const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0));
1318      memcpy(dst, &src[offset * width], count * width);
1319    }
1320  } else {
1321    ScopedObjectAccessUnchecked soa(Thread::Current());
1322    mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>();
1323    for (int i = 0; i < count; ++i) {
1324      mirror::Object* element = oa->Get(offset + i);
1325      JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element)
1326                                                        : element_tag;
1327      expandBufAdd1(pReply, specific_tag);
1328      expandBufAddObjectId(pReply, gRegistry->Add(element));
1329    }
1330  }
1331
1332  return JDWP::ERR_NONE;
1333}
1334
1335template <typename T>
1336static void CopyArrayData(mirror::Array* a, JDWP::Request& src, int offset, int count)
1337    NO_THREAD_SAFETY_ANALYSIS {
1338  // TODO: fix when annotalysis correctly handles non-member functions.
1339  DCHECK(a->GetClass()->IsPrimitiveArray());
1340
1341  T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset));
1342  for (int i = 0; i < count; ++i) {
1343    *dst++ = src.ReadValue(sizeof(T));
1344  }
1345}
1346
1347JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
1348                                      JDWP::Request& request)
1349    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1350  JDWP::JdwpError status;
1351  mirror::Array* dst = DecodeArray(array_id, status);
1352  if (dst == NULL) {
1353    return status;
1354  }
1355
1356  if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) {
1357    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1358    return JDWP::ERR_INVALID_LENGTH;
1359  }
1360  JDWP::JdwpTag element_tag = BasicTagFromClass(dst->GetClass()->GetComponentType());
1361
1362  if (IsPrimitiveTag(element_tag)) {
1363    size_t width = GetTagWidth(element_tag);
1364    if (width == 8) {
1365      CopyArrayData<uint64_t>(dst, request, offset, count);
1366    } else if (width == 4) {
1367      CopyArrayData<uint32_t>(dst, request, offset, count);
1368    } else if (width == 2) {
1369      CopyArrayData<uint16_t>(dst, request, offset, count);
1370    } else {
1371      CopyArrayData<uint8_t>(dst, request, offset, count);
1372    }
1373  } else {
1374    mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>();
1375    for (int i = 0; i < count; ++i) {
1376      JDWP::ObjectId id = request.ReadObjectId();
1377      mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
1378      if (o == ObjectRegistry::kInvalidObject) {
1379        return JDWP::ERR_INVALID_OBJECT;
1380      }
1381      oa->Set<false>(offset + i, o);
1382    }
1383  }
1384
1385  return JDWP::ERR_NONE;
1386}
1387
1388JDWP::ObjectId Dbg::CreateString(const std::string& str) {
1389  return gRegistry->Add(mirror::String::AllocFromModifiedUtf8(Thread::Current(), str.c_str()));
1390}
1391
1392JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId& new_object) {
1393  JDWP::JdwpError status;
1394  mirror::Class* c = DecodeClass(class_id, status);
1395  if (c == NULL) {
1396    return status;
1397  }
1398  new_object = gRegistry->Add(c->AllocObject(Thread::Current()));
1399  return JDWP::ERR_NONE;
1400}
1401
1402/*
1403 * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]".
1404 */
1405JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
1406                                       JDWP::ObjectId& new_array) {
1407  JDWP::JdwpError status;
1408  mirror::Class* c = DecodeClass(array_class_id, status);
1409  if (c == NULL) {
1410    return status;
1411  }
1412  new_array = gRegistry->Add(mirror::Array::Alloc<true>(Thread::Current(), c, length,
1413                                                        c->GetComponentSize(),
1414                                                        Runtime::Current()->GetHeap()->GetCurrentAllocator()));
1415  return JDWP::ERR_NONE;
1416}
1417
1418JDWP::FieldId Dbg::ToFieldId(const mirror::ArtField* f) {
1419  CHECK(!kMovingFields);
1420  return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
1421}
1422
1423static JDWP::MethodId ToMethodId(const mirror::ArtMethod* m)
1424    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1425  CHECK(!kMovingMethods);
1426  return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
1427}
1428
1429static mirror::ArtField* FromFieldId(JDWP::FieldId fid)
1430    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1431  CHECK(!kMovingFields);
1432  return reinterpret_cast<mirror::ArtField*>(static_cast<uintptr_t>(fid));
1433}
1434
1435static mirror::ArtMethod* FromMethodId(JDWP::MethodId mid)
1436    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1437  CHECK(!kMovingMethods);
1438  return reinterpret_cast<mirror::ArtMethod*>(static_cast<uintptr_t>(mid));
1439}
1440
1441bool Dbg::MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread) {
1442  CHECK(event_thread != nullptr);
1443  mirror::Object* expected_thread_peer = gRegistry->Get<mirror::Object*>(expected_thread_id);
1444  return expected_thread_peer == event_thread->GetPeer();
1445}
1446
1447bool Dbg::MatchLocation(const JDWP::JdwpLocation& expected_location,
1448                        const JDWP::EventLocation& event_location) {
1449  if (expected_location.dex_pc != event_location.dex_pc) {
1450    return false;
1451  }
1452  mirror::ArtMethod* m = FromMethodId(expected_location.method_id);
1453  return m == event_location.method;
1454}
1455
1456bool Dbg::MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id) {
1457  JDWP::JdwpError status;
1458  mirror::Class* expected_class = DecodeClass(class_id, status);
1459  CHECK(expected_class != nullptr);
1460  return expected_class->IsAssignableFrom(event_class);
1461}
1462
1463bool Dbg::MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
1464                     mirror::ArtField* event_field) {
1465  mirror::ArtField* expected_field = FromFieldId(expected_field_id);
1466  if (expected_field != event_field) {
1467    return false;
1468  }
1469  return Dbg::MatchType(event_field->GetDeclaringClass(), expected_type_id);
1470}
1471
1472bool Dbg::MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance) {
1473  mirror::Object* modifier_instance = gRegistry->Get<mirror::Object*>(expected_instance_id);
1474  return modifier_instance == event_instance;
1475}
1476
1477void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
1478    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1479  if (m == nullptr) {
1480    memset(&location, 0, sizeof(*location));
1481  } else {
1482    mirror::Class* c = m->GetDeclaringClass();
1483    location->type_tag = GetTypeTag(c);
1484    location->class_id = gRegistry->AddRefType(c);
1485    location->method_id = ToMethodId(m);
1486    location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc;
1487  }
1488}
1489
1490std::string Dbg::GetMethodName(JDWP::MethodId method_id)
1491    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1492  mirror::ArtMethod* m = FromMethodId(method_id);
1493  return m->GetName();
1494}
1495
1496std::string Dbg::GetFieldName(JDWP::FieldId field_id)
1497    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1498  return FromFieldId(field_id)->GetName();
1499}
1500
1501/*
1502 * Augment the access flags for synthetic methods and fields by setting
1503 * the (as described by the spec) "0xf0000000 bit".  Also, strip out any
1504 * flags not specified by the Java programming language.
1505 */
1506static uint32_t MangleAccessFlags(uint32_t accessFlags) {
1507  accessFlags &= kAccJavaFlagsMask;
1508  if ((accessFlags & kAccSynthetic) != 0) {
1509    accessFlags |= 0xf0000000;
1510  }
1511  return accessFlags;
1512}
1513
1514/*
1515 * Circularly shifts registers so that arguments come first. Debuggers
1516 * expect slots to begin with arguments, but dex code places them at
1517 * the end.
1518 */
1519static uint16_t MangleSlot(uint16_t slot, mirror::ArtMethod* m)
1520    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1521  const DexFile::CodeItem* code_item = m->GetCodeItem();
1522  if (code_item == nullptr) {
1523    // We should not get here for a method without code (native, proxy or abstract). Log it and
1524    // return the slot as is since all registers are arguments.
1525    LOG(WARNING) << "Trying to mangle slot for method without code " << PrettyMethod(m);
1526    return slot;
1527  }
1528  uint16_t ins_size = code_item->ins_size_;
1529  uint16_t locals_size = code_item->registers_size_ - ins_size;
1530  if (slot >= locals_size) {
1531    return slot - locals_size;
1532  } else {
1533    return slot + ins_size;
1534  }
1535}
1536
1537/*
1538 * Circularly shifts registers so that arguments come last. Reverts
1539 * slots to dex style argument placement.
1540 */
1541static uint16_t DemangleSlot(uint16_t slot, mirror::ArtMethod* m)
1542    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1543  const DexFile::CodeItem* code_item = m->GetCodeItem();
1544  if (code_item == nullptr) {
1545    // We should not get here for a method without code (native, proxy or abstract). Log it and
1546    // return the slot as is since all registers are arguments.
1547    LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m);
1548    return slot;
1549  }
1550  uint16_t ins_size = code_item->ins_size_;
1551  uint16_t locals_size = code_item->registers_size_ - ins_size;
1552  if (slot < ins_size) {
1553    return slot + locals_size;
1554  } else {
1555    return slot - ins_size;
1556  }
1557}
1558
1559JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) {
1560  JDWP::JdwpError status;
1561  mirror::Class* c = DecodeClass(class_id, status);
1562  if (c == NULL) {
1563    return status;
1564  }
1565
1566  size_t instance_field_count = c->NumInstanceFields();
1567  size_t static_field_count = c->NumStaticFields();
1568
1569  expandBufAdd4BE(pReply, instance_field_count + static_field_count);
1570
1571  for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
1572    mirror::ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
1573    expandBufAddFieldId(pReply, ToFieldId(f));
1574    expandBufAddUtf8String(pReply, f->GetName());
1575    expandBufAddUtf8String(pReply, f->GetTypeDescriptor());
1576    if (with_generic) {
1577      static const char genericSignature[1] = "";
1578      expandBufAddUtf8String(pReply, genericSignature);
1579    }
1580    expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags()));
1581  }
1582  return JDWP::ERR_NONE;
1583}
1584
1585JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic,
1586                                           JDWP::ExpandBuf* pReply) {
1587  JDWP::JdwpError status;
1588  mirror::Class* c = DecodeClass(class_id, status);
1589  if (c == NULL) {
1590    return status;
1591  }
1592
1593  size_t direct_method_count = c->NumDirectMethods();
1594  size_t virtual_method_count = c->NumVirtualMethods();
1595
1596  expandBufAdd4BE(pReply, direct_method_count + virtual_method_count);
1597
1598  for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) {
1599    mirror::ArtMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count);
1600    expandBufAddMethodId(pReply, ToMethodId(m));
1601    expandBufAddUtf8String(pReply, m->GetName());
1602    expandBufAddUtf8String(pReply, m->GetSignature().ToString());
1603    if (with_generic) {
1604      static const char genericSignature[1] = "";
1605      expandBufAddUtf8String(pReply, genericSignature);
1606    }
1607    expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags()));
1608  }
1609  return JDWP::ERR_NONE;
1610}
1611
1612JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1613  JDWP::JdwpError status;
1614  Thread* self = Thread::Current();
1615  StackHandleScope<1> hs(self);
1616  Handle<mirror::Class> c(hs.NewHandle(DecodeClass(class_id, status)));
1617  if (c.Get() == nullptr) {
1618    return status;
1619  }
1620  size_t interface_count = c->NumDirectInterfaces();
1621  expandBufAdd4BE(pReply, interface_count);
1622  for (size_t i = 0; i < interface_count; ++i) {
1623    expandBufAddRefTypeId(pReply,
1624                          gRegistry->AddRefType(mirror::Class::GetDirectInterface(self, c, i)));
1625  }
1626  return JDWP::ERR_NONE;
1627}
1628
1629void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply)
1630    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1631  struct DebugCallbackContext {
1632    int numItems;
1633    JDWP::ExpandBuf* pReply;
1634
1635    static bool Callback(void* context, uint32_t address, uint32_t line_number) {
1636      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1637      expandBufAdd8BE(pContext->pReply, address);
1638      expandBufAdd4BE(pContext->pReply, line_number);
1639      pContext->numItems++;
1640      return false;
1641    }
1642  };
1643  mirror::ArtMethod* m = FromMethodId(method_id);
1644  const DexFile::CodeItem* code_item = m->GetCodeItem();
1645  uint64_t start, end;
1646  if (code_item == nullptr) {
1647    DCHECK(m->IsNative() || m->IsProxyMethod());
1648    start = -1;
1649    end = -1;
1650  } else {
1651    start = 0;
1652    // Return the index of the last instruction
1653    end = code_item->insns_size_in_code_units_ - 1;
1654  }
1655
1656  expandBufAdd8BE(pReply, start);
1657  expandBufAdd8BE(pReply, end);
1658
1659  // Add numLines later
1660  size_t numLinesOffset = expandBufGetLength(pReply);
1661  expandBufAdd4BE(pReply, 0);
1662
1663  DebugCallbackContext context;
1664  context.numItems = 0;
1665  context.pReply = pReply;
1666
1667  if (code_item != nullptr) {
1668    m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
1669                                     DebugCallbackContext::Callback, NULL, &context);
1670  }
1671
1672  JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
1673}
1674
1675void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic,
1676                              JDWP::ExpandBuf* pReply) {
1677  struct DebugCallbackContext {
1678    mirror::ArtMethod* method;
1679    JDWP::ExpandBuf* pReply;
1680    size_t variable_count;
1681    bool with_generic;
1682
1683    static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress,
1684                         const char* name, const char* descriptor, const char* signature)
1685        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1686      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1687
1688      VLOG(jdwp) << StringPrintf("    %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
1689                                 pContext->variable_count, startAddress, endAddress - startAddress,
1690                                 name, descriptor, signature, slot,
1691                                 MangleSlot(slot, pContext->method));
1692
1693      slot = MangleSlot(slot, pContext->method);
1694
1695      expandBufAdd8BE(pContext->pReply, startAddress);
1696      expandBufAddUtf8String(pContext->pReply, name);
1697      expandBufAddUtf8String(pContext->pReply, descriptor);
1698      if (pContext->with_generic) {
1699        expandBufAddUtf8String(pContext->pReply, signature);
1700      }
1701      expandBufAdd4BE(pContext->pReply, endAddress - startAddress);
1702      expandBufAdd4BE(pContext->pReply, slot);
1703
1704      ++pContext->variable_count;
1705    }
1706  };
1707  mirror::ArtMethod* m = FromMethodId(method_id);
1708
1709  // arg_count considers doubles and longs to take 2 units.
1710  // variable_count considers everything to take 1 unit.
1711  std::string shorty(m->GetShorty());
1712  expandBufAdd4BE(pReply, mirror::ArtMethod::NumArgRegisters(shorty));
1713
1714  // We don't know the total number of variables yet, so leave a blank and update it later.
1715  size_t variable_count_offset = expandBufGetLength(pReply);
1716  expandBufAdd4BE(pReply, 0);
1717
1718  DebugCallbackContext context;
1719  context.method = m;
1720  context.pReply = pReply;
1721  context.variable_count = 0;
1722  context.with_generic = with_generic;
1723
1724  const DexFile::CodeItem* code_item = m->GetCodeItem();
1725  if (code_item != nullptr) {
1726    m->GetDexFile()->DecodeDebugInfo(
1727        code_item, m->IsStatic(), m->GetDexMethodIndex(), NULL, DebugCallbackContext::Callback,
1728        &context);
1729  }
1730
1731  JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count);
1732}
1733
1734void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
1735                                  JDWP::ExpandBuf* pReply) {
1736  mirror::ArtMethod* m = FromMethodId(method_id);
1737  JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty());
1738  OutputJValue(tag, return_value, pReply);
1739}
1740
1741void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
1742                           JDWP::ExpandBuf* pReply) {
1743  mirror::ArtField* f = FromFieldId(field_id);
1744  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1745  OutputJValue(tag, field_value, pReply);
1746}
1747
1748JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
1749                                  std::vector<uint8_t>& bytecodes)
1750    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1751  mirror::ArtMethod* m = FromMethodId(method_id);
1752  if (m == NULL) {
1753    return JDWP::ERR_INVALID_METHODID;
1754  }
1755  const DexFile::CodeItem* code_item = m->GetCodeItem();
1756  size_t byte_count = code_item->insns_size_in_code_units_ * 2;
1757  const uint8_t* begin = reinterpret_cast<const uint8_t*>(code_item->insns_);
1758  const uint8_t* end = begin + byte_count;
1759  for (const uint8_t* p = begin; p != end; ++p) {
1760    bytecodes.push_back(*p);
1761  }
1762  return JDWP::ERR_NONE;
1763}
1764
1765JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) {
1766  return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1767}
1768
1769JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) {
1770  return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1771}
1772
1773static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
1774                                         JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
1775                                         bool is_static)
1776    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1777  JDWP::JdwpError status;
1778  mirror::Class* c = DecodeClass(ref_type_id, status);
1779  if (ref_type_id != 0 && c == NULL) {
1780    return status;
1781  }
1782
1783  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id);
1784  if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) {
1785    return JDWP::ERR_INVALID_OBJECT;
1786  }
1787  mirror::ArtField* f = FromFieldId(field_id);
1788
1789  mirror::Class* receiver_class = c;
1790  if (receiver_class == NULL && o != NULL) {
1791    receiver_class = o->GetClass();
1792  }
1793  // TODO: should we give up now if receiver_class is NULL?
1794  if (receiver_class != NULL && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
1795    LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class);
1796    return JDWP::ERR_INVALID_FIELDID;
1797  }
1798
1799  // The RI only enforces the static/non-static mismatch in one direction.
1800  // TODO: should we change the tests and check both?
1801  if (is_static) {
1802    if (!f->IsStatic()) {
1803      return JDWP::ERR_INVALID_FIELDID;
1804    }
1805  } else {
1806    if (f->IsStatic()) {
1807      LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f);
1808    }
1809  }
1810  if (f->IsStatic()) {
1811    o = f->GetDeclaringClass();
1812  }
1813
1814  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1815  JValue field_value;
1816  if (tag == JDWP::JT_VOID) {
1817    LOG(FATAL) << "Unknown tag: " << tag;
1818  } else if (!IsPrimitiveTag(tag)) {
1819    field_value.SetL(f->GetObject(o));
1820  } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1821    field_value.SetJ(f->Get64(o));
1822  } else {
1823    field_value.SetI(f->Get32(o));
1824  }
1825  Dbg::OutputJValue(tag, &field_value, pReply);
1826
1827  return JDWP::ERR_NONE;
1828}
1829
1830JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1831                                   JDWP::ExpandBuf* pReply) {
1832  return GetFieldValueImpl(0, object_id, field_id, pReply, false);
1833}
1834
1835JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id, JDWP::ExpandBuf* pReply) {
1836  return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true);
1837}
1838
1839static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1840                                         uint64_t value, int width, bool is_static)
1841    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1842  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id);
1843  if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) {
1844    return JDWP::ERR_INVALID_OBJECT;
1845  }
1846  mirror::ArtField* f = FromFieldId(field_id);
1847
1848  // The RI only enforces the static/non-static mismatch in one direction.
1849  // TODO: should we change the tests and check both?
1850  if (is_static) {
1851    if (!f->IsStatic()) {
1852      return JDWP::ERR_INVALID_FIELDID;
1853    }
1854  } else {
1855    if (f->IsStatic()) {
1856      LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f);
1857    }
1858  }
1859  if (f->IsStatic()) {
1860    o = f->GetDeclaringClass();
1861  }
1862
1863  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1864
1865  if (IsPrimitiveTag(tag)) {
1866    if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1867      CHECK_EQ(width, 8);
1868      // Debugging can't use transactional mode (runtime only).
1869      f->Set64<false>(o, value);
1870    } else {
1871      CHECK_LE(width, 4);
1872      // Debugging can't use transactional mode (runtime only).
1873      f->Set32<false>(o, value);
1874    }
1875  } else {
1876    mirror::Object* v = Dbg::GetObjectRegistry()->Get<mirror::Object*>(value);
1877    if (v == ObjectRegistry::kInvalidObject) {
1878      return JDWP::ERR_INVALID_OBJECT;
1879    }
1880    if (v != NULL) {
1881      mirror::Class* field_type;
1882      {
1883        StackHandleScope<3> hs(Thread::Current());
1884        HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
1885        HandleWrapper<mirror::ArtField> h_f(hs.NewHandleWrapper(&f));
1886        HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
1887        field_type = FieldHelper(h_f).GetType();
1888      }
1889      if (!field_type->IsAssignableFrom(v->GetClass())) {
1890        return JDWP::ERR_INVALID_OBJECT;
1891      }
1892    }
1893    // Debugging can't use transactional mode (runtime only).
1894    f->SetObject<false>(o, v);
1895  }
1896
1897  return JDWP::ERR_NONE;
1898}
1899
1900JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value,
1901                                   int width) {
1902  return SetFieldValueImpl(object_id, field_id, value, width, false);
1903}
1904
1905JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) {
1906  return SetFieldValueImpl(0, field_id, value, width, true);
1907}
1908
1909std::string Dbg::StringToUtf8(JDWP::ObjectId string_id) {
1910  mirror::String* s = gRegistry->Get<mirror::String*>(string_id);
1911  return s->ToModifiedUtf8();
1912}
1913
1914void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) {
1915  if (IsPrimitiveTag(tag)) {
1916    expandBufAdd1(pReply, tag);
1917    if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) {
1918      expandBufAdd1(pReply, return_value->GetI());
1919    } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) {
1920      expandBufAdd2BE(pReply, return_value->GetI());
1921    } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) {
1922      expandBufAdd4BE(pReply, return_value->GetI());
1923    } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1924      expandBufAdd8BE(pReply, return_value->GetJ());
1925    } else {
1926      CHECK_EQ(tag, JDWP::JT_VOID);
1927    }
1928  } else {
1929    ScopedObjectAccessUnchecked soa(Thread::Current());
1930    mirror::Object* value = return_value->GetL();
1931    expandBufAdd1(pReply, TagFromObject(soa, value));
1932    expandBufAddObjectId(pReply, gRegistry->Add(value));
1933  }
1934}
1935
1936JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string& name) {
1937  ScopedObjectAccessUnchecked soa(Thread::Current());
1938  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1939  Thread* thread;
1940  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1941  if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
1942    return error;
1943  }
1944
1945  // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName.
1946  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
1947  mirror::ArtField* java_lang_Thread_name_field =
1948      soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
1949  mirror::String* s =
1950      reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object));
1951  if (s != NULL) {
1952    name = s->ToModifiedUtf8();
1953  }
1954  return JDWP::ERR_NONE;
1955}
1956
1957JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
1958  ScopedObjectAccess soa(Thread::Current());
1959  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
1960  if (thread_object == ObjectRegistry::kInvalidObject) {
1961    return JDWP::ERR_INVALID_OBJECT;
1962  }
1963  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroup");
1964  // Okay, so it's an object, but is it actually a thread?
1965  JDWP::JdwpError error;
1966  {
1967    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1968    Thread* thread;
1969    error = DecodeThread(soa, thread_id, thread);
1970  }
1971  if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
1972    // Zombie threads are in the null group.
1973    expandBufAddObjectId(pReply, JDWP::ObjectId(0));
1974    error = JDWP::ERR_NONE;
1975  } else if (error == JDWP::ERR_NONE) {
1976    mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
1977    CHECK(c != nullptr);
1978    mirror::ArtField* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;");
1979    CHECK(f != nullptr);
1980    mirror::Object* group = f->GetObject(thread_object);
1981    CHECK(group != nullptr);
1982    JDWP::ObjectId thread_group_id = gRegistry->Add(group);
1983    expandBufAddObjectId(pReply, thread_group_id);
1984  }
1985  soa.Self()->EndAssertNoThreadSuspension(old_cause);
1986  return error;
1987}
1988
1989std::string Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id) {
1990  ScopedObjectAccess soa(Thread::Current());
1991  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
1992  CHECK(thread_group != nullptr);
1993  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupName");
1994  mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
1995  CHECK(c != nullptr);
1996  mirror::ArtField* f = c->FindInstanceField("name", "Ljava/lang/String;");
1997  CHECK(f != NULL);
1998  mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
1999  soa.Self()->EndAssertNoThreadSuspension(old_cause);
2000  return s->ToModifiedUtf8();
2001}
2002
2003JDWP::ObjectId Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id) {
2004  ScopedObjectAccessUnchecked soa(Thread::Current());
2005  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
2006  CHECK(thread_group != nullptr);
2007  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupParent");
2008  mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
2009  CHECK(c != nullptr);
2010  mirror::ArtField* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;");
2011  CHECK(f != NULL);
2012  mirror::Object* parent = f->GetObject(thread_group);
2013  soa.Self()->EndAssertNoThreadSuspension(old_cause);
2014  return gRegistry->Add(parent);
2015}
2016
2017JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
2018  ScopedObjectAccessUnchecked soa(Thread::Current());
2019  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
2020  mirror::Object* group = f->GetObject(f->GetDeclaringClass());
2021  return gRegistry->Add(group);
2022}
2023
2024JDWP::ObjectId Dbg::GetMainThreadGroupId() {
2025  ScopedObjectAccess soa(Thread::Current());
2026  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup);
2027  mirror::Object* group = f->GetObject(f->GetDeclaringClass());
2028  return gRegistry->Add(group);
2029}
2030
2031JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
2032  switch (state) {
2033    case kBlocked:
2034      return JDWP::TS_MONITOR;
2035    case kNative:
2036    case kRunnable:
2037    case kSuspended:
2038      return JDWP::TS_RUNNING;
2039    case kSleeping:
2040      return JDWP::TS_SLEEPING;
2041    case kStarting:
2042    case kTerminated:
2043      return JDWP::TS_ZOMBIE;
2044    case kTimedWaiting:
2045    case kWaitingForCheckPointsToRun:
2046    case kWaitingForDebuggerSend:
2047    case kWaitingForDebuggerSuspension:
2048    case kWaitingForDebuggerToAttach:
2049    case kWaitingForDeoptimization:
2050    case kWaitingForGcToComplete:
2051    case kWaitingForJniOnLoad:
2052    case kWaitingForMethodTracingStart:
2053    case kWaitingForSignalCatcherOutput:
2054    case kWaitingInMainDebuggerLoop:
2055    case kWaitingInMainSignalCatcherLoop:
2056    case kWaitingPerformingGc:
2057    case kWaiting:
2058      return JDWP::TS_WAIT;
2059      // Don't add a 'default' here so the compiler can spot incompatible enum changes.
2060  }
2061  LOG(FATAL) << "Unknown thread state: " << state;
2062  return JDWP::TS_ZOMBIE;
2063}
2064
2065JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus,
2066                                     JDWP::JdwpSuspendStatus* pSuspendStatus) {
2067  ScopedObjectAccess soa(Thread::Current());
2068
2069  *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED;
2070
2071  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2072  Thread* thread;
2073  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2074  if (error != JDWP::ERR_NONE) {
2075    if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
2076      *pThreadStatus = JDWP::TS_ZOMBIE;
2077      return JDWP::ERR_NONE;
2078    }
2079    return error;
2080  }
2081
2082  if (IsSuspendedForDebugger(soa, thread)) {
2083    *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED;
2084  }
2085
2086  *pThreadStatus = ToJdwpThreadStatus(thread->GetState());
2087  return JDWP::ERR_NONE;
2088}
2089
2090JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
2091  ScopedObjectAccess soa(Thread::Current());
2092  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2093  Thread* thread;
2094  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2095  if (error != JDWP::ERR_NONE) {
2096    return error;
2097  }
2098  MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2099  expandBufAdd4BE(pReply, thread->GetDebugSuspendCount());
2100  return JDWP::ERR_NONE;
2101}
2102
2103JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) {
2104  ScopedObjectAccess soa(Thread::Current());
2105  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2106  Thread* thread;
2107  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2108  if (error != JDWP::ERR_NONE) {
2109    return error;
2110  }
2111  thread->Interrupt(soa.Self());
2112  return JDWP::ERR_NONE;
2113}
2114
2115static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
2116                                   mirror::Object* desired_thread_group, mirror::Object* peer)
2117    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2118  // Do we want threads from all thread groups?
2119  if (desired_thread_group == nullptr) {
2120    return true;
2121  }
2122  mirror::ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
2123  DCHECK(thread_group_field != nullptr);
2124  mirror::Object* group = thread_group_field->GetObject(peer);
2125  return (group == desired_thread_group);
2126}
2127
2128void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& thread_ids) {
2129  ScopedObjectAccessUnchecked soa(Thread::Current());
2130  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
2131  std::list<Thread*> all_threads_list;
2132  {
2133    MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
2134    all_threads_list = Runtime::Current()->GetThreadList()->GetList();
2135  }
2136  for (Thread* t : all_threads_list) {
2137    if (t == Dbg::GetDebugThread()) {
2138      // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
2139      // query all threads, so it's easier if we just don't tell them about this thread.
2140      continue;
2141    }
2142    if (t->IsStillStarting()) {
2143      // This thread is being started (and has been registered in the thread list). However, it is
2144      // not completely started yet so we must ignore it.
2145      continue;
2146    }
2147    mirror::Object* peer = t->GetPeer();
2148    if (peer == nullptr) {
2149      // peer might be NULL if the thread is still starting up. We can't tell the debugger about
2150      // this thread yet.
2151      // TODO: if we identified threads to the debugger by their Thread*
2152      // rather than their peer's mirror::Object*, we could fix this.
2153      // Doing so might help us report ZOMBIE threads too.
2154      continue;
2155    }
2156    if (IsInDesiredThreadGroup(soa, thread_group, peer)) {
2157      thread_ids.push_back(gRegistry->Add(peer));
2158    }
2159  }
2160}
2161
2162void Dbg::GetChildThreadGroups(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& child_thread_group_ids) {
2163  ScopedObjectAccess soa(Thread::Current());
2164  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
2165
2166  // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
2167  mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
2168  mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
2169
2170  // Get the array and size out of the ArrayList<ThreadGroup>...
2171  mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
2172  mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
2173  mirror::ObjectArray<mirror::Object>* groups_array =
2174      array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
2175  const int32_t size = size_field->GetInt(groups_array_list);
2176
2177  // Copy the first 'size' elements out of the array into the result.
2178  for (int32_t i = 0; i < size; ++i) {
2179    child_thread_group_ids.push_back(gRegistry->Add(groups_array->Get(i)));
2180  }
2181}
2182
2183static int GetStackDepth(Thread* thread)
2184    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2185  struct CountStackDepthVisitor : public StackVisitor {
2186    explicit CountStackDepthVisitor(Thread* thread)
2187        : StackVisitor(thread, NULL), depth(0) {}
2188
2189    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2190    // annotalysis.
2191    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2192      if (!GetMethod()->IsRuntimeMethod()) {
2193        ++depth;
2194      }
2195      return true;
2196    }
2197    size_t depth;
2198  };
2199
2200  CountStackDepthVisitor visitor(thread);
2201  visitor.WalkStack();
2202  return visitor.depth;
2203}
2204
2205JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t& result) {
2206  ScopedObjectAccess soa(Thread::Current());
2207  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2208  Thread* thread;
2209  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2210  if (error != JDWP::ERR_NONE) {
2211    return error;
2212  }
2213  if (!IsSuspendedForDebugger(soa, thread)) {
2214    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2215  }
2216  result = GetStackDepth(thread);
2217  return JDWP::ERR_NONE;
2218}
2219
2220JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
2221                                     size_t frame_count, JDWP::ExpandBuf* buf) {
2222  class GetFrameVisitor : public StackVisitor {
2223   public:
2224    GetFrameVisitor(Thread* thread, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf)
2225        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2226        : StackVisitor(thread, NULL), depth_(0),
2227          start_frame_(start_frame), frame_count_(frame_count), buf_(buf) {
2228      expandBufAdd4BE(buf_, frame_count_);
2229    }
2230
2231    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2232    // annotalysis.
2233    virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2234      if (GetMethod()->IsRuntimeMethod()) {
2235        return true;  // The debugger can't do anything useful with a frame that has no Method*.
2236      }
2237      if (depth_ >= start_frame_ + frame_count_) {
2238        return false;
2239      }
2240      if (depth_ >= start_frame_) {
2241        JDWP::FrameId frame_id(GetFrameId());
2242        JDWP::JdwpLocation location;
2243        SetJdwpLocation(&location, GetMethod(), GetDexPc());
2244        VLOG(jdwp) << StringPrintf("    Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
2245        expandBufAdd8BE(buf_, frame_id);
2246        expandBufAddLocation(buf_, location);
2247      }
2248      ++depth_;
2249      return true;
2250    }
2251
2252   private:
2253    size_t depth_;
2254    const size_t start_frame_;
2255    const size_t frame_count_;
2256    JDWP::ExpandBuf* buf_;
2257  };
2258
2259  ScopedObjectAccessUnchecked soa(Thread::Current());
2260  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2261  Thread* thread;
2262  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2263  if (error != JDWP::ERR_NONE) {
2264    return error;
2265  }
2266  if (!IsSuspendedForDebugger(soa, thread)) {
2267    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2268  }
2269  GetFrameVisitor visitor(thread, start_frame, frame_count, buf);
2270  visitor.WalkStack();
2271  return JDWP::ERR_NONE;
2272}
2273
2274JDWP::ObjectId Dbg::GetThreadSelfId() {
2275  return GetThreadId(Thread::Current());
2276}
2277
2278JDWP::ObjectId Dbg::GetThreadId(Thread* thread) {
2279  ScopedObjectAccessUnchecked soa(Thread::Current());
2280  return gRegistry->Add(thread->GetPeer());
2281}
2282
2283void Dbg::SuspendVM() {
2284  Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
2285}
2286
2287void Dbg::ResumeVM() {
2288  Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
2289}
2290
2291JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
2292  Thread* self = Thread::Current();
2293  ScopedLocalRef<jobject> peer(self->GetJniEnv(), NULL);
2294  {
2295    ScopedObjectAccess soa(self);
2296    peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id)));
2297  }
2298  if (peer.get() == NULL) {
2299    return JDWP::ERR_THREAD_NOT_ALIVE;
2300  }
2301  // Suspend thread to build stack trace. Take suspend thread lock to avoid races with threads
2302  // trying to suspend this one.
2303  MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
2304  bool timed_out;
2305  ThreadList* thread_list = Runtime::Current()->GetThreadList();
2306  Thread* thread = thread_list->SuspendThreadByPeer(peer.get(), request_suspension, true,
2307                                                    &timed_out);
2308  if (thread != NULL) {
2309    return JDWP::ERR_NONE;
2310  } else if (timed_out) {
2311    return JDWP::ERR_INTERNAL;
2312  } else {
2313    return JDWP::ERR_THREAD_NOT_ALIVE;
2314  }
2315}
2316
2317void Dbg::ResumeThread(JDWP::ObjectId thread_id) {
2318  ScopedObjectAccessUnchecked soa(Thread::Current());
2319  mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id);
2320  Thread* thread;
2321  {
2322    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2323    thread = Thread::FromManagedThread(soa, peer);
2324  }
2325  if (thread == NULL) {
2326    LOG(WARNING) << "No such thread for resume: " << peer;
2327    return;
2328  }
2329  bool needs_resume;
2330  {
2331    MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2332    needs_resume = thread->GetSuspendCount() > 0;
2333  }
2334  if (needs_resume) {
2335    Runtime::Current()->GetThreadList()->Resume(thread, true);
2336  }
2337}
2338
2339void Dbg::SuspendSelf() {
2340  Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
2341}
2342
2343struct GetThisVisitor : public StackVisitor {
2344  GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
2345      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2346      : StackVisitor(thread, context), this_object(NULL), frame_id(frame_id) {}
2347
2348  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2349  // annotalysis.
2350  virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2351    if (frame_id != GetFrameId()) {
2352      return true;  // continue
2353    } else {
2354      this_object = GetThisObject();
2355      return false;
2356    }
2357  }
2358
2359  mirror::Object* this_object;
2360  JDWP::FrameId frame_id;
2361};
2362
2363JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
2364                                   JDWP::ObjectId* result) {
2365  ScopedObjectAccessUnchecked soa(Thread::Current());
2366  Thread* thread;
2367  {
2368    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2369    JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2370    if (error != JDWP::ERR_NONE) {
2371      return error;
2372    }
2373    if (!IsSuspendedForDebugger(soa, thread)) {
2374      return JDWP::ERR_THREAD_NOT_SUSPENDED;
2375    }
2376  }
2377  std::unique_ptr<Context> context(Context::Create());
2378  GetThisVisitor visitor(thread, context.get(), frame_id);
2379  visitor.WalkStack();
2380  *result = gRegistry->Add(visitor.this_object);
2381  return JDWP::ERR_NONE;
2382}
2383
2384JDWP::JdwpError Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
2385                                   JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
2386  struct GetLocalVisitor : public StackVisitor {
2387    GetLocalVisitor(const ScopedObjectAccessUnchecked& soa, Thread* thread, Context* context,
2388                    JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width)
2389        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2390        : StackVisitor(thread, context), soa_(soa), frame_id_(frame_id), slot_(slot), tag_(tag),
2391          buf_(buf), width_(width), error_(JDWP::ERR_NONE) {}
2392
2393    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2394    // annotalysis.
2395    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2396      if (GetFrameId() != frame_id_) {
2397        return true;  // Not our frame, carry on.
2398      }
2399      // TODO: check that the tag is compatible with the actual type of the slot!
2400      // TODO: check slot is valid for this method or return INVALID_SLOT error.
2401      mirror::ArtMethod* m = GetMethod();
2402      if (m->IsNative()) {
2403        // We can't read local value from native method.
2404        error_ = JDWP::ERR_OPAQUE_FRAME;
2405        return false;
2406      }
2407      uint16_t reg = DemangleSlot(slot_, m);
2408      constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
2409      switch (tag_) {
2410        case JDWP::JT_BOOLEAN: {
2411          CHECK_EQ(width_, 1U);
2412          uint32_t intVal;
2413          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2414            VLOG(jdwp) << "get boolean local " << reg << " = " << intVal;
2415            JDWP::Set1(buf_+1, intVal != 0);
2416          } else {
2417            VLOG(jdwp) << "failed to get boolean local " << reg;
2418            error_ = kFailureErrorCode;
2419          }
2420          break;
2421        }
2422        case JDWP::JT_BYTE: {
2423          CHECK_EQ(width_, 1U);
2424          uint32_t intVal;
2425          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2426            VLOG(jdwp) << "get byte local " << reg << " = " << intVal;
2427            JDWP::Set1(buf_+1, intVal);
2428          } else {
2429            VLOG(jdwp) << "failed to get byte local " << reg;
2430            error_ = kFailureErrorCode;
2431          }
2432          break;
2433        }
2434        case JDWP::JT_SHORT:
2435        case JDWP::JT_CHAR: {
2436          CHECK_EQ(width_, 2U);
2437          uint32_t intVal;
2438          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2439            VLOG(jdwp) << "get short/char local " << reg << " = " << intVal;
2440            JDWP::Set2BE(buf_+1, intVal);
2441          } else {
2442            VLOG(jdwp) << "failed to get short/char local " << reg;
2443            error_ = kFailureErrorCode;
2444          }
2445          break;
2446        }
2447        case JDWP::JT_INT: {
2448          CHECK_EQ(width_, 4U);
2449          uint32_t intVal;
2450          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2451            VLOG(jdwp) << "get int local " << reg << " = " << intVal;
2452            JDWP::Set4BE(buf_+1, intVal);
2453          } else {
2454            VLOG(jdwp) << "failed to get int local " << reg;
2455            error_ = kFailureErrorCode;
2456          }
2457          break;
2458        }
2459        case JDWP::JT_FLOAT: {
2460          CHECK_EQ(width_, 4U);
2461          uint32_t intVal;
2462          if (GetVReg(m, reg, kFloatVReg, &intVal)) {
2463            VLOG(jdwp) << "get float local " << reg << " = " << intVal;
2464            JDWP::Set4BE(buf_+1, intVal);
2465          } else {
2466            VLOG(jdwp) << "failed to get float local " << reg;
2467            error_ = kFailureErrorCode;
2468          }
2469          break;
2470        }
2471        case JDWP::JT_ARRAY:
2472        case JDWP::JT_CLASS_LOADER:
2473        case JDWP::JT_CLASS_OBJECT:
2474        case JDWP::JT_OBJECT:
2475        case JDWP::JT_STRING:
2476        case JDWP::JT_THREAD:
2477        case JDWP::JT_THREAD_GROUP: {
2478          CHECK_EQ(width_, sizeof(JDWP::ObjectId));
2479          uint32_t intVal;
2480          if (GetVReg(m, reg, kReferenceVReg, &intVal)) {
2481            mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal);
2482            VLOG(jdwp) << "get " << tag_ << " object local " << reg << " = " << o;
2483            if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
2484              LOG(FATAL) << "Register " << reg << " expected to hold " << tag_ << " object: " << o;
2485            }
2486            tag_ = TagFromObject(soa_, o);
2487            JDWP::SetObjectId(buf_+1, gRegistry->Add(o));
2488          } else {
2489            VLOG(jdwp) << "failed to get " << tag_ << " object local " << reg;
2490            error_ = kFailureErrorCode;
2491          }
2492          break;
2493        }
2494        case JDWP::JT_DOUBLE: {
2495          CHECK_EQ(width_, 8U);
2496          uint64_t longVal;
2497          if (GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
2498            VLOG(jdwp) << "get double local " << reg << " = " << longVal;
2499            JDWP::Set8BE(buf_+1, longVal);
2500          } else {
2501            VLOG(jdwp) << "failed to get double local " << reg;
2502            error_ = kFailureErrorCode;
2503          }
2504          break;
2505        }
2506        case JDWP::JT_LONG: {
2507          CHECK_EQ(width_, 8U);
2508          uint64_t longVal;
2509          if (GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &longVal)) {
2510            VLOG(jdwp) << "get long local " << reg << " = " << longVal;
2511            JDWP::Set8BE(buf_+1, longVal);
2512          } else {
2513            VLOG(jdwp) << "failed to get long local " << reg;
2514            error_ = kFailureErrorCode;
2515          }
2516          break;
2517        }
2518        default:
2519          LOG(FATAL) << "Unknown tag " << tag_;
2520          break;
2521      }
2522
2523      // Prepend tag, which may have been updated.
2524      JDWP::Set1(buf_, tag_);
2525      return false;
2526    }
2527    const ScopedObjectAccessUnchecked& soa_;
2528    const JDWP::FrameId frame_id_;
2529    const int slot_;
2530    JDWP::JdwpTag tag_;
2531    uint8_t* const buf_;
2532    const size_t width_;
2533    JDWP::JdwpError error_;
2534  };
2535
2536  ScopedObjectAccessUnchecked soa(Thread::Current());
2537  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2538  Thread* thread;
2539  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2540  if (error != JDWP::ERR_NONE) {
2541    return error;
2542  }
2543  // TODO check thread is suspended by the debugger ?
2544  std::unique_ptr<Context> context(Context::Create());
2545  GetLocalVisitor visitor(soa, thread, context.get(), frame_id, slot, tag, buf, width);
2546  visitor.WalkStack();
2547  return visitor.error_;
2548}
2549
2550JDWP::JdwpError Dbg::SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
2551                                   JDWP::JdwpTag tag, uint64_t value, size_t width) {
2552  struct SetLocalVisitor : public StackVisitor {
2553    SetLocalVisitor(Thread* thread, Context* context,
2554                    JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint64_t value,
2555                    size_t width)
2556        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2557        : StackVisitor(thread, context),
2558          frame_id_(frame_id), slot_(slot), tag_(tag), value_(value), width_(width),
2559          error_(JDWP::ERR_NONE) {}
2560
2561    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2562    // annotalysis.
2563    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2564      if (GetFrameId() != frame_id_) {
2565        return true;  // Not our frame, carry on.
2566      }
2567      // TODO: check that the tag is compatible with the actual type of the slot!
2568      // TODO: check slot is valid for this method or return INVALID_SLOT error.
2569      mirror::ArtMethod* m = GetMethod();
2570      if (m->IsNative()) {
2571        // We can't read local value from native method.
2572        error_ = JDWP::ERR_OPAQUE_FRAME;
2573        return false;
2574      }
2575      uint16_t reg = DemangleSlot(slot_, m);
2576      constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
2577      switch (tag_) {
2578        case JDWP::JT_BOOLEAN:
2579        case JDWP::JT_BYTE:
2580          CHECK_EQ(width_, 1U);
2581          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2582            VLOG(jdwp) << "failed to set boolean/byte local " << reg << " = "
2583                       << static_cast<uint32_t>(value_);
2584            error_ = kFailureErrorCode;
2585          }
2586          break;
2587        case JDWP::JT_SHORT:
2588        case JDWP::JT_CHAR:
2589          CHECK_EQ(width_, 2U);
2590          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2591            VLOG(jdwp) << "failed to set short/char local " << reg << " = "
2592                       << static_cast<uint32_t>(value_);
2593            error_ = kFailureErrorCode;
2594          }
2595          break;
2596        case JDWP::JT_INT:
2597          CHECK_EQ(width_, 4U);
2598          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2599            VLOG(jdwp) << "failed to set int local " << reg << " = "
2600                       << static_cast<uint32_t>(value_);
2601            error_ = kFailureErrorCode;
2602          }
2603          break;
2604        case JDWP::JT_FLOAT:
2605          CHECK_EQ(width_, 4U);
2606          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kFloatVReg)) {
2607            VLOG(jdwp) << "failed to set float local " << reg << " = "
2608                       << static_cast<uint32_t>(value_);
2609            error_ = kFailureErrorCode;
2610          }
2611          break;
2612        case JDWP::JT_ARRAY:
2613        case JDWP::JT_CLASS_LOADER:
2614        case JDWP::JT_CLASS_OBJECT:
2615        case JDWP::JT_OBJECT:
2616        case JDWP::JT_STRING:
2617        case JDWP::JT_THREAD:
2618        case JDWP::JT_THREAD_GROUP: {
2619          CHECK_EQ(width_, sizeof(JDWP::ObjectId));
2620          mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value_));
2621          if (o == ObjectRegistry::kInvalidObject) {
2622            VLOG(jdwp) << tag_ << " object " << o << " is an invalid object";
2623            error_ = JDWP::ERR_INVALID_OBJECT;
2624          } else if (!SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)),
2625                              kReferenceVReg)) {
2626            VLOG(jdwp) << "failed to set " << tag_ << " object local " << reg << " = " << o;
2627            error_ = kFailureErrorCode;
2628          }
2629          break;
2630        }
2631        case JDWP::JT_DOUBLE: {
2632          CHECK_EQ(width_, 8U);
2633          bool success = SetVRegPair(m, reg, value_, kDoubleLoVReg, kDoubleHiVReg);
2634          if (!success) {
2635            VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
2636            error_ = kFailureErrorCode;
2637          }
2638          break;
2639        }
2640        case JDWP::JT_LONG: {
2641          CHECK_EQ(width_, 8U);
2642          bool success = SetVRegPair(m, reg, value_, kLongLoVReg, kLongHiVReg);
2643          if (!success) {
2644            VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
2645            error_ = kFailureErrorCode;
2646          }
2647          break;
2648        }
2649        default:
2650          LOG(FATAL) << "Unknown tag " << tag_;
2651          break;
2652      }
2653      return false;
2654    }
2655
2656    const JDWP::FrameId frame_id_;
2657    const int slot_;
2658    const JDWP::JdwpTag tag_;
2659    const uint64_t value_;
2660    const size_t width_;
2661    JDWP::JdwpError error_;
2662  };
2663
2664  ScopedObjectAccessUnchecked soa(Thread::Current());
2665  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2666  Thread* thread;
2667  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2668  if (error != JDWP::ERR_NONE) {
2669    return error;
2670  }
2671  // TODO check thread is suspended by the debugger ?
2672  std::unique_ptr<Context> context(Context::Create());
2673  SetLocalVisitor visitor(thread, context.get(), frame_id, slot, tag, value, width);
2674  visitor.WalkStack();
2675  return visitor.error_;
2676}
2677
2678static void SetEventLocation(JDWP::EventLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
2679    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2680  DCHECK(location != nullptr);
2681  if (m == nullptr) {
2682    memset(location, 0, sizeof(*location));
2683  } else {
2684    location->method = m;
2685    location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint32_t>(-1) : dex_pc;
2686  }
2687}
2688
2689void Dbg::PostLocationEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object,
2690                            int event_flags, const JValue* return_value) {
2691  if (!IsDebuggerActive()) {
2692    return;
2693  }
2694  DCHECK(m != nullptr);
2695  DCHECK_EQ(m->IsStatic(), this_object == nullptr);
2696  JDWP::EventLocation location;
2697  SetEventLocation(&location, m, dex_pc);
2698
2699  gJdwpState->PostLocationEvent(&location, this_object, event_flags, return_value);
2700}
2701
2702void Dbg::PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc,
2703                               mirror::Object* this_object, mirror::ArtField* f) {
2704  if (!IsDebuggerActive()) {
2705    return;
2706  }
2707  DCHECK(m != nullptr);
2708  DCHECK(f != nullptr);
2709  JDWP::EventLocation location;
2710  SetEventLocation(&location, m, dex_pc);
2711
2712  gJdwpState->PostFieldEvent(&location, f, this_object, nullptr, false);
2713}
2714
2715void Dbg::PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc,
2716                                     mirror::Object* this_object, mirror::ArtField* f,
2717                                     const JValue* field_value) {
2718  if (!IsDebuggerActive()) {
2719    return;
2720  }
2721  DCHECK(m != nullptr);
2722  DCHECK(f != nullptr);
2723  DCHECK(field_value != nullptr);
2724  JDWP::EventLocation location;
2725  SetEventLocation(&location, m, dex_pc);
2726
2727  gJdwpState->PostFieldEvent(&location, f, this_object, field_value, true);
2728}
2729
2730void Dbg::PostException(const ThrowLocation& throw_location,
2731                        mirror::ArtMethod* catch_method,
2732                        uint32_t catch_dex_pc, mirror::Throwable* exception_object) {
2733  if (!IsDebuggerActive()) {
2734    return;
2735  }
2736  JDWP::EventLocation exception_throw_location;
2737  SetEventLocation(&exception_throw_location, throw_location.GetMethod(), throw_location.GetDexPc());
2738  JDWP::EventLocation exception_catch_location;
2739  SetEventLocation(&exception_catch_location, catch_method, catch_dex_pc);
2740
2741  gJdwpState->PostException(&exception_throw_location, exception_object, &exception_catch_location,
2742                            throw_location.GetThis());
2743}
2744
2745void Dbg::PostClassPrepare(mirror::Class* c) {
2746  if (!IsDebuggerActive()) {
2747    return;
2748  }
2749  gJdwpState->PostClassPrepare(c);
2750}
2751
2752void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
2753                         mirror::ArtMethod* m, uint32_t dex_pc,
2754                         int event_flags, const JValue* return_value) {
2755  if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
2756    return;
2757  }
2758
2759  if (IsBreakpoint(m, dex_pc)) {
2760    event_flags |= kBreakpoint;
2761  }
2762
2763  // If the debugger is single-stepping one of our threads, check to
2764  // see if we're that thread and we've reached a step point.
2765  const SingleStepControl* single_step_control = thread->GetSingleStepControl();
2766  DCHECK(single_step_control != nullptr);
2767  if (single_step_control->is_active) {
2768    CHECK(!m->IsNative());
2769    if (single_step_control->step_depth == JDWP::SD_INTO) {
2770      // Step into method calls.  We break when the line number
2771      // or method pointer changes.  If we're in SS_MIN mode, we
2772      // always stop.
2773      if (single_step_control->method != m) {
2774        event_flags |= kSingleStep;
2775        VLOG(jdwp) << "SS new method";
2776      } else if (single_step_control->step_size == JDWP::SS_MIN) {
2777        event_flags |= kSingleStep;
2778        VLOG(jdwp) << "SS new instruction";
2779      } else if (single_step_control->ContainsDexPc(dex_pc)) {
2780        event_flags |= kSingleStep;
2781        VLOG(jdwp) << "SS new line";
2782      }
2783    } else if (single_step_control->step_depth == JDWP::SD_OVER) {
2784      // Step over method calls.  We break when the line number is
2785      // different and the frame depth is <= the original frame
2786      // depth.  (We can't just compare on the method, because we
2787      // might get unrolled past it by an exception, and it's tricky
2788      // to identify recursion.)
2789
2790      int stack_depth = GetStackDepth(thread);
2791
2792      if (stack_depth < single_step_control->stack_depth) {
2793        // Popped up one or more frames, always trigger.
2794        event_flags |= kSingleStep;
2795        VLOG(jdwp) << "SS method pop";
2796      } else if (stack_depth == single_step_control->stack_depth) {
2797        // Same depth, see if we moved.
2798        if (single_step_control->step_size == JDWP::SS_MIN) {
2799          event_flags |= kSingleStep;
2800          VLOG(jdwp) << "SS new instruction";
2801        } else if (single_step_control->ContainsDexPc(dex_pc)) {
2802          event_flags |= kSingleStep;
2803          VLOG(jdwp) << "SS new line";
2804        }
2805      }
2806    } else {
2807      CHECK_EQ(single_step_control->step_depth, JDWP::SD_OUT);
2808      // Return from the current method.  We break when the frame
2809      // depth pops up.
2810
2811      // This differs from the "method exit" break in that it stops
2812      // with the PC at the next instruction in the returned-to
2813      // function, rather than the end of the returning function.
2814
2815      int stack_depth = GetStackDepth(thread);
2816      if (stack_depth < single_step_control->stack_depth) {
2817        event_flags |= kSingleStep;
2818        VLOG(jdwp) << "SS method pop";
2819      }
2820    }
2821  }
2822
2823  // If there's something interesting going on, see if it matches one
2824  // of the debugger filters.
2825  if (event_flags != 0) {
2826    Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, return_value);
2827  }
2828}
2829
2830size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) {
2831  switch (instrumentation_event) {
2832    case instrumentation::Instrumentation::kMethodEntered:
2833      return &method_enter_event_ref_count_;
2834    case instrumentation::Instrumentation::kMethodExited:
2835      return &method_exit_event_ref_count_;
2836    case instrumentation::Instrumentation::kDexPcMoved:
2837      return &dex_pc_change_event_ref_count_;
2838    case instrumentation::Instrumentation::kFieldRead:
2839      return &field_read_event_ref_count_;
2840    case instrumentation::Instrumentation::kFieldWritten:
2841      return &field_write_event_ref_count_;
2842    case instrumentation::Instrumentation::kExceptionCaught:
2843      return &exception_catch_event_ref_count_;
2844    default:
2845      return nullptr;
2846  }
2847}
2848
2849// Process request while all mutator threads are suspended.
2850void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
2851  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
2852  switch (request.GetKind()) {
2853    case DeoptimizationRequest::kNothing:
2854      LOG(WARNING) << "Ignoring empty deoptimization request.";
2855      break;
2856    case DeoptimizationRequest::kRegisterForEvent:
2857      VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x",
2858                                 request.InstrumentationEvent());
2859      instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent());
2860      instrumentation_events_ |= request.InstrumentationEvent();
2861      break;
2862    case DeoptimizationRequest::kUnregisterForEvent:
2863      VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x",
2864                                 request.InstrumentationEvent());
2865      instrumentation->RemoveListener(&gDebugInstrumentationListener,
2866                                      request.InstrumentationEvent());
2867      instrumentation_events_ &= ~request.InstrumentationEvent();
2868      break;
2869    case DeoptimizationRequest::kFullDeoptimization:
2870      VLOG(jdwp) << "Deoptimize the world ...";
2871      instrumentation->DeoptimizeEverything();
2872      VLOG(jdwp) << "Deoptimize the world DONE";
2873      break;
2874    case DeoptimizationRequest::kFullUndeoptimization:
2875      VLOG(jdwp) << "Undeoptimize the world ...";
2876      instrumentation->UndeoptimizeEverything();
2877      VLOG(jdwp) << "Undeoptimize the world DONE";
2878      break;
2879    case DeoptimizationRequest::kSelectiveDeoptimization:
2880      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " ...";
2881      instrumentation->Deoptimize(request.Method());
2882      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " DONE";
2883      break;
2884    case DeoptimizationRequest::kSelectiveUndeoptimization:
2885      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " ...";
2886      instrumentation->Undeoptimize(request.Method());
2887      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " DONE";
2888      break;
2889    default:
2890      LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind();
2891      break;
2892  }
2893}
2894
2895void Dbg::DelayFullUndeoptimization() {
2896  MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
2897  ++delayed_full_undeoptimization_count_;
2898  DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_);
2899}
2900
2901void Dbg::ProcessDelayedFullUndeoptimizations() {
2902  // TODO: avoid taking the lock twice (once here and once in ManageDeoptimization).
2903  {
2904    MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
2905    while (delayed_full_undeoptimization_count_ > 0) {
2906      DeoptimizationRequest req;
2907      req.SetKind(DeoptimizationRequest::kFullUndeoptimization);
2908      req.SetMethod(nullptr);
2909      RequestDeoptimizationLocked(req);
2910      --delayed_full_undeoptimization_count_;
2911    }
2912  }
2913  ManageDeoptimization();
2914}
2915
2916void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
2917  if (req.GetKind() == DeoptimizationRequest::kNothing) {
2918    // Nothing to do.
2919    return;
2920  }
2921  MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
2922  RequestDeoptimizationLocked(req);
2923}
2924
2925void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
2926  switch (req.GetKind()) {
2927    case DeoptimizationRequest::kRegisterForEvent: {
2928      DCHECK_NE(req.InstrumentationEvent(), 0u);
2929      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
2930      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
2931                                                req.InstrumentationEvent());
2932      if (*counter == 0) {
2933        VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x",
2934                                   deoptimization_requests_.size(), req.InstrumentationEvent());
2935        deoptimization_requests_.push_back(req);
2936      }
2937      *counter = *counter + 1;
2938      break;
2939    }
2940    case DeoptimizationRequest::kUnregisterForEvent: {
2941      DCHECK_NE(req.InstrumentationEvent(), 0u);
2942      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
2943      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
2944                                                req.InstrumentationEvent());
2945      *counter = *counter - 1;
2946      if (*counter == 0) {
2947        VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x",
2948                                   deoptimization_requests_.size(), req.InstrumentationEvent());
2949        deoptimization_requests_.push_back(req);
2950      }
2951      break;
2952    }
2953    case DeoptimizationRequest::kFullDeoptimization: {
2954      DCHECK(req.Method() == nullptr);
2955      if (full_deoptimization_event_count_ == 0) {
2956        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2957                   << " for full deoptimization";
2958        deoptimization_requests_.push_back(req);
2959      }
2960      ++full_deoptimization_event_count_;
2961      break;
2962    }
2963    case DeoptimizationRequest::kFullUndeoptimization: {
2964      DCHECK(req.Method() == nullptr);
2965      DCHECK_GT(full_deoptimization_event_count_, 0U);
2966      --full_deoptimization_event_count_;
2967      if (full_deoptimization_event_count_ == 0) {
2968        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2969                   << " for full undeoptimization";
2970        deoptimization_requests_.push_back(req);
2971      }
2972      break;
2973    }
2974    case DeoptimizationRequest::kSelectiveDeoptimization: {
2975      DCHECK(req.Method() != nullptr);
2976      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2977                 << " for deoptimization of " << PrettyMethod(req.Method());
2978      deoptimization_requests_.push_back(req);
2979      break;
2980    }
2981    case DeoptimizationRequest::kSelectiveUndeoptimization: {
2982      DCHECK(req.Method() != nullptr);
2983      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2984                 << " for undeoptimization of " << PrettyMethod(req.Method());
2985      deoptimization_requests_.push_back(req);
2986      break;
2987    }
2988    default: {
2989      LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind();
2990      break;
2991    }
2992  }
2993}
2994
2995void Dbg::ManageDeoptimization() {
2996  Thread* const self = Thread::Current();
2997  {
2998    // Avoid suspend/resume if there is no pending request.
2999    MutexLock mu(self, *Locks::deoptimization_lock_);
3000    if (deoptimization_requests_.empty()) {
3001      return;
3002    }
3003  }
3004  CHECK_EQ(self->GetState(), kRunnable);
3005  self->TransitionFromRunnableToSuspended(kWaitingForDeoptimization);
3006  // We need to suspend mutator threads first.
3007  Runtime* const runtime = Runtime::Current();
3008  runtime->GetThreadList()->SuspendAll();
3009  const ThreadState old_state = self->SetStateUnsafe(kRunnable);
3010  {
3011    MutexLock mu(self, *Locks::deoptimization_lock_);
3012    size_t req_index = 0;
3013    for (DeoptimizationRequest& request : deoptimization_requests_) {
3014      VLOG(jdwp) << "Process deoptimization request #" << req_index++;
3015      ProcessDeoptimizationRequest(request);
3016    }
3017    deoptimization_requests_.clear();
3018  }
3019  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
3020  runtime->GetThreadList()->ResumeAll();
3021  self->TransitionFromSuspendedToRunnable();
3022}
3023
3024static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m)
3025    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
3026  const DexFile::CodeItem* code_item = m->GetCodeItem();
3027  if (code_item == nullptr) {
3028    // TODO We should not be asked to watch location in a native or abstract method so the code item
3029    // should never be null. We could just check we never encounter this case.
3030    return false;
3031  }
3032  StackHandleScope<2> hs(self);
3033  mirror::Class* declaring_class = m->GetDeclaringClass();
3034  Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
3035  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
3036  verifier::MethodVerifier verifier(dex_cache->GetDexFile(), &dex_cache, &class_loader,
3037                                    &m->GetClassDef(), code_item, m->GetDexMethodIndex(), m,
3038                                    m->GetAccessFlags(), false, true, false);
3039  // Note: we don't need to verify the method.
3040  return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr);
3041}
3042
3043static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m)
3044    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3045  for (Breakpoint& breakpoint : gBreakpoints) {
3046    if (breakpoint.Method() == m) {
3047      return &breakpoint;
3048    }
3049  }
3050  return nullptr;
3051}
3052
3053// Sanity checks all existing breakpoints on the same method.
3054static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_deoptimization)
3055    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3056  if (kIsDebugBuild) {
3057    for (const Breakpoint& breakpoint : gBreakpoints) {
3058      CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization());
3059    }
3060    if (need_full_deoptimization) {
3061      // We should have deoptimized everything but not "selectively" deoptimized this method.
3062      CHECK(Runtime::Current()->GetInstrumentation()->AreAllMethodsDeoptimized());
3063      CHECK(!Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3064    } else {
3065      // We should have "selectively" deoptimized this method.
3066      // Note: while we have not deoptimized everything for this method, we may have done it for
3067      // another event.
3068      CHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3069    }
3070  }
3071}
3072
3073// Installs a breakpoint at the specified location. Also indicates through the deoptimization
3074// request if we need to deoptimize.
3075void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3076  Thread* const self = Thread::Current();
3077  mirror::ArtMethod* m = FromMethodId(location->method_id);
3078  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3079
3080  WriterMutexLock mu(self, *Locks::breakpoint_lock_);
3081  const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
3082  bool need_full_deoptimization;
3083  if (existing_breakpoint == nullptr) {
3084    // There is no breakpoint on this method yet: we need to deoptimize. If this method may be
3085    // inlined, we deoptimize everything; otherwise we deoptimize only this method.
3086    need_full_deoptimization = IsMethodPossiblyInlined(self, m);
3087    if (need_full_deoptimization) {
3088      req->SetKind(DeoptimizationRequest::kFullDeoptimization);
3089      req->SetMethod(nullptr);
3090    } else {
3091      req->SetKind(DeoptimizationRequest::kSelectiveDeoptimization);
3092      req->SetMethod(m);
3093    }
3094  } else {
3095    // There is at least one breakpoint for this method: we don't need to deoptimize.
3096    req->SetKind(DeoptimizationRequest::kNothing);
3097    req->SetMethod(nullptr);
3098
3099    need_full_deoptimization = existing_breakpoint->NeedFullDeoptimization();
3100    SanityCheckExistingBreakpoints(m, need_full_deoptimization);
3101  }
3102
3103  gBreakpoints.push_back(Breakpoint(m, location->dex_pc, need_full_deoptimization));
3104  VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
3105             << gBreakpoints[gBreakpoints.size() - 1];
3106}
3107
3108// Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
3109// request if we need to undeoptimize.
3110void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3111  WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
3112  mirror::ArtMethod* m = FromMethodId(location->method_id);
3113  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3114  bool need_full_deoptimization = false;
3115  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
3116    if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].Method() == m) {
3117      VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
3118      need_full_deoptimization = gBreakpoints[i].NeedFullDeoptimization();
3119      DCHECK_NE(need_full_deoptimization, Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3120      gBreakpoints.erase(gBreakpoints.begin() + i);
3121      break;
3122    }
3123  }
3124  const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
3125  if (existing_breakpoint == nullptr) {
3126    // There is no more breakpoint on this method: we need to undeoptimize.
3127    if (need_full_deoptimization) {
3128      // This method required full deoptimization: we need to undeoptimize everything.
3129      req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
3130      req->SetMethod(nullptr);
3131    } else {
3132      // This method required selective deoptimization: we need to undeoptimize only that method.
3133      req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
3134      req->SetMethod(m);
3135    }
3136  } else {
3137    // There is at least one breakpoint for this method: we don't need to undeoptimize.
3138    req->SetKind(DeoptimizationRequest::kNothing);
3139    req->SetMethod(nullptr);
3140    SanityCheckExistingBreakpoints(m, need_full_deoptimization);
3141  }
3142}
3143
3144// Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
3145// cause suspension if the thread is the current thread.
3146class ScopedThreadSuspension {
3147 public:
3148  ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
3149      LOCKS_EXCLUDED(Locks::thread_list_lock_)
3150      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
3151      thread_(nullptr),
3152      error_(JDWP::ERR_NONE),
3153      self_suspend_(false),
3154      other_suspend_(false) {
3155    ScopedObjectAccessUnchecked soa(self);
3156    {
3157      MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3158      error_ = DecodeThread(soa, thread_id, thread_);
3159    }
3160    if (error_ == JDWP::ERR_NONE) {
3161      if (thread_ == soa.Self()) {
3162        self_suspend_ = true;
3163      } else {
3164        soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3165        jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id);
3166        bool timed_out;
3167        Thread* suspended_thread;
3168        {
3169          // Take suspend thread lock to avoid races with threads trying to suspend this one.
3170          MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
3171          ThreadList* thread_list = Runtime::Current()->GetThreadList();
3172          suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true, &timed_out);
3173        }
3174        CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
3175        if (suspended_thread == nullptr) {
3176          // Thread terminated from under us while suspending.
3177          error_ = JDWP::ERR_INVALID_THREAD;
3178        } else {
3179          CHECK_EQ(suspended_thread, thread_);
3180          other_suspend_ = true;
3181        }
3182      }
3183    }
3184  }
3185
3186  Thread* GetThread() const {
3187    return thread_;
3188  }
3189
3190  JDWP::JdwpError GetError() const {
3191    return error_;
3192  }
3193
3194  ~ScopedThreadSuspension() {
3195    if (other_suspend_) {
3196      Runtime::Current()->GetThreadList()->Resume(thread_, true);
3197    }
3198  }
3199
3200 private:
3201  Thread* thread_;
3202  JDWP::JdwpError error_;
3203  bool self_suspend_;
3204  bool other_suspend_;
3205};
3206
3207JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size,
3208                                   JDWP::JdwpStepDepth step_depth) {
3209  Thread* self = Thread::Current();
3210  ScopedThreadSuspension sts(self, thread_id);
3211  if (sts.GetError() != JDWP::ERR_NONE) {
3212    return sts.GetError();
3213  }
3214
3215  //
3216  // Work out what Method* we're in, the current line number, and how deep the stack currently
3217  // is for step-out.
3218  //
3219
3220  struct SingleStepStackVisitor : public StackVisitor {
3221    explicit SingleStepStackVisitor(Thread* thread, SingleStepControl* single_step_control,
3222                                    int32_t* line_number)
3223        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
3224        : StackVisitor(thread, NULL), single_step_control_(single_step_control),
3225          line_number_(line_number) {
3226      DCHECK_EQ(single_step_control_, thread->GetSingleStepControl());
3227      single_step_control_->method = NULL;
3228      single_step_control_->stack_depth = 0;
3229    }
3230
3231    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
3232    // annotalysis.
3233    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
3234      mirror::ArtMethod* m = GetMethod();
3235      if (!m->IsRuntimeMethod()) {
3236        ++single_step_control_->stack_depth;
3237        if (single_step_control_->method == NULL) {
3238          mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
3239          single_step_control_->method = m;
3240          *line_number_ = -1;
3241          if (dex_cache != NULL) {
3242            const DexFile& dex_file = *dex_cache->GetDexFile();
3243            *line_number_ = dex_file.GetLineNumFromPC(m, GetDexPc());
3244          }
3245        }
3246      }
3247      return true;
3248    }
3249
3250    SingleStepControl* const single_step_control_;
3251    int32_t* const line_number_;
3252  };
3253
3254  Thread* const thread = sts.GetThread();
3255  SingleStepControl* const single_step_control = thread->GetSingleStepControl();
3256  DCHECK(single_step_control != nullptr);
3257  int32_t line_number = -1;
3258  SingleStepStackVisitor visitor(thread, single_step_control, &line_number);
3259  visitor.WalkStack();
3260
3261  //
3262  // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
3263  //
3264
3265  struct DebugCallbackContext {
3266    explicit DebugCallbackContext(SingleStepControl* single_step_control, int32_t line_number,
3267                                  const DexFile::CodeItem* code_item)
3268      : single_step_control_(single_step_control), line_number_(line_number), code_item_(code_item),
3269        last_pc_valid(false), last_pc(0) {
3270    }
3271
3272    static bool Callback(void* raw_context, uint32_t address, uint32_t line_number) {
3273      DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
3274      if (static_cast<int32_t>(line_number) == context->line_number_) {
3275        if (!context->last_pc_valid) {
3276          // Everything from this address until the next line change is ours.
3277          context->last_pc = address;
3278          context->last_pc_valid = true;
3279        }
3280        // Otherwise, if we're already in a valid range for this line,
3281        // just keep going (shouldn't really happen)...
3282      } else if (context->last_pc_valid) {  // and the line number is new
3283        // Add everything from the last entry up until here to the set
3284        for (uint32_t dex_pc = context->last_pc; dex_pc < address; ++dex_pc) {
3285          context->single_step_control_->dex_pcs.insert(dex_pc);
3286        }
3287        context->last_pc_valid = false;
3288      }
3289      return false;  // There may be multiple entries for any given line.
3290    }
3291
3292    ~DebugCallbackContext() {
3293      // If the line number was the last in the position table...
3294      if (last_pc_valid) {
3295        size_t end = code_item_->insns_size_in_code_units_;
3296        for (uint32_t dex_pc = last_pc; dex_pc < end; ++dex_pc) {
3297          single_step_control_->dex_pcs.insert(dex_pc);
3298        }
3299      }
3300    }
3301
3302    SingleStepControl* const single_step_control_;
3303    const int32_t line_number_;
3304    const DexFile::CodeItem* const code_item_;
3305    bool last_pc_valid;
3306    uint32_t last_pc;
3307  };
3308  single_step_control->dex_pcs.clear();
3309  mirror::ArtMethod* m = single_step_control->method;
3310  if (!m->IsNative()) {
3311    const DexFile::CodeItem* const code_item = m->GetCodeItem();
3312    DebugCallbackContext context(single_step_control, line_number, code_item);
3313    m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
3314                                     DebugCallbackContext::Callback, NULL, &context);
3315  }
3316
3317  //
3318  // Everything else...
3319  //
3320
3321  single_step_control->step_size = step_size;
3322  single_step_control->step_depth = step_depth;
3323  single_step_control->is_active = true;
3324
3325  if (VLOG_IS_ON(jdwp)) {
3326    VLOG(jdwp) << "Single-step thread: " << *thread;
3327    VLOG(jdwp) << "Single-step step size: " << single_step_control->step_size;
3328    VLOG(jdwp) << "Single-step step depth: " << single_step_control->step_depth;
3329    VLOG(jdwp) << "Single-step current method: " << PrettyMethod(single_step_control->method);
3330    VLOG(jdwp) << "Single-step current line: " << line_number;
3331    VLOG(jdwp) << "Single-step current stack depth: " << single_step_control->stack_depth;
3332    VLOG(jdwp) << "Single-step dex_pc values:";
3333    for (uint32_t dex_pc : single_step_control->dex_pcs) {
3334      VLOG(jdwp) << StringPrintf(" %#x", dex_pc);
3335    }
3336  }
3337
3338  return JDWP::ERR_NONE;
3339}
3340
3341void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) {
3342  ScopedObjectAccessUnchecked soa(Thread::Current());
3343  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3344  Thread* thread;
3345  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
3346  if (error == JDWP::ERR_NONE) {
3347    SingleStepControl* single_step_control = thread->GetSingleStepControl();
3348    DCHECK(single_step_control != nullptr);
3349    single_step_control->Clear();
3350  }
3351}
3352
3353static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
3354  switch (tag) {
3355    default:
3356      LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag);
3357
3358    // Primitives.
3359    case JDWP::JT_BYTE:    return 'B';
3360    case JDWP::JT_CHAR:    return 'C';
3361    case JDWP::JT_FLOAT:   return 'F';
3362    case JDWP::JT_DOUBLE:  return 'D';
3363    case JDWP::JT_INT:     return 'I';
3364    case JDWP::JT_LONG:    return 'J';
3365    case JDWP::JT_SHORT:   return 'S';
3366    case JDWP::JT_VOID:    return 'V';
3367    case JDWP::JT_BOOLEAN: return 'Z';
3368
3369    // Reference types.
3370    case JDWP::JT_ARRAY:
3371    case JDWP::JT_OBJECT:
3372    case JDWP::JT_STRING:
3373    case JDWP::JT_THREAD:
3374    case JDWP::JT_THREAD_GROUP:
3375    case JDWP::JT_CLASS_LOADER:
3376    case JDWP::JT_CLASS_OBJECT:
3377      return 'L';
3378  }
3379}
3380
3381JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id,
3382                                  JDWP::RefTypeId class_id, JDWP::MethodId method_id,
3383                                  uint32_t arg_count, uint64_t* arg_values,
3384                                  JDWP::JdwpTag* arg_types, uint32_t options,
3385                                  JDWP::JdwpTag* pResultTag, uint64_t* pResultValue,
3386                                  JDWP::ObjectId* pExceptionId) {
3387  ThreadList* thread_list = Runtime::Current()->GetThreadList();
3388
3389  Thread* targetThread = NULL;
3390  DebugInvokeReq* req = NULL;
3391  Thread* self = Thread::Current();
3392  {
3393    ScopedObjectAccessUnchecked soa(self);
3394    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3395    JDWP::JdwpError error = DecodeThread(soa, thread_id, targetThread);
3396    if (error != JDWP::ERR_NONE) {
3397      LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id;
3398      return error;
3399    }
3400    req = targetThread->GetInvokeReq();
3401    if (!req->ready) {
3402      LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
3403      return JDWP::ERR_INVALID_THREAD;
3404    }
3405
3406    /*
3407     * We currently have a bug where we don't successfully resume the
3408     * target thread if the suspend count is too deep.  We're expected to
3409     * require one "resume" for each "suspend", but when asked to execute
3410     * a method we have to resume fully and then re-suspend it back to the
3411     * same level.  (The easiest way to cause this is to type "suspend"
3412     * multiple times in jdb.)
3413     *
3414     * It's unclear what this means when the event specifies "resume all"
3415     * and some threads are suspended more deeply than others.  This is
3416     * a rare problem, so for now we just prevent it from hanging forever
3417     * by rejecting the method invocation request.  Without this, we will
3418     * be stuck waiting on a suspended thread.
3419     */
3420    int suspend_count;
3421    {
3422      MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
3423      suspend_count = targetThread->GetSuspendCount();
3424    }
3425    if (suspend_count > 1) {
3426      LOG(ERROR) << *targetThread << " suspend count too deep for method invocation: " << suspend_count;
3427      return JDWP::ERR_THREAD_SUSPENDED;  // Probably not expected here.
3428    }
3429
3430    JDWP::JdwpError status;
3431    mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id);
3432    if (receiver == ObjectRegistry::kInvalidObject) {
3433      return JDWP::ERR_INVALID_OBJECT;
3434    }
3435
3436    mirror::Object* thread = gRegistry->Get<mirror::Object*>(thread_id);
3437    if (thread == ObjectRegistry::kInvalidObject) {
3438      return JDWP::ERR_INVALID_OBJECT;
3439    }
3440    // TODO: check that 'thread' is actually a java.lang.Thread!
3441
3442    mirror::Class* c = DecodeClass(class_id, status);
3443    if (c == NULL) {
3444      return status;
3445    }
3446
3447    mirror::ArtMethod* m = FromMethodId(method_id);
3448    if (m->IsStatic() != (receiver == NULL)) {
3449      return JDWP::ERR_INVALID_METHODID;
3450    }
3451    if (m->IsStatic()) {
3452      if (m->GetDeclaringClass() != c) {
3453        return JDWP::ERR_INVALID_METHODID;
3454      }
3455    } else {
3456      if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
3457        return JDWP::ERR_INVALID_METHODID;
3458      }
3459    }
3460
3461    // Check the argument list matches the method.
3462    uint32_t shorty_len = 0;
3463    const char* shorty = m->GetShorty(&shorty_len);
3464    if (shorty_len - 1 != arg_count) {
3465      return JDWP::ERR_ILLEGAL_ARGUMENT;
3466    }
3467
3468    {
3469      StackHandleScope<3> hs(soa.Self());
3470      MethodHelper mh(hs.NewHandle(m));
3471      HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver));
3472      HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c));
3473      const DexFile::TypeList* types = m->GetParameterTypeList();
3474      for (size_t i = 0; i < arg_count; ++i) {
3475        if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) {
3476          return JDWP::ERR_ILLEGAL_ARGUMENT;
3477        }
3478
3479        if (shorty[i + 1] == 'L') {
3480          // Did we really get an argument of an appropriate reference type?
3481          mirror::Class* parameter_type = mh.GetClassFromTypeIdx(types->GetTypeItem(i).type_idx_);
3482          mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i]);
3483          if (argument == ObjectRegistry::kInvalidObject) {
3484            return JDWP::ERR_INVALID_OBJECT;
3485          }
3486          if (argument != NULL && !argument->InstanceOf(parameter_type)) {
3487            return JDWP::ERR_ILLEGAL_ARGUMENT;
3488          }
3489
3490          // Turn the on-the-wire ObjectId into a jobject.
3491          jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]);
3492          v.l = gRegistry->GetJObject(arg_values[i]);
3493        }
3494      }
3495      // Update in case it moved.
3496      m = mh.GetMethod();
3497    }
3498
3499    req->receiver = receiver;
3500    req->thread = thread;
3501    req->klass = c;
3502    req->method = m;
3503    req->arg_count = arg_count;
3504    req->arg_values = arg_values;
3505    req->options = options;
3506    req->invoke_needed = true;
3507  }
3508
3509  // The fact that we've released the thread list lock is a bit risky --- if the thread goes
3510  // away we're sitting high and dry -- but we must release this before the ResumeAllThreads
3511  // call, and it's unwise to hold it during WaitForSuspend.
3512
3513  {
3514    /*
3515     * We change our (JDWP thread) status, which should be THREAD_RUNNING,
3516     * so we can suspend for a GC if the invoke request causes us to
3517     * run out of memory.  It's also a good idea to change it before locking
3518     * the invokeReq mutex, although that should never be held for long.
3519     */
3520    self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
3521
3522    VLOG(jdwp) << "    Transferring control to event thread";
3523    {
3524      MutexLock mu(self, req->lock);
3525
3526      if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3527        VLOG(jdwp) << "      Resuming all threads";
3528        thread_list->UndoDebuggerSuspensions();
3529      } else {
3530        VLOG(jdwp) << "      Resuming event thread only";
3531        thread_list->Resume(targetThread, true);
3532      }
3533
3534      // Wait for the request to finish executing.
3535      while (req->invoke_needed) {
3536        req->cond.Wait(self);
3537      }
3538    }
3539    VLOG(jdwp) << "    Control has returned from event thread";
3540
3541    /* wait for thread to re-suspend itself */
3542    SuspendThread(thread_id, false /* request_suspension */);
3543    self->TransitionFromSuspendedToRunnable();
3544  }
3545
3546  /*
3547   * Suspend the threads.  We waited for the target thread to suspend
3548   * itself, so all we need to do is suspend the others.
3549   *
3550   * The suspendAllThreads() call will double-suspend the event thread,
3551   * so we want to resume the target thread once to keep the books straight.
3552   */
3553  if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3554    self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3555    VLOG(jdwp) << "      Suspending all threads";
3556    thread_list->SuspendAllForDebugger();
3557    self->TransitionFromSuspendedToRunnable();
3558    VLOG(jdwp) << "      Resuming event thread to balance the count";
3559    thread_list->Resume(targetThread, true);
3560  }
3561
3562  // Copy the result.
3563  *pResultTag = req->result_tag;
3564  if (IsPrimitiveTag(req->result_tag)) {
3565    *pResultValue = req->result_value.GetJ();
3566  } else {
3567    *pResultValue = gRegistry->Add(req->result_value.GetL());
3568  }
3569  *pExceptionId = req->exception;
3570  return req->error;
3571}
3572
3573void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
3574  ScopedObjectAccess soa(Thread::Current());
3575
3576  // We can be called while an exception is pending. We need
3577  // to preserve that across the method invocation.
3578  StackHandleScope<4> hs(soa.Self());
3579  auto old_throw_this_object = hs.NewHandle<mirror::Object>(nullptr);
3580  auto old_throw_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
3581  auto old_exception = hs.NewHandle<mirror::Throwable>(nullptr);
3582  uint32_t old_throw_dex_pc;
3583  bool old_exception_report_flag;
3584  {
3585    ThrowLocation old_throw_location;
3586    mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location);
3587    old_throw_this_object.Assign(old_throw_location.GetThis());
3588    old_throw_method.Assign(old_throw_location.GetMethod());
3589    old_exception.Assign(old_exception_obj);
3590    old_throw_dex_pc = old_throw_location.GetDexPc();
3591    old_exception_report_flag = soa.Self()->IsExceptionReportedToInstrumentation();
3592    soa.Self()->ClearException();
3593  }
3594
3595  // Translate the method through the vtable, unless the debugger wants to suppress it.
3596  Handle<mirror::ArtMethod> m(hs.NewHandle(pReq->method));
3597  if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver != NULL) {
3598    mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.Get());
3599    if (actual_method != m.Get()) {
3600      VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.Get()) << " to " << PrettyMethod(actual_method);
3601      m.Assign(actual_method);
3602    }
3603  }
3604  VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.Get())
3605             << " receiver=" << pReq->receiver
3606             << " arg_count=" << pReq->arg_count;
3607  CHECK(m.Get() != nullptr);
3608
3609  CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
3610
3611  pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(m.Get()),
3612                                         reinterpret_cast<jvalue*>(pReq->arg_values));
3613
3614  mirror::Throwable* exception = soa.Self()->GetException(NULL);
3615  soa.Self()->ClearException();
3616  pReq->exception = gRegistry->Add(exception);
3617  pReq->result_tag = BasicTagFromDescriptor(m.Get()->GetShorty());
3618  if (pReq->exception != 0) {
3619    VLOG(jdwp) << "  JDWP invocation returning with exception=" << exception
3620        << " " << exception->Dump();
3621    pReq->result_value.SetJ(0);
3622  } else if (pReq->result_tag == JDWP::JT_OBJECT) {
3623    /* if no exception thrown, examine object result more closely */
3624    JDWP::JdwpTag new_tag = TagFromObject(soa, pReq->result_value.GetL());
3625    if (new_tag != pReq->result_tag) {
3626      VLOG(jdwp) << "  JDWP promoted result from " << pReq->result_tag << " to " << new_tag;
3627      pReq->result_tag = new_tag;
3628    }
3629
3630    /*
3631     * Register the object.  We don't actually need an ObjectId yet,
3632     * but we do need to be sure that the GC won't move or discard the
3633     * object when we switch out of RUNNING.  The ObjectId conversion
3634     * will add the object to the "do not touch" list.
3635     *
3636     * We can't use the "tracked allocation" mechanism here because
3637     * the object is going to be handed off to a different thread.
3638     */
3639    gRegistry->Add(pReq->result_value.GetL());
3640  }
3641
3642  if (old_exception.Get() != NULL) {
3643    ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
3644                                         old_throw_dex_pc);
3645    soa.Self()->SetException(gc_safe_throw_location, old_exception.Get());
3646    soa.Self()->SetExceptionReportedToInstrumentation(old_exception_report_flag);
3647  }
3648}
3649
3650/*
3651 * "request" contains a full JDWP packet, possibly with multiple chunks.  We
3652 * need to process each, accumulate the replies, and ship the whole thing
3653 * back.
3654 *
3655 * Returns "true" if we have a reply.  The reply buffer is newly allocated,
3656 * and includes the chunk type/length, followed by the data.
3657 *
3658 * OLD-TODO: we currently assume that the request and reply include a single
3659 * chunk.  If this becomes inconvenient we will need to adapt.
3660 */
3661bool Dbg::DdmHandlePacket(JDWP::Request& request, uint8_t** pReplyBuf, int* pReplyLen) {
3662  Thread* self = Thread::Current();
3663  JNIEnv* env = self->GetJniEnv();
3664
3665  uint32_t type = request.ReadUnsigned32("type");
3666  uint32_t length = request.ReadUnsigned32("length");
3667
3668  // Create a byte[] corresponding to 'request'.
3669  size_t request_length = request.size();
3670  ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(request_length));
3671  if (dataArray.get() == NULL) {
3672    LOG(WARNING) << "byte[] allocation failed: " << request_length;
3673    env->ExceptionClear();
3674    return false;
3675  }
3676  env->SetByteArrayRegion(dataArray.get(), 0, request_length, reinterpret_cast<const jbyte*>(request.data()));
3677  request.Skip(request_length);
3678
3679  // Run through and find all chunks.  [Currently just find the first.]
3680  ScopedByteArrayRO contents(env, dataArray.get());
3681  if (length != request_length) {
3682    LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length);
3683    return false;
3684  }
3685
3686  // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)".
3687  ScopedLocalRef<jobject> chunk(env, env->CallStaticObjectMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3688                                                                 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch,
3689                                                                 type, dataArray.get(), 0, length));
3690  if (env->ExceptionCheck()) {
3691    LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type);
3692    env->ExceptionDescribe();
3693    env->ExceptionClear();
3694    return false;
3695  }
3696
3697  if (chunk.get() == NULL) {
3698    return false;
3699  }
3700
3701  /*
3702   * Pull the pieces out of the chunk.  We copy the results into a
3703   * newly-allocated buffer that the caller can free.  We don't want to
3704   * continue using the Chunk object because nothing has a reference to it.
3705   *
3706   * We could avoid this by returning type/data/offset/length and having
3707   * the caller be aware of the object lifetime issues, but that
3708   * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work
3709   * if we have responses for multiple chunks.
3710   *
3711   * So we're pretty much stuck with copying data around multiple times.
3712   */
3713  ScopedLocalRef<jbyteArray> replyData(env, reinterpret_cast<jbyteArray>(env->GetObjectField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data)));
3714  jint offset = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset);
3715  length = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length);
3716  type = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type);
3717
3718  VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length);
3719  if (length == 0 || replyData.get() == NULL) {
3720    return false;
3721  }
3722
3723  const int kChunkHdrLen = 8;
3724  uint8_t* reply = new uint8_t[length + kChunkHdrLen];
3725  if (reply == NULL) {
3726    LOG(WARNING) << "malloc failed: " << (length + kChunkHdrLen);
3727    return false;
3728  }
3729  JDWP::Set4BE(reply + 0, type);
3730  JDWP::Set4BE(reply + 4, length);
3731  env->GetByteArrayRegion(replyData.get(), offset, length, reinterpret_cast<jbyte*>(reply + kChunkHdrLen));
3732
3733  *pReplyBuf = reply;
3734  *pReplyLen = length + kChunkHdrLen;
3735
3736  VLOG(jdwp) << StringPrintf("dvmHandleDdm returning type=%.4s %p len=%d", reinterpret_cast<char*>(reply), reply, length);
3737  return true;
3738}
3739
3740void Dbg::DdmBroadcast(bool connect) {
3741  VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
3742
3743  Thread* self = Thread::Current();
3744  if (self->GetState() != kRunnable) {
3745    LOG(ERROR) << "DDM broadcast in thread state " << self->GetState();
3746    /* try anyway? */
3747  }
3748
3749  JNIEnv* env = self->GetJniEnv();
3750  jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/;
3751  env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3752                            WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast,
3753                            event);
3754  if (env->ExceptionCheck()) {
3755    LOG(ERROR) << "DdmServer.broadcast " << event << " failed";
3756    env->ExceptionDescribe();
3757    env->ExceptionClear();
3758  }
3759}
3760
3761void Dbg::DdmConnected() {
3762  Dbg::DdmBroadcast(true);
3763}
3764
3765void Dbg::DdmDisconnected() {
3766  Dbg::DdmBroadcast(false);
3767  gDdmThreadNotification = false;
3768}
3769
3770/*
3771 * Send a notification when a thread starts, stops, or changes its name.
3772 *
3773 * Because we broadcast the full set of threads when the notifications are
3774 * first enabled, it's possible for "thread" to be actively executing.
3775 */
3776void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
3777  if (!gDdmThreadNotification) {
3778    return;
3779  }
3780
3781  if (type == CHUNK_TYPE("THDE")) {
3782    uint8_t buf[4];
3783    JDWP::Set4BE(&buf[0], t->GetThreadId());
3784    Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf);
3785  } else {
3786    CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
3787    ScopedObjectAccessUnchecked soa(Thread::Current());
3788    StackHandleScope<1> hs(soa.Self());
3789    Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa)));
3790    size_t char_count = (name.Get() != NULL) ? name->GetLength() : 0;
3791    const jchar* chars = (name.Get() != NULL) ? name->GetCharArray()->GetData() : NULL;
3792
3793    std::vector<uint8_t> bytes;
3794    JDWP::Append4BE(bytes, t->GetThreadId());
3795    JDWP::AppendUtf16BE(bytes, chars, char_count);
3796    CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
3797    Dbg::DdmSendChunk(type, bytes);
3798  }
3799}
3800
3801void Dbg::DdmSetThreadNotification(bool enable) {
3802  // Enable/disable thread notifications.
3803  gDdmThreadNotification = enable;
3804  if (enable) {
3805    // Suspend the VM then post thread start notifications for all threads. Threads attaching will
3806    // see a suspension in progress and block until that ends. They then post their own start
3807    // notification.
3808    SuspendVM();
3809    std::list<Thread*> threads;
3810    Thread* self = Thread::Current();
3811    {
3812      MutexLock mu(self, *Locks::thread_list_lock_);
3813      threads = Runtime::Current()->GetThreadList()->GetList();
3814    }
3815    {
3816      ScopedObjectAccess soa(self);
3817      for (Thread* thread : threads) {
3818        Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
3819      }
3820    }
3821    ResumeVM();
3822  }
3823}
3824
3825void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
3826  if (IsDebuggerActive()) {
3827    gJdwpState->PostThreadChange(t, type == CHUNK_TYPE("THCR"));
3828  }
3829  Dbg::DdmSendThreadNotification(t, type);
3830}
3831
3832void Dbg::PostThreadStart(Thread* t) {
3833  Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR"));
3834}
3835
3836void Dbg::PostThreadDeath(Thread* t) {
3837  Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
3838}
3839
3840void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) {
3841  CHECK(buf != NULL);
3842  iovec vec[1];
3843  vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf));
3844  vec[0].iov_len = byte_count;
3845  Dbg::DdmSendChunkV(type, vec, 1);
3846}
3847
3848void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) {
3849  DdmSendChunk(type, bytes.size(), &bytes[0]);
3850}
3851
3852void Dbg::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) {
3853  if (gJdwpState == NULL) {
3854    VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
3855  } else {
3856    gJdwpState->DdmSendChunkV(type, iov, iov_count);
3857  }
3858}
3859
3860int Dbg::DdmHandleHpifChunk(HpifWhen when) {
3861  if (when == HPIF_WHEN_NOW) {
3862    DdmSendHeapInfo(when);
3863    return true;
3864  }
3865
3866  if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
3867    LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
3868    return false;
3869  }
3870
3871  gDdmHpifWhen = when;
3872  return true;
3873}
3874
3875bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
3876  if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) {
3877    LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when);
3878    return false;
3879  }
3880
3881  if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) {
3882    LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what);
3883    return false;
3884  }
3885
3886  if (native) {
3887    gDdmNhsgWhen = when;
3888    gDdmNhsgWhat = what;
3889  } else {
3890    gDdmHpsgWhen = when;
3891    gDdmHpsgWhat = what;
3892  }
3893  return true;
3894}
3895
3896void Dbg::DdmSendHeapInfo(HpifWhen reason) {
3897  // If there's a one-shot 'when', reset it.
3898  if (reason == gDdmHpifWhen) {
3899    if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) {
3900      gDdmHpifWhen = HPIF_WHEN_NEVER;
3901    }
3902  }
3903
3904  /*
3905   * Chunk HPIF (client --> server)
3906   *
3907   * Heap Info. General information about the heap,
3908   * suitable for a summary display.
3909   *
3910   *   [u4]: number of heaps
3911   *
3912   *   For each heap:
3913   *     [u4]: heap ID
3914   *     [u8]: timestamp in ms since Unix epoch
3915   *     [u1]: capture reason (same as 'when' value from server)
3916   *     [u4]: max heap size in bytes (-Xmx)
3917   *     [u4]: current heap size in bytes
3918   *     [u4]: current number of bytes allocated
3919   *     [u4]: current number of objects allocated
3920   */
3921  uint8_t heap_count = 1;
3922  gc::Heap* heap = Runtime::Current()->GetHeap();
3923  std::vector<uint8_t> bytes;
3924  JDWP::Append4BE(bytes, heap_count);
3925  JDWP::Append4BE(bytes, 1);  // Heap id (bogus; we only have one heap).
3926  JDWP::Append8BE(bytes, MilliTime());
3927  JDWP::Append1BE(bytes, reason);
3928  JDWP::Append4BE(bytes, heap->GetMaxMemory());  // Max allowed heap size in bytes.
3929  JDWP::Append4BE(bytes, heap->GetTotalMemory());  // Current heap size in bytes.
3930  JDWP::Append4BE(bytes, heap->GetBytesAllocated());
3931  JDWP::Append4BE(bytes, heap->GetObjectsAllocated());
3932  CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
3933  Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes);
3934}
3935
3936enum HpsgSolidity {
3937  SOLIDITY_FREE = 0,
3938  SOLIDITY_HARD = 1,
3939  SOLIDITY_SOFT = 2,
3940  SOLIDITY_WEAK = 3,
3941  SOLIDITY_PHANTOM = 4,
3942  SOLIDITY_FINALIZABLE = 5,
3943  SOLIDITY_SWEEP = 6,
3944};
3945
3946enum HpsgKind {
3947  KIND_OBJECT = 0,
3948  KIND_CLASS_OBJECT = 1,
3949  KIND_ARRAY_1 = 2,
3950  KIND_ARRAY_2 = 3,
3951  KIND_ARRAY_4 = 4,
3952  KIND_ARRAY_8 = 5,
3953  KIND_UNKNOWN = 6,
3954  KIND_NATIVE = 7,
3955};
3956
3957#define HPSG_PARTIAL (1<<7)
3958#define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
3959
3960class HeapChunkContext {
3961 public:
3962  // Maximum chunk size.  Obtain this from the formula:
3963  // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
3964  HeapChunkContext(bool merge, bool native)
3965      : buf_(16384 - 16),
3966        type_(0),
3967        merge_(merge),
3968        chunk_overhead_(0) {
3969    Reset();
3970    if (native) {
3971      type_ = CHUNK_TYPE("NHSG");
3972    } else {
3973      type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO");
3974    }
3975  }
3976
3977  ~HeapChunkContext() {
3978    if (p_ > &buf_[0]) {
3979      Flush();
3980    }
3981  }
3982
3983  void SetChunkOverhead(size_t chunk_overhead) {
3984    chunk_overhead_ = chunk_overhead;
3985  }
3986
3987  void ResetStartOfNextChunk() {
3988    startOfNextMemoryChunk_ = nullptr;
3989  }
3990
3991  void EnsureHeader(const void* chunk_ptr) {
3992    if (!needHeader_) {
3993      return;
3994    }
3995
3996    // Start a new HPSx chunk.
3997    JDWP::Write4BE(&p_, 1);  // Heap id (bogus; we only have one heap).
3998    JDWP::Write1BE(&p_, 8);  // Size of allocation unit, in bytes.
3999
4000    JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr));  // virtual address of segment start.
4001    JDWP::Write4BE(&p_, 0);  // offset of this piece (relative to the virtual address).
4002    // [u4]: length of piece, in allocation units
4003    // We won't know this until we're done, so save the offset and stuff in a dummy value.
4004    pieceLenField_ = p_;
4005    JDWP::Write4BE(&p_, 0x55555555);
4006    needHeader_ = false;
4007  }
4008
4009  void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4010    if (pieceLenField_ == NULL) {
4011      // Flush immediately post Reset (maybe back-to-back Flush). Ignore.
4012      CHECK(needHeader_);
4013      return;
4014    }
4015    // Patch the "length of piece" field.
4016    CHECK_LE(&buf_[0], pieceLenField_);
4017    CHECK_LE(pieceLenField_, p_);
4018    JDWP::Set4BE(pieceLenField_, totalAllocationUnits_);
4019
4020    Dbg::DdmSendChunk(type_, p_ - &buf_[0], &buf_[0]);
4021    Reset();
4022  }
4023
4024  static void HeapChunkCallback(void* start, void* end, size_t used_bytes, void* arg)
4025      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
4026                            Locks::mutator_lock_) {
4027    reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkCallback(start, end, used_bytes);
4028  }
4029
4030 private:
4031  enum { ALLOCATION_UNIT_SIZE = 8 };
4032
4033  void Reset() {
4034    p_ = &buf_[0];
4035    ResetStartOfNextChunk();
4036    totalAllocationUnits_ = 0;
4037    needHeader_ = true;
4038    pieceLenField_ = NULL;
4039  }
4040
4041  void HeapChunkCallback(void* start, void* /*end*/, size_t used_bytes)
4042      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
4043                            Locks::mutator_lock_) {
4044    // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
4045    // in the following code not to allocate memory, by ensuring buf_ is of the correct size
4046    if (used_bytes == 0) {
4047        if (start == NULL) {
4048            // Reset for start of new heap.
4049            startOfNextMemoryChunk_ = NULL;
4050            Flush();
4051        }
4052        // Only process in use memory so that free region information
4053        // also includes dlmalloc book keeping.
4054        return;
4055    }
4056
4057    /* If we're looking at the native heap, we'll just return
4058     * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
4059     */
4060    bool native = type_ == CHUNK_TYPE("NHSG");
4061
4062    // TODO: I'm not sure using start of next chunk works well with multiple spaces. We shouldn't
4063    // count gaps inbetween spaces as free memory.
4064    if (startOfNextMemoryChunk_ != NULL) {
4065        // Transmit any pending free memory. Native free memory of
4066        // over kMaxFreeLen could be because of the use of mmaps, so
4067        // don't report. If not free memory then start a new segment.
4068        bool flush = true;
4069        if (start > startOfNextMemoryChunk_) {
4070            const size_t kMaxFreeLen = 2 * kPageSize;
4071            void* freeStart = startOfNextMemoryChunk_;
4072            void* freeEnd = start;
4073            size_t freeLen = reinterpret_cast<char*>(freeEnd) - reinterpret_cast<char*>(freeStart);
4074            if (!native || freeLen < kMaxFreeLen) {
4075                AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), freeStart, freeLen);
4076                flush = false;
4077            }
4078        }
4079        if (flush) {
4080            startOfNextMemoryChunk_ = NULL;
4081            Flush();
4082        }
4083    }
4084    mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
4085
4086    // Determine the type of this chunk.
4087    // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
4088    // If it's the same, we should combine them.
4089    uint8_t state = ExamineObject(obj, native);
4090    AppendChunk(state, start, used_bytes + chunk_overhead_);
4091    startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
4092  }
4093
4094  void AppendChunk(uint8_t state, void* ptr, size_t length)
4095      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4096    // Make sure there's enough room left in the buffer.
4097    // We need to use two bytes for every fractional 256 allocation units used by the chunk plus
4098    // 17 bytes for any header.
4099    size_t needed = (((length/ALLOCATION_UNIT_SIZE + 255) / 256) * 2) + 17;
4100    size_t bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
4101    if (bytesLeft < needed) {
4102      Flush();
4103    }
4104
4105    bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
4106    if (bytesLeft < needed) {
4107      LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", "
4108          << needed << " bytes)";
4109      return;
4110    }
4111    EnsureHeader(ptr);
4112    // Write out the chunk description.
4113    length /= ALLOCATION_UNIT_SIZE;   // Convert to allocation units.
4114    totalAllocationUnits_ += length;
4115    while (length > 256) {
4116      *p_++ = state | HPSG_PARTIAL;
4117      *p_++ = 255;     // length - 1
4118      length -= 256;
4119    }
4120    *p_++ = state;
4121    *p_++ = length - 1;
4122  }
4123
4124  uint8_t ExamineObject(mirror::Object* o, bool is_native_heap)
4125      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
4126    if (o == NULL) {
4127      return HPSG_STATE(SOLIDITY_FREE, 0);
4128    }
4129
4130    // It's an allocated chunk. Figure out what it is.
4131
4132    // If we're looking at the native heap, we'll just return
4133    // (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks.
4134    if (is_native_heap) {
4135      return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4136    }
4137
4138    if (!Runtime::Current()->GetHeap()->IsLiveObjectLocked(o)) {
4139      return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4140    }
4141
4142    mirror::Class* c = o->GetClass();
4143    if (c == NULL) {
4144      // The object was probably just created but hasn't been initialized yet.
4145      return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4146    }
4147
4148    if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
4149      LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
4150      return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4151    }
4152
4153    if (c->IsClassClass()) {
4154      return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
4155    }
4156
4157    if (c->IsArrayClass()) {
4158      if (o->IsObjectArray()) {
4159        return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4160      }
4161      switch (c->GetComponentSize()) {
4162      case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
4163      case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
4164      case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4165      case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
4166      }
4167    }
4168
4169    return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4170  }
4171
4172  std::vector<uint8_t> buf_;
4173  uint8_t* p_;
4174  uint8_t* pieceLenField_;
4175  void* startOfNextMemoryChunk_;
4176  size_t totalAllocationUnits_;
4177  uint32_t type_;
4178  bool merge_;
4179  bool needHeader_;
4180  size_t chunk_overhead_;
4181
4182  DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
4183};
4184
4185static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
4186    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
4187  const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
4188  HeapChunkContext::HeapChunkCallback(
4189      obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
4190}
4191
4192void Dbg::DdmSendHeapSegments(bool native) {
4193  Dbg::HpsgWhen when;
4194  Dbg::HpsgWhat what;
4195  if (!native) {
4196    when = gDdmHpsgWhen;
4197    what = gDdmHpsgWhat;
4198  } else {
4199    when = gDdmNhsgWhen;
4200    what = gDdmNhsgWhat;
4201  }
4202  if (when == HPSG_WHEN_NEVER) {
4203    return;
4204  }
4205
4206  // Figure out what kind of chunks we'll be sending.
4207  CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS) << static_cast<int>(what);
4208
4209  // First, send a heap start chunk.
4210  uint8_t heap_id[4];
4211  JDWP::Set4BE(&heap_id[0], 1);  // Heap id (bogus; we only have one heap).
4212  Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id);
4213
4214  Thread* self = Thread::Current();
4215
4216  // To allow the Walk/InspectAll() below to exclusively-lock the
4217  // mutator lock, temporarily release the shared access to the
4218  // mutator lock here by transitioning to the suspended state.
4219  Locks::mutator_lock_->AssertSharedHeld(self);
4220  self->TransitionFromRunnableToSuspended(kSuspended);
4221
4222  // Send a series of heap segment chunks.
4223  HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
4224  if (native) {
4225#ifdef USE_DLMALLOC
4226    dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context);
4227#else
4228    UNIMPLEMENTED(WARNING) << "Native heap inspection is only supported with dlmalloc";
4229#endif
4230  } else {
4231    gc::Heap* heap = Runtime::Current()->GetHeap();
4232    for (const auto& space : heap->GetContinuousSpaces()) {
4233      if (space->IsDlMallocSpace()) {
4234        // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
4235        // allocation then the first sizeof(size_t) may belong to it.
4236        context.SetChunkOverhead(sizeof(size_t));
4237        space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4238      } else if (space->IsRosAllocSpace()) {
4239        context.SetChunkOverhead(0);
4240        space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4241      } else if (space->IsBumpPointerSpace()) {
4242        context.SetChunkOverhead(0);
4243        ReaderMutexLock mu(self, *Locks::mutator_lock_);
4244        WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
4245        space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
4246      } else {
4247        UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
4248      }
4249      context.ResetStartOfNextChunk();
4250    }
4251    // Walk the large objects, these are not in the AllocSpace.
4252    context.SetChunkOverhead(0);
4253    heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4254  }
4255
4256  // Shared-lock the mutator lock back.
4257  self->TransitionFromSuspendedToRunnable();
4258  Locks::mutator_lock_->AssertSharedHeld(self);
4259
4260  // Finally, send a heap end chunk.
4261  Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
4262}
4263
4264static size_t GetAllocTrackerMax() {
4265#ifdef HAVE_ANDROID_OS
4266  // Check whether there's a system property overriding the number of records.
4267  const char* propertyName = "dalvik.vm.allocTrackerMax";
4268  char allocRecordMaxString[PROPERTY_VALUE_MAX];
4269  if (property_get(propertyName, allocRecordMaxString, "") > 0) {
4270    char* end;
4271    size_t value = strtoul(allocRecordMaxString, &end, 10);
4272    if (*end != '\0') {
4273      LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocRecordMaxString
4274                 << "' --- invalid";
4275      return kDefaultNumAllocRecords;
4276    }
4277    if (!IsPowerOfTwo(value)) {
4278      LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocRecordMaxString
4279                 << "' --- not power of two";
4280      return kDefaultNumAllocRecords;
4281    }
4282    return value;
4283  }
4284#endif
4285  return kDefaultNumAllocRecords;
4286}
4287
4288void Dbg::SetAllocTrackingEnabled(bool enable) {
4289  Thread* self = Thread::Current();
4290  if (enable) {
4291    {
4292      MutexLock mu(self, *Locks::alloc_tracker_lock_);
4293      if (recent_allocation_records_ != NULL) {
4294        return;  // Already enabled, bail.
4295      }
4296      alloc_record_max_ = GetAllocTrackerMax();
4297      LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
4298                << kMaxAllocRecordStackDepth << " frames, taking "
4299                << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
4300      DCHECK_EQ(alloc_record_head_, 0U);
4301      DCHECK_EQ(alloc_record_count_, 0U);
4302      recent_allocation_records_ = new AllocRecord[alloc_record_max_];
4303      CHECK(recent_allocation_records_ != NULL);
4304    }
4305    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
4306  } else {
4307    {
4308      ScopedObjectAccess soa(self);  // For type_cache_.Clear();
4309      MutexLock mu(self, *Locks::alloc_tracker_lock_);
4310      if (recent_allocation_records_ == NULL) {
4311        return;  // Already disabled, bail.
4312      }
4313      LOG(INFO) << "Disabling alloc tracker";
4314      delete[] recent_allocation_records_;
4315      recent_allocation_records_ = NULL;
4316      alloc_record_head_ = 0;
4317      alloc_record_count_ = 0;
4318      type_cache_.Clear();
4319    }
4320    // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
4321    Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
4322  }
4323}
4324
4325struct AllocRecordStackVisitor : public StackVisitor {
4326  AllocRecordStackVisitor(Thread* thread, AllocRecord* record)
4327      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
4328      : StackVisitor(thread, NULL), record(record), depth(0) {}
4329
4330  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
4331  // annotalysis.
4332  bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
4333    if (depth >= kMaxAllocRecordStackDepth) {
4334      return false;
4335    }
4336    mirror::ArtMethod* m = GetMethod();
4337    if (!m->IsRuntimeMethod()) {
4338      record->StackElement(depth)->SetMethod(m);
4339      record->StackElement(depth)->SetDexPc(GetDexPc());
4340      ++depth;
4341    }
4342    return true;
4343  }
4344
4345  ~AllocRecordStackVisitor() {
4346    // Clear out any unused stack trace elements.
4347    for (; depth < kMaxAllocRecordStackDepth; ++depth) {
4348      record->StackElement(depth)->SetMethod(nullptr);
4349      record->StackElement(depth)->SetDexPc(0);
4350    }
4351  }
4352
4353  AllocRecord* record;
4354  size_t depth;
4355};
4356
4357void Dbg::RecordAllocation(mirror::Class* type, size_t byte_count) {
4358  Thread* self = Thread::Current();
4359  CHECK(self != NULL);
4360
4361  MutexLock mu(self, *Locks::alloc_tracker_lock_);
4362  if (recent_allocation_records_ == NULL) {
4363    // In the process of shutting down recording, bail.
4364    return;
4365  }
4366
4367  // Advance and clip.
4368  if (++alloc_record_head_ == alloc_record_max_) {
4369    alloc_record_head_ = 0;
4370  }
4371
4372  // Fill in the basics.
4373  AllocRecord* record = &recent_allocation_records_[alloc_record_head_];
4374  record->SetType(type);
4375  record->SetByteCount(byte_count);
4376  record->SetThinLockId(self->GetThreadId());
4377
4378  // Fill in the stack trace.
4379  AllocRecordStackVisitor visitor(self, record);
4380  visitor.WalkStack();
4381
4382  if (alloc_record_count_ < alloc_record_max_) {
4383    ++alloc_record_count_;
4384  }
4385}
4386
4387// Returns the index of the head element.
4388//
4389// We point at the most-recently-written record, so if alloc_record_count_ is 1
4390// we want to use the current element.  Take "head+1" and subtract count
4391// from it.
4392//
4393// We need to handle underflow in our circular buffer, so we add
4394// alloc_record_max_ and then mask it back down.
4395size_t Dbg::HeadIndex() {
4396  return (Dbg::alloc_record_head_ + 1 + Dbg::alloc_record_max_ - Dbg::alloc_record_count_) &
4397      (Dbg::alloc_record_max_ - 1);
4398}
4399
4400void Dbg::DumpRecentAllocations() {
4401  ScopedObjectAccess soa(Thread::Current());
4402  MutexLock mu(soa.Self(), *Locks::alloc_tracker_lock_);
4403  if (recent_allocation_records_ == NULL) {
4404    LOG(INFO) << "Not recording tracked allocations";
4405    return;
4406  }
4407
4408  // "i" is the head of the list.  We want to start at the end of the
4409  // list and move forward to the tail.
4410  size_t i = HeadIndex();
4411  const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
4412  uint16_t count = capped_count;
4413
4414  LOG(INFO) << "Tracked allocations, (head=" << alloc_record_head_ << " count=" << count << ")";
4415  while (count--) {
4416    AllocRecord* record = &recent_allocation_records_[i];
4417
4418    LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->ThinLockId(), record->ByteCount())
4419              << PrettyClass(record->Type());
4420
4421    for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
4422      AllocRecordStackTraceElement* stack_element = record->StackElement(stack_frame);
4423      mirror::ArtMethod* m = stack_element->Method();
4424      if (m == NULL) {
4425        break;
4426      }
4427      LOG(INFO) << "    " << PrettyMethod(m) << " line " << stack_element->LineNumber();
4428    }
4429
4430    // pause periodically to help logcat catch up
4431    if ((count % 5) == 0) {
4432      usleep(40000);
4433    }
4434
4435    i = (i + 1) & (alloc_record_max_ - 1);
4436  }
4437}
4438
4439class StringTable {
4440 public:
4441  StringTable() {
4442  }
4443
4444  void Add(const std::string& str) {
4445    table_.insert(str);
4446  }
4447
4448  void Add(const char* str) {
4449    table_.insert(str);
4450  }
4451
4452  size_t IndexOf(const char* s) const {
4453    auto it = table_.find(s);
4454    if (it == table_.end()) {
4455      LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
4456    }
4457    return std::distance(table_.begin(), it);
4458  }
4459
4460  size_t Size() const {
4461    return table_.size();
4462  }
4463
4464  void WriteTo(std::vector<uint8_t>& bytes) const {
4465    for (const std::string& str : table_) {
4466      const char* s = str.c_str();
4467      size_t s_len = CountModifiedUtf8Chars(s);
4468      std::unique_ptr<uint16_t> s_utf16(new uint16_t[s_len]);
4469      ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
4470      JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len);
4471    }
4472  }
4473
4474 private:
4475  std::set<std::string> table_;
4476  DISALLOW_COPY_AND_ASSIGN(StringTable);
4477};
4478
4479static const char* GetMethodSourceFile(mirror::ArtMethod* method)
4480    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4481  DCHECK(method != nullptr);
4482  const char* source_file = method->GetDeclaringClassSourceFile();
4483  return (source_file != nullptr) ? source_file : "";
4484}
4485
4486/*
4487 * The data we send to DDMS contains everything we have recorded.
4488 *
4489 * Message header (all values big-endian):
4490 * (1b) message header len (to allow future expansion); includes itself
4491 * (1b) entry header len
4492 * (1b) stack frame len
4493 * (2b) number of entries
4494 * (4b) offset to string table from start of message
4495 * (2b) number of class name strings
4496 * (2b) number of method name strings
4497 * (2b) number of source file name strings
4498 * For each entry:
4499 *   (4b) total allocation size
4500 *   (2b) thread id
4501 *   (2b) allocated object's class name index
4502 *   (1b) stack depth
4503 *   For each stack frame:
4504 *     (2b) method's class name
4505 *     (2b) method name
4506 *     (2b) method source file
4507 *     (2b) line number, clipped to 32767; -2 if native; -1 if no source
4508 * (xb) class name strings
4509 * (xb) method name strings
4510 * (xb) source file strings
4511 *
4512 * As with other DDM traffic, strings are sent as a 4-byte length
4513 * followed by UTF-16 data.
4514 *
4515 * We send up 16-bit unsigned indexes into string tables.  In theory there
4516 * can be (kMaxAllocRecordStackDepth * alloc_record_max_) unique strings in
4517 * each table, but in practice there should be far fewer.
4518 *
4519 * The chief reason for using a string table here is to keep the size of
4520 * the DDMS message to a minimum.  This is partly to make the protocol
4521 * efficient, but also because we have to form the whole thing up all at
4522 * once in a memory buffer.
4523 *
4524 * We use separate string tables for class names, method names, and source
4525 * files to keep the indexes small.  There will generally be no overlap
4526 * between the contents of these tables.
4527 */
4528jbyteArray Dbg::GetRecentAllocations() {
4529  if (false) {
4530    DumpRecentAllocations();
4531  }
4532
4533  Thread* self = Thread::Current();
4534  std::vector<uint8_t> bytes;
4535  {
4536    MutexLock mu(self, *Locks::alloc_tracker_lock_);
4537    //
4538    // Part 1: generate string tables.
4539    //
4540    StringTable class_names;
4541    StringTable method_names;
4542    StringTable filenames;
4543
4544    const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
4545    uint16_t count = capped_count;
4546    size_t idx = HeadIndex();
4547    while (count--) {
4548      AllocRecord* record = &recent_allocation_records_[idx];
4549      std::string temp;
4550      class_names.Add(record->Type()->GetDescriptor(&temp));
4551      for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) {
4552        mirror::ArtMethod* m = record->StackElement(i)->Method();
4553        if (m != NULL) {
4554          class_names.Add(m->GetDeclaringClassDescriptor());
4555          method_names.Add(m->GetName());
4556          filenames.Add(GetMethodSourceFile(m));
4557        }
4558      }
4559
4560      idx = (idx + 1) & (alloc_record_max_ - 1);
4561    }
4562
4563    LOG(INFO) << "allocation records: " << capped_count;
4564
4565    //
4566    // Part 2: Generate the output and store it in the buffer.
4567    //
4568
4569    // (1b) message header len (to allow future expansion); includes itself
4570    // (1b) entry header len
4571    // (1b) stack frame len
4572    const int kMessageHeaderLen = 15;
4573    const int kEntryHeaderLen = 9;
4574    const int kStackFrameLen = 8;
4575    JDWP::Append1BE(bytes, kMessageHeaderLen);
4576    JDWP::Append1BE(bytes, kEntryHeaderLen);
4577    JDWP::Append1BE(bytes, kStackFrameLen);
4578
4579    // (2b) number of entries
4580    // (4b) offset to string table from start of message
4581    // (2b) number of class name strings
4582    // (2b) number of method name strings
4583    // (2b) number of source file name strings
4584    JDWP::Append2BE(bytes, capped_count);
4585    size_t string_table_offset = bytes.size();
4586    JDWP::Append4BE(bytes, 0);  // We'll patch this later...
4587    JDWP::Append2BE(bytes, class_names.Size());
4588    JDWP::Append2BE(bytes, method_names.Size());
4589    JDWP::Append2BE(bytes, filenames.Size());
4590
4591    idx = HeadIndex();
4592    std::string temp;
4593    for (count = capped_count; count != 0; --count) {
4594      // For each entry:
4595      // (4b) total allocation size
4596      // (2b) thread id
4597      // (2b) allocated object's class name index
4598      // (1b) stack depth
4599      AllocRecord* record = &recent_allocation_records_[idx];
4600      size_t stack_depth = record->GetDepth();
4601      size_t allocated_object_class_name_index =
4602          class_names.IndexOf(record->Type()->GetDescriptor(&temp));
4603      JDWP::Append4BE(bytes, record->ByteCount());
4604      JDWP::Append2BE(bytes, record->ThinLockId());
4605      JDWP::Append2BE(bytes, allocated_object_class_name_index);
4606      JDWP::Append1BE(bytes, stack_depth);
4607
4608      for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) {
4609        // For each stack frame:
4610        // (2b) method's class name
4611        // (2b) method name
4612        // (2b) method source file
4613        // (2b) line number, clipped to 32767; -2 if native; -1 if no source
4614        mirror::ArtMethod* m = record->StackElement(stack_frame)->Method();
4615        size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor());
4616        size_t method_name_index = method_names.IndexOf(m->GetName());
4617        size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m));
4618        JDWP::Append2BE(bytes, class_name_index);
4619        JDWP::Append2BE(bytes, method_name_index);
4620        JDWP::Append2BE(bytes, file_name_index);
4621        JDWP::Append2BE(bytes, record->StackElement(stack_frame)->LineNumber());
4622      }
4623      idx = (idx + 1) & (alloc_record_max_ - 1);
4624    }
4625
4626    // (xb) class name strings
4627    // (xb) method name strings
4628    // (xb) source file strings
4629    JDWP::Set4BE(&bytes[string_table_offset], bytes.size());
4630    class_names.WriteTo(bytes);
4631    method_names.WriteTo(bytes);
4632    filenames.WriteTo(bytes);
4633  }
4634  JNIEnv* env = self->GetJniEnv();
4635  jbyteArray result = env->NewByteArray(bytes.size());
4636  if (result != NULL) {
4637    env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
4638  }
4639  return result;
4640}
4641
4642mirror::ArtMethod* DeoptimizationRequest::Method() const {
4643  ScopedObjectAccessUnchecked soa(Thread::Current());
4644  return soa.DecodeMethod(method_);
4645}
4646
4647void DeoptimizationRequest::SetMethod(mirror::ArtMethod* m) {
4648  ScopedObjectAccessUnchecked soa(Thread::Current());
4649  method_ = soa.EncodeMethod(m);
4650}
4651
4652}  // namespace art
4653