debugger.cc revision 29259fa6b0514866d2d4bf57d58c1557b26abbb7
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "debugger.h"
18
19#include <sys/uio.h>
20
21#include <set>
22
23#include "arch/context.h"
24#include "class_linker.h"
25#include "class_linker-inl.h"
26#include "dex_file-inl.h"
27#include "dex_instruction.h"
28#include "field_helper.h"
29#include "gc/accounting/card_table-inl.h"
30#include "gc/space/large_object_space.h"
31#include "gc/space/space-inl.h"
32#include "handle_scope.h"
33#include "jdwp/object_registry.h"
34#include "method_helper.h"
35#include "mirror/art_field-inl.h"
36#include "mirror/art_method-inl.h"
37#include "mirror/class.h"
38#include "mirror/class-inl.h"
39#include "mirror/class_loader.h"
40#include "mirror/object-inl.h"
41#include "mirror/object_array-inl.h"
42#include "mirror/string-inl.h"
43#include "mirror/throwable.h"
44#include "quick/inline_method_analyser.h"
45#include "reflection.h"
46#include "safe_map.h"
47#include "scoped_thread_state_change.h"
48#include "ScopedLocalRef.h"
49#include "ScopedPrimitiveArray.h"
50#include "handle_scope-inl.h"
51#include "thread_list.h"
52#include "throw_location.h"
53#include "utf.h"
54#include "verifier/method_verifier-inl.h"
55#include "well_known_classes.h"
56
57#ifdef HAVE_ANDROID_OS
58#include "cutils/properties.h"
59#endif
60
61namespace art {
62
63static const size_t kMaxAllocRecordStackDepth = 16;  // Max 255.
64static const size_t kDefaultNumAllocRecords = 64*1024;  // Must be a power of 2. 2BE can hold 64k-1.
65
66// Limit alloc_record_count to the 2BE value that is the limit of the current protocol.
67static uint16_t CappedAllocRecordCount(size_t alloc_record_count) {
68  if (alloc_record_count > 0xffff) {
69    return 0xffff;
70  }
71  return alloc_record_count;
72}
73
74class AllocRecordStackTraceElement {
75 public:
76  AllocRecordStackTraceElement() : method_(nullptr), dex_pc_(0) {
77  }
78
79  int32_t LineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
80    mirror::ArtMethod* method = Method();
81    DCHECK(method != nullptr);
82    return method->GetLineNumFromDexPC(DexPc());
83  }
84
85  mirror::ArtMethod* Method() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
86    ScopedObjectAccessUnchecked soa(Thread::Current());
87    return soa.DecodeMethod(method_);
88  }
89
90  void SetMethod(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
91    ScopedObjectAccessUnchecked soa(Thread::Current());
92    method_ = soa.EncodeMethod(m);
93  }
94
95  uint32_t DexPc() const {
96    return dex_pc_;
97  }
98
99  void SetDexPc(uint32_t pc) {
100    dex_pc_ = pc;
101  }
102
103 private:
104  jmethodID method_;
105  uint32_t dex_pc_;
106};
107
108jobject Dbg::TypeCache::Add(mirror::Class* t) {
109  ScopedObjectAccessUnchecked soa(Thread::Current());
110  int32_t hash_code = t->IdentityHashCode();
111  auto range = objects_.equal_range(hash_code);
112  for (auto it = range.first; it != range.second; ++it) {
113    if (soa.Decode<mirror::Class*>(it->second) == t) {
114      // Found a matching weak global, return it.
115      return it->second;
116    }
117  }
118  JNIEnv* env = soa.Env();
119  const jobject local_ref = soa.AddLocalReference<jobject>(t);
120  const jobject weak_global = env->NewWeakGlobalRef(local_ref);
121  env->DeleteLocalRef(local_ref);
122  objects_.insert(std::make_pair(hash_code, weak_global));
123  return weak_global;
124}
125
126void Dbg::TypeCache::Clear() {
127  JavaVMExt* vm = Runtime::Current()->GetJavaVM();
128  Thread* self = Thread::Current();
129  for (const auto& p : objects_) {
130    vm->DeleteWeakGlobalRef(self, p.second);
131  }
132  objects_.clear();
133}
134
135class AllocRecord {
136 public:
137  AllocRecord() : type_(nullptr), byte_count_(0), thin_lock_id_(0) {}
138
139  mirror::Class* Type() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
140    return down_cast<mirror::Class*>(Thread::Current()->DecodeJObject(type_));
141  }
142
143  void SetType(mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
144                                                       Locks::alloc_tracker_lock_) {
145    type_ = Dbg::type_cache_.Add(t);
146  }
147
148  size_t GetDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
149    size_t depth = 0;
150    while (depth < kMaxAllocRecordStackDepth && stack_[depth].Method() != NULL) {
151      ++depth;
152    }
153    return depth;
154  }
155
156  size_t ByteCount() const {
157    return byte_count_;
158  }
159
160  void SetByteCount(size_t count) {
161    byte_count_ = count;
162  }
163
164  uint16_t ThinLockId() const {
165    return thin_lock_id_;
166  }
167
168  void SetThinLockId(uint16_t id) {
169    thin_lock_id_ = id;
170  }
171
172  AllocRecordStackTraceElement* StackElement(size_t index) {
173    DCHECK_LT(index, kMaxAllocRecordStackDepth);
174    return &stack_[index];
175  }
176
177 private:
178  jobject type_;  // This is a weak global.
179  size_t byte_count_;
180  uint16_t thin_lock_id_;
181  AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth];  // Unused entries have NULL method.
182};
183
184class Breakpoint {
185 public:
186  Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization)
187    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
188    : method_(nullptr), dex_pc_(dex_pc), need_full_deoptimization_(need_full_deoptimization) {
189    ScopedObjectAccessUnchecked soa(Thread::Current());
190    method_ = soa.EncodeMethod(method);
191  }
192
193  Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
194    : method_(nullptr), dex_pc_(other.dex_pc_),
195      need_full_deoptimization_(other.need_full_deoptimization_) {
196    ScopedObjectAccessUnchecked soa(Thread::Current());
197    method_ = soa.EncodeMethod(other.Method());
198  }
199
200  mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
201    ScopedObjectAccessUnchecked soa(Thread::Current());
202    return soa.DecodeMethod(method_);
203  }
204
205  uint32_t DexPc() const {
206    return dex_pc_;
207  }
208
209  bool NeedFullDeoptimization() const {
210    return need_full_deoptimization_;
211  }
212
213 private:
214  // The location of this breakpoint.
215  jmethodID method_;
216  uint32_t dex_pc_;
217
218  // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
219  bool need_full_deoptimization_;
220};
221
222static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
223    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
224  os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
225  return os;
226}
227
228class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
229 public:
230  DebugInstrumentationListener() {}
231  virtual ~DebugInstrumentationListener() {}
232
233  void MethodEntered(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
234                     uint32_t dex_pc)
235      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
236    if (method->IsNative()) {
237      // TODO: post location events is a suspension point and native method entry stubs aren't.
238      return;
239    }
240    Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr);
241  }
242
243  void MethodExited(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
244                    uint32_t dex_pc, const JValue& return_value)
245      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
246    if (method->IsNative()) {
247      // TODO: post location events is a suspension point and native method entry stubs aren't.
248      return;
249    }
250    Dbg::UpdateDebugger(thread, this_object, method, dex_pc, Dbg::kMethodExit, &return_value);
251  }
252
253  void MethodUnwind(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
254                    uint32_t dex_pc)
255      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
256    // We're not recorded to listen to this kind of event, so complain.
257    LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
258               << " " << dex_pc;
259  }
260
261  void DexPcMoved(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
262                  uint32_t new_dex_pc)
263      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
264    Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, 0, nullptr);
265  }
266
267  void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
268                 uint32_t dex_pc, mirror::ArtField* field)
269      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
270    Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
271  }
272
273  void FieldWritten(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
274                    uint32_t dex_pc, mirror::ArtField* field, const JValue& field_value)
275      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
276    Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
277  }
278
279  void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
280                       mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
281                       mirror::Throwable* exception_object)
282      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
283    Dbg::PostException(throw_location, catch_method, catch_dex_pc, exception_object);
284  }
285
286 private:
287  DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
288} gDebugInstrumentationListener;
289
290// JDWP is allowed unless the Zygote forbids it.
291static bool gJdwpAllowed = true;
292
293// Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line?
294static bool gJdwpConfigured = false;
295
296// Broken-down JDWP options. (Only valid if IsJdwpConfigured() is true.)
297static JDWP::JdwpOptions gJdwpOptions;
298
299// Runtime JDWP state.
300static JDWP::JdwpState* gJdwpState = NULL;
301static bool gDebuggerConnected;  // debugger or DDMS is connected.
302static bool gDebuggerActive;     // debugger is making requests.
303static bool gDisposed;           // debugger called VirtualMachine.Dispose, so we should drop the connection.
304
305static bool gDdmThreadNotification = false;
306
307// DDMS GC-related settings.
308static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER;
309static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER;
310static Dbg::HpsgWhat gDdmHpsgWhat;
311static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
312static Dbg::HpsgWhat gDdmNhsgWhat;
313
314static ObjectRegistry* gRegistry = nullptr;
315
316// Recent allocation tracking.
317AllocRecord* Dbg::recent_allocation_records_ = nullptr;  // TODO: CircularBuffer<AllocRecord>
318size_t Dbg::alloc_record_max_ = 0;
319size_t Dbg::alloc_record_head_ = 0;
320size_t Dbg::alloc_record_count_ = 0;
321Dbg::TypeCache Dbg::type_cache_;
322
323// Deoptimization support.
324std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
325size_t Dbg::full_deoptimization_event_count_ = 0;
326size_t Dbg::delayed_full_undeoptimization_count_ = 0;
327
328// Instrumentation event reference counters.
329size_t Dbg::dex_pc_change_event_ref_count_ = 0;
330size_t Dbg::method_enter_event_ref_count_ = 0;
331size_t Dbg::method_exit_event_ref_count_ = 0;
332size_t Dbg::field_read_event_ref_count_ = 0;
333size_t Dbg::field_write_event_ref_count_ = 0;
334size_t Dbg::exception_catch_event_ref_count_ = 0;
335uint32_t Dbg::instrumentation_events_ = 0;
336
337// Breakpoints.
338static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
339
340void DebugInvokeReq::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
341                                RootType root_type) {
342  if (receiver != nullptr) {
343    callback(&receiver, arg, tid, root_type);
344  }
345  if (thread != nullptr) {
346    callback(&thread, arg, tid, root_type);
347  }
348  if (klass != nullptr) {
349    callback(reinterpret_cast<mirror::Object**>(&klass), arg, tid, root_type);
350  }
351  if (method != nullptr) {
352    callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
353  }
354}
355
356void DebugInvokeReq::Clear() {
357  invoke_needed = false;
358  receiver = nullptr;
359  thread = nullptr;
360  klass = nullptr;
361  method = nullptr;
362}
363
364void SingleStepControl::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
365                                   RootType root_type) {
366  if (method != nullptr) {
367    callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
368  }
369}
370
371bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const {
372  return dex_pcs.find(dex_pc) == dex_pcs.end();
373}
374
375void SingleStepControl::Clear() {
376  is_active = false;
377  method = nullptr;
378  dex_pcs.clear();
379}
380
381static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc)
382    LOCKS_EXCLUDED(Locks::breakpoint_lock_)
383    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
384  ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
385  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
386    if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) {
387      VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
388      return true;
389    }
390  }
391  return false;
392}
393
394static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
395    LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) {
396  MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
397  // A thread may be suspended for GC; in this code, we really want to know whether
398  // there's a debugger suspension active.
399  return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0;
400}
401
402static mirror::Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status)
403    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
404  mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
405  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
406    status = JDWP::ERR_INVALID_OBJECT;
407    return NULL;
408  }
409  if (!o->IsArrayInstance()) {
410    status = JDWP::ERR_INVALID_ARRAY;
411    return NULL;
412  }
413  status = JDWP::ERR_NONE;
414  return o->AsArray();
415}
416
417static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status)
418    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
419  mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
420  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
421    status = JDWP::ERR_INVALID_OBJECT;
422    return NULL;
423  }
424  if (!o->IsClass()) {
425    status = JDWP::ERR_INVALID_CLASS;
426    return NULL;
427  }
428  status = JDWP::ERR_NONE;
429  return o->AsClass();
430}
431
432static JDWP::JdwpError DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id, Thread*& thread)
433    EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
434    LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
435    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
436  mirror::Object* thread_peer = gRegistry->Get<mirror::Object*>(thread_id);
437  if (thread_peer == NULL || thread_peer == ObjectRegistry::kInvalidObject) {
438    // This isn't even an object.
439    return JDWP::ERR_INVALID_OBJECT;
440  }
441
442  mirror::Class* java_lang_Thread = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
443  if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) {
444    // This isn't a thread.
445    return JDWP::ERR_INVALID_THREAD;
446  }
447
448  thread = Thread::FromManagedThread(soa, thread_peer);
449  if (thread == NULL) {
450    // This is a java.lang.Thread without a Thread*. Must be a zombie.
451    return JDWP::ERR_THREAD_NOT_ALIVE;
452  }
453  return JDWP::ERR_NONE;
454}
455
456static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) {
457  // JDWP deliberately uses the descriptor characters' ASCII values for its enum.
458  // Note that by "basic" we mean that we don't get more specific than JT_OBJECT.
459  return static_cast<JDWP::JdwpTag>(descriptor[0]);
460}
461
462static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
463    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
464  std::string temp;
465  const char* descriptor = klass->GetDescriptor(&temp);
466  return BasicTagFromDescriptor(descriptor);
467}
468
469static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
470    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
471  CHECK(c != NULL);
472  if (c->IsArrayClass()) {
473    return JDWP::JT_ARRAY;
474  }
475  if (c->IsStringClass()) {
476    return JDWP::JT_STRING;
477  }
478  if (c->IsClassClass()) {
479    return JDWP::JT_CLASS_OBJECT;
480  }
481  {
482    mirror::Class* thread_class = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
483    if (thread_class->IsAssignableFrom(c)) {
484      return JDWP::JT_THREAD;
485    }
486  }
487  {
488    mirror::Class* thread_group_class =
489        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
490    if (thread_group_class->IsAssignableFrom(c)) {
491      return JDWP::JT_THREAD_GROUP;
492    }
493  }
494  {
495    mirror::Class* class_loader_class =
496        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader);
497    if (class_loader_class->IsAssignableFrom(c)) {
498      return JDWP::JT_CLASS_LOADER;
499    }
500  }
501  return JDWP::JT_OBJECT;
502}
503
504/*
505 * Objects declared to hold Object might actually hold a more specific
506 * type.  The debugger may take a special interest in these (e.g. it
507 * wants to display the contents of Strings), so we want to return an
508 * appropriate tag.
509 *
510 * Null objects are tagged JT_OBJECT.
511 */
512static JDWP::JdwpTag TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o)
513    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
514  return (o == NULL) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass());
515}
516
517static bool IsPrimitiveTag(JDWP::JdwpTag tag) {
518  switch (tag) {
519  case JDWP::JT_BOOLEAN:
520  case JDWP::JT_BYTE:
521  case JDWP::JT_CHAR:
522  case JDWP::JT_FLOAT:
523  case JDWP::JT_DOUBLE:
524  case JDWP::JT_INT:
525  case JDWP::JT_LONG:
526  case JDWP::JT_SHORT:
527  case JDWP::JT_VOID:
528    return true;
529  default:
530    return false;
531  }
532}
533
534/*
535 * Handle one of the JDWP name/value pairs.
536 *
537 * JDWP options are:
538 *  help: if specified, show help message and bail
539 *  transport: may be dt_socket or dt_shmem
540 *  address: for dt_socket, "host:port", or just "port" when listening
541 *  server: if "y", wait for debugger to attach; if "n", attach to debugger
542 *  timeout: how long to wait for debugger to connect / listen
543 *
544 * Useful with server=n (these aren't supported yet):
545 *  onthrow=<exception-name>: connect to debugger when exception thrown
546 *  onuncaught=y|n: connect to debugger when uncaught exception thrown
547 *  launch=<command-line>: launch the debugger itself
548 *
549 * The "transport" option is required, as is "address" if server=n.
550 */
551static bool ParseJdwpOption(const std::string& name, const std::string& value) {
552  if (name == "transport") {
553    if (value == "dt_socket") {
554      gJdwpOptions.transport = JDWP::kJdwpTransportSocket;
555    } else if (value == "dt_android_adb") {
556      gJdwpOptions.transport = JDWP::kJdwpTransportAndroidAdb;
557    } else {
558      LOG(ERROR) << "JDWP transport not supported: " << value;
559      return false;
560    }
561  } else if (name == "server") {
562    if (value == "n") {
563      gJdwpOptions.server = false;
564    } else if (value == "y") {
565      gJdwpOptions.server = true;
566    } else {
567      LOG(ERROR) << "JDWP option 'server' must be 'y' or 'n'";
568      return false;
569    }
570  } else if (name == "suspend") {
571    if (value == "n") {
572      gJdwpOptions.suspend = false;
573    } else if (value == "y") {
574      gJdwpOptions.suspend = true;
575    } else {
576      LOG(ERROR) << "JDWP option 'suspend' must be 'y' or 'n'";
577      return false;
578    }
579  } else if (name == "address") {
580    /* this is either <port> or <host>:<port> */
581    std::string port_string;
582    gJdwpOptions.host.clear();
583    std::string::size_type colon = value.find(':');
584    if (colon != std::string::npos) {
585      gJdwpOptions.host = value.substr(0, colon);
586      port_string = value.substr(colon + 1);
587    } else {
588      port_string = value;
589    }
590    if (port_string.empty()) {
591      LOG(ERROR) << "JDWP address missing port: " << value;
592      return false;
593    }
594    char* end;
595    uint64_t port = strtoul(port_string.c_str(), &end, 10);
596    if (*end != '\0' || port > 0xffff) {
597      LOG(ERROR) << "JDWP address has junk in port field: " << value;
598      return false;
599    }
600    gJdwpOptions.port = port;
601  } else if (name == "launch" || name == "onthrow" || name == "oncaught" || name == "timeout") {
602    /* valid but unsupported */
603    LOG(INFO) << "Ignoring JDWP option '" << name << "'='" << value << "'";
604  } else {
605    LOG(INFO) << "Ignoring unrecognized JDWP option '" << name << "'='" << value << "'";
606  }
607
608  return true;
609}
610
611/*
612 * Parse the latter half of a -Xrunjdwp/-agentlib:jdwp= string, e.g.:
613 * "transport=dt_socket,address=8000,server=y,suspend=n"
614 */
615bool Dbg::ParseJdwpOptions(const std::string& options) {
616  VLOG(jdwp) << "ParseJdwpOptions: " << options;
617
618  std::vector<std::string> pairs;
619  Split(options, ',', pairs);
620
621  for (size_t i = 0; i < pairs.size(); ++i) {
622    std::string::size_type equals = pairs[i].find('=');
623    if (equals == std::string::npos) {
624      LOG(ERROR) << "Can't parse JDWP option '" << pairs[i] << "' in '" << options << "'";
625      return false;
626    }
627    ParseJdwpOption(pairs[i].substr(0, equals), pairs[i].substr(equals + 1));
628  }
629
630  if (gJdwpOptions.transport == JDWP::kJdwpTransportUnknown) {
631    LOG(ERROR) << "Must specify JDWP transport: " << options;
632  }
633  if (!gJdwpOptions.server && (gJdwpOptions.host.empty() || gJdwpOptions.port == 0)) {
634    LOG(ERROR) << "Must specify JDWP host and port when server=n: " << options;
635    return false;
636  }
637
638  gJdwpConfigured = true;
639  return true;
640}
641
642void Dbg::StartJdwp() {
643  if (!gJdwpAllowed || !IsJdwpConfigured()) {
644    // No JDWP for you!
645    return;
646  }
647
648  CHECK(gRegistry == nullptr);
649  gRegistry = new ObjectRegistry;
650
651  // Init JDWP if the debugger is enabled. This may connect out to a
652  // debugger, passively listen for a debugger, or block waiting for a
653  // debugger.
654  gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions);
655  if (gJdwpState == NULL) {
656    // We probably failed because some other process has the port already, which means that
657    // if we don't abort the user is likely to think they're talking to us when they're actually
658    // talking to that other process.
659    LOG(FATAL) << "Debugger thread failed to initialize";
660  }
661
662  // If a debugger has already attached, send the "welcome" message.
663  // This may cause us to suspend all threads.
664  if (gJdwpState->IsActive()) {
665    ScopedObjectAccess soa(Thread::Current());
666    if (!gJdwpState->PostVMStart()) {
667      LOG(WARNING) << "Failed to post 'start' message to debugger";
668    }
669  }
670}
671
672void Dbg::StopJdwp() {
673  // Post VM_DEATH event before the JDWP connection is closed (either by the JDWP thread or the
674  // destruction of gJdwpState).
675  if (gJdwpState != nullptr && gJdwpState->IsActive()) {
676    gJdwpState->PostVMDeath();
677  }
678  // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
679  Disposed();
680  delete gJdwpState;
681  gJdwpState = nullptr;
682  delete gRegistry;
683  gRegistry = nullptr;
684}
685
686void Dbg::GcDidFinish() {
687  if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
688    ScopedObjectAccess soa(Thread::Current());
689    VLOG(jdwp) << "Sending heap info to DDM";
690    DdmSendHeapInfo(gDdmHpifWhen);
691  }
692  if (gDdmHpsgWhen != HPSG_WHEN_NEVER) {
693    ScopedObjectAccess soa(Thread::Current());
694    VLOG(jdwp) << "Dumping heap to DDM";
695    DdmSendHeapSegments(false);
696  }
697  if (gDdmNhsgWhen != HPSG_WHEN_NEVER) {
698    ScopedObjectAccess soa(Thread::Current());
699    VLOG(jdwp) << "Dumping native heap to DDM";
700    DdmSendHeapSegments(true);
701  }
702}
703
704void Dbg::SetJdwpAllowed(bool allowed) {
705  gJdwpAllowed = allowed;
706}
707
708DebugInvokeReq* Dbg::GetInvokeReq() {
709  return Thread::Current()->GetInvokeReq();
710}
711
712Thread* Dbg::GetDebugThread() {
713  return (gJdwpState != NULL) ? gJdwpState->GetDebugThread() : NULL;
714}
715
716void Dbg::ClearWaitForEventThread() {
717  gJdwpState->ClearWaitForEventThread();
718}
719
720void Dbg::Connected() {
721  CHECK(!gDebuggerConnected);
722  VLOG(jdwp) << "JDWP has attached";
723  gDebuggerConnected = true;
724  gDisposed = false;
725}
726
727void Dbg::Disposed() {
728  gDisposed = true;
729}
730
731bool Dbg::IsDisposed() {
732  return gDisposed;
733}
734
735void Dbg::GoActive() {
736  // Enable all debugging features, including scans for breakpoints.
737  // This is a no-op if we're already active.
738  // Only called from the JDWP handler thread.
739  if (gDebuggerActive) {
740    return;
741  }
742
743  {
744    // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected?
745    ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
746    CHECK_EQ(gBreakpoints.size(), 0U);
747  }
748
749  {
750    MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
751    CHECK_EQ(deoptimization_requests_.size(), 0U);
752    CHECK_EQ(full_deoptimization_event_count_, 0U);
753    CHECK_EQ(delayed_full_undeoptimization_count_, 0U);
754    CHECK_EQ(dex_pc_change_event_ref_count_, 0U);
755    CHECK_EQ(method_enter_event_ref_count_, 0U);
756    CHECK_EQ(method_exit_event_ref_count_, 0U);
757    CHECK_EQ(field_read_event_ref_count_, 0U);
758    CHECK_EQ(field_write_event_ref_count_, 0U);
759    CHECK_EQ(exception_catch_event_ref_count_, 0U);
760  }
761
762  Runtime* runtime = Runtime::Current();
763  runtime->GetThreadList()->SuspendAll();
764  Thread* self = Thread::Current();
765  ThreadState old_state = self->SetStateUnsafe(kRunnable);
766  CHECK_NE(old_state, kRunnable);
767  runtime->GetInstrumentation()->EnableDeoptimization();
768  instrumentation_events_ = 0;
769  gDebuggerActive = true;
770  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
771  runtime->GetThreadList()->ResumeAll();
772
773  LOG(INFO) << "Debugger is active";
774}
775
776void Dbg::Disconnected() {
777  CHECK(gDebuggerConnected);
778
779  LOG(INFO) << "Debugger is no longer active";
780
781  // Suspend all threads and exclusively acquire the mutator lock. Set the state of the thread
782  // to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener
783  // and clear the object registry.
784  Runtime* runtime = Runtime::Current();
785  runtime->GetThreadList()->SuspendAll();
786  Thread* self = Thread::Current();
787  ThreadState old_state = self->SetStateUnsafe(kRunnable);
788
789  // Debugger may not be active at this point.
790  if (gDebuggerActive) {
791    {
792      // Since we're going to disable deoptimization, we clear the deoptimization requests queue.
793      // This prevents us from having any pending deoptimization request when the debugger attaches
794      // to us again while no event has been requested yet.
795      MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
796      deoptimization_requests_.clear();
797      full_deoptimization_event_count_ = 0U;
798      delayed_full_undeoptimization_count_ = 0U;
799    }
800    if (instrumentation_events_ != 0) {
801      runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
802                                                    instrumentation_events_);
803      instrumentation_events_ = 0;
804    }
805    runtime->GetInstrumentation()->DisableDeoptimization();
806    gDebuggerActive = false;
807  }
808  gRegistry->Clear();
809  gDebuggerConnected = false;
810  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
811  runtime->GetThreadList()->ResumeAll();
812}
813
814bool Dbg::IsDebuggerActive() {
815  return gDebuggerActive;
816}
817
818bool Dbg::IsJdwpConfigured() {
819  return gJdwpConfigured;
820}
821
822int64_t Dbg::LastDebuggerActivity() {
823  return gJdwpState->LastDebuggerActivity();
824}
825
826void Dbg::UndoDebuggerSuspensions() {
827  Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
828}
829
830std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
831  mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id);
832  if (o == NULL) {
833    return "NULL";
834  }
835  if (o == ObjectRegistry::kInvalidObject) {
836    return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
837  }
838  if (!o->IsClass()) {
839    return StringPrintf("non-class %p", o);  // This is only used for debugging output anyway.
840  }
841  std::string temp;
842  return DescriptorToName(o->AsClass()->GetDescriptor(&temp));
843}
844
845JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& class_object_id) {
846  JDWP::JdwpError status;
847  mirror::Class* c = DecodeClass(id, status);
848  if (c == NULL) {
849    return status;
850  }
851  class_object_id = gRegistry->Add(c);
852  return JDWP::ERR_NONE;
853}
854
855JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId& superclass_id) {
856  JDWP::JdwpError status;
857  mirror::Class* c = DecodeClass(id, status);
858  if (c == NULL) {
859    return status;
860  }
861  if (c->IsInterface()) {
862    // http://code.google.com/p/android/issues/detail?id=20856
863    superclass_id = 0;
864  } else {
865    superclass_id = gRegistry->Add(c->GetSuperClass());
866  }
867  return JDWP::ERR_NONE;
868}
869
870JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
871  mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
872  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
873    return JDWP::ERR_INVALID_OBJECT;
874  }
875  expandBufAddObjectId(pReply, gRegistry->Add(o->GetClass()->GetClassLoader()));
876  return JDWP::ERR_NONE;
877}
878
879JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
880  JDWP::JdwpError status;
881  mirror::Class* c = DecodeClass(id, status);
882  if (c == NULL) {
883    return status;
884  }
885
886  uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask;
887
888  // Set ACC_SUPER. Dex files don't contain this flag but only classes are supposed to have it set,
889  // not interfaces.
890  // Class.getModifiers doesn't return it, but JDWP does, so we set it here.
891  if ((access_flags & kAccInterface) == 0) {
892    access_flags |= kAccSuper;
893  }
894
895  expandBufAdd4BE(pReply, access_flags);
896
897  return JDWP::ERR_NONE;
898}
899
900JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply)
901    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
902  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
903  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
904    return JDWP::ERR_INVALID_OBJECT;
905  }
906
907  // Ensure all threads are suspended while we read objects' lock words.
908  Thread* self = Thread::Current();
909  CHECK_EQ(self->GetState(), kRunnable);
910  self->TransitionFromRunnableToSuspended(kSuspended);
911  Runtime::Current()->GetThreadList()->SuspendAll();
912
913  MonitorInfo monitor_info(o);
914
915  Runtime::Current()->GetThreadList()->ResumeAll();
916  self->TransitionFromSuspendedToRunnable();
917
918  if (monitor_info.owner_ != NULL) {
919    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeer()));
920  } else {
921    expandBufAddObjectId(reply, gRegistry->Add(NULL));
922  }
923  expandBufAdd4BE(reply, monitor_info.entry_count_);
924  expandBufAdd4BE(reply, monitor_info.waiters_.size());
925  for (size_t i = 0; i < monitor_info.waiters_.size(); ++i) {
926    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters_[i]->GetPeer()));
927  }
928  return JDWP::ERR_NONE;
929}
930
931JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id,
932                                      std::vector<JDWP::ObjectId>& monitors,
933                                      std::vector<uint32_t>& stack_depths) {
934  struct OwnedMonitorVisitor : public StackVisitor {
935    OwnedMonitorVisitor(Thread* thread, Context* context,
936                        std::vector<JDWP::ObjectId>* monitor_vector,
937                        std::vector<uint32_t>* stack_depth_vector)
938        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
939      : StackVisitor(thread, context), current_stack_depth(0),
940        monitors(monitor_vector), stack_depths(stack_depth_vector) {}
941
942    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
943    // annotalysis.
944    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
945      if (!GetMethod()->IsRuntimeMethod()) {
946        Monitor::VisitLocks(this, AppendOwnedMonitors, this);
947        ++current_stack_depth;
948      }
949      return true;
950    }
951
952    static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg)
953        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
954      OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
955      visitor->monitors->push_back(gRegistry->Add(owned_monitor));
956      visitor->stack_depths->push_back(visitor->current_stack_depth);
957    }
958
959    size_t current_stack_depth;
960    std::vector<JDWP::ObjectId>* monitors;
961    std::vector<uint32_t>* stack_depths;
962  };
963
964  ScopedObjectAccessUnchecked soa(Thread::Current());
965  Thread* thread;
966  {
967    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
968    JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
969    if (error != JDWP::ERR_NONE) {
970      return error;
971    }
972    if (!IsSuspendedForDebugger(soa, thread)) {
973      return JDWP::ERR_THREAD_NOT_SUSPENDED;
974    }
975  }
976  std::unique_ptr<Context> context(Context::Create());
977  OwnedMonitorVisitor visitor(thread, context.get(), &monitors, &stack_depths);
978  visitor.WalkStack();
979  return JDWP::ERR_NONE;
980}
981
982JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id,
983                                         JDWP::ObjectId& contended_monitor) {
984  mirror::Object* contended_monitor_obj;
985  ScopedObjectAccessUnchecked soa(Thread::Current());
986  {
987    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
988    Thread* thread;
989    JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
990    if (error != JDWP::ERR_NONE) {
991      return error;
992    }
993    if (!IsSuspendedForDebugger(soa, thread)) {
994      return JDWP::ERR_THREAD_NOT_SUSPENDED;
995    }
996    contended_monitor_obj = Monitor::GetContendedMonitor(thread);
997  }
998  // Add() requires the thread_list_lock_ not held to avoid the lock
999  // level violation.
1000  contended_monitor = gRegistry->Add(contended_monitor_obj);
1001  return JDWP::ERR_NONE;
1002}
1003
1004JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
1005                                       std::vector<uint64_t>& counts)
1006    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1007  gc::Heap* heap = Runtime::Current()->GetHeap();
1008  heap->CollectGarbage(false);
1009  std::vector<mirror::Class*> classes;
1010  counts.clear();
1011  for (size_t i = 0; i < class_ids.size(); ++i) {
1012    JDWP::JdwpError status;
1013    mirror::Class* c = DecodeClass(class_ids[i], status);
1014    if (c == NULL) {
1015      return status;
1016    }
1017    classes.push_back(c);
1018    counts.push_back(0);
1019  }
1020  heap->CountInstances(classes, false, &counts[0]);
1021  return JDWP::ERR_NONE;
1022}
1023
1024JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, std::vector<JDWP::ObjectId>& instances)
1025    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1026  gc::Heap* heap = Runtime::Current()->GetHeap();
1027  // We only want reachable instances, so do a GC.
1028  heap->CollectGarbage(false);
1029  JDWP::JdwpError status;
1030  mirror::Class* c = DecodeClass(class_id, status);
1031  if (c == nullptr) {
1032    return status;
1033  }
1034  std::vector<mirror::Object*> raw_instances;
1035  Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
1036  for (size_t i = 0; i < raw_instances.size(); ++i) {
1037    instances.push_back(gRegistry->Add(raw_instances[i]));
1038  }
1039  return JDWP::ERR_NONE;
1040}
1041
1042JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
1043                                         std::vector<JDWP::ObjectId>& referring_objects)
1044    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1045  gc::Heap* heap = Runtime::Current()->GetHeap();
1046  heap->CollectGarbage(false);
1047  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1048  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1049    return JDWP::ERR_INVALID_OBJECT;
1050  }
1051  std::vector<mirror::Object*> raw_instances;
1052  heap->GetReferringObjects(o, max_count, raw_instances);
1053  for (size_t i = 0; i < raw_instances.size(); ++i) {
1054    referring_objects.push_back(gRegistry->Add(raw_instances[i]));
1055  }
1056  return JDWP::ERR_NONE;
1057}
1058
1059JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id)
1060    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1061  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1062  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1063    return JDWP::ERR_INVALID_OBJECT;
1064  }
1065  gRegistry->DisableCollection(object_id);
1066  return JDWP::ERR_NONE;
1067}
1068
1069JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id)
1070    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1071  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1072  // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI
1073  // also ignores these cases and never return an error. However it's not obvious why this command
1074  // should behave differently from DisableCollection and IsCollected commands. So let's be more
1075  // strict and return an error if this happens.
1076  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1077    return JDWP::ERR_INVALID_OBJECT;
1078  }
1079  gRegistry->EnableCollection(object_id);
1080  return JDWP::ERR_NONE;
1081}
1082
1083JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool& is_collected)
1084    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1085  if (object_id == 0) {
1086    // Null object id is invalid.
1087    return JDWP::ERR_INVALID_OBJECT;
1088  }
1089  // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However
1090  // the RI seems to ignore this and assume object has been collected.
1091  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1092  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1093    is_collected = true;
1094  } else {
1095    is_collected = gRegistry->IsCollected(object_id);
1096  }
1097  return JDWP::ERR_NONE;
1098}
1099
1100void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count)
1101    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1102  gRegistry->DisposeObject(object_id, reference_count);
1103}
1104
1105static JDWP::JdwpTypeTag GetTypeTag(mirror::Class* klass)
1106    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1107  DCHECK(klass != nullptr);
1108  if (klass->IsArrayClass()) {
1109    return JDWP::TT_ARRAY;
1110  } else if (klass->IsInterface()) {
1111    return JDWP::TT_INTERFACE;
1112  } else {
1113    return JDWP::TT_CLASS;
1114  }
1115}
1116
1117JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1118  JDWP::JdwpError status;
1119  mirror::Class* c = DecodeClass(class_id, status);
1120  if (c == NULL) {
1121    return status;
1122  }
1123
1124  JDWP::JdwpTypeTag type_tag = GetTypeTag(c);
1125  expandBufAdd1(pReply, type_tag);
1126  expandBufAddRefTypeId(pReply, class_id);
1127  return JDWP::ERR_NONE;
1128}
1129
1130void Dbg::GetClassList(std::vector<JDWP::RefTypeId>& classes) {
1131  // Get the complete list of reference classes (i.e. all classes except
1132  // the primitive types).
1133  // Returns a newly-allocated buffer full of RefTypeId values.
1134  struct ClassListCreator {
1135    explicit ClassListCreator(std::vector<JDWP::RefTypeId>& classes) : classes(classes) {
1136    }
1137
1138    static bool Visit(mirror::Class* c, void* arg) {
1139      return reinterpret_cast<ClassListCreator*>(arg)->Visit(c);
1140    }
1141
1142    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
1143    // annotalysis.
1144    bool Visit(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS {
1145      if (!c->IsPrimitive()) {
1146        classes.push_back(gRegistry->AddRefType(c));
1147      }
1148      return true;
1149    }
1150
1151    std::vector<JDWP::RefTypeId>& classes;
1152  };
1153
1154  ClassListCreator clc(classes);
1155  Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(ClassListCreator::Visit,
1156                                                                       &clc);
1157}
1158
1159JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
1160                                  uint32_t* pStatus, std::string* pDescriptor) {
1161  JDWP::JdwpError status;
1162  mirror::Class* c = DecodeClass(class_id, status);
1163  if (c == NULL) {
1164    return status;
1165  }
1166
1167  if (c->IsArrayClass()) {
1168    *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
1169    *pTypeTag = JDWP::TT_ARRAY;
1170  } else {
1171    if (c->IsErroneous()) {
1172      *pStatus = JDWP::CS_ERROR;
1173    } else {
1174      *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED;
1175    }
1176    *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
1177  }
1178
1179  if (pDescriptor != NULL) {
1180    std::string temp;
1181    *pDescriptor = c->GetDescriptor(&temp);
1182  }
1183  return JDWP::ERR_NONE;
1184}
1185
1186void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>& ids) {
1187  std::vector<mirror::Class*> classes;
1188  Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
1189  ids.clear();
1190  for (size_t i = 0; i < classes.size(); ++i) {
1191    ids.push_back(gRegistry->Add(classes[i]));
1192  }
1193}
1194
1195JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply)
1196    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1197  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1198  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1199    return JDWP::ERR_INVALID_OBJECT;
1200  }
1201
1202  JDWP::JdwpTypeTag type_tag = GetTypeTag(o->GetClass());
1203  JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass());
1204
1205  expandBufAdd1(pReply, type_tag);
1206  expandBufAddRefTypeId(pReply, type_id);
1207
1208  return JDWP::ERR_NONE;
1209}
1210
1211JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) {
1212  JDWP::JdwpError status;
1213  mirror::Class* c = DecodeClass(class_id, status);
1214  if (c == NULL) {
1215    return status;
1216  }
1217  std::string temp;
1218  *signature = c->GetDescriptor(&temp);
1219  return JDWP::ERR_NONE;
1220}
1221
1222JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string& result) {
1223  JDWP::JdwpError status;
1224  mirror::Class* c = DecodeClass(class_id, status);
1225  if (c == nullptr) {
1226    return status;
1227  }
1228  const char* source_file = c->GetSourceFile();
1229  if (source_file == nullptr) {
1230    return JDWP::ERR_ABSENT_INFORMATION;
1231  }
1232  result = source_file;
1233  return JDWP::ERR_NONE;
1234}
1235
1236JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t& tag) {
1237  ScopedObjectAccessUnchecked soa(Thread::Current());
1238  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1239  if (o == ObjectRegistry::kInvalidObject) {
1240    return JDWP::ERR_INVALID_OBJECT;
1241  }
1242  tag = TagFromObject(soa, o);
1243  return JDWP::ERR_NONE;
1244}
1245
1246size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) {
1247  switch (tag) {
1248  case JDWP::JT_VOID:
1249    return 0;
1250  case JDWP::JT_BYTE:
1251  case JDWP::JT_BOOLEAN:
1252    return 1;
1253  case JDWP::JT_CHAR:
1254  case JDWP::JT_SHORT:
1255    return 2;
1256  case JDWP::JT_FLOAT:
1257  case JDWP::JT_INT:
1258    return 4;
1259  case JDWP::JT_ARRAY:
1260  case JDWP::JT_OBJECT:
1261  case JDWP::JT_STRING:
1262  case JDWP::JT_THREAD:
1263  case JDWP::JT_THREAD_GROUP:
1264  case JDWP::JT_CLASS_LOADER:
1265  case JDWP::JT_CLASS_OBJECT:
1266    return sizeof(JDWP::ObjectId);
1267  case JDWP::JT_DOUBLE:
1268  case JDWP::JT_LONG:
1269    return 8;
1270  default:
1271    LOG(FATAL) << "Unknown tag " << tag;
1272    return -1;
1273  }
1274}
1275
1276JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int& length) {
1277  JDWP::JdwpError status;
1278  mirror::Array* a = DecodeArray(array_id, status);
1279  if (a == NULL) {
1280    return status;
1281  }
1282  length = a->GetLength();
1283  return JDWP::ERR_NONE;
1284}
1285
1286JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) {
1287  JDWP::JdwpError status;
1288  mirror::Array* a = DecodeArray(array_id, status);
1289  if (a == nullptr) {
1290    return status;
1291  }
1292
1293  if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) {
1294    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1295    return JDWP::ERR_INVALID_LENGTH;
1296  }
1297  JDWP::JdwpTag element_tag = BasicTagFromClass(a->GetClass()->GetComponentType());
1298  expandBufAdd1(pReply, element_tag);
1299  expandBufAdd4BE(pReply, count);
1300
1301  if (IsPrimitiveTag(element_tag)) {
1302    size_t width = GetTagWidth(element_tag);
1303    uint8_t* dst = expandBufAddSpace(pReply, count * width);
1304    if (width == 8) {
1305      const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0));
1306      for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]);
1307    } else if (width == 4) {
1308      const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0));
1309      for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]);
1310    } else if (width == 2) {
1311      const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0));
1312      for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]);
1313    } else {
1314      const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0));
1315      memcpy(dst, &src[offset * width], count * width);
1316    }
1317  } else {
1318    ScopedObjectAccessUnchecked soa(Thread::Current());
1319    mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>();
1320    for (int i = 0; i < count; ++i) {
1321      mirror::Object* element = oa->Get(offset + i);
1322      JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element)
1323                                                        : element_tag;
1324      expandBufAdd1(pReply, specific_tag);
1325      expandBufAddObjectId(pReply, gRegistry->Add(element));
1326    }
1327  }
1328
1329  return JDWP::ERR_NONE;
1330}
1331
1332template <typename T>
1333static void CopyArrayData(mirror::Array* a, JDWP::Request& src, int offset, int count)
1334    NO_THREAD_SAFETY_ANALYSIS {
1335  // TODO: fix when annotalysis correctly handles non-member functions.
1336  DCHECK(a->GetClass()->IsPrimitiveArray());
1337
1338  T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset));
1339  for (int i = 0; i < count; ++i) {
1340    *dst++ = src.ReadValue(sizeof(T));
1341  }
1342}
1343
1344JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
1345                                      JDWP::Request& request)
1346    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1347  JDWP::JdwpError status;
1348  mirror::Array* dst = DecodeArray(array_id, status);
1349  if (dst == NULL) {
1350    return status;
1351  }
1352
1353  if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) {
1354    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1355    return JDWP::ERR_INVALID_LENGTH;
1356  }
1357  JDWP::JdwpTag element_tag = BasicTagFromClass(dst->GetClass()->GetComponentType());
1358
1359  if (IsPrimitiveTag(element_tag)) {
1360    size_t width = GetTagWidth(element_tag);
1361    if (width == 8) {
1362      CopyArrayData<uint64_t>(dst, request, offset, count);
1363    } else if (width == 4) {
1364      CopyArrayData<uint32_t>(dst, request, offset, count);
1365    } else if (width == 2) {
1366      CopyArrayData<uint16_t>(dst, request, offset, count);
1367    } else {
1368      CopyArrayData<uint8_t>(dst, request, offset, count);
1369    }
1370  } else {
1371    mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>();
1372    for (int i = 0; i < count; ++i) {
1373      JDWP::ObjectId id = request.ReadObjectId();
1374      mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
1375      if (o == ObjectRegistry::kInvalidObject) {
1376        return JDWP::ERR_INVALID_OBJECT;
1377      }
1378      oa->Set<false>(offset + i, o);
1379    }
1380  }
1381
1382  return JDWP::ERR_NONE;
1383}
1384
1385JDWP::ObjectId Dbg::CreateString(const std::string& str) {
1386  return gRegistry->Add(mirror::String::AllocFromModifiedUtf8(Thread::Current(), str.c_str()));
1387}
1388
1389JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId& new_object) {
1390  JDWP::JdwpError status;
1391  mirror::Class* c = DecodeClass(class_id, status);
1392  if (c == NULL) {
1393    return status;
1394  }
1395  new_object = gRegistry->Add(c->AllocObject(Thread::Current()));
1396  return JDWP::ERR_NONE;
1397}
1398
1399/*
1400 * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]".
1401 */
1402JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
1403                                       JDWP::ObjectId& new_array) {
1404  JDWP::JdwpError status;
1405  mirror::Class* c = DecodeClass(array_class_id, status);
1406  if (c == NULL) {
1407    return status;
1408  }
1409  new_array = gRegistry->Add(mirror::Array::Alloc<true>(Thread::Current(), c, length,
1410                                                        c->GetComponentSize(),
1411                                                        Runtime::Current()->GetHeap()->GetCurrentAllocator()));
1412  return JDWP::ERR_NONE;
1413}
1414
1415bool Dbg::MatchType(JDWP::RefTypeId instance_class_id, JDWP::RefTypeId class_id) {
1416  JDWP::JdwpError status;
1417  mirror::Class* c1 = DecodeClass(instance_class_id, status);
1418  CHECK(c1 != NULL);
1419  mirror::Class* c2 = DecodeClass(class_id, status);
1420  CHECK(c2 != NULL);
1421  return c2->IsAssignableFrom(c1);
1422}
1423
1424static JDWP::FieldId ToFieldId(const mirror::ArtField* f)
1425    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1426  CHECK(!kMovingFields);
1427  return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
1428}
1429
1430static JDWP::MethodId ToMethodId(const mirror::ArtMethod* m)
1431    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1432  CHECK(!kMovingMethods);
1433  return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
1434}
1435
1436static mirror::ArtField* FromFieldId(JDWP::FieldId fid)
1437    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1438  CHECK(!kMovingFields);
1439  return reinterpret_cast<mirror::ArtField*>(static_cast<uintptr_t>(fid));
1440}
1441
1442static mirror::ArtMethod* FromMethodId(JDWP::MethodId mid)
1443    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1444  CHECK(!kMovingMethods);
1445  return reinterpret_cast<mirror::ArtMethod*>(static_cast<uintptr_t>(mid));
1446}
1447
1448static void SetLocation(JDWP::JdwpLocation& location, mirror::ArtMethod* m, uint32_t dex_pc)
1449    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1450  if (m == NULL) {
1451    memset(&location, 0, sizeof(location));
1452  } else {
1453    mirror::Class* c = m->GetDeclaringClass();
1454    location.type_tag = GetTypeTag(c);
1455    location.class_id = gRegistry->AddRefType(c);
1456    location.method_id = ToMethodId(m);
1457    location.dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc;
1458  }
1459}
1460
1461std::string Dbg::GetMethodName(JDWP::MethodId method_id)
1462    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1463  mirror::ArtMethod* m = FromMethodId(method_id);
1464  return m->GetName();
1465}
1466
1467std::string Dbg::GetFieldName(JDWP::FieldId field_id)
1468    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1469  return FromFieldId(field_id)->GetName();
1470}
1471
1472/*
1473 * Augment the access flags for synthetic methods and fields by setting
1474 * the (as described by the spec) "0xf0000000 bit".  Also, strip out any
1475 * flags not specified by the Java programming language.
1476 */
1477static uint32_t MangleAccessFlags(uint32_t accessFlags) {
1478  accessFlags &= kAccJavaFlagsMask;
1479  if ((accessFlags & kAccSynthetic) != 0) {
1480    accessFlags |= 0xf0000000;
1481  }
1482  return accessFlags;
1483}
1484
1485/*
1486 * Circularly shifts registers so that arguments come first. Debuggers
1487 * expect slots to begin with arguments, but dex code places them at
1488 * the end.
1489 */
1490static uint16_t MangleSlot(uint16_t slot, mirror::ArtMethod* m)
1491    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1492  const DexFile::CodeItem* code_item = m->GetCodeItem();
1493  if (code_item == nullptr) {
1494    // We should not get here for a method without code (native, proxy or abstract). Log it and
1495    // return the slot as is since all registers are arguments.
1496    LOG(WARNING) << "Trying to mangle slot for method without code " << PrettyMethod(m);
1497    return slot;
1498  }
1499  uint16_t ins_size = code_item->ins_size_;
1500  uint16_t locals_size = code_item->registers_size_ - ins_size;
1501  if (slot >= locals_size) {
1502    return slot - locals_size;
1503  } else {
1504    return slot + ins_size;
1505  }
1506}
1507
1508/*
1509 * Circularly shifts registers so that arguments come last. Reverts
1510 * slots to dex style argument placement.
1511 */
1512static uint16_t DemangleSlot(uint16_t slot, mirror::ArtMethod* m)
1513    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1514  const DexFile::CodeItem* code_item = m->GetCodeItem();
1515  if (code_item == nullptr) {
1516    // We should not get here for a method without code (native, proxy or abstract). Log it and
1517    // return the slot as is since all registers are arguments.
1518    LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m);
1519    return slot;
1520  }
1521  uint16_t ins_size = code_item->ins_size_;
1522  uint16_t locals_size = code_item->registers_size_ - ins_size;
1523  if (slot < ins_size) {
1524    return slot + locals_size;
1525  } else {
1526    return slot - ins_size;
1527  }
1528}
1529
1530JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) {
1531  JDWP::JdwpError status;
1532  mirror::Class* c = DecodeClass(class_id, status);
1533  if (c == NULL) {
1534    return status;
1535  }
1536
1537  size_t instance_field_count = c->NumInstanceFields();
1538  size_t static_field_count = c->NumStaticFields();
1539
1540  expandBufAdd4BE(pReply, instance_field_count + static_field_count);
1541
1542  for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
1543    mirror::ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
1544    expandBufAddFieldId(pReply, ToFieldId(f));
1545    expandBufAddUtf8String(pReply, f->GetName());
1546    expandBufAddUtf8String(pReply, f->GetTypeDescriptor());
1547    if (with_generic) {
1548      static const char genericSignature[1] = "";
1549      expandBufAddUtf8String(pReply, genericSignature);
1550    }
1551    expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags()));
1552  }
1553  return JDWP::ERR_NONE;
1554}
1555
1556JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic,
1557                                           JDWP::ExpandBuf* pReply) {
1558  JDWP::JdwpError status;
1559  mirror::Class* c = DecodeClass(class_id, status);
1560  if (c == NULL) {
1561    return status;
1562  }
1563
1564  size_t direct_method_count = c->NumDirectMethods();
1565  size_t virtual_method_count = c->NumVirtualMethods();
1566
1567  expandBufAdd4BE(pReply, direct_method_count + virtual_method_count);
1568
1569  for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) {
1570    mirror::ArtMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count);
1571    expandBufAddMethodId(pReply, ToMethodId(m));
1572    expandBufAddUtf8String(pReply, m->GetName());
1573    expandBufAddUtf8String(pReply, m->GetSignature().ToString());
1574    if (with_generic) {
1575      static const char genericSignature[1] = "";
1576      expandBufAddUtf8String(pReply, genericSignature);
1577    }
1578    expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags()));
1579  }
1580  return JDWP::ERR_NONE;
1581}
1582
1583JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1584  JDWP::JdwpError status;
1585  Thread* self = Thread::Current();
1586  StackHandleScope<1> hs(self);
1587  Handle<mirror::Class> c(hs.NewHandle(DecodeClass(class_id, status)));
1588  if (c.Get() == nullptr) {
1589    return status;
1590  }
1591  size_t interface_count = c->NumDirectInterfaces();
1592  expandBufAdd4BE(pReply, interface_count);
1593  for (size_t i = 0; i < interface_count; ++i) {
1594    expandBufAddRefTypeId(pReply,
1595                          gRegistry->AddRefType(mirror::Class::GetDirectInterface(self, c, i)));
1596  }
1597  return JDWP::ERR_NONE;
1598}
1599
1600void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply)
1601    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1602  struct DebugCallbackContext {
1603    int numItems;
1604    JDWP::ExpandBuf* pReply;
1605
1606    static bool Callback(void* context, uint32_t address, uint32_t line_number) {
1607      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1608      expandBufAdd8BE(pContext->pReply, address);
1609      expandBufAdd4BE(pContext->pReply, line_number);
1610      pContext->numItems++;
1611      return false;
1612    }
1613  };
1614  mirror::ArtMethod* m = FromMethodId(method_id);
1615  const DexFile::CodeItem* code_item = m->GetCodeItem();
1616  uint64_t start, end;
1617  if (code_item == nullptr) {
1618    DCHECK(m->IsNative() || m->IsProxyMethod());
1619    start = -1;
1620    end = -1;
1621  } else {
1622    start = 0;
1623    // Return the index of the last instruction
1624    end = code_item->insns_size_in_code_units_ - 1;
1625  }
1626
1627  expandBufAdd8BE(pReply, start);
1628  expandBufAdd8BE(pReply, end);
1629
1630  // Add numLines later
1631  size_t numLinesOffset = expandBufGetLength(pReply);
1632  expandBufAdd4BE(pReply, 0);
1633
1634  DebugCallbackContext context;
1635  context.numItems = 0;
1636  context.pReply = pReply;
1637
1638  if (code_item != nullptr) {
1639    m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
1640                                     DebugCallbackContext::Callback, NULL, &context);
1641  }
1642
1643  JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
1644}
1645
1646void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic,
1647                              JDWP::ExpandBuf* pReply) {
1648  struct DebugCallbackContext {
1649    mirror::ArtMethod* method;
1650    JDWP::ExpandBuf* pReply;
1651    size_t variable_count;
1652    bool with_generic;
1653
1654    static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress,
1655                         const char* name, const char* descriptor, const char* signature)
1656        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1657      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1658
1659      VLOG(jdwp) << StringPrintf("    %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
1660                                 pContext->variable_count, startAddress, endAddress - startAddress,
1661                                 name, descriptor, signature, slot,
1662                                 MangleSlot(slot, pContext->method));
1663
1664      slot = MangleSlot(slot, pContext->method);
1665
1666      expandBufAdd8BE(pContext->pReply, startAddress);
1667      expandBufAddUtf8String(pContext->pReply, name);
1668      expandBufAddUtf8String(pContext->pReply, descriptor);
1669      if (pContext->with_generic) {
1670        expandBufAddUtf8String(pContext->pReply, signature);
1671      }
1672      expandBufAdd4BE(pContext->pReply, endAddress - startAddress);
1673      expandBufAdd4BE(pContext->pReply, slot);
1674
1675      ++pContext->variable_count;
1676    }
1677  };
1678  mirror::ArtMethod* m = FromMethodId(method_id);
1679
1680  // arg_count considers doubles and longs to take 2 units.
1681  // variable_count considers everything to take 1 unit.
1682  std::string shorty(m->GetShorty());
1683  expandBufAdd4BE(pReply, mirror::ArtMethod::NumArgRegisters(shorty));
1684
1685  // We don't know the total number of variables yet, so leave a blank and update it later.
1686  size_t variable_count_offset = expandBufGetLength(pReply);
1687  expandBufAdd4BE(pReply, 0);
1688
1689  DebugCallbackContext context;
1690  context.method = m;
1691  context.pReply = pReply;
1692  context.variable_count = 0;
1693  context.with_generic = with_generic;
1694
1695  const DexFile::CodeItem* code_item = m->GetCodeItem();
1696  if (code_item != nullptr) {
1697    m->GetDexFile()->DecodeDebugInfo(
1698        code_item, m->IsStatic(), m->GetDexMethodIndex(), NULL, DebugCallbackContext::Callback,
1699        &context);
1700  }
1701
1702  JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count);
1703}
1704
1705void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
1706                                  JDWP::ExpandBuf* pReply) {
1707  mirror::ArtMethod* m = FromMethodId(method_id);
1708  JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty());
1709  OutputJValue(tag, return_value, pReply);
1710}
1711
1712void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
1713                           JDWP::ExpandBuf* pReply) {
1714  mirror::ArtField* f = FromFieldId(field_id);
1715  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1716  OutputJValue(tag, field_value, pReply);
1717}
1718
1719JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
1720                                  std::vector<uint8_t>& bytecodes)
1721    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1722  mirror::ArtMethod* m = FromMethodId(method_id);
1723  if (m == NULL) {
1724    return JDWP::ERR_INVALID_METHODID;
1725  }
1726  const DexFile::CodeItem* code_item = m->GetCodeItem();
1727  size_t byte_count = code_item->insns_size_in_code_units_ * 2;
1728  const uint8_t* begin = reinterpret_cast<const uint8_t*>(code_item->insns_);
1729  const uint8_t* end = begin + byte_count;
1730  for (const uint8_t* p = begin; p != end; ++p) {
1731    bytecodes.push_back(*p);
1732  }
1733  return JDWP::ERR_NONE;
1734}
1735
1736JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) {
1737  return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1738}
1739
1740JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) {
1741  return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1742}
1743
1744static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
1745                                         JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
1746                                         bool is_static)
1747    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1748  JDWP::JdwpError status;
1749  mirror::Class* c = DecodeClass(ref_type_id, status);
1750  if (ref_type_id != 0 && c == NULL) {
1751    return status;
1752  }
1753
1754  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1755  if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) {
1756    return JDWP::ERR_INVALID_OBJECT;
1757  }
1758  mirror::ArtField* f = FromFieldId(field_id);
1759
1760  mirror::Class* receiver_class = c;
1761  if (receiver_class == NULL && o != NULL) {
1762    receiver_class = o->GetClass();
1763  }
1764  // TODO: should we give up now if receiver_class is NULL?
1765  if (receiver_class != NULL && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
1766    LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class);
1767    return JDWP::ERR_INVALID_FIELDID;
1768  }
1769
1770  // The RI only enforces the static/non-static mismatch in one direction.
1771  // TODO: should we change the tests and check both?
1772  if (is_static) {
1773    if (!f->IsStatic()) {
1774      return JDWP::ERR_INVALID_FIELDID;
1775    }
1776  } else {
1777    if (f->IsStatic()) {
1778      LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f);
1779    }
1780  }
1781  if (f->IsStatic()) {
1782    o = f->GetDeclaringClass();
1783  }
1784
1785  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1786  JValue field_value;
1787  if (tag == JDWP::JT_VOID) {
1788    LOG(FATAL) << "Unknown tag: " << tag;
1789  } else if (!IsPrimitiveTag(tag)) {
1790    field_value.SetL(f->GetObject(o));
1791  } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1792    field_value.SetJ(f->Get64(o));
1793  } else {
1794    field_value.SetI(f->Get32(o));
1795  }
1796  Dbg::OutputJValue(tag, &field_value, pReply);
1797
1798  return JDWP::ERR_NONE;
1799}
1800
1801JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1802                                   JDWP::ExpandBuf* pReply) {
1803  return GetFieldValueImpl(0, object_id, field_id, pReply, false);
1804}
1805
1806JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id, JDWP::ExpandBuf* pReply) {
1807  return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true);
1808}
1809
1810static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1811                                         uint64_t value, int width, bool is_static)
1812    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1813  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1814  if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) {
1815    return JDWP::ERR_INVALID_OBJECT;
1816  }
1817  mirror::ArtField* f = FromFieldId(field_id);
1818
1819  // The RI only enforces the static/non-static mismatch in one direction.
1820  // TODO: should we change the tests and check both?
1821  if (is_static) {
1822    if (!f->IsStatic()) {
1823      return JDWP::ERR_INVALID_FIELDID;
1824    }
1825  } else {
1826    if (f->IsStatic()) {
1827      LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f);
1828    }
1829  }
1830  if (f->IsStatic()) {
1831    o = f->GetDeclaringClass();
1832  }
1833
1834  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1835
1836  if (IsPrimitiveTag(tag)) {
1837    if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1838      CHECK_EQ(width, 8);
1839      // Debugging can't use transactional mode (runtime only).
1840      f->Set64<false>(o, value);
1841    } else {
1842      CHECK_LE(width, 4);
1843      // Debugging can't use transactional mode (runtime only).
1844      f->Set32<false>(o, value);
1845    }
1846  } else {
1847    mirror::Object* v = gRegistry->Get<mirror::Object*>(value);
1848    if (v == ObjectRegistry::kInvalidObject) {
1849      return JDWP::ERR_INVALID_OBJECT;
1850    }
1851    if (v != NULL) {
1852      mirror::Class* field_type;
1853      {
1854        StackHandleScope<3> hs(Thread::Current());
1855        HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
1856        HandleWrapper<mirror::ArtField> h_f(hs.NewHandleWrapper(&f));
1857        HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
1858        field_type = FieldHelper(h_f).GetType();
1859      }
1860      if (!field_type->IsAssignableFrom(v->GetClass())) {
1861        return JDWP::ERR_INVALID_OBJECT;
1862      }
1863    }
1864    // Debugging can't use transactional mode (runtime only).
1865    f->SetObject<false>(o, v);
1866  }
1867
1868  return JDWP::ERR_NONE;
1869}
1870
1871JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value,
1872                                   int width) {
1873  return SetFieldValueImpl(object_id, field_id, value, width, false);
1874}
1875
1876JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) {
1877  return SetFieldValueImpl(0, field_id, value, width, true);
1878}
1879
1880JDWP::JdwpError Dbg::StringToUtf8(JDWP::ObjectId string_id, std::string* str) {
1881  mirror::Object* obj = gRegistry->Get<mirror::Object*>(string_id);
1882  if (obj == nullptr || obj == ObjectRegistry::kInvalidObject) {
1883    return JDWP::ERR_INVALID_OBJECT;
1884  }
1885  {
1886    ScopedObjectAccessUnchecked soa(Thread::Current());
1887    mirror::Class* java_lang_String = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_String);
1888    if (!java_lang_String->IsAssignableFrom(obj->GetClass())) {
1889      // This isn't a string.
1890      return JDWP::ERR_INVALID_STRING;
1891    }
1892  }
1893  *str = obj->AsString()->ToModifiedUtf8();
1894  return JDWP::ERR_NONE;
1895}
1896
1897void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) {
1898  if (IsPrimitiveTag(tag)) {
1899    expandBufAdd1(pReply, tag);
1900    if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) {
1901      expandBufAdd1(pReply, return_value->GetI());
1902    } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) {
1903      expandBufAdd2BE(pReply, return_value->GetI());
1904    } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) {
1905      expandBufAdd4BE(pReply, return_value->GetI());
1906    } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1907      expandBufAdd8BE(pReply, return_value->GetJ());
1908    } else {
1909      CHECK_EQ(tag, JDWP::JT_VOID);
1910    }
1911  } else {
1912    ScopedObjectAccessUnchecked soa(Thread::Current());
1913    mirror::Object* value = return_value->GetL();
1914    expandBufAdd1(pReply, TagFromObject(soa, value));
1915    expandBufAddObjectId(pReply, gRegistry->Add(value));
1916  }
1917}
1918
1919JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string& name) {
1920  ScopedObjectAccessUnchecked soa(Thread::Current());
1921  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1922  Thread* thread;
1923  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1924  if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
1925    return error;
1926  }
1927
1928  // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName.
1929  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
1930  mirror::ArtField* java_lang_Thread_name_field =
1931      soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
1932  mirror::String* s =
1933      reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object));
1934  if (s != NULL) {
1935    name = s->ToModifiedUtf8();
1936  }
1937  return JDWP::ERR_NONE;
1938}
1939
1940JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
1941  ScopedObjectAccess soa(Thread::Current());
1942  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
1943  if (thread_object == ObjectRegistry::kInvalidObject) {
1944    return JDWP::ERR_INVALID_OBJECT;
1945  }
1946  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroup");
1947  // Okay, so it's an object, but is it actually a thread?
1948  JDWP::JdwpError error;
1949  {
1950    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1951    Thread* thread;
1952    error = DecodeThread(soa, thread_id, thread);
1953  }
1954  if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
1955    // Zombie threads are in the null group.
1956    expandBufAddObjectId(pReply, JDWP::ObjectId(0));
1957    error = JDWP::ERR_NONE;
1958  } else if (error == JDWP::ERR_NONE) {
1959    mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
1960    CHECK(c != nullptr);
1961    mirror::ArtField* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;");
1962    CHECK(f != nullptr);
1963    mirror::Object* group = f->GetObject(thread_object);
1964    CHECK(group != nullptr);
1965    JDWP::ObjectId thread_group_id = gRegistry->Add(group);
1966    expandBufAddObjectId(pReply, thread_group_id);
1967  }
1968  soa.Self()->EndAssertNoThreadSuspension(old_cause);
1969  return error;
1970}
1971
1972std::string Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id) {
1973  ScopedObjectAccess soa(Thread::Current());
1974  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
1975  CHECK(thread_group != nullptr);
1976  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupName");
1977  mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
1978  CHECK(c != nullptr);
1979  mirror::ArtField* f = c->FindInstanceField("name", "Ljava/lang/String;");
1980  CHECK(f != NULL);
1981  mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
1982  soa.Self()->EndAssertNoThreadSuspension(old_cause);
1983  return s->ToModifiedUtf8();
1984}
1985
1986JDWP::ObjectId Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id) {
1987  ScopedObjectAccessUnchecked soa(Thread::Current());
1988  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
1989  CHECK(thread_group != nullptr);
1990  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupParent");
1991  mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
1992  CHECK(c != nullptr);
1993  mirror::ArtField* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;");
1994  CHECK(f != NULL);
1995  mirror::Object* parent = f->GetObject(thread_group);
1996  soa.Self()->EndAssertNoThreadSuspension(old_cause);
1997  return gRegistry->Add(parent);
1998}
1999
2000JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
2001  ScopedObjectAccessUnchecked soa(Thread::Current());
2002  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
2003  mirror::Object* group = f->GetObject(f->GetDeclaringClass());
2004  return gRegistry->Add(group);
2005}
2006
2007JDWP::ObjectId Dbg::GetMainThreadGroupId() {
2008  ScopedObjectAccess soa(Thread::Current());
2009  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup);
2010  mirror::Object* group = f->GetObject(f->GetDeclaringClass());
2011  return gRegistry->Add(group);
2012}
2013
2014JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
2015  switch (state) {
2016    case kBlocked:
2017      return JDWP::TS_MONITOR;
2018    case kNative:
2019    case kRunnable:
2020    case kSuspended:
2021      return JDWP::TS_RUNNING;
2022    case kSleeping:
2023      return JDWP::TS_SLEEPING;
2024    case kStarting:
2025    case kTerminated:
2026      return JDWP::TS_ZOMBIE;
2027    case kTimedWaiting:
2028    case kWaitingForCheckPointsToRun:
2029    case kWaitingForDebuggerSend:
2030    case kWaitingForDebuggerSuspension:
2031    case kWaitingForDebuggerToAttach:
2032    case kWaitingForDeoptimization:
2033    case kWaitingForGcToComplete:
2034    case kWaitingForJniOnLoad:
2035    case kWaitingForMethodTracingStart:
2036    case kWaitingForSignalCatcherOutput:
2037    case kWaitingInMainDebuggerLoop:
2038    case kWaitingInMainSignalCatcherLoop:
2039    case kWaitingPerformingGc:
2040    case kWaiting:
2041      return JDWP::TS_WAIT;
2042      // Don't add a 'default' here so the compiler can spot incompatible enum changes.
2043  }
2044  LOG(FATAL) << "Unknown thread state: " << state;
2045  return JDWP::TS_ZOMBIE;
2046}
2047
2048JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus,
2049                                     JDWP::JdwpSuspendStatus* pSuspendStatus) {
2050  ScopedObjectAccess soa(Thread::Current());
2051
2052  *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED;
2053
2054  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2055  Thread* thread;
2056  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2057  if (error != JDWP::ERR_NONE) {
2058    if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
2059      *pThreadStatus = JDWP::TS_ZOMBIE;
2060      return JDWP::ERR_NONE;
2061    }
2062    return error;
2063  }
2064
2065  if (IsSuspendedForDebugger(soa, thread)) {
2066    *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED;
2067  }
2068
2069  *pThreadStatus = ToJdwpThreadStatus(thread->GetState());
2070  return JDWP::ERR_NONE;
2071}
2072
2073JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
2074  ScopedObjectAccess soa(Thread::Current());
2075  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2076  Thread* thread;
2077  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2078  if (error != JDWP::ERR_NONE) {
2079    return error;
2080  }
2081  MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2082  expandBufAdd4BE(pReply, thread->GetDebugSuspendCount());
2083  return JDWP::ERR_NONE;
2084}
2085
2086JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) {
2087  ScopedObjectAccess soa(Thread::Current());
2088  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2089  Thread* thread;
2090  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2091  if (error != JDWP::ERR_NONE) {
2092    return error;
2093  }
2094  thread->Interrupt(soa.Self());
2095  return JDWP::ERR_NONE;
2096}
2097
2098static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
2099                                   mirror::Object* desired_thread_group, mirror::Object* peer)
2100    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2101  // Do we want threads from all thread groups?
2102  if (desired_thread_group == nullptr) {
2103    return true;
2104  }
2105  mirror::ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
2106  DCHECK(thread_group_field != nullptr);
2107  mirror::Object* group = thread_group_field->GetObject(peer);
2108  return (group == desired_thread_group);
2109}
2110
2111void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& thread_ids) {
2112  ScopedObjectAccessUnchecked soa(Thread::Current());
2113  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
2114  std::list<Thread*> all_threads_list;
2115  {
2116    MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
2117    all_threads_list = Runtime::Current()->GetThreadList()->GetList();
2118  }
2119  for (Thread* t : all_threads_list) {
2120    if (t == Dbg::GetDebugThread()) {
2121      // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
2122      // query all threads, so it's easier if we just don't tell them about this thread.
2123      continue;
2124    }
2125    if (t->IsStillStarting()) {
2126      // This thread is being started (and has been registered in the thread list). However, it is
2127      // not completely started yet so we must ignore it.
2128      continue;
2129    }
2130    mirror::Object* peer = t->GetPeer();
2131    if (peer == nullptr) {
2132      // peer might be NULL if the thread is still starting up. We can't tell the debugger about
2133      // this thread yet.
2134      // TODO: if we identified threads to the debugger by their Thread*
2135      // rather than their peer's mirror::Object*, we could fix this.
2136      // Doing so might help us report ZOMBIE threads too.
2137      continue;
2138    }
2139    if (IsInDesiredThreadGroup(soa, thread_group, peer)) {
2140      thread_ids.push_back(gRegistry->Add(peer));
2141    }
2142  }
2143}
2144
2145void Dbg::GetChildThreadGroups(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& child_thread_group_ids) {
2146  ScopedObjectAccess soa(Thread::Current());
2147  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
2148
2149  // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
2150  mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
2151  mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
2152
2153  // Get the array and size out of the ArrayList<ThreadGroup>...
2154  mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
2155  mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
2156  mirror::ObjectArray<mirror::Object>* groups_array =
2157      array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
2158  const int32_t size = size_field->GetInt(groups_array_list);
2159
2160  // Copy the first 'size' elements out of the array into the result.
2161  for (int32_t i = 0; i < size; ++i) {
2162    child_thread_group_ids.push_back(gRegistry->Add(groups_array->Get(i)));
2163  }
2164}
2165
2166static int GetStackDepth(Thread* thread)
2167    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2168  struct CountStackDepthVisitor : public StackVisitor {
2169    explicit CountStackDepthVisitor(Thread* thread)
2170        : StackVisitor(thread, NULL), depth(0) {}
2171
2172    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2173    // annotalysis.
2174    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2175      if (!GetMethod()->IsRuntimeMethod()) {
2176        ++depth;
2177      }
2178      return true;
2179    }
2180    size_t depth;
2181  };
2182
2183  CountStackDepthVisitor visitor(thread);
2184  visitor.WalkStack();
2185  return visitor.depth;
2186}
2187
2188JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t& result) {
2189  ScopedObjectAccess soa(Thread::Current());
2190  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2191  Thread* thread;
2192  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2193  if (error != JDWP::ERR_NONE) {
2194    return error;
2195  }
2196  if (!IsSuspendedForDebugger(soa, thread)) {
2197    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2198  }
2199  result = GetStackDepth(thread);
2200  return JDWP::ERR_NONE;
2201}
2202
2203JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
2204                                     size_t frame_count, JDWP::ExpandBuf* buf) {
2205  class GetFrameVisitor : public StackVisitor {
2206   public:
2207    GetFrameVisitor(Thread* thread, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf)
2208        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2209        : StackVisitor(thread, NULL), depth_(0),
2210          start_frame_(start_frame), frame_count_(frame_count), buf_(buf) {
2211      expandBufAdd4BE(buf_, frame_count_);
2212    }
2213
2214    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2215    // annotalysis.
2216    virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2217      if (GetMethod()->IsRuntimeMethod()) {
2218        return true;  // The debugger can't do anything useful with a frame that has no Method*.
2219      }
2220      if (depth_ >= start_frame_ + frame_count_) {
2221        return false;
2222      }
2223      if (depth_ >= start_frame_) {
2224        JDWP::FrameId frame_id(GetFrameId());
2225        JDWP::JdwpLocation location;
2226        SetLocation(location, GetMethod(), GetDexPc());
2227        VLOG(jdwp) << StringPrintf("    Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
2228        expandBufAdd8BE(buf_, frame_id);
2229        expandBufAddLocation(buf_, location);
2230      }
2231      ++depth_;
2232      return true;
2233    }
2234
2235   private:
2236    size_t depth_;
2237    const size_t start_frame_;
2238    const size_t frame_count_;
2239    JDWP::ExpandBuf* buf_;
2240  };
2241
2242  ScopedObjectAccessUnchecked soa(Thread::Current());
2243  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2244  Thread* thread;
2245  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2246  if (error != JDWP::ERR_NONE) {
2247    return error;
2248  }
2249  if (!IsSuspendedForDebugger(soa, thread)) {
2250    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2251  }
2252  GetFrameVisitor visitor(thread, start_frame, frame_count, buf);
2253  visitor.WalkStack();
2254  return JDWP::ERR_NONE;
2255}
2256
2257JDWP::ObjectId Dbg::GetThreadSelfId() {
2258  ScopedObjectAccessUnchecked soa(Thread::Current());
2259  return gRegistry->Add(soa.Self()->GetPeer());
2260}
2261
2262void Dbg::SuspendVM() {
2263  Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
2264}
2265
2266void Dbg::ResumeVM() {
2267  Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
2268}
2269
2270JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
2271  Thread* self = Thread::Current();
2272  ScopedLocalRef<jobject> peer(self->GetJniEnv(), NULL);
2273  {
2274    ScopedObjectAccess soa(self);
2275    peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id)));
2276  }
2277  if (peer.get() == NULL) {
2278    return JDWP::ERR_THREAD_NOT_ALIVE;
2279  }
2280  // Suspend thread to build stack trace. Take suspend thread lock to avoid races with threads
2281  // trying to suspend this one.
2282  MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_);
2283  bool timed_out;
2284  ThreadList* thread_list = Runtime::Current()->GetThreadList();
2285  Thread* thread = thread_list->SuspendThreadByPeer(peer.get(), request_suspension, true,
2286                                                    &timed_out);
2287  if (thread != NULL) {
2288    return JDWP::ERR_NONE;
2289  } else if (timed_out) {
2290    return JDWP::ERR_INTERNAL;
2291  } else {
2292    return JDWP::ERR_THREAD_NOT_ALIVE;
2293  }
2294}
2295
2296void Dbg::ResumeThread(JDWP::ObjectId thread_id) {
2297  ScopedObjectAccessUnchecked soa(Thread::Current());
2298  mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id);
2299  Thread* thread;
2300  {
2301    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2302    thread = Thread::FromManagedThread(soa, peer);
2303  }
2304  if (thread == NULL) {
2305    LOG(WARNING) << "No such thread for resume: " << peer;
2306    return;
2307  }
2308  bool needs_resume;
2309  {
2310    MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2311    needs_resume = thread->GetSuspendCount() > 0;
2312  }
2313  if (needs_resume) {
2314    Runtime::Current()->GetThreadList()->Resume(thread, true);
2315  }
2316}
2317
2318void Dbg::SuspendSelf() {
2319  Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
2320}
2321
2322struct GetThisVisitor : public StackVisitor {
2323  GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
2324      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2325      : StackVisitor(thread, context), this_object(NULL), frame_id(frame_id) {}
2326
2327  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2328  // annotalysis.
2329  virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2330    if (frame_id != GetFrameId()) {
2331      return true;  // continue
2332    } else {
2333      this_object = GetThisObject();
2334      return false;
2335    }
2336  }
2337
2338  mirror::Object* this_object;
2339  JDWP::FrameId frame_id;
2340};
2341
2342JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
2343                                   JDWP::ObjectId* result) {
2344  ScopedObjectAccessUnchecked soa(Thread::Current());
2345  Thread* thread;
2346  {
2347    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2348    JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2349    if (error != JDWP::ERR_NONE) {
2350      return error;
2351    }
2352    if (!IsSuspendedForDebugger(soa, thread)) {
2353      return JDWP::ERR_THREAD_NOT_SUSPENDED;
2354    }
2355  }
2356  std::unique_ptr<Context> context(Context::Create());
2357  GetThisVisitor visitor(thread, context.get(), frame_id);
2358  visitor.WalkStack();
2359  *result = gRegistry->Add(visitor.this_object);
2360  return JDWP::ERR_NONE;
2361}
2362
2363JDWP::JdwpError Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
2364                                   JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
2365  struct GetLocalVisitor : public StackVisitor {
2366    GetLocalVisitor(const ScopedObjectAccessUnchecked& soa, Thread* thread, Context* context,
2367                    JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width)
2368        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2369        : StackVisitor(thread, context), soa_(soa), frame_id_(frame_id), slot_(slot), tag_(tag),
2370          buf_(buf), width_(width), error_(JDWP::ERR_NONE) {}
2371
2372    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2373    // annotalysis.
2374    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2375      if (GetFrameId() != frame_id_) {
2376        return true;  // Not our frame, carry on.
2377      }
2378      // TODO: check that the tag is compatible with the actual type of the slot!
2379      // TODO: check slot is valid for this method or return INVALID_SLOT error.
2380      mirror::ArtMethod* m = GetMethod();
2381      if (m->IsNative()) {
2382        // We can't read local value from native method.
2383        error_ = JDWP::ERR_OPAQUE_FRAME;
2384        return false;
2385      }
2386      uint16_t reg = DemangleSlot(slot_, m);
2387      constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
2388      switch (tag_) {
2389        case JDWP::JT_BOOLEAN: {
2390          CHECK_EQ(width_, 1U);
2391          uint32_t intVal;
2392          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2393            VLOG(jdwp) << "get boolean local " << reg << " = " << intVal;
2394            JDWP::Set1(buf_+1, intVal != 0);
2395          } else {
2396            VLOG(jdwp) << "failed to get boolean local " << reg;
2397            error_ = kFailureErrorCode;
2398          }
2399          break;
2400        }
2401        case JDWP::JT_BYTE: {
2402          CHECK_EQ(width_, 1U);
2403          uint32_t intVal;
2404          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2405            VLOG(jdwp) << "get byte local " << reg << " = " << intVal;
2406            JDWP::Set1(buf_+1, intVal);
2407          } else {
2408            VLOG(jdwp) << "failed to get byte local " << reg;
2409            error_ = kFailureErrorCode;
2410          }
2411          break;
2412        }
2413        case JDWP::JT_SHORT:
2414        case JDWP::JT_CHAR: {
2415          CHECK_EQ(width_, 2U);
2416          uint32_t intVal;
2417          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2418            VLOG(jdwp) << "get short/char local " << reg << " = " << intVal;
2419            JDWP::Set2BE(buf_+1, intVal);
2420          } else {
2421            VLOG(jdwp) << "failed to get short/char local " << reg;
2422            error_ = kFailureErrorCode;
2423          }
2424          break;
2425        }
2426        case JDWP::JT_INT: {
2427          CHECK_EQ(width_, 4U);
2428          uint32_t intVal;
2429          if (GetVReg(m, reg, kIntVReg, &intVal)) {
2430            VLOG(jdwp) << "get int local " << reg << " = " << intVal;
2431            JDWP::Set4BE(buf_+1, intVal);
2432          } else {
2433            VLOG(jdwp) << "failed to get int local " << reg;
2434            error_ = kFailureErrorCode;
2435          }
2436          break;
2437        }
2438        case JDWP::JT_FLOAT: {
2439          CHECK_EQ(width_, 4U);
2440          uint32_t intVal;
2441          if (GetVReg(m, reg, kFloatVReg, &intVal)) {
2442            VLOG(jdwp) << "get float local " << reg << " = " << intVal;
2443            JDWP::Set4BE(buf_+1, intVal);
2444          } else {
2445            VLOG(jdwp) << "failed to get float local " << reg;
2446            error_ = kFailureErrorCode;
2447          }
2448          break;
2449        }
2450        case JDWP::JT_ARRAY:
2451        case JDWP::JT_CLASS_LOADER:
2452        case JDWP::JT_CLASS_OBJECT:
2453        case JDWP::JT_OBJECT:
2454        case JDWP::JT_STRING:
2455        case JDWP::JT_THREAD:
2456        case JDWP::JT_THREAD_GROUP: {
2457          CHECK_EQ(width_, sizeof(JDWP::ObjectId));
2458          uint32_t intVal;
2459          if (GetVReg(m, reg, kReferenceVReg, &intVal)) {
2460            mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal);
2461            VLOG(jdwp) << "get " << tag_ << " object local " << reg << " = " << o;
2462            if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
2463              LOG(FATAL) << "Register " << reg << " expected to hold " << tag_ << " object: " << o;
2464            }
2465            tag_ = TagFromObject(soa_, o);
2466            JDWP::SetObjectId(buf_+1, gRegistry->Add(o));
2467          } else {
2468            VLOG(jdwp) << "failed to get " << tag_ << " object local " << reg;
2469            error_ = kFailureErrorCode;
2470          }
2471          break;
2472        }
2473        case JDWP::JT_DOUBLE: {
2474          CHECK_EQ(width_, 8U);
2475          uint64_t longVal;
2476          if (GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
2477            VLOG(jdwp) << "get double local " << reg << " = " << longVal;
2478            JDWP::Set8BE(buf_+1, longVal);
2479          } else {
2480            VLOG(jdwp) << "failed to get double local " << reg;
2481            error_ = kFailureErrorCode;
2482          }
2483          break;
2484        }
2485        case JDWP::JT_LONG: {
2486          CHECK_EQ(width_, 8U);
2487          uint64_t longVal;
2488          if (GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &longVal)) {
2489            VLOG(jdwp) << "get long local " << reg << " = " << longVal;
2490            JDWP::Set8BE(buf_+1, longVal);
2491          } else {
2492            VLOG(jdwp) << "failed to get long local " << reg;
2493            error_ = kFailureErrorCode;
2494          }
2495          break;
2496        }
2497        default:
2498          LOG(FATAL) << "Unknown tag " << tag_;
2499          break;
2500      }
2501
2502      // Prepend tag, which may have been updated.
2503      JDWP::Set1(buf_, tag_);
2504      return false;
2505    }
2506    const ScopedObjectAccessUnchecked& soa_;
2507    const JDWP::FrameId frame_id_;
2508    const int slot_;
2509    JDWP::JdwpTag tag_;
2510    uint8_t* const buf_;
2511    const size_t width_;
2512    JDWP::JdwpError error_;
2513  };
2514
2515  ScopedObjectAccessUnchecked soa(Thread::Current());
2516  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2517  Thread* thread;
2518  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2519  if (error != JDWP::ERR_NONE) {
2520    return error;
2521  }
2522  // TODO check thread is suspended by the debugger ?
2523  std::unique_ptr<Context> context(Context::Create());
2524  GetLocalVisitor visitor(soa, thread, context.get(), frame_id, slot, tag, buf, width);
2525  visitor.WalkStack();
2526  return visitor.error_;
2527}
2528
2529JDWP::JdwpError Dbg::SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
2530                                   JDWP::JdwpTag tag, uint64_t value, size_t width) {
2531  struct SetLocalVisitor : public StackVisitor {
2532    SetLocalVisitor(Thread* thread, Context* context,
2533                    JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint64_t value,
2534                    size_t width)
2535        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2536        : StackVisitor(thread, context),
2537          frame_id_(frame_id), slot_(slot), tag_(tag), value_(value), width_(width),
2538          error_(JDWP::ERR_NONE) {}
2539
2540    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2541    // annotalysis.
2542    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2543      if (GetFrameId() != frame_id_) {
2544        return true;  // Not our frame, carry on.
2545      }
2546      // TODO: check that the tag is compatible with the actual type of the slot!
2547      // TODO: check slot is valid for this method or return INVALID_SLOT error.
2548      mirror::ArtMethod* m = GetMethod();
2549      if (m->IsNative()) {
2550        // We can't read local value from native method.
2551        error_ = JDWP::ERR_OPAQUE_FRAME;
2552        return false;
2553      }
2554      uint16_t reg = DemangleSlot(slot_, m);
2555      constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
2556      switch (tag_) {
2557        case JDWP::JT_BOOLEAN:
2558        case JDWP::JT_BYTE:
2559          CHECK_EQ(width_, 1U);
2560          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2561            VLOG(jdwp) << "failed to set boolean/byte local " << reg << " = "
2562                       << static_cast<uint32_t>(value_);
2563            error_ = kFailureErrorCode;
2564          }
2565          break;
2566        case JDWP::JT_SHORT:
2567        case JDWP::JT_CHAR:
2568          CHECK_EQ(width_, 2U);
2569          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2570            VLOG(jdwp) << "failed to set short/char local " << reg << " = "
2571                       << static_cast<uint32_t>(value_);
2572            error_ = kFailureErrorCode;
2573          }
2574          break;
2575        case JDWP::JT_INT:
2576          CHECK_EQ(width_, 4U);
2577          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) {
2578            VLOG(jdwp) << "failed to set int local " << reg << " = "
2579                       << static_cast<uint32_t>(value_);
2580            error_ = kFailureErrorCode;
2581          }
2582          break;
2583        case JDWP::JT_FLOAT:
2584          CHECK_EQ(width_, 4U);
2585          if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kFloatVReg)) {
2586            VLOG(jdwp) << "failed to set float local " << reg << " = "
2587                       << static_cast<uint32_t>(value_);
2588            error_ = kFailureErrorCode;
2589          }
2590          break;
2591        case JDWP::JT_ARRAY:
2592        case JDWP::JT_CLASS_LOADER:
2593        case JDWP::JT_CLASS_OBJECT:
2594        case JDWP::JT_OBJECT:
2595        case JDWP::JT_STRING:
2596        case JDWP::JT_THREAD:
2597        case JDWP::JT_THREAD_GROUP: {
2598          CHECK_EQ(width_, sizeof(JDWP::ObjectId));
2599          mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value_));
2600          if (o == ObjectRegistry::kInvalidObject) {
2601            VLOG(jdwp) << tag_ << " object " << o << " is an invalid object";
2602            error_ = JDWP::ERR_INVALID_OBJECT;
2603          } else if (!SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)),
2604                              kReferenceVReg)) {
2605            VLOG(jdwp) << "failed to set " << tag_ << " object local " << reg << " = " << o;
2606            error_ = kFailureErrorCode;
2607          }
2608          break;
2609        }
2610        case JDWP::JT_DOUBLE: {
2611          CHECK_EQ(width_, 8U);
2612          bool success = SetVRegPair(m, reg, value_, kDoubleLoVReg, kDoubleHiVReg);
2613          if (!success) {
2614            VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
2615            error_ = kFailureErrorCode;
2616          }
2617          break;
2618        }
2619        case JDWP::JT_LONG: {
2620          CHECK_EQ(width_, 8U);
2621          bool success = SetVRegPair(m, reg, value_, kLongLoVReg, kLongHiVReg);
2622          if (!success) {
2623            VLOG(jdwp) << "failed to set double local " << reg << " = " << value_;
2624            error_ = kFailureErrorCode;
2625          }
2626          break;
2627        }
2628        default:
2629          LOG(FATAL) << "Unknown tag " << tag_;
2630          break;
2631      }
2632      return false;
2633    }
2634
2635    const JDWP::FrameId frame_id_;
2636    const int slot_;
2637    const JDWP::JdwpTag tag_;
2638    const uint64_t value_;
2639    const size_t width_;
2640    JDWP::JdwpError error_;
2641  };
2642
2643  ScopedObjectAccessUnchecked soa(Thread::Current());
2644  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2645  Thread* thread;
2646  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2647  if (error != JDWP::ERR_NONE) {
2648    return error;
2649  }
2650  // TODO check thread is suspended by the debugger ?
2651  std::unique_ptr<Context> context(Context::Create());
2652  SetLocalVisitor visitor(thread, context.get(), frame_id, slot, tag, value, width);
2653  visitor.WalkStack();
2654  return visitor.error_;
2655}
2656
2657JDWP::ObjectId Dbg::GetThisObjectIdForEvent(mirror::Object* this_object) {
2658  // If 'this_object' isn't already in the registry, we know that we're not looking for it, so
2659  // there's no point adding it to the registry and burning through ids.
2660  // When registering an event request with an instance filter, we've been given an existing object
2661  // id so it must already be present in the registry when the event fires.
2662  JDWP::ObjectId this_id = 0;
2663  if (this_object != nullptr && gRegistry->Contains(this_object)) {
2664    this_id = gRegistry->Add(this_object);
2665  }
2666  return this_id;
2667}
2668
2669void Dbg::PostLocationEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object,
2670                            int event_flags, const JValue* return_value) {
2671  if (!IsDebuggerActive()) {
2672    return;
2673  }
2674  DCHECK(m != nullptr);
2675  DCHECK_EQ(m->IsStatic(), this_object == nullptr);
2676  JDWP::JdwpLocation location;
2677  SetLocation(location, m, dex_pc);
2678
2679  // We need 'this' for InstanceOnly filters only.
2680  JDWP::ObjectId this_id = GetThisObjectIdForEvent(this_object);
2681  gJdwpState->PostLocationEvent(&location, this_id, event_flags, return_value);
2682}
2683
2684void Dbg::PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc,
2685                               mirror::Object* this_object, mirror::ArtField* f) {
2686  if (!IsDebuggerActive()) {
2687    return;
2688  }
2689  DCHECK(m != nullptr);
2690  DCHECK(f != nullptr);
2691  JDWP::JdwpLocation location;
2692  SetLocation(location, m, dex_pc);
2693
2694  JDWP::RefTypeId type_id = gRegistry->AddRefType(f->GetDeclaringClass());
2695  JDWP::FieldId field_id = ToFieldId(f);
2696  JDWP::ObjectId this_id = gRegistry->Add(this_object);
2697
2698  gJdwpState->PostFieldEvent(&location, type_id, field_id, this_id, nullptr, false);
2699}
2700
2701void Dbg::PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc,
2702                                     mirror::Object* this_object, mirror::ArtField* f,
2703                                     const JValue* field_value) {
2704  if (!IsDebuggerActive()) {
2705    return;
2706  }
2707  DCHECK(m != nullptr);
2708  DCHECK(f != nullptr);
2709  DCHECK(field_value != nullptr);
2710  JDWP::JdwpLocation location;
2711  SetLocation(location, m, dex_pc);
2712
2713  JDWP::RefTypeId type_id = gRegistry->AddRefType(f->GetDeclaringClass());
2714  JDWP::FieldId field_id = ToFieldId(f);
2715  JDWP::ObjectId this_id = gRegistry->Add(this_object);
2716
2717  gJdwpState->PostFieldEvent(&location, type_id, field_id, this_id, field_value, true);
2718}
2719
2720void Dbg::PostException(const ThrowLocation& throw_location,
2721                        mirror::ArtMethod* catch_method,
2722                        uint32_t catch_dex_pc, mirror::Throwable* exception_object) {
2723  if (!IsDebuggerActive()) {
2724    return;
2725  }
2726
2727  JDWP::JdwpLocation jdwp_throw_location;
2728  SetLocation(jdwp_throw_location, throw_location.GetMethod(), throw_location.GetDexPc());
2729  JDWP::JdwpLocation catch_location;
2730  SetLocation(catch_location, catch_method, catch_dex_pc);
2731
2732  // We need 'this' for InstanceOnly filters only.
2733  JDWP::ObjectId this_id = GetThisObjectIdForEvent(throw_location.GetThis());
2734  JDWP::ObjectId exception_id = gRegistry->Add(exception_object);
2735  JDWP::RefTypeId exception_class_id = gRegistry->AddRefType(exception_object->GetClass());
2736
2737  gJdwpState->PostException(&jdwp_throw_location, exception_id, exception_class_id, &catch_location,
2738                            this_id);
2739}
2740
2741void Dbg::PostClassPrepare(mirror::Class* c) {
2742  if (!IsDebuggerActive()) {
2743    return;
2744  }
2745
2746  // OLD-TODO - we currently always send both "verified" and "prepared" since
2747  // debuggers seem to like that.  There might be some advantage to honesty,
2748  // since the class may not yet be verified.
2749  int state = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
2750  JDWP::JdwpTypeTag tag = GetTypeTag(c);
2751  std::string temp;
2752  gJdwpState->PostClassPrepare(tag, gRegistry->Add(c), c->GetDescriptor(&temp), state);
2753}
2754
2755void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
2756                         mirror::ArtMethod* m, uint32_t dex_pc,
2757                         int event_flags, const JValue* return_value) {
2758  if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
2759    return;
2760  }
2761
2762  if (IsBreakpoint(m, dex_pc)) {
2763    event_flags |= kBreakpoint;
2764  }
2765
2766  // If the debugger is single-stepping one of our threads, check to
2767  // see if we're that thread and we've reached a step point.
2768  const SingleStepControl* single_step_control = thread->GetSingleStepControl();
2769  DCHECK(single_step_control != nullptr);
2770  if (single_step_control->is_active) {
2771    CHECK(!m->IsNative());
2772    if (single_step_control->step_depth == JDWP::SD_INTO) {
2773      // Step into method calls.  We break when the line number
2774      // or method pointer changes.  If we're in SS_MIN mode, we
2775      // always stop.
2776      if (single_step_control->method != m) {
2777        event_flags |= kSingleStep;
2778        VLOG(jdwp) << "SS new method";
2779      } else if (single_step_control->step_size == JDWP::SS_MIN) {
2780        event_flags |= kSingleStep;
2781        VLOG(jdwp) << "SS new instruction";
2782      } else if (single_step_control->ContainsDexPc(dex_pc)) {
2783        event_flags |= kSingleStep;
2784        VLOG(jdwp) << "SS new line";
2785      }
2786    } else if (single_step_control->step_depth == JDWP::SD_OVER) {
2787      // Step over method calls.  We break when the line number is
2788      // different and the frame depth is <= the original frame
2789      // depth.  (We can't just compare on the method, because we
2790      // might get unrolled past it by an exception, and it's tricky
2791      // to identify recursion.)
2792
2793      int stack_depth = GetStackDepth(thread);
2794
2795      if (stack_depth < single_step_control->stack_depth) {
2796        // Popped up one or more frames, always trigger.
2797        event_flags |= kSingleStep;
2798        VLOG(jdwp) << "SS method pop";
2799      } else if (stack_depth == single_step_control->stack_depth) {
2800        // Same depth, see if we moved.
2801        if (single_step_control->step_size == JDWP::SS_MIN) {
2802          event_flags |= kSingleStep;
2803          VLOG(jdwp) << "SS new instruction";
2804        } else if (single_step_control->ContainsDexPc(dex_pc)) {
2805          event_flags |= kSingleStep;
2806          VLOG(jdwp) << "SS new line";
2807        }
2808      }
2809    } else {
2810      CHECK_EQ(single_step_control->step_depth, JDWP::SD_OUT);
2811      // Return from the current method.  We break when the frame
2812      // depth pops up.
2813
2814      // This differs from the "method exit" break in that it stops
2815      // with the PC at the next instruction in the returned-to
2816      // function, rather than the end of the returning function.
2817
2818      int stack_depth = GetStackDepth(thread);
2819      if (stack_depth < single_step_control->stack_depth) {
2820        event_flags |= kSingleStep;
2821        VLOG(jdwp) << "SS method pop";
2822      }
2823    }
2824  }
2825
2826  // If there's something interesting going on, see if it matches one
2827  // of the debugger filters.
2828  if (event_flags != 0) {
2829    Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, return_value);
2830  }
2831}
2832
2833size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) {
2834  switch (instrumentation_event) {
2835    case instrumentation::Instrumentation::kMethodEntered:
2836      return &method_enter_event_ref_count_;
2837    case instrumentation::Instrumentation::kMethodExited:
2838      return &method_exit_event_ref_count_;
2839    case instrumentation::Instrumentation::kDexPcMoved:
2840      return &dex_pc_change_event_ref_count_;
2841    case instrumentation::Instrumentation::kFieldRead:
2842      return &field_read_event_ref_count_;
2843    case instrumentation::Instrumentation::kFieldWritten:
2844      return &field_write_event_ref_count_;
2845    case instrumentation::Instrumentation::kExceptionCaught:
2846      return &exception_catch_event_ref_count_;
2847    default:
2848      return nullptr;
2849  }
2850}
2851
2852// Process request while all mutator threads are suspended.
2853void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
2854  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
2855  switch (request.GetKind()) {
2856    case DeoptimizationRequest::kNothing:
2857      LOG(WARNING) << "Ignoring empty deoptimization request.";
2858      break;
2859    case DeoptimizationRequest::kRegisterForEvent:
2860      VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x",
2861                                 request.InstrumentationEvent());
2862      instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent());
2863      instrumentation_events_ |= request.InstrumentationEvent();
2864      break;
2865    case DeoptimizationRequest::kUnregisterForEvent:
2866      VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x",
2867                                 request.InstrumentationEvent());
2868      instrumentation->RemoveListener(&gDebugInstrumentationListener,
2869                                      request.InstrumentationEvent());
2870      instrumentation_events_ &= ~request.InstrumentationEvent();
2871      break;
2872    case DeoptimizationRequest::kFullDeoptimization:
2873      VLOG(jdwp) << "Deoptimize the world ...";
2874      instrumentation->DeoptimizeEverything();
2875      VLOG(jdwp) << "Deoptimize the world DONE";
2876      break;
2877    case DeoptimizationRequest::kFullUndeoptimization:
2878      VLOG(jdwp) << "Undeoptimize the world ...";
2879      instrumentation->UndeoptimizeEverything();
2880      VLOG(jdwp) << "Undeoptimize the world DONE";
2881      break;
2882    case DeoptimizationRequest::kSelectiveDeoptimization:
2883      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " ...";
2884      instrumentation->Deoptimize(request.Method());
2885      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " DONE";
2886      break;
2887    case DeoptimizationRequest::kSelectiveUndeoptimization:
2888      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " ...";
2889      instrumentation->Undeoptimize(request.Method());
2890      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " DONE";
2891      break;
2892    default:
2893      LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind();
2894      break;
2895  }
2896}
2897
2898void Dbg::DelayFullUndeoptimization() {
2899  MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
2900  ++delayed_full_undeoptimization_count_;
2901  DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_);
2902}
2903
2904void Dbg::ProcessDelayedFullUndeoptimizations() {
2905  // TODO: avoid taking the lock twice (once here and once in ManageDeoptimization).
2906  {
2907    MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
2908    while (delayed_full_undeoptimization_count_ > 0) {
2909      DeoptimizationRequest req;
2910      req.SetKind(DeoptimizationRequest::kFullUndeoptimization);
2911      req.SetMethod(nullptr);
2912      RequestDeoptimizationLocked(req);
2913      --delayed_full_undeoptimization_count_;
2914    }
2915  }
2916  ManageDeoptimization();
2917}
2918
2919void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
2920  if (req.GetKind() == DeoptimizationRequest::kNothing) {
2921    // Nothing to do.
2922    return;
2923  }
2924  MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
2925  RequestDeoptimizationLocked(req);
2926}
2927
2928void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
2929  switch (req.GetKind()) {
2930    case DeoptimizationRequest::kRegisterForEvent: {
2931      DCHECK_NE(req.InstrumentationEvent(), 0u);
2932      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
2933      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
2934                                                req.InstrumentationEvent());
2935      if (*counter == 0) {
2936        VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x",
2937                                   deoptimization_requests_.size(), req.InstrumentationEvent());
2938        deoptimization_requests_.push_back(req);
2939      }
2940      *counter = *counter + 1;
2941      break;
2942    }
2943    case DeoptimizationRequest::kUnregisterForEvent: {
2944      DCHECK_NE(req.InstrumentationEvent(), 0u);
2945      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
2946      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
2947                                                req.InstrumentationEvent());
2948      *counter = *counter - 1;
2949      if (*counter == 0) {
2950        VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x",
2951                                   deoptimization_requests_.size(), req.InstrumentationEvent());
2952        deoptimization_requests_.push_back(req);
2953      }
2954      break;
2955    }
2956    case DeoptimizationRequest::kFullDeoptimization: {
2957      DCHECK(req.Method() == nullptr);
2958      if (full_deoptimization_event_count_ == 0) {
2959        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2960                   << " for full deoptimization";
2961        deoptimization_requests_.push_back(req);
2962      }
2963      ++full_deoptimization_event_count_;
2964      break;
2965    }
2966    case DeoptimizationRequest::kFullUndeoptimization: {
2967      DCHECK(req.Method() == nullptr);
2968      DCHECK_GT(full_deoptimization_event_count_, 0U);
2969      --full_deoptimization_event_count_;
2970      if (full_deoptimization_event_count_ == 0) {
2971        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2972                   << " for full undeoptimization";
2973        deoptimization_requests_.push_back(req);
2974      }
2975      break;
2976    }
2977    case DeoptimizationRequest::kSelectiveDeoptimization: {
2978      DCHECK(req.Method() != nullptr);
2979      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2980                 << " for deoptimization of " << PrettyMethod(req.Method());
2981      deoptimization_requests_.push_back(req);
2982      break;
2983    }
2984    case DeoptimizationRequest::kSelectiveUndeoptimization: {
2985      DCHECK(req.Method() != nullptr);
2986      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2987                 << " for undeoptimization of " << PrettyMethod(req.Method());
2988      deoptimization_requests_.push_back(req);
2989      break;
2990    }
2991    default: {
2992      LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind();
2993      break;
2994    }
2995  }
2996}
2997
2998void Dbg::ManageDeoptimization() {
2999  Thread* const self = Thread::Current();
3000  {
3001    // Avoid suspend/resume if there is no pending request.
3002    MutexLock mu(self, *Locks::deoptimization_lock_);
3003    if (deoptimization_requests_.empty()) {
3004      return;
3005    }
3006  }
3007  CHECK_EQ(self->GetState(), kRunnable);
3008  self->TransitionFromRunnableToSuspended(kWaitingForDeoptimization);
3009  // We need to suspend mutator threads first.
3010  Runtime* const runtime = Runtime::Current();
3011  runtime->GetThreadList()->SuspendAll();
3012  const ThreadState old_state = self->SetStateUnsafe(kRunnable);
3013  {
3014    MutexLock mu(self, *Locks::deoptimization_lock_);
3015    size_t req_index = 0;
3016    for (DeoptimizationRequest& request : deoptimization_requests_) {
3017      VLOG(jdwp) << "Process deoptimization request #" << req_index++;
3018      ProcessDeoptimizationRequest(request);
3019    }
3020    deoptimization_requests_.clear();
3021  }
3022  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
3023  runtime->GetThreadList()->ResumeAll();
3024  self->TransitionFromSuspendedToRunnable();
3025}
3026
3027static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m)
3028    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
3029  const DexFile::CodeItem* code_item = m->GetCodeItem();
3030  if (code_item == nullptr) {
3031    // TODO We should not be asked to watch location in a native or abstract method so the code item
3032    // should never be null. We could just check we never encounter this case.
3033    return false;
3034  }
3035  StackHandleScope<2> hs(self);
3036  mirror::Class* declaring_class = m->GetDeclaringClass();
3037  Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
3038  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
3039  verifier::MethodVerifier verifier(dex_cache->GetDexFile(), &dex_cache, &class_loader,
3040                                    &m->GetClassDef(), code_item, m->GetDexMethodIndex(), m,
3041                                    m->GetAccessFlags(), false, true, false);
3042  // Note: we don't need to verify the method.
3043  return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr);
3044}
3045
3046static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m)
3047    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3048  for (Breakpoint& breakpoint : gBreakpoints) {
3049    if (breakpoint.Method() == m) {
3050      return &breakpoint;
3051    }
3052  }
3053  return nullptr;
3054}
3055
3056// Sanity checks all existing breakpoints on the same method.
3057static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_deoptimization)
3058    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3059  if (kIsDebugBuild) {
3060    for (const Breakpoint& breakpoint : gBreakpoints) {
3061      CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization());
3062    }
3063    if (need_full_deoptimization) {
3064      // We should have deoptimized everything but not "selectively" deoptimized this method.
3065      CHECK(Runtime::Current()->GetInstrumentation()->AreAllMethodsDeoptimized());
3066      CHECK(!Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3067    } else {
3068      // We should have "selectively" deoptimized this method.
3069      // Note: while we have not deoptimized everything for this method, we may have done it for
3070      // another event.
3071      CHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3072    }
3073  }
3074}
3075
3076// Installs a breakpoint at the specified location. Also indicates through the deoptimization
3077// request if we need to deoptimize.
3078void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3079  Thread* const self = Thread::Current();
3080  mirror::ArtMethod* m = FromMethodId(location->method_id);
3081  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3082
3083  WriterMutexLock mu(self, *Locks::breakpoint_lock_);
3084  const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
3085  bool need_full_deoptimization;
3086  if (existing_breakpoint == nullptr) {
3087    // There is no breakpoint on this method yet: we need to deoptimize. If this method may be
3088    // inlined, we deoptimize everything; otherwise we deoptimize only this method.
3089    need_full_deoptimization = IsMethodPossiblyInlined(self, m);
3090    if (need_full_deoptimization) {
3091      req->SetKind(DeoptimizationRequest::kFullDeoptimization);
3092      req->SetMethod(nullptr);
3093    } else {
3094      req->SetKind(DeoptimizationRequest::kSelectiveDeoptimization);
3095      req->SetMethod(m);
3096    }
3097  } else {
3098    // There is at least one breakpoint for this method: we don't need to deoptimize.
3099    req->SetKind(DeoptimizationRequest::kNothing);
3100    req->SetMethod(nullptr);
3101
3102    need_full_deoptimization = existing_breakpoint->NeedFullDeoptimization();
3103    SanityCheckExistingBreakpoints(m, need_full_deoptimization);
3104  }
3105
3106  gBreakpoints.push_back(Breakpoint(m, location->dex_pc, need_full_deoptimization));
3107  VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
3108             << gBreakpoints[gBreakpoints.size() - 1];
3109}
3110
3111// Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
3112// request if we need to undeoptimize.
3113void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3114  WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
3115  mirror::ArtMethod* m = FromMethodId(location->method_id);
3116  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3117  bool need_full_deoptimization = false;
3118  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
3119    if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].Method() == m) {
3120      VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
3121      need_full_deoptimization = gBreakpoints[i].NeedFullDeoptimization();
3122      DCHECK_NE(need_full_deoptimization, Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3123      gBreakpoints.erase(gBreakpoints.begin() + i);
3124      break;
3125    }
3126  }
3127  const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
3128  if (existing_breakpoint == nullptr) {
3129    // There is no more breakpoint on this method: we need to undeoptimize.
3130    if (need_full_deoptimization) {
3131      // This method required full deoptimization: we need to undeoptimize everything.
3132      req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
3133      req->SetMethod(nullptr);
3134    } else {
3135      // This method required selective deoptimization: we need to undeoptimize only that method.
3136      req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
3137      req->SetMethod(m);
3138    }
3139  } else {
3140    // There is at least one breakpoint for this method: we don't need to undeoptimize.
3141    req->SetKind(DeoptimizationRequest::kNothing);
3142    req->SetMethod(nullptr);
3143    SanityCheckExistingBreakpoints(m, need_full_deoptimization);
3144  }
3145}
3146
3147// Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
3148// cause suspension if the thread is the current thread.
3149class ScopedThreadSuspension {
3150 public:
3151  ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
3152      LOCKS_EXCLUDED(Locks::thread_list_lock_)
3153      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
3154      thread_(nullptr),
3155      error_(JDWP::ERR_NONE),
3156      self_suspend_(false),
3157      other_suspend_(false) {
3158    ScopedObjectAccessUnchecked soa(self);
3159    {
3160      MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3161      error_ = DecodeThread(soa, thread_id, thread_);
3162    }
3163    if (error_ == JDWP::ERR_NONE) {
3164      if (thread_ == soa.Self()) {
3165        self_suspend_ = true;
3166      } else {
3167        soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3168        jobject thread_peer = gRegistry->GetJObject(thread_id);
3169        bool timed_out;
3170        Thread* suspended_thread;
3171        {
3172          // Take suspend thread lock to avoid races with threads trying to suspend this one.
3173          MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_);
3174          ThreadList* thread_list = Runtime::Current()->GetThreadList();
3175          suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true, &timed_out);
3176        }
3177        CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
3178        if (suspended_thread == nullptr) {
3179          // Thread terminated from under us while suspending.
3180          error_ = JDWP::ERR_INVALID_THREAD;
3181        } else {
3182          CHECK_EQ(suspended_thread, thread_);
3183          other_suspend_ = true;
3184        }
3185      }
3186    }
3187  }
3188
3189  Thread* GetThread() const {
3190    return thread_;
3191  }
3192
3193  JDWP::JdwpError GetError() const {
3194    return error_;
3195  }
3196
3197  ~ScopedThreadSuspension() {
3198    if (other_suspend_) {
3199      Runtime::Current()->GetThreadList()->Resume(thread_, true);
3200    }
3201  }
3202
3203 private:
3204  Thread* thread_;
3205  JDWP::JdwpError error_;
3206  bool self_suspend_;
3207  bool other_suspend_;
3208};
3209
3210JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size,
3211                                   JDWP::JdwpStepDepth step_depth) {
3212  Thread* self = Thread::Current();
3213  ScopedThreadSuspension sts(self, thread_id);
3214  if (sts.GetError() != JDWP::ERR_NONE) {
3215    return sts.GetError();
3216  }
3217
3218  //
3219  // Work out what Method* we're in, the current line number, and how deep the stack currently
3220  // is for step-out.
3221  //
3222
3223  struct SingleStepStackVisitor : public StackVisitor {
3224    explicit SingleStepStackVisitor(Thread* thread, SingleStepControl* single_step_control,
3225                                    int32_t* line_number)
3226        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
3227        : StackVisitor(thread, NULL), single_step_control_(single_step_control),
3228          line_number_(line_number) {
3229      DCHECK_EQ(single_step_control_, thread->GetSingleStepControl());
3230      single_step_control_->method = NULL;
3231      single_step_control_->stack_depth = 0;
3232    }
3233
3234    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
3235    // annotalysis.
3236    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
3237      mirror::ArtMethod* m = GetMethod();
3238      if (!m->IsRuntimeMethod()) {
3239        ++single_step_control_->stack_depth;
3240        if (single_step_control_->method == NULL) {
3241          mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
3242          single_step_control_->method = m;
3243          *line_number_ = -1;
3244          if (dex_cache != NULL) {
3245            const DexFile& dex_file = *dex_cache->GetDexFile();
3246            *line_number_ = dex_file.GetLineNumFromPC(m, GetDexPc());
3247          }
3248        }
3249      }
3250      return true;
3251    }
3252
3253    SingleStepControl* const single_step_control_;
3254    int32_t* const line_number_;
3255  };
3256
3257  Thread* const thread = sts.GetThread();
3258  SingleStepControl* const single_step_control = thread->GetSingleStepControl();
3259  DCHECK(single_step_control != nullptr);
3260  int32_t line_number = -1;
3261  SingleStepStackVisitor visitor(thread, single_step_control, &line_number);
3262  visitor.WalkStack();
3263
3264  //
3265  // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
3266  //
3267
3268  struct DebugCallbackContext {
3269    explicit DebugCallbackContext(SingleStepControl* single_step_control, int32_t line_number,
3270                                  const DexFile::CodeItem* code_item)
3271      : single_step_control_(single_step_control), line_number_(line_number), code_item_(code_item),
3272        last_pc_valid(false), last_pc(0) {
3273    }
3274
3275    static bool Callback(void* raw_context, uint32_t address, uint32_t line_number) {
3276      DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
3277      if (static_cast<int32_t>(line_number) == context->line_number_) {
3278        if (!context->last_pc_valid) {
3279          // Everything from this address until the next line change is ours.
3280          context->last_pc = address;
3281          context->last_pc_valid = true;
3282        }
3283        // Otherwise, if we're already in a valid range for this line,
3284        // just keep going (shouldn't really happen)...
3285      } else if (context->last_pc_valid) {  // and the line number is new
3286        // Add everything from the last entry up until here to the set
3287        for (uint32_t dex_pc = context->last_pc; dex_pc < address; ++dex_pc) {
3288          context->single_step_control_->dex_pcs.insert(dex_pc);
3289        }
3290        context->last_pc_valid = false;
3291      }
3292      return false;  // There may be multiple entries for any given line.
3293    }
3294
3295    ~DebugCallbackContext() {
3296      // If the line number was the last in the position table...
3297      if (last_pc_valid) {
3298        size_t end = code_item_->insns_size_in_code_units_;
3299        for (uint32_t dex_pc = last_pc; dex_pc < end; ++dex_pc) {
3300          single_step_control_->dex_pcs.insert(dex_pc);
3301        }
3302      }
3303    }
3304
3305    SingleStepControl* const single_step_control_;
3306    const int32_t line_number_;
3307    const DexFile::CodeItem* const code_item_;
3308    bool last_pc_valid;
3309    uint32_t last_pc;
3310  };
3311  single_step_control->dex_pcs.clear();
3312  mirror::ArtMethod* m = single_step_control->method;
3313  if (!m->IsNative()) {
3314    const DexFile::CodeItem* const code_item = m->GetCodeItem();
3315    DebugCallbackContext context(single_step_control, line_number, code_item);
3316    m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
3317                                     DebugCallbackContext::Callback, NULL, &context);
3318  }
3319
3320  //
3321  // Everything else...
3322  //
3323
3324  single_step_control->step_size = step_size;
3325  single_step_control->step_depth = step_depth;
3326  single_step_control->is_active = true;
3327
3328  if (VLOG_IS_ON(jdwp)) {
3329    VLOG(jdwp) << "Single-step thread: " << *thread;
3330    VLOG(jdwp) << "Single-step step size: " << single_step_control->step_size;
3331    VLOG(jdwp) << "Single-step step depth: " << single_step_control->step_depth;
3332    VLOG(jdwp) << "Single-step current method: " << PrettyMethod(single_step_control->method);
3333    VLOG(jdwp) << "Single-step current line: " << line_number;
3334    VLOG(jdwp) << "Single-step current stack depth: " << single_step_control->stack_depth;
3335    VLOG(jdwp) << "Single-step dex_pc values:";
3336    for (uint32_t dex_pc : single_step_control->dex_pcs) {
3337      VLOG(jdwp) << StringPrintf(" %#x", dex_pc);
3338    }
3339  }
3340
3341  return JDWP::ERR_NONE;
3342}
3343
3344void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) {
3345  ScopedObjectAccessUnchecked soa(Thread::Current());
3346  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3347  Thread* thread;
3348  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
3349  if (error == JDWP::ERR_NONE) {
3350    SingleStepControl* single_step_control = thread->GetSingleStepControl();
3351    DCHECK(single_step_control != nullptr);
3352    single_step_control->Clear();
3353  }
3354}
3355
3356static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
3357  switch (tag) {
3358    default:
3359      LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag);
3360
3361    // Primitives.
3362    case JDWP::JT_BYTE:    return 'B';
3363    case JDWP::JT_CHAR:    return 'C';
3364    case JDWP::JT_FLOAT:   return 'F';
3365    case JDWP::JT_DOUBLE:  return 'D';
3366    case JDWP::JT_INT:     return 'I';
3367    case JDWP::JT_LONG:    return 'J';
3368    case JDWP::JT_SHORT:   return 'S';
3369    case JDWP::JT_VOID:    return 'V';
3370    case JDWP::JT_BOOLEAN: return 'Z';
3371
3372    // Reference types.
3373    case JDWP::JT_ARRAY:
3374    case JDWP::JT_OBJECT:
3375    case JDWP::JT_STRING:
3376    case JDWP::JT_THREAD:
3377    case JDWP::JT_THREAD_GROUP:
3378    case JDWP::JT_CLASS_LOADER:
3379    case JDWP::JT_CLASS_OBJECT:
3380      return 'L';
3381  }
3382}
3383
3384JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id,
3385                                  JDWP::RefTypeId class_id, JDWP::MethodId method_id,
3386                                  uint32_t arg_count, uint64_t* arg_values,
3387                                  JDWP::JdwpTag* arg_types, uint32_t options,
3388                                  JDWP::JdwpTag* pResultTag, uint64_t* pResultValue,
3389                                  JDWP::ObjectId* pExceptionId) {
3390  ThreadList* thread_list = Runtime::Current()->GetThreadList();
3391
3392  Thread* targetThread = NULL;
3393  DebugInvokeReq* req = NULL;
3394  Thread* self = Thread::Current();
3395  {
3396    ScopedObjectAccessUnchecked soa(self);
3397    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3398    JDWP::JdwpError error = DecodeThread(soa, thread_id, targetThread);
3399    if (error != JDWP::ERR_NONE) {
3400      LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id;
3401      return error;
3402    }
3403    req = targetThread->GetInvokeReq();
3404    if (!req->ready) {
3405      LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
3406      return JDWP::ERR_INVALID_THREAD;
3407    }
3408
3409    /*
3410     * We currently have a bug where we don't successfully resume the
3411     * target thread if the suspend count is too deep.  We're expected to
3412     * require one "resume" for each "suspend", but when asked to execute
3413     * a method we have to resume fully and then re-suspend it back to the
3414     * same level.  (The easiest way to cause this is to type "suspend"
3415     * multiple times in jdb.)
3416     *
3417     * It's unclear what this means when the event specifies "resume all"
3418     * and some threads are suspended more deeply than others.  This is
3419     * a rare problem, so for now we just prevent it from hanging forever
3420     * by rejecting the method invocation request.  Without this, we will
3421     * be stuck waiting on a suspended thread.
3422     */
3423    int suspend_count;
3424    {
3425      MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
3426      suspend_count = targetThread->GetSuspendCount();
3427    }
3428    if (suspend_count > 1) {
3429      LOG(ERROR) << *targetThread << " suspend count too deep for method invocation: " << suspend_count;
3430      return JDWP::ERR_THREAD_SUSPENDED;  // Probably not expected here.
3431    }
3432
3433    JDWP::JdwpError status;
3434    mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id);
3435    if (receiver == ObjectRegistry::kInvalidObject) {
3436      return JDWP::ERR_INVALID_OBJECT;
3437    }
3438
3439    mirror::Object* thread = gRegistry->Get<mirror::Object*>(thread_id);
3440    if (thread == ObjectRegistry::kInvalidObject) {
3441      return JDWP::ERR_INVALID_OBJECT;
3442    }
3443    // TODO: check that 'thread' is actually a java.lang.Thread!
3444
3445    mirror::Class* c = DecodeClass(class_id, status);
3446    if (c == NULL) {
3447      return status;
3448    }
3449
3450    mirror::ArtMethod* m = FromMethodId(method_id);
3451    if (m->IsStatic() != (receiver == NULL)) {
3452      return JDWP::ERR_INVALID_METHODID;
3453    }
3454    if (m->IsStatic()) {
3455      if (m->GetDeclaringClass() != c) {
3456        return JDWP::ERR_INVALID_METHODID;
3457      }
3458    } else {
3459      if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
3460        return JDWP::ERR_INVALID_METHODID;
3461      }
3462    }
3463
3464    // Check the argument list matches the method.
3465    uint32_t shorty_len = 0;
3466    const char* shorty = m->GetShorty(&shorty_len);
3467    if (shorty_len - 1 != arg_count) {
3468      return JDWP::ERR_ILLEGAL_ARGUMENT;
3469    }
3470
3471    {
3472      StackHandleScope<3> hs(soa.Self());
3473      MethodHelper mh(hs.NewHandle(m));
3474      HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver));
3475      HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c));
3476      const DexFile::TypeList* types = m->GetParameterTypeList();
3477      for (size_t i = 0; i < arg_count; ++i) {
3478        if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) {
3479          return JDWP::ERR_ILLEGAL_ARGUMENT;
3480        }
3481
3482        if (shorty[i + 1] == 'L') {
3483          // Did we really get an argument of an appropriate reference type?
3484          mirror::Class* parameter_type = mh.GetClassFromTypeIdx(types->GetTypeItem(i).type_idx_);
3485          mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i]);
3486          if (argument == ObjectRegistry::kInvalidObject) {
3487            return JDWP::ERR_INVALID_OBJECT;
3488          }
3489          if (argument != NULL && !argument->InstanceOf(parameter_type)) {
3490            return JDWP::ERR_ILLEGAL_ARGUMENT;
3491          }
3492
3493          // Turn the on-the-wire ObjectId into a jobject.
3494          jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]);
3495          v.l = gRegistry->GetJObject(arg_values[i]);
3496        }
3497      }
3498      // Update in case it moved.
3499      m = mh.GetMethod();
3500    }
3501
3502    req->receiver = receiver;
3503    req->thread = thread;
3504    req->klass = c;
3505    req->method = m;
3506    req->arg_count = arg_count;
3507    req->arg_values = arg_values;
3508    req->options = options;
3509    req->invoke_needed = true;
3510  }
3511
3512  // The fact that we've released the thread list lock is a bit risky --- if the thread goes
3513  // away we're sitting high and dry -- but we must release this before the ResumeAllThreads
3514  // call, and it's unwise to hold it during WaitForSuspend.
3515
3516  {
3517    /*
3518     * We change our (JDWP thread) status, which should be THREAD_RUNNING,
3519     * so we can suspend for a GC if the invoke request causes us to
3520     * run out of memory.  It's also a good idea to change it before locking
3521     * the invokeReq mutex, although that should never be held for long.
3522     */
3523    self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
3524
3525    VLOG(jdwp) << "    Transferring control to event thread";
3526    {
3527      MutexLock mu(self, req->lock);
3528
3529      if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3530        VLOG(jdwp) << "      Resuming all threads";
3531        thread_list->UndoDebuggerSuspensions();
3532      } else {
3533        VLOG(jdwp) << "      Resuming event thread only";
3534        thread_list->Resume(targetThread, true);
3535      }
3536
3537      // Wait for the request to finish executing.
3538      while (req->invoke_needed) {
3539        req->cond.Wait(self);
3540      }
3541    }
3542    VLOG(jdwp) << "    Control has returned from event thread";
3543
3544    /* wait for thread to re-suspend itself */
3545    SuspendThread(thread_id, false /* request_suspension */);
3546    self->TransitionFromSuspendedToRunnable();
3547  }
3548
3549  /*
3550   * Suspend the threads.  We waited for the target thread to suspend
3551   * itself, so all we need to do is suspend the others.
3552   *
3553   * The suspendAllThreads() call will double-suspend the event thread,
3554   * so we want to resume the target thread once to keep the books straight.
3555   */
3556  if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3557    self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3558    VLOG(jdwp) << "      Suspending all threads";
3559    thread_list->SuspendAllForDebugger();
3560    self->TransitionFromSuspendedToRunnable();
3561    VLOG(jdwp) << "      Resuming event thread to balance the count";
3562    thread_list->Resume(targetThread, true);
3563  }
3564
3565  // Copy the result.
3566  *pResultTag = req->result_tag;
3567  if (IsPrimitiveTag(req->result_tag)) {
3568    *pResultValue = req->result_value.GetJ();
3569  } else {
3570    *pResultValue = gRegistry->Add(req->result_value.GetL());
3571  }
3572  *pExceptionId = req->exception;
3573  return req->error;
3574}
3575
3576void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
3577  ScopedObjectAccess soa(Thread::Current());
3578
3579  // We can be called while an exception is pending. We need
3580  // to preserve that across the method invocation.
3581  StackHandleScope<4> hs(soa.Self());
3582  auto old_throw_this_object = hs.NewHandle<mirror::Object>(nullptr);
3583  auto old_throw_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
3584  auto old_exception = hs.NewHandle<mirror::Throwable>(nullptr);
3585  uint32_t old_throw_dex_pc;
3586  bool old_exception_report_flag;
3587  {
3588    ThrowLocation old_throw_location;
3589    mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location);
3590    old_throw_this_object.Assign(old_throw_location.GetThis());
3591    old_throw_method.Assign(old_throw_location.GetMethod());
3592    old_exception.Assign(old_exception_obj);
3593    old_throw_dex_pc = old_throw_location.GetDexPc();
3594    old_exception_report_flag = soa.Self()->IsExceptionReportedToInstrumentation();
3595    soa.Self()->ClearException();
3596  }
3597
3598  // Translate the method through the vtable, unless the debugger wants to suppress it.
3599  Handle<mirror::ArtMethod> m(hs.NewHandle(pReq->method));
3600  if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver != NULL) {
3601    mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.Get());
3602    if (actual_method != m.Get()) {
3603      VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.Get()) << " to " << PrettyMethod(actual_method);
3604      m.Assign(actual_method);
3605    }
3606  }
3607  VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.Get())
3608             << " receiver=" << pReq->receiver
3609             << " arg_count=" << pReq->arg_count;
3610  CHECK(m.Get() != nullptr);
3611
3612  CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
3613
3614  pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(m.Get()),
3615                                         reinterpret_cast<jvalue*>(pReq->arg_values));
3616
3617  mirror::Throwable* exception = soa.Self()->GetException(NULL);
3618  soa.Self()->ClearException();
3619  pReq->exception = gRegistry->Add(exception);
3620  pReq->result_tag = BasicTagFromDescriptor(m.Get()->GetShorty());
3621  if (pReq->exception != 0) {
3622    VLOG(jdwp) << "  JDWP invocation returning with exception=" << exception
3623        << " " << exception->Dump();
3624    pReq->result_value.SetJ(0);
3625  } else if (pReq->result_tag == JDWP::JT_OBJECT) {
3626    /* if no exception thrown, examine object result more closely */
3627    JDWP::JdwpTag new_tag = TagFromObject(soa, pReq->result_value.GetL());
3628    if (new_tag != pReq->result_tag) {
3629      VLOG(jdwp) << "  JDWP promoted result from " << pReq->result_tag << " to " << new_tag;
3630      pReq->result_tag = new_tag;
3631    }
3632
3633    /*
3634     * Register the object.  We don't actually need an ObjectId yet,
3635     * but we do need to be sure that the GC won't move or discard the
3636     * object when we switch out of RUNNING.  The ObjectId conversion
3637     * will add the object to the "do not touch" list.
3638     *
3639     * We can't use the "tracked allocation" mechanism here because
3640     * the object is going to be handed off to a different thread.
3641     */
3642    gRegistry->Add(pReq->result_value.GetL());
3643  }
3644
3645  if (old_exception.Get() != NULL) {
3646    ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
3647                                         old_throw_dex_pc);
3648    soa.Self()->SetException(gc_safe_throw_location, old_exception.Get());
3649    soa.Self()->SetExceptionReportedToInstrumentation(old_exception_report_flag);
3650  }
3651}
3652
3653/*
3654 * "request" contains a full JDWP packet, possibly with multiple chunks.  We
3655 * need to process each, accumulate the replies, and ship the whole thing
3656 * back.
3657 *
3658 * Returns "true" if we have a reply.  The reply buffer is newly allocated,
3659 * and includes the chunk type/length, followed by the data.
3660 *
3661 * OLD-TODO: we currently assume that the request and reply include a single
3662 * chunk.  If this becomes inconvenient we will need to adapt.
3663 */
3664bool Dbg::DdmHandlePacket(JDWP::Request& request, uint8_t** pReplyBuf, int* pReplyLen) {
3665  Thread* self = Thread::Current();
3666  JNIEnv* env = self->GetJniEnv();
3667
3668  uint32_t type = request.ReadUnsigned32("type");
3669  uint32_t length = request.ReadUnsigned32("length");
3670
3671  // Create a byte[] corresponding to 'request'.
3672  size_t request_length = request.size();
3673  ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(request_length));
3674  if (dataArray.get() == NULL) {
3675    LOG(WARNING) << "byte[] allocation failed: " << request_length;
3676    env->ExceptionClear();
3677    return false;
3678  }
3679  env->SetByteArrayRegion(dataArray.get(), 0, request_length, reinterpret_cast<const jbyte*>(request.data()));
3680  request.Skip(request_length);
3681
3682  // Run through and find all chunks.  [Currently just find the first.]
3683  ScopedByteArrayRO contents(env, dataArray.get());
3684  if (length != request_length) {
3685    LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length);
3686    return false;
3687  }
3688
3689  // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)".
3690  ScopedLocalRef<jobject> chunk(env, env->CallStaticObjectMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3691                                                                 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch,
3692                                                                 type, dataArray.get(), 0, length));
3693  if (env->ExceptionCheck()) {
3694    LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type);
3695    env->ExceptionDescribe();
3696    env->ExceptionClear();
3697    return false;
3698  }
3699
3700  if (chunk.get() == NULL) {
3701    return false;
3702  }
3703
3704  /*
3705   * Pull the pieces out of the chunk.  We copy the results into a
3706   * newly-allocated buffer that the caller can free.  We don't want to
3707   * continue using the Chunk object because nothing has a reference to it.
3708   *
3709   * We could avoid this by returning type/data/offset/length and having
3710   * the caller be aware of the object lifetime issues, but that
3711   * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work
3712   * if we have responses for multiple chunks.
3713   *
3714   * So we're pretty much stuck with copying data around multiple times.
3715   */
3716  ScopedLocalRef<jbyteArray> replyData(env, reinterpret_cast<jbyteArray>(env->GetObjectField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data)));
3717  jint offset = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset);
3718  length = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length);
3719  type = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type);
3720
3721  VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length);
3722  if (length == 0 || replyData.get() == NULL) {
3723    return false;
3724  }
3725
3726  const int kChunkHdrLen = 8;
3727  uint8_t* reply = new uint8_t[length + kChunkHdrLen];
3728  if (reply == NULL) {
3729    LOG(WARNING) << "malloc failed: " << (length + kChunkHdrLen);
3730    return false;
3731  }
3732  JDWP::Set4BE(reply + 0, type);
3733  JDWP::Set4BE(reply + 4, length);
3734  env->GetByteArrayRegion(replyData.get(), offset, length, reinterpret_cast<jbyte*>(reply + kChunkHdrLen));
3735
3736  *pReplyBuf = reply;
3737  *pReplyLen = length + kChunkHdrLen;
3738
3739  VLOG(jdwp) << StringPrintf("dvmHandleDdm returning type=%.4s %p len=%d", reinterpret_cast<char*>(reply), reply, length);
3740  return true;
3741}
3742
3743void Dbg::DdmBroadcast(bool connect) {
3744  VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
3745
3746  Thread* self = Thread::Current();
3747  if (self->GetState() != kRunnable) {
3748    LOG(ERROR) << "DDM broadcast in thread state " << self->GetState();
3749    /* try anyway? */
3750  }
3751
3752  JNIEnv* env = self->GetJniEnv();
3753  jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/;
3754  env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3755                            WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast,
3756                            event);
3757  if (env->ExceptionCheck()) {
3758    LOG(ERROR) << "DdmServer.broadcast " << event << " failed";
3759    env->ExceptionDescribe();
3760    env->ExceptionClear();
3761  }
3762}
3763
3764void Dbg::DdmConnected() {
3765  Dbg::DdmBroadcast(true);
3766}
3767
3768void Dbg::DdmDisconnected() {
3769  Dbg::DdmBroadcast(false);
3770  gDdmThreadNotification = false;
3771}
3772
3773/*
3774 * Send a notification when a thread starts, stops, or changes its name.
3775 *
3776 * Because we broadcast the full set of threads when the notifications are
3777 * first enabled, it's possible for "thread" to be actively executing.
3778 */
3779void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
3780  if (!gDdmThreadNotification) {
3781    return;
3782  }
3783
3784  if (type == CHUNK_TYPE("THDE")) {
3785    uint8_t buf[4];
3786    JDWP::Set4BE(&buf[0], t->GetThreadId());
3787    Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf);
3788  } else {
3789    CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
3790    ScopedObjectAccessUnchecked soa(Thread::Current());
3791    StackHandleScope<1> hs(soa.Self());
3792    Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa)));
3793    size_t char_count = (name.Get() != NULL) ? name->GetLength() : 0;
3794    const jchar* chars = (name.Get() != NULL) ? name->GetCharArray()->GetData() : NULL;
3795
3796    std::vector<uint8_t> bytes;
3797    JDWP::Append4BE(bytes, t->GetThreadId());
3798    JDWP::AppendUtf16BE(bytes, chars, char_count);
3799    CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
3800    Dbg::DdmSendChunk(type, bytes);
3801  }
3802}
3803
3804void Dbg::DdmSetThreadNotification(bool enable) {
3805  // Enable/disable thread notifications.
3806  gDdmThreadNotification = enable;
3807  if (enable) {
3808    // Suspend the VM then post thread start notifications for all threads. Threads attaching will
3809    // see a suspension in progress and block until that ends. They then post their own start
3810    // notification.
3811    SuspendVM();
3812    std::list<Thread*> threads;
3813    Thread* self = Thread::Current();
3814    {
3815      MutexLock mu(self, *Locks::thread_list_lock_);
3816      threads = Runtime::Current()->GetThreadList()->GetList();
3817    }
3818    {
3819      ScopedObjectAccess soa(self);
3820      for (Thread* thread : threads) {
3821        Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
3822      }
3823    }
3824    ResumeVM();
3825  }
3826}
3827
3828void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
3829  if (IsDebuggerActive()) {
3830    ScopedObjectAccessUnchecked soa(Thread::Current());
3831    JDWP::ObjectId id = gRegistry->Add(t->GetPeer());
3832    gJdwpState->PostThreadChange(id, type == CHUNK_TYPE("THCR"));
3833  }
3834  Dbg::DdmSendThreadNotification(t, type);
3835}
3836
3837void Dbg::PostThreadStart(Thread* t) {
3838  Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR"));
3839}
3840
3841void Dbg::PostThreadDeath(Thread* t) {
3842  Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
3843}
3844
3845void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) {
3846  CHECK(buf != NULL);
3847  iovec vec[1];
3848  vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf));
3849  vec[0].iov_len = byte_count;
3850  Dbg::DdmSendChunkV(type, vec, 1);
3851}
3852
3853void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) {
3854  DdmSendChunk(type, bytes.size(), &bytes[0]);
3855}
3856
3857void Dbg::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) {
3858  if (gJdwpState == NULL) {
3859    VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
3860  } else {
3861    gJdwpState->DdmSendChunkV(type, iov, iov_count);
3862  }
3863}
3864
3865int Dbg::DdmHandleHpifChunk(HpifWhen when) {
3866  if (when == HPIF_WHEN_NOW) {
3867    DdmSendHeapInfo(when);
3868    return true;
3869  }
3870
3871  if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
3872    LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
3873    return false;
3874  }
3875
3876  gDdmHpifWhen = when;
3877  return true;
3878}
3879
3880bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
3881  if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) {
3882    LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when);
3883    return false;
3884  }
3885
3886  if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) {
3887    LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what);
3888    return false;
3889  }
3890
3891  if (native) {
3892    gDdmNhsgWhen = when;
3893    gDdmNhsgWhat = what;
3894  } else {
3895    gDdmHpsgWhen = when;
3896    gDdmHpsgWhat = what;
3897  }
3898  return true;
3899}
3900
3901void Dbg::DdmSendHeapInfo(HpifWhen reason) {
3902  // If there's a one-shot 'when', reset it.
3903  if (reason == gDdmHpifWhen) {
3904    if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) {
3905      gDdmHpifWhen = HPIF_WHEN_NEVER;
3906    }
3907  }
3908
3909  /*
3910   * Chunk HPIF (client --> server)
3911   *
3912   * Heap Info. General information about the heap,
3913   * suitable for a summary display.
3914   *
3915   *   [u4]: number of heaps
3916   *
3917   *   For each heap:
3918   *     [u4]: heap ID
3919   *     [u8]: timestamp in ms since Unix epoch
3920   *     [u1]: capture reason (same as 'when' value from server)
3921   *     [u4]: max heap size in bytes (-Xmx)
3922   *     [u4]: current heap size in bytes
3923   *     [u4]: current number of bytes allocated
3924   *     [u4]: current number of objects allocated
3925   */
3926  uint8_t heap_count = 1;
3927  gc::Heap* heap = Runtime::Current()->GetHeap();
3928  std::vector<uint8_t> bytes;
3929  JDWP::Append4BE(bytes, heap_count);
3930  JDWP::Append4BE(bytes, 1);  // Heap id (bogus; we only have one heap).
3931  JDWP::Append8BE(bytes, MilliTime());
3932  JDWP::Append1BE(bytes, reason);
3933  JDWP::Append4BE(bytes, heap->GetMaxMemory());  // Max allowed heap size in bytes.
3934  JDWP::Append4BE(bytes, heap->GetTotalMemory());  // Current heap size in bytes.
3935  JDWP::Append4BE(bytes, heap->GetBytesAllocated());
3936  JDWP::Append4BE(bytes, heap->GetObjectsAllocated());
3937  CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
3938  Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes);
3939}
3940
3941enum HpsgSolidity {
3942  SOLIDITY_FREE = 0,
3943  SOLIDITY_HARD = 1,
3944  SOLIDITY_SOFT = 2,
3945  SOLIDITY_WEAK = 3,
3946  SOLIDITY_PHANTOM = 4,
3947  SOLIDITY_FINALIZABLE = 5,
3948  SOLIDITY_SWEEP = 6,
3949};
3950
3951enum HpsgKind {
3952  KIND_OBJECT = 0,
3953  KIND_CLASS_OBJECT = 1,
3954  KIND_ARRAY_1 = 2,
3955  KIND_ARRAY_2 = 3,
3956  KIND_ARRAY_4 = 4,
3957  KIND_ARRAY_8 = 5,
3958  KIND_UNKNOWN = 6,
3959  KIND_NATIVE = 7,
3960};
3961
3962#define HPSG_PARTIAL (1<<7)
3963#define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
3964
3965class HeapChunkContext {
3966 public:
3967  // Maximum chunk size.  Obtain this from the formula:
3968  // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
3969  HeapChunkContext(bool merge, bool native)
3970      : buf_(16384 - 16),
3971        type_(0),
3972        merge_(merge),
3973        chunk_overhead_(0) {
3974    Reset();
3975    if (native) {
3976      type_ = CHUNK_TYPE("NHSG");
3977    } else {
3978      type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO");
3979    }
3980  }
3981
3982  ~HeapChunkContext() {
3983    if (p_ > &buf_[0]) {
3984      Flush();
3985    }
3986  }
3987
3988  void SetChunkOverhead(size_t chunk_overhead) {
3989    chunk_overhead_ = chunk_overhead;
3990  }
3991
3992  void ResetStartOfNextChunk() {
3993    startOfNextMemoryChunk_ = nullptr;
3994  }
3995
3996  void EnsureHeader(const void* chunk_ptr) {
3997    if (!needHeader_) {
3998      return;
3999    }
4000
4001    // Start a new HPSx chunk.
4002    JDWP::Write4BE(&p_, 1);  // Heap id (bogus; we only have one heap).
4003    JDWP::Write1BE(&p_, 8);  // Size of allocation unit, in bytes.
4004
4005    JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr));  // virtual address of segment start.
4006    JDWP::Write4BE(&p_, 0);  // offset of this piece (relative to the virtual address).
4007    // [u4]: length of piece, in allocation units
4008    // We won't know this until we're done, so save the offset and stuff in a dummy value.
4009    pieceLenField_ = p_;
4010    JDWP::Write4BE(&p_, 0x55555555);
4011    needHeader_ = false;
4012  }
4013
4014  void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4015    if (pieceLenField_ == NULL) {
4016      // Flush immediately post Reset (maybe back-to-back Flush). Ignore.
4017      CHECK(needHeader_);
4018      return;
4019    }
4020    // Patch the "length of piece" field.
4021    CHECK_LE(&buf_[0], pieceLenField_);
4022    CHECK_LE(pieceLenField_, p_);
4023    JDWP::Set4BE(pieceLenField_, totalAllocationUnits_);
4024
4025    Dbg::DdmSendChunk(type_, p_ - &buf_[0], &buf_[0]);
4026    Reset();
4027  }
4028
4029  static void HeapChunkCallback(void* start, void* end, size_t used_bytes, void* arg)
4030      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
4031                            Locks::mutator_lock_) {
4032    reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkCallback(start, end, used_bytes);
4033  }
4034
4035 private:
4036  enum { ALLOCATION_UNIT_SIZE = 8 };
4037
4038  void Reset() {
4039    p_ = &buf_[0];
4040    ResetStartOfNextChunk();
4041    totalAllocationUnits_ = 0;
4042    needHeader_ = true;
4043    pieceLenField_ = NULL;
4044  }
4045
4046  void HeapChunkCallback(void* start, void* /*end*/, size_t used_bytes)
4047      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
4048                            Locks::mutator_lock_) {
4049    // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
4050    // in the following code not to allocate memory, by ensuring buf_ is of the correct size
4051    if (used_bytes == 0) {
4052        if (start == NULL) {
4053            // Reset for start of new heap.
4054            startOfNextMemoryChunk_ = NULL;
4055            Flush();
4056        }
4057        // Only process in use memory so that free region information
4058        // also includes dlmalloc book keeping.
4059        return;
4060    }
4061
4062    /* If we're looking at the native heap, we'll just return
4063     * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
4064     */
4065    bool native = type_ == CHUNK_TYPE("NHSG");
4066
4067    // TODO: I'm not sure using start of next chunk works well with multiple spaces. We shouldn't
4068    // count gaps inbetween spaces as free memory.
4069    if (startOfNextMemoryChunk_ != NULL) {
4070        // Transmit any pending free memory. Native free memory of
4071        // over kMaxFreeLen could be because of the use of mmaps, so
4072        // don't report. If not free memory then start a new segment.
4073        bool flush = true;
4074        if (start > startOfNextMemoryChunk_) {
4075            const size_t kMaxFreeLen = 2 * kPageSize;
4076            void* freeStart = startOfNextMemoryChunk_;
4077            void* freeEnd = start;
4078            size_t freeLen = reinterpret_cast<char*>(freeEnd) - reinterpret_cast<char*>(freeStart);
4079            if (!native || freeLen < kMaxFreeLen) {
4080                AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), freeStart, freeLen);
4081                flush = false;
4082            }
4083        }
4084        if (flush) {
4085            startOfNextMemoryChunk_ = NULL;
4086            Flush();
4087        }
4088    }
4089    mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
4090
4091    // Determine the type of this chunk.
4092    // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
4093    // If it's the same, we should combine them.
4094    uint8_t state = ExamineObject(obj, native);
4095    AppendChunk(state, start, used_bytes + chunk_overhead_);
4096    startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
4097  }
4098
4099  void AppendChunk(uint8_t state, void* ptr, size_t length)
4100      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4101    // Make sure there's enough room left in the buffer.
4102    // We need to use two bytes for every fractional 256 allocation units used by the chunk plus
4103    // 17 bytes for any header.
4104    size_t needed = (((length/ALLOCATION_UNIT_SIZE + 255) / 256) * 2) + 17;
4105    size_t bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
4106    if (bytesLeft < needed) {
4107      Flush();
4108    }
4109
4110    bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
4111    if (bytesLeft < needed) {
4112      LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", "
4113          << needed << " bytes)";
4114      return;
4115    }
4116    EnsureHeader(ptr);
4117    // Write out the chunk description.
4118    length /= ALLOCATION_UNIT_SIZE;   // Convert to allocation units.
4119    totalAllocationUnits_ += length;
4120    while (length > 256) {
4121      *p_++ = state | HPSG_PARTIAL;
4122      *p_++ = 255;     // length - 1
4123      length -= 256;
4124    }
4125    *p_++ = state;
4126    *p_++ = length - 1;
4127  }
4128
4129  uint8_t ExamineObject(mirror::Object* o, bool is_native_heap)
4130      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
4131    if (o == NULL) {
4132      return HPSG_STATE(SOLIDITY_FREE, 0);
4133    }
4134
4135    // It's an allocated chunk. Figure out what it is.
4136
4137    // If we're looking at the native heap, we'll just return
4138    // (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks.
4139    if (is_native_heap) {
4140      return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4141    }
4142
4143    if (!Runtime::Current()->GetHeap()->IsLiveObjectLocked(o)) {
4144      return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4145    }
4146
4147    mirror::Class* c = o->GetClass();
4148    if (c == NULL) {
4149      // The object was probably just created but hasn't been initialized yet.
4150      return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4151    }
4152
4153    if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
4154      LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
4155      return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4156    }
4157
4158    if (c->IsClassClass()) {
4159      return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
4160    }
4161
4162    if (c->IsArrayClass()) {
4163      if (o->IsObjectArray()) {
4164        return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4165      }
4166      switch (c->GetComponentSize()) {
4167      case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
4168      case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
4169      case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4170      case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
4171      }
4172    }
4173
4174    return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4175  }
4176
4177  std::vector<uint8_t> buf_;
4178  uint8_t* p_;
4179  uint8_t* pieceLenField_;
4180  void* startOfNextMemoryChunk_;
4181  size_t totalAllocationUnits_;
4182  uint32_t type_;
4183  bool merge_;
4184  bool needHeader_;
4185  size_t chunk_overhead_;
4186
4187  DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
4188};
4189
4190static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
4191    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
4192  const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
4193  HeapChunkContext::HeapChunkCallback(
4194      obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
4195}
4196
4197void Dbg::DdmSendHeapSegments(bool native) {
4198  Dbg::HpsgWhen when;
4199  Dbg::HpsgWhat what;
4200  if (!native) {
4201    when = gDdmHpsgWhen;
4202    what = gDdmHpsgWhat;
4203  } else {
4204    when = gDdmNhsgWhen;
4205    what = gDdmNhsgWhat;
4206  }
4207  if (when == HPSG_WHEN_NEVER) {
4208    return;
4209  }
4210
4211  // Figure out what kind of chunks we'll be sending.
4212  CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS) << static_cast<int>(what);
4213
4214  // First, send a heap start chunk.
4215  uint8_t heap_id[4];
4216  JDWP::Set4BE(&heap_id[0], 1);  // Heap id (bogus; we only have one heap).
4217  Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id);
4218
4219  Thread* self = Thread::Current();
4220
4221  // To allow the Walk/InspectAll() below to exclusively-lock the
4222  // mutator lock, temporarily release the shared access to the
4223  // mutator lock here by transitioning to the suspended state.
4224  Locks::mutator_lock_->AssertSharedHeld(self);
4225  self->TransitionFromRunnableToSuspended(kSuspended);
4226
4227  // Send a series of heap segment chunks.
4228  HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
4229  if (native) {
4230#ifdef USE_DLMALLOC
4231    dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context);
4232#else
4233    UNIMPLEMENTED(WARNING) << "Native heap inspection is only supported with dlmalloc";
4234#endif
4235  } else {
4236    gc::Heap* heap = Runtime::Current()->GetHeap();
4237    for (const auto& space : heap->GetContinuousSpaces()) {
4238      if (space->IsDlMallocSpace()) {
4239        // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
4240        // allocation then the first sizeof(size_t) may belong to it.
4241        context.SetChunkOverhead(sizeof(size_t));
4242        space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4243      } else if (space->IsRosAllocSpace()) {
4244        context.SetChunkOverhead(0);
4245        space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4246      } else if (space->IsBumpPointerSpace()) {
4247        context.SetChunkOverhead(0);
4248        ReaderMutexLock mu(self, *Locks::mutator_lock_);
4249        WriterMutexLock mu2(self, *Locks::heap_bitmap_lock_);
4250        space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
4251      } else {
4252        UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
4253      }
4254      context.ResetStartOfNextChunk();
4255    }
4256    // Walk the large objects, these are not in the AllocSpace.
4257    context.SetChunkOverhead(0);
4258    heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
4259  }
4260
4261  // Shared-lock the mutator lock back.
4262  self->TransitionFromSuspendedToRunnable();
4263  Locks::mutator_lock_->AssertSharedHeld(self);
4264
4265  // Finally, send a heap end chunk.
4266  Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
4267}
4268
4269static size_t GetAllocTrackerMax() {
4270#ifdef HAVE_ANDROID_OS
4271  // Check whether there's a system property overriding the number of records.
4272  const char* propertyName = "dalvik.vm.allocTrackerMax";
4273  char allocRecordMaxString[PROPERTY_VALUE_MAX];
4274  if (property_get(propertyName, allocRecordMaxString, "") > 0) {
4275    char* end;
4276    size_t value = strtoul(allocRecordMaxString, &end, 10);
4277    if (*end != '\0') {
4278      LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocRecordMaxString
4279                 << "' --- invalid";
4280      return kDefaultNumAllocRecords;
4281    }
4282    if (!IsPowerOfTwo(value)) {
4283      LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocRecordMaxString
4284                 << "' --- not power of two";
4285      return kDefaultNumAllocRecords;
4286    }
4287    return value;
4288  }
4289#endif
4290  return kDefaultNumAllocRecords;
4291}
4292
4293void Dbg::SetAllocTrackingEnabled(bool enable) {
4294  Thread* self = Thread::Current();
4295  if (enable) {
4296    {
4297      MutexLock mu(self, *Locks::alloc_tracker_lock_);
4298      if (recent_allocation_records_ != NULL) {
4299        return;  // Already enabled, bail.
4300      }
4301      alloc_record_max_ = GetAllocTrackerMax();
4302      LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
4303                << kMaxAllocRecordStackDepth << " frames, taking "
4304                << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
4305      DCHECK_EQ(alloc_record_head_, 0U);
4306      DCHECK_EQ(alloc_record_count_, 0U);
4307      recent_allocation_records_ = new AllocRecord[alloc_record_max_];
4308      CHECK(recent_allocation_records_ != NULL);
4309    }
4310    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
4311  } else {
4312    {
4313      ScopedObjectAccess soa(self);  // For type_cache_.Clear();
4314      MutexLock mu(self, *Locks::alloc_tracker_lock_);
4315      if (recent_allocation_records_ == NULL) {
4316        return;  // Already disabled, bail.
4317      }
4318      LOG(INFO) << "Disabling alloc tracker";
4319      delete[] recent_allocation_records_;
4320      recent_allocation_records_ = NULL;
4321      alloc_record_head_ = 0;
4322      alloc_record_count_ = 0;
4323      type_cache_.Clear();
4324    }
4325    // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
4326    Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
4327  }
4328}
4329
4330struct AllocRecordStackVisitor : public StackVisitor {
4331  AllocRecordStackVisitor(Thread* thread, AllocRecord* record)
4332      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
4333      : StackVisitor(thread, NULL), record(record), depth(0) {}
4334
4335  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
4336  // annotalysis.
4337  bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
4338    if (depth >= kMaxAllocRecordStackDepth) {
4339      return false;
4340    }
4341    mirror::ArtMethod* m = GetMethod();
4342    if (!m->IsRuntimeMethod()) {
4343      record->StackElement(depth)->SetMethod(m);
4344      record->StackElement(depth)->SetDexPc(GetDexPc());
4345      ++depth;
4346    }
4347    return true;
4348  }
4349
4350  ~AllocRecordStackVisitor() {
4351    // Clear out any unused stack trace elements.
4352    for (; depth < kMaxAllocRecordStackDepth; ++depth) {
4353      record->StackElement(depth)->SetMethod(nullptr);
4354      record->StackElement(depth)->SetDexPc(0);
4355    }
4356  }
4357
4358  AllocRecord* record;
4359  size_t depth;
4360};
4361
4362void Dbg::RecordAllocation(mirror::Class* type, size_t byte_count) {
4363  Thread* self = Thread::Current();
4364  CHECK(self != NULL);
4365
4366  MutexLock mu(self, *Locks::alloc_tracker_lock_);
4367  if (recent_allocation_records_ == NULL) {
4368    // In the process of shutting down recording, bail.
4369    return;
4370  }
4371
4372  // Advance and clip.
4373  if (++alloc_record_head_ == alloc_record_max_) {
4374    alloc_record_head_ = 0;
4375  }
4376
4377  // Fill in the basics.
4378  AllocRecord* record = &recent_allocation_records_[alloc_record_head_];
4379  record->SetType(type);
4380  record->SetByteCount(byte_count);
4381  record->SetThinLockId(self->GetThreadId());
4382
4383  // Fill in the stack trace.
4384  AllocRecordStackVisitor visitor(self, record);
4385  visitor.WalkStack();
4386
4387  if (alloc_record_count_ < alloc_record_max_) {
4388    ++alloc_record_count_;
4389  }
4390}
4391
4392// Returns the index of the head element.
4393//
4394// We point at the most-recently-written record, so if alloc_record_count_ is 1
4395// we want to use the current element.  Take "head+1" and subtract count
4396// from it.
4397//
4398// We need to handle underflow in our circular buffer, so we add
4399// alloc_record_max_ and then mask it back down.
4400size_t Dbg::HeadIndex() {
4401  return (Dbg::alloc_record_head_ + 1 + Dbg::alloc_record_max_ - Dbg::alloc_record_count_) &
4402      (Dbg::alloc_record_max_ - 1);
4403}
4404
4405void Dbg::DumpRecentAllocations() {
4406  ScopedObjectAccess soa(Thread::Current());
4407  MutexLock mu(soa.Self(), *Locks::alloc_tracker_lock_);
4408  if (recent_allocation_records_ == NULL) {
4409    LOG(INFO) << "Not recording tracked allocations";
4410    return;
4411  }
4412
4413  // "i" is the head of the list.  We want to start at the end of the
4414  // list and move forward to the tail.
4415  size_t i = HeadIndex();
4416  const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
4417  uint16_t count = capped_count;
4418
4419  LOG(INFO) << "Tracked allocations, (head=" << alloc_record_head_ << " count=" << count << ")";
4420  while (count--) {
4421    AllocRecord* record = &recent_allocation_records_[i];
4422
4423    LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->ThinLockId(), record->ByteCount())
4424              << PrettyClass(record->Type());
4425
4426    for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
4427      AllocRecordStackTraceElement* stack_element = record->StackElement(stack_frame);
4428      mirror::ArtMethod* m = stack_element->Method();
4429      if (m == NULL) {
4430        break;
4431      }
4432      LOG(INFO) << "    " << PrettyMethod(m) << " line " << stack_element->LineNumber();
4433    }
4434
4435    // pause periodically to help logcat catch up
4436    if ((count % 5) == 0) {
4437      usleep(40000);
4438    }
4439
4440    i = (i + 1) & (alloc_record_max_ - 1);
4441  }
4442}
4443
4444class StringTable {
4445 public:
4446  StringTable() {
4447  }
4448
4449  void Add(const std::string& str) {
4450    table_.insert(str);
4451  }
4452
4453  void Add(const char* str) {
4454    table_.insert(str);
4455  }
4456
4457  size_t IndexOf(const char* s) const {
4458    auto it = table_.find(s);
4459    if (it == table_.end()) {
4460      LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
4461    }
4462    return std::distance(table_.begin(), it);
4463  }
4464
4465  size_t Size() const {
4466    return table_.size();
4467  }
4468
4469  void WriteTo(std::vector<uint8_t>& bytes) const {
4470    for (const std::string& str : table_) {
4471      const char* s = str.c_str();
4472      size_t s_len = CountModifiedUtf8Chars(s);
4473      std::unique_ptr<uint16_t> s_utf16(new uint16_t[s_len]);
4474      ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
4475      JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len);
4476    }
4477  }
4478
4479 private:
4480  std::set<std::string> table_;
4481  DISALLOW_COPY_AND_ASSIGN(StringTable);
4482};
4483
4484static const char* GetMethodSourceFile(mirror::ArtMethod* method)
4485    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4486  DCHECK(method != nullptr);
4487  const char* source_file = method->GetDeclaringClassSourceFile();
4488  return (source_file != nullptr) ? source_file : "";
4489}
4490
4491/*
4492 * The data we send to DDMS contains everything we have recorded.
4493 *
4494 * Message header (all values big-endian):
4495 * (1b) message header len (to allow future expansion); includes itself
4496 * (1b) entry header len
4497 * (1b) stack frame len
4498 * (2b) number of entries
4499 * (4b) offset to string table from start of message
4500 * (2b) number of class name strings
4501 * (2b) number of method name strings
4502 * (2b) number of source file name strings
4503 * For each entry:
4504 *   (4b) total allocation size
4505 *   (2b) thread id
4506 *   (2b) allocated object's class name index
4507 *   (1b) stack depth
4508 *   For each stack frame:
4509 *     (2b) method's class name
4510 *     (2b) method name
4511 *     (2b) method source file
4512 *     (2b) line number, clipped to 32767; -2 if native; -1 if no source
4513 * (xb) class name strings
4514 * (xb) method name strings
4515 * (xb) source file strings
4516 *
4517 * As with other DDM traffic, strings are sent as a 4-byte length
4518 * followed by UTF-16 data.
4519 *
4520 * We send up 16-bit unsigned indexes into string tables.  In theory there
4521 * can be (kMaxAllocRecordStackDepth * alloc_record_max_) unique strings in
4522 * each table, but in practice there should be far fewer.
4523 *
4524 * The chief reason for using a string table here is to keep the size of
4525 * the DDMS message to a minimum.  This is partly to make the protocol
4526 * efficient, but also because we have to form the whole thing up all at
4527 * once in a memory buffer.
4528 *
4529 * We use separate string tables for class names, method names, and source
4530 * files to keep the indexes small.  There will generally be no overlap
4531 * between the contents of these tables.
4532 */
4533jbyteArray Dbg::GetRecentAllocations() {
4534  if (false) {
4535    DumpRecentAllocations();
4536  }
4537
4538  Thread* self = Thread::Current();
4539  std::vector<uint8_t> bytes;
4540  {
4541    MutexLock mu(self, *Locks::alloc_tracker_lock_);
4542    //
4543    // Part 1: generate string tables.
4544    //
4545    StringTable class_names;
4546    StringTable method_names;
4547    StringTable filenames;
4548
4549    const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
4550    uint16_t count = capped_count;
4551    size_t idx = HeadIndex();
4552    while (count--) {
4553      AllocRecord* record = &recent_allocation_records_[idx];
4554      std::string temp;
4555      class_names.Add(record->Type()->GetDescriptor(&temp));
4556      for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) {
4557        mirror::ArtMethod* m = record->StackElement(i)->Method();
4558        if (m != NULL) {
4559          class_names.Add(m->GetDeclaringClassDescriptor());
4560          method_names.Add(m->GetName());
4561          filenames.Add(GetMethodSourceFile(m));
4562        }
4563      }
4564
4565      idx = (idx + 1) & (alloc_record_max_ - 1);
4566    }
4567
4568    LOG(INFO) << "allocation records: " << capped_count;
4569
4570    //
4571    // Part 2: Generate the output and store it in the buffer.
4572    //
4573
4574    // (1b) message header len (to allow future expansion); includes itself
4575    // (1b) entry header len
4576    // (1b) stack frame len
4577    const int kMessageHeaderLen = 15;
4578    const int kEntryHeaderLen = 9;
4579    const int kStackFrameLen = 8;
4580    JDWP::Append1BE(bytes, kMessageHeaderLen);
4581    JDWP::Append1BE(bytes, kEntryHeaderLen);
4582    JDWP::Append1BE(bytes, kStackFrameLen);
4583
4584    // (2b) number of entries
4585    // (4b) offset to string table from start of message
4586    // (2b) number of class name strings
4587    // (2b) number of method name strings
4588    // (2b) number of source file name strings
4589    JDWP::Append2BE(bytes, capped_count);
4590    size_t string_table_offset = bytes.size();
4591    JDWP::Append4BE(bytes, 0);  // We'll patch this later...
4592    JDWP::Append2BE(bytes, class_names.Size());
4593    JDWP::Append2BE(bytes, method_names.Size());
4594    JDWP::Append2BE(bytes, filenames.Size());
4595
4596    idx = HeadIndex();
4597    std::string temp;
4598    for (count = capped_count; count != 0; --count) {
4599      // For each entry:
4600      // (4b) total allocation size
4601      // (2b) thread id
4602      // (2b) allocated object's class name index
4603      // (1b) stack depth
4604      AllocRecord* record = &recent_allocation_records_[idx];
4605      size_t stack_depth = record->GetDepth();
4606      size_t allocated_object_class_name_index =
4607          class_names.IndexOf(record->Type()->GetDescriptor(&temp));
4608      JDWP::Append4BE(bytes, record->ByteCount());
4609      JDWP::Append2BE(bytes, record->ThinLockId());
4610      JDWP::Append2BE(bytes, allocated_object_class_name_index);
4611      JDWP::Append1BE(bytes, stack_depth);
4612
4613      for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) {
4614        // For each stack frame:
4615        // (2b) method's class name
4616        // (2b) method name
4617        // (2b) method source file
4618        // (2b) line number, clipped to 32767; -2 if native; -1 if no source
4619        mirror::ArtMethod* m = record->StackElement(stack_frame)->Method();
4620        size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor());
4621        size_t method_name_index = method_names.IndexOf(m->GetName());
4622        size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m));
4623        JDWP::Append2BE(bytes, class_name_index);
4624        JDWP::Append2BE(bytes, method_name_index);
4625        JDWP::Append2BE(bytes, file_name_index);
4626        JDWP::Append2BE(bytes, record->StackElement(stack_frame)->LineNumber());
4627      }
4628      idx = (idx + 1) & (alloc_record_max_ - 1);
4629    }
4630
4631    // (xb) class name strings
4632    // (xb) method name strings
4633    // (xb) source file strings
4634    JDWP::Set4BE(&bytes[string_table_offset], bytes.size());
4635    class_names.WriteTo(bytes);
4636    method_names.WriteTo(bytes);
4637    filenames.WriteTo(bytes);
4638  }
4639  JNIEnv* env = self->GetJniEnv();
4640  jbyteArray result = env->NewByteArray(bytes.size());
4641  if (result != NULL) {
4642    env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
4643  }
4644  return result;
4645}
4646
4647mirror::ArtMethod* DeoptimizationRequest::Method() const {
4648  ScopedObjectAccessUnchecked soa(Thread::Current());
4649  return soa.DecodeMethod(method_);
4650}
4651
4652void DeoptimizationRequest::SetMethod(mirror::ArtMethod* m) {
4653  ScopedObjectAccessUnchecked soa(Thread::Current());
4654  method_ = soa.EncodeMethod(m);
4655}
4656
4657}  // namespace art
4658