debugger.cc revision 4c4d609a3f1d67c76c855df13c2c1be9c315a6c9
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "debugger.h"
18
19#include <sys/uio.h>
20
21#include <set>
22
23#include "arch/context.h"
24#include "class_linker.h"
25#include "class_linker-inl.h"
26#include "dex_file-inl.h"
27#include "dex_instruction.h"
28#include "gc/accounting/card_table-inl.h"
29#include "gc/space/large_object_space.h"
30#include "gc/space/space-inl.h"
31#include "handle_scope.h"
32#include "jdwp/object_registry.h"
33#include "mirror/art_field-inl.h"
34#include "mirror/art_method-inl.h"
35#include "mirror/class.h"
36#include "mirror/class-inl.h"
37#include "mirror/class_loader.h"
38#include "mirror/object-inl.h"
39#include "mirror/object_array-inl.h"
40#include "mirror/string-inl.h"
41#include "mirror/throwable.h"
42#include "quick/inline_method_analyser.h"
43#include "reflection.h"
44#include "safe_map.h"
45#include "scoped_thread_state_change.h"
46#include "ScopedLocalRef.h"
47#include "ScopedPrimitiveArray.h"
48#include "handle_scope-inl.h"
49#include "thread_list.h"
50#include "throw_location.h"
51#include "utf.h"
52#include "verifier/method_verifier-inl.h"
53#include "well_known_classes.h"
54
55#ifdef HAVE_ANDROID_OS
56#include "cutils/properties.h"
57#endif
58
59namespace art {
60
61static const size_t kMaxAllocRecordStackDepth = 16;  // Max 255.
62static const size_t kDefaultNumAllocRecords = 64*1024;  // Must be a power of 2. 2BE can hold 64k-1.
63
64// Limit alloc_record_count to the 2BE value that is the limit of the current protocol.
65static uint16_t CappedAllocRecordCount(size_t alloc_record_count) {
66  if (alloc_record_count > 0xffff) {
67    return 0xffff;
68  }
69  return alloc_record_count;
70}
71
72class AllocRecordStackTraceElement {
73 public:
74  AllocRecordStackTraceElement() : method_(nullptr), dex_pc_(0) {
75  }
76
77  int32_t LineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
78    mirror::ArtMethod* method = Method();
79    DCHECK(method != nullptr);
80    return method->GetLineNumFromDexPC(DexPc());
81  }
82
83  mirror::ArtMethod* Method() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
84    ScopedObjectAccessUnchecked soa(Thread::Current());
85    return soa.DecodeMethod(method_);
86  }
87
88  void SetMethod(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
89    ScopedObjectAccessUnchecked soa(Thread::Current());
90    method_ = soa.EncodeMethod(m);
91  }
92
93  uint32_t DexPc() const {
94    return dex_pc_;
95  }
96
97  void SetDexPc(uint32_t pc) {
98    dex_pc_ = pc;
99  }
100
101 private:
102  jmethodID method_;
103  uint32_t dex_pc_;
104};
105
106jobject Dbg::TypeCache::Add(mirror::Class* t) {
107  ScopedObjectAccessUnchecked soa(Thread::Current());
108  JNIEnv* const env = soa.Env();
109  ScopedLocalRef<jobject> local_ref(soa.Env(), soa.AddLocalReference<jobject>(t));
110  const int32_t hash_code = soa.Decode<mirror::Class*>(local_ref.get())->IdentityHashCode();
111  auto range = objects_.equal_range(hash_code);
112  for (auto it = range.first; it != range.second; ++it) {
113    if (soa.Decode<mirror::Class*>(it->second) == soa.Decode<mirror::Class*>(local_ref.get())) {
114      // Found a matching weak global, return it.
115      return it->second;
116    }
117  }
118  const jobject weak_global = env->NewWeakGlobalRef(local_ref.get());
119  objects_.insert(std::make_pair(hash_code, weak_global));
120  return weak_global;
121}
122
123void Dbg::TypeCache::Clear() {
124  JavaVMExt* vm = Runtime::Current()->GetJavaVM();
125  Thread* self = Thread::Current();
126  for (const auto& p : objects_) {
127    vm->DeleteWeakGlobalRef(self, p.second);
128  }
129  objects_.clear();
130}
131
132class AllocRecord {
133 public:
134  AllocRecord() : type_(nullptr), byte_count_(0), thin_lock_id_(0) {}
135
136  mirror::Class* Type() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
137    return down_cast<mirror::Class*>(Thread::Current()->DecodeJObject(type_));
138  }
139
140  void SetType(mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_,
141                                                       Locks::alloc_tracker_lock_) {
142    type_ = Dbg::type_cache_.Add(t);
143  }
144
145  size_t GetDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
146    size_t depth = 0;
147    while (depth < kMaxAllocRecordStackDepth && stack_[depth].Method() != nullptr) {
148      ++depth;
149    }
150    return depth;
151  }
152
153  size_t ByteCount() const {
154    return byte_count_;
155  }
156
157  void SetByteCount(size_t count) {
158    byte_count_ = count;
159  }
160
161  uint16_t ThinLockId() const {
162    return thin_lock_id_;
163  }
164
165  void SetThinLockId(uint16_t id) {
166    thin_lock_id_ = id;
167  }
168
169  AllocRecordStackTraceElement* StackElement(size_t index) {
170    DCHECK_LT(index, kMaxAllocRecordStackDepth);
171    return &stack_[index];
172  }
173
174 private:
175  jobject type_;  // This is a weak global.
176  size_t byte_count_;
177  uint16_t thin_lock_id_;
178  AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth];  // Unused entries have nullptr method.
179};
180
181class Breakpoint {
182 public:
183  Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc,
184             DeoptimizationRequest::Kind deoptimization_kind)
185    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
186    : method_(nullptr), dex_pc_(dex_pc), deoptimization_kind_(deoptimization_kind) {
187    CHECK(deoptimization_kind_ == DeoptimizationRequest::kNothing ||
188          deoptimization_kind_ == DeoptimizationRequest::kSelectiveDeoptimization ||
189          deoptimization_kind_ == DeoptimizationRequest::kFullDeoptimization);
190    ScopedObjectAccessUnchecked soa(Thread::Current());
191    method_ = soa.EncodeMethod(method);
192  }
193
194  Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
195    : method_(nullptr), dex_pc_(other.dex_pc_),
196      deoptimization_kind_(other.deoptimization_kind_) {
197    ScopedObjectAccessUnchecked soa(Thread::Current());
198    method_ = soa.EncodeMethod(other.Method());
199  }
200
201  mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
202    ScopedObjectAccessUnchecked soa(Thread::Current());
203    return soa.DecodeMethod(method_);
204  }
205
206  uint32_t DexPc() const {
207    return dex_pc_;
208  }
209
210  DeoptimizationRequest::Kind GetDeoptimizationKind() const {
211    return deoptimization_kind_;
212  }
213
214 private:
215  // The location of this breakpoint.
216  jmethodID method_;
217  uint32_t dex_pc_;
218
219  // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
220  DeoptimizationRequest::Kind deoptimization_kind_;
221};
222
223static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
224    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
225  os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc());
226  return os;
227}
228
229class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
230 public:
231  DebugInstrumentationListener() {}
232  virtual ~DebugInstrumentationListener() {}
233
234  void MethodEntered(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
235                     uint32_t dex_pc ATTRIBUTE_UNUSED)
236      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
237    if (method->IsNative()) {
238      // TODO: post location events is a suspension point and native method entry stubs aren't.
239      return;
240    }
241    Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr);
242  }
243
244  void MethodExited(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
245                    uint32_t dex_pc, const JValue& return_value)
246      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
247    if (method->IsNative()) {
248      // TODO: post location events is a suspension point and native method entry stubs aren't.
249      return;
250    }
251    Dbg::UpdateDebugger(thread, this_object, method, dex_pc, Dbg::kMethodExit, &return_value);
252  }
253
254  void MethodUnwind(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
255                    uint32_t dex_pc)
256      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
257    // We're not recorded to listen to this kind of event, so complain.
258    UNUSED(thread, this_object, method, dex_pc);
259    LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
260               << " " << dex_pc;
261  }
262
263  void DexPcMoved(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
264                  uint32_t new_dex_pc)
265      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
266    Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, 0, nullptr);
267  }
268
269  void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
270                 uint32_t dex_pc, mirror::ArtField* field)
271      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
272    UNUSED(thread);
273    Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
274  }
275
276  void FieldWritten(Thread* thread ATTRIBUTE_UNUSED, mirror::Object* this_object,
277                    mirror::ArtMethod* method, uint32_t dex_pc, mirror::ArtField* field,
278                    const JValue& field_value)
279      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
280    Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
281  }
282
283  void ExceptionCaught(Thread* thread ATTRIBUTE_UNUSED, const ThrowLocation& throw_location,
284                       mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
285                       mirror::Throwable* exception_object)
286      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
287    Dbg::PostException(throw_location, catch_method, catch_dex_pc, exception_object);
288  }
289
290 private:
291  DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
292} gDebugInstrumentationListener;
293
294// JDWP is allowed unless the Zygote forbids it.
295static bool gJdwpAllowed = true;
296
297// Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line?
298static bool gJdwpConfigured = false;
299
300// Broken-down JDWP options. (Only valid if IsJdwpConfigured() is true.)
301static JDWP::JdwpOptions gJdwpOptions;
302
303// Runtime JDWP state.
304static JDWP::JdwpState* gJdwpState = nullptr;
305static bool gDebuggerConnected;  // debugger or DDMS is connected.
306static bool gDebuggerActive;     // debugger is making requests.
307static bool gDisposed;           // debugger called VirtualMachine.Dispose, so we should drop the connection.
308
309static bool gDdmThreadNotification = false;
310
311// DDMS GC-related settings.
312static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER;
313static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER;
314static Dbg::HpsgWhat gDdmHpsgWhat;
315static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
316static Dbg::HpsgWhat gDdmNhsgWhat;
317
318ObjectRegistry* Dbg::gRegistry = nullptr;
319
320// Recent allocation tracking.
321AllocRecord* Dbg::recent_allocation_records_ = nullptr;  // TODO: CircularBuffer<AllocRecord>
322size_t Dbg::alloc_record_max_ = 0;
323size_t Dbg::alloc_record_head_ = 0;
324size_t Dbg::alloc_record_count_ = 0;
325Dbg::TypeCache Dbg::type_cache_;
326
327// Deoptimization support.
328std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
329size_t Dbg::full_deoptimization_event_count_ = 0;
330size_t Dbg::delayed_full_undeoptimization_count_ = 0;
331
332// Instrumentation event reference counters.
333size_t Dbg::dex_pc_change_event_ref_count_ = 0;
334size_t Dbg::method_enter_event_ref_count_ = 0;
335size_t Dbg::method_exit_event_ref_count_ = 0;
336size_t Dbg::field_read_event_ref_count_ = 0;
337size_t Dbg::field_write_event_ref_count_ = 0;
338size_t Dbg::exception_catch_event_ref_count_ = 0;
339uint32_t Dbg::instrumentation_events_ = 0;
340
341// Breakpoints.
342static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
343
344void DebugInvokeReq::VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info) {
345  if (receiver != nullptr) {
346    callback(&receiver, arg, root_info);
347  }
348  if (thread != nullptr) {
349    callback(&thread, arg, root_info);
350  }
351  if (klass != nullptr) {
352    callback(reinterpret_cast<mirror::Object**>(&klass), arg, root_info);
353  }
354  if (method != nullptr) {
355    callback(reinterpret_cast<mirror::Object**>(&method), arg, root_info);
356  }
357}
358
359void DebugInvokeReq::Clear() {
360  invoke_needed = false;
361  receiver = nullptr;
362  thread = nullptr;
363  klass = nullptr;
364  method = nullptr;
365}
366
367void SingleStepControl::VisitRoots(RootCallback* callback, void* arg, const RootInfo& root_info) {
368  if (method != nullptr) {
369    callback(reinterpret_cast<mirror::Object**>(&method), arg, root_info);
370  }
371}
372
373bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const {
374  return dex_pcs.find(dex_pc) == dex_pcs.end();
375}
376
377void SingleStepControl::Clear() {
378  is_active = false;
379  method = nullptr;
380  dex_pcs.clear();
381}
382
383static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc)
384    LOCKS_EXCLUDED(Locks::breakpoint_lock_)
385    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
386  ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
387  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
388    if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) {
389      VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
390      return true;
391    }
392  }
393  return false;
394}
395
396static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
397    LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) {
398  MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
399  // A thread may be suspended for GC; in this code, we really want to know whether
400  // there's a debugger suspension active.
401  return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0;
402}
403
404static mirror::Array* DecodeNonNullArray(JDWP::RefTypeId id, JDWP::JdwpError* error)
405    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
406  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
407  if (o == nullptr) {
408    *error = JDWP::ERR_INVALID_OBJECT;
409    return nullptr;
410  }
411  if (!o->IsArrayInstance()) {
412    *error = JDWP::ERR_INVALID_ARRAY;
413    return nullptr;
414  }
415  *error = JDWP::ERR_NONE;
416  return o->AsArray();
417}
418
419static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError* error)
420    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
421  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(id, error);
422  if (o == nullptr) {
423    *error = JDWP::ERR_INVALID_OBJECT;
424    return nullptr;
425  }
426  if (!o->IsClass()) {
427    *error = JDWP::ERR_INVALID_CLASS;
428    return nullptr;
429  }
430  *error = JDWP::ERR_NONE;
431  return o->AsClass();
432}
433
434static Thread* DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id,
435                            JDWP::JdwpError* error)
436    EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
437    LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
438    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
439  mirror::Object* thread_peer = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_id, error);
440  if (thread_peer == nullptr) {
441    // This isn't even an object.
442    *error = JDWP::ERR_INVALID_OBJECT;
443    return nullptr;
444  }
445
446  mirror::Class* java_lang_Thread = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
447  if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) {
448    // This isn't a thread.
449    *error = JDWP::ERR_INVALID_THREAD;
450    return nullptr;
451  }
452
453  Thread* thread = Thread::FromManagedThread(soa, thread_peer);
454  // If thread is null then this a java.lang.Thread without a Thread*. Must be a un-started or a
455  // zombie.
456  *error = (thread == nullptr) ? JDWP::ERR_THREAD_NOT_ALIVE : JDWP::ERR_NONE;
457  return thread;
458}
459
460static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) {
461  // JDWP deliberately uses the descriptor characters' ASCII values for its enum.
462  // Note that by "basic" we mean that we don't get more specific than JT_OBJECT.
463  return static_cast<JDWP::JdwpTag>(descriptor[0]);
464}
465
466static JDWP::JdwpTag BasicTagFromClass(mirror::Class* klass)
467    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
468  std::string temp;
469  const char* descriptor = klass->GetDescriptor(&temp);
470  return BasicTagFromDescriptor(descriptor);
471}
472
473static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
474    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
475  CHECK(c != nullptr);
476  if (c->IsArrayClass()) {
477    return JDWP::JT_ARRAY;
478  }
479  if (c->IsStringClass()) {
480    return JDWP::JT_STRING;
481  }
482  if (c->IsClassClass()) {
483    return JDWP::JT_CLASS_OBJECT;
484  }
485  {
486    mirror::Class* thread_class = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
487    if (thread_class->IsAssignableFrom(c)) {
488      return JDWP::JT_THREAD;
489    }
490  }
491  {
492    mirror::Class* thread_group_class =
493        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
494    if (thread_group_class->IsAssignableFrom(c)) {
495      return JDWP::JT_THREAD_GROUP;
496    }
497  }
498  {
499    mirror::Class* class_loader_class =
500        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader);
501    if (class_loader_class->IsAssignableFrom(c)) {
502      return JDWP::JT_CLASS_LOADER;
503    }
504  }
505  return JDWP::JT_OBJECT;
506}
507
508/*
509 * Objects declared to hold Object might actually hold a more specific
510 * type.  The debugger may take a special interest in these (e.g. it
511 * wants to display the contents of Strings), so we want to return an
512 * appropriate tag.
513 *
514 * Null objects are tagged JT_OBJECT.
515 */
516JDWP::JdwpTag Dbg::TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o) {
517  return (o == nullptr) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass());
518}
519
520static bool IsPrimitiveTag(JDWP::JdwpTag tag) {
521  switch (tag) {
522  case JDWP::JT_BOOLEAN:
523  case JDWP::JT_BYTE:
524  case JDWP::JT_CHAR:
525  case JDWP::JT_FLOAT:
526  case JDWP::JT_DOUBLE:
527  case JDWP::JT_INT:
528  case JDWP::JT_LONG:
529  case JDWP::JT_SHORT:
530  case JDWP::JT_VOID:
531    return true;
532  default:
533    return false;
534  }
535}
536
537/*
538 * Handle one of the JDWP name/value pairs.
539 *
540 * JDWP options are:
541 *  help: if specified, show help message and bail
542 *  transport: may be dt_socket or dt_shmem
543 *  address: for dt_socket, "host:port", or just "port" when listening
544 *  server: if "y", wait for debugger to attach; if "n", attach to debugger
545 *  timeout: how long to wait for debugger to connect / listen
546 *
547 * Useful with server=n (these aren't supported yet):
548 *  onthrow=<exception-name>: connect to debugger when exception thrown
549 *  onuncaught=y|n: connect to debugger when uncaught exception thrown
550 *  launch=<command-line>: launch the debugger itself
551 *
552 * The "transport" option is required, as is "address" if server=n.
553 */
554static bool ParseJdwpOption(const std::string& name, const std::string& value) {
555  if (name == "transport") {
556    if (value == "dt_socket") {
557      gJdwpOptions.transport = JDWP::kJdwpTransportSocket;
558    } else if (value == "dt_android_adb") {
559      gJdwpOptions.transport = JDWP::kJdwpTransportAndroidAdb;
560    } else {
561      LOG(ERROR) << "JDWP transport not supported: " << value;
562      return false;
563    }
564  } else if (name == "server") {
565    if (value == "n") {
566      gJdwpOptions.server = false;
567    } else if (value == "y") {
568      gJdwpOptions.server = true;
569    } else {
570      LOG(ERROR) << "JDWP option 'server' must be 'y' or 'n'";
571      return false;
572    }
573  } else if (name == "suspend") {
574    if (value == "n") {
575      gJdwpOptions.suspend = false;
576    } else if (value == "y") {
577      gJdwpOptions.suspend = true;
578    } else {
579      LOG(ERROR) << "JDWP option 'suspend' must be 'y' or 'n'";
580      return false;
581    }
582  } else if (name == "address") {
583    /* this is either <port> or <host>:<port> */
584    std::string port_string;
585    gJdwpOptions.host.clear();
586    std::string::size_type colon = value.find(':');
587    if (colon != std::string::npos) {
588      gJdwpOptions.host = value.substr(0, colon);
589      port_string = value.substr(colon + 1);
590    } else {
591      port_string = value;
592    }
593    if (port_string.empty()) {
594      LOG(ERROR) << "JDWP address missing port: " << value;
595      return false;
596    }
597    char* end;
598    uint64_t port = strtoul(port_string.c_str(), &end, 10);
599    if (*end != '\0' || port > 0xffff) {
600      LOG(ERROR) << "JDWP address has junk in port field: " << value;
601      return false;
602    }
603    gJdwpOptions.port = port;
604  } else if (name == "launch" || name == "onthrow" || name == "oncaught" || name == "timeout") {
605    /* valid but unsupported */
606    LOG(INFO) << "Ignoring JDWP option '" << name << "'='" << value << "'";
607  } else {
608    LOG(INFO) << "Ignoring unrecognized JDWP option '" << name << "'='" << value << "'";
609  }
610
611  return true;
612}
613
614/*
615 * Parse the latter half of a -Xrunjdwp/-agentlib:jdwp= string, e.g.:
616 * "transport=dt_socket,address=8000,server=y,suspend=n"
617 */
618bool Dbg::ParseJdwpOptions(const std::string& options) {
619  VLOG(jdwp) << "ParseJdwpOptions: " << options;
620
621  std::vector<std::string> pairs;
622  Split(options, ',', &pairs);
623
624  for (size_t i = 0; i < pairs.size(); ++i) {
625    std::string::size_type equals = pairs[i].find('=');
626    if (equals == std::string::npos) {
627      LOG(ERROR) << "Can't parse JDWP option '" << pairs[i] << "' in '" << options << "'";
628      return false;
629    }
630    ParseJdwpOption(pairs[i].substr(0, equals), pairs[i].substr(equals + 1));
631  }
632
633  if (gJdwpOptions.transport == JDWP::kJdwpTransportUnknown) {
634    LOG(ERROR) << "Must specify JDWP transport: " << options;
635  }
636  if (!gJdwpOptions.server && (gJdwpOptions.host.empty() || gJdwpOptions.port == 0)) {
637    LOG(ERROR) << "Must specify JDWP host and port when server=n: " << options;
638    return false;
639  }
640
641  gJdwpConfigured = true;
642  return true;
643}
644
645void Dbg::StartJdwp() {
646  if (!gJdwpAllowed || !IsJdwpConfigured()) {
647    // No JDWP for you!
648    return;
649  }
650
651  CHECK(gRegistry == nullptr);
652  gRegistry = new ObjectRegistry;
653
654  // Init JDWP if the debugger is enabled. This may connect out to a
655  // debugger, passively listen for a debugger, or block waiting for a
656  // debugger.
657  gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions);
658  if (gJdwpState == nullptr) {
659    // We probably failed because some other process has the port already, which means that
660    // if we don't abort the user is likely to think they're talking to us when they're actually
661    // talking to that other process.
662    LOG(FATAL) << "Debugger thread failed to initialize";
663  }
664
665  // If a debugger has already attached, send the "welcome" message.
666  // This may cause us to suspend all threads.
667  if (gJdwpState->IsActive()) {
668    ScopedObjectAccess soa(Thread::Current());
669    gJdwpState->PostVMStart();
670  }
671}
672
673void Dbg::StopJdwp() {
674  // Post VM_DEATH event before the JDWP connection is closed (either by the JDWP thread or the
675  // destruction of gJdwpState).
676  if (gJdwpState != nullptr && gJdwpState->IsActive()) {
677    gJdwpState->PostVMDeath();
678  }
679  // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
680  Disposed();
681  delete gJdwpState;
682  gJdwpState = nullptr;
683  delete gRegistry;
684  gRegistry = nullptr;
685}
686
687void Dbg::GcDidFinish() {
688  if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
689    ScopedObjectAccess soa(Thread::Current());
690    VLOG(jdwp) << "Sending heap info to DDM";
691    DdmSendHeapInfo(gDdmHpifWhen);
692  }
693  if (gDdmHpsgWhen != HPSG_WHEN_NEVER) {
694    ScopedObjectAccess soa(Thread::Current());
695    VLOG(jdwp) << "Dumping heap to DDM";
696    DdmSendHeapSegments(false);
697  }
698  if (gDdmNhsgWhen != HPSG_WHEN_NEVER) {
699    ScopedObjectAccess soa(Thread::Current());
700    VLOG(jdwp) << "Dumping native heap to DDM";
701    DdmSendHeapSegments(true);
702  }
703}
704
705void Dbg::SetJdwpAllowed(bool allowed) {
706  gJdwpAllowed = allowed;
707}
708
709DebugInvokeReq* Dbg::GetInvokeReq() {
710  return Thread::Current()->GetInvokeReq();
711}
712
713Thread* Dbg::GetDebugThread() {
714  return (gJdwpState != nullptr) ? gJdwpState->GetDebugThread() : nullptr;
715}
716
717void Dbg::ClearWaitForEventThread() {
718  gJdwpState->ClearWaitForEventThread();
719}
720
721void Dbg::Connected() {
722  CHECK(!gDebuggerConnected);
723  VLOG(jdwp) << "JDWP has attached";
724  gDebuggerConnected = true;
725  gDisposed = false;
726}
727
728void Dbg::Disposed() {
729  gDisposed = true;
730}
731
732bool Dbg::IsDisposed() {
733  return gDisposed;
734}
735
736bool Dbg::RequiresDeoptimization() {
737  // We don't need deoptimization if everything runs with interpreter after
738  // enabling -Xint mode.
739  return !Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly();
740}
741
742void Dbg::GoActive() {
743  // Enable all debugging features, including scans for breakpoints.
744  // This is a no-op if we're already active.
745  // Only called from the JDWP handler thread.
746  if (gDebuggerActive) {
747    return;
748  }
749
750  {
751    // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected?
752    ReaderMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
753    CHECK_EQ(gBreakpoints.size(), 0U);
754  }
755
756  {
757    MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
758    CHECK_EQ(deoptimization_requests_.size(), 0U);
759    CHECK_EQ(full_deoptimization_event_count_, 0U);
760    CHECK_EQ(delayed_full_undeoptimization_count_, 0U);
761    CHECK_EQ(dex_pc_change_event_ref_count_, 0U);
762    CHECK_EQ(method_enter_event_ref_count_, 0U);
763    CHECK_EQ(method_exit_event_ref_count_, 0U);
764    CHECK_EQ(field_read_event_ref_count_, 0U);
765    CHECK_EQ(field_write_event_ref_count_, 0U);
766    CHECK_EQ(exception_catch_event_ref_count_, 0U);
767  }
768
769  Runtime* runtime = Runtime::Current();
770  runtime->GetThreadList()->SuspendAll();
771  Thread* self = Thread::Current();
772  ThreadState old_state = self->SetStateUnsafe(kRunnable);
773  CHECK_NE(old_state, kRunnable);
774  if (RequiresDeoptimization()) {
775    runtime->GetInstrumentation()->EnableDeoptimization();
776  }
777  instrumentation_events_ = 0;
778  gDebuggerActive = true;
779  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
780  runtime->GetThreadList()->ResumeAll();
781
782  LOG(INFO) << "Debugger is active";
783}
784
785void Dbg::Disconnected() {
786  CHECK(gDebuggerConnected);
787
788  LOG(INFO) << "Debugger is no longer active";
789
790  // Suspend all threads and exclusively acquire the mutator lock. Set the state of the thread
791  // to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener
792  // and clear the object registry.
793  Runtime* runtime = Runtime::Current();
794  runtime->GetThreadList()->SuspendAll();
795  Thread* self = Thread::Current();
796  ThreadState old_state = self->SetStateUnsafe(kRunnable);
797
798  // Debugger may not be active at this point.
799  if (gDebuggerActive) {
800    {
801      // Since we're going to disable deoptimization, we clear the deoptimization requests queue.
802      // This prevents us from having any pending deoptimization request when the debugger attaches
803      // to us again while no event has been requested yet.
804      MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
805      deoptimization_requests_.clear();
806      full_deoptimization_event_count_ = 0U;
807      delayed_full_undeoptimization_count_ = 0U;
808    }
809    if (instrumentation_events_ != 0) {
810      runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener,
811                                                    instrumentation_events_);
812      instrumentation_events_ = 0;
813    }
814    if (RequiresDeoptimization()) {
815      runtime->GetInstrumentation()->DisableDeoptimization();
816    }
817    gDebuggerActive = false;
818  }
819  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
820  runtime->GetThreadList()->ResumeAll();
821
822  {
823    ScopedObjectAccess soa(self);
824    gRegistry->Clear();
825  }
826
827  gDebuggerConnected = false;
828}
829
830bool Dbg::IsDebuggerActive() {
831  return gDebuggerActive;
832}
833
834bool Dbg::IsJdwpConfigured() {
835  return gJdwpConfigured;
836}
837
838int64_t Dbg::LastDebuggerActivity() {
839  return gJdwpState->LastDebuggerActivity();
840}
841
842void Dbg::UndoDebuggerSuspensions() {
843  Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
844}
845
846std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
847  JDWP::JdwpError error;
848  mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id, &error);
849  if (o == nullptr) {
850    if (error == JDWP::ERR_NONE) {
851      return "NULL";
852    } else {
853      return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
854    }
855  }
856  if (!o->IsClass()) {
857    return StringPrintf("non-class %p", o);  // This is only used for debugging output anyway.
858  }
859  return GetClassName(o->AsClass());
860}
861
862std::string Dbg::GetClassName(mirror::Class* klass) {
863  if (klass == nullptr) {
864    return "NULL";
865  }
866  std::string temp;
867  return DescriptorToName(klass->GetDescriptor(&temp));
868}
869
870JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId* class_object_id) {
871  JDWP::JdwpError status;
872  mirror::Class* c = DecodeClass(id, &status);
873  if (c == nullptr) {
874    *class_object_id = 0;
875    return status;
876  }
877  *class_object_id = gRegistry->Add(c);
878  return JDWP::ERR_NONE;
879}
880
881JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId* superclass_id) {
882  JDWP::JdwpError status;
883  mirror::Class* c = DecodeClass(id, &status);
884  if (c == nullptr) {
885    *superclass_id = 0;
886    return status;
887  }
888  if (c->IsInterface()) {
889    // http://code.google.com/p/android/issues/detail?id=20856
890    *superclass_id = 0;
891  } else {
892    *superclass_id = gRegistry->Add(c->GetSuperClass());
893  }
894  return JDWP::ERR_NONE;
895}
896
897JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
898  JDWP::JdwpError error;
899  mirror::Object* o = gRegistry->Get<mirror::Object*>(id, &error);
900  if (o == nullptr) {
901    return JDWP::ERR_INVALID_OBJECT;
902  }
903  expandBufAddObjectId(pReply, gRegistry->Add(o->GetClass()->GetClassLoader()));
904  return JDWP::ERR_NONE;
905}
906
907JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
908  JDWP::JdwpError error;
909  mirror::Class* c = DecodeClass(id, &error);
910  if (c == nullptr) {
911    return error;
912  }
913
914  uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask;
915
916  // Set ACC_SUPER. Dex files don't contain this flag but only classes are supposed to have it set,
917  // not interfaces.
918  // Class.getModifiers doesn't return it, but JDWP does, so we set it here.
919  if ((access_flags & kAccInterface) == 0) {
920    access_flags |= kAccSuper;
921  }
922
923  expandBufAdd4BE(pReply, access_flags);
924
925  return JDWP::ERR_NONE;
926}
927
928JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply) {
929  JDWP::JdwpError error;
930  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
931  if (o == nullptr) {
932    return JDWP::ERR_INVALID_OBJECT;
933  }
934
935  // Ensure all threads are suspended while we read objects' lock words.
936  Thread* self = Thread::Current();
937  CHECK_EQ(self->GetState(), kRunnable);
938  self->TransitionFromRunnableToSuspended(kSuspended);
939  Runtime::Current()->GetThreadList()->SuspendAll();
940
941  MonitorInfo monitor_info(o);
942
943  Runtime::Current()->GetThreadList()->ResumeAll();
944  self->TransitionFromSuspendedToRunnable();
945
946  if (monitor_info.owner_ != nullptr) {
947    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeer()));
948  } else {
949    expandBufAddObjectId(reply, gRegistry->Add(nullptr));
950  }
951  expandBufAdd4BE(reply, monitor_info.entry_count_);
952  expandBufAdd4BE(reply, monitor_info.waiters_.size());
953  for (size_t i = 0; i < monitor_info.waiters_.size(); ++i) {
954    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters_[i]->GetPeer()));
955  }
956  return JDWP::ERR_NONE;
957}
958
959JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id,
960                                      std::vector<JDWP::ObjectId>* monitors,
961                                      std::vector<uint32_t>* stack_depths) {
962  struct OwnedMonitorVisitor : public StackVisitor {
963    OwnedMonitorVisitor(Thread* thread, Context* context,
964                        std::vector<JDWP::ObjectId>* monitor_vector,
965                        std::vector<uint32_t>* stack_depth_vector)
966        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
967      : StackVisitor(thread, context), current_stack_depth(0),
968        monitors(monitor_vector), stack_depths(stack_depth_vector) {}
969
970    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
971    // annotalysis.
972    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
973      if (!GetMethod()->IsRuntimeMethod()) {
974        Monitor::VisitLocks(this, AppendOwnedMonitors, this);
975        ++current_stack_depth;
976      }
977      return true;
978    }
979
980    static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg)
981        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
982      OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
983      visitor->monitors->push_back(gRegistry->Add(owned_monitor));
984      visitor->stack_depths->push_back(visitor->current_stack_depth);
985    }
986
987    size_t current_stack_depth;
988    std::vector<JDWP::ObjectId>* const monitors;
989    std::vector<uint32_t>* const stack_depths;
990  };
991
992  ScopedObjectAccessUnchecked soa(Thread::Current());
993  Thread* thread;
994  {
995    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
996    JDWP::JdwpError error;
997    thread = DecodeThread(soa, thread_id, &error);
998    if (thread == nullptr) {
999      return error;
1000    }
1001    if (!IsSuspendedForDebugger(soa, thread)) {
1002      return JDWP::ERR_THREAD_NOT_SUSPENDED;
1003    }
1004  }
1005  std::unique_ptr<Context> context(Context::Create());
1006  OwnedMonitorVisitor visitor(thread, context.get(), monitors, stack_depths);
1007  visitor.WalkStack();
1008  return JDWP::ERR_NONE;
1009}
1010
1011JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id,
1012                                         JDWP::ObjectId* contended_monitor) {
1013  mirror::Object* contended_monitor_obj;
1014  ScopedObjectAccessUnchecked soa(Thread::Current());
1015  *contended_monitor = 0;
1016  {
1017    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1018    JDWP::JdwpError error;
1019    Thread* thread = DecodeThread(soa, thread_id, &error);
1020    if (thread == nullptr) {
1021      return error;
1022    }
1023    if (!IsSuspendedForDebugger(soa, thread)) {
1024      return JDWP::ERR_THREAD_NOT_SUSPENDED;
1025    }
1026    contended_monitor_obj = Monitor::GetContendedMonitor(thread);
1027  }
1028  // Add() requires the thread_list_lock_ not held to avoid the lock
1029  // level violation.
1030  *contended_monitor = gRegistry->Add(contended_monitor_obj);
1031  return JDWP::ERR_NONE;
1032}
1033
1034JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
1035                                       std::vector<uint64_t>* counts) {
1036  gc::Heap* heap = Runtime::Current()->GetHeap();
1037  heap->CollectGarbage(false);
1038  std::vector<mirror::Class*> classes;
1039  counts->clear();
1040  for (size_t i = 0; i < class_ids.size(); ++i) {
1041    JDWP::JdwpError error;
1042    mirror::Class* c = DecodeClass(class_ids[i], &error);
1043    if (c == nullptr) {
1044      return error;
1045    }
1046    classes.push_back(c);
1047    counts->push_back(0);
1048  }
1049  heap->CountInstances(classes, false, &(*counts)[0]);
1050  return JDWP::ERR_NONE;
1051}
1052
1053JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count,
1054                                  std::vector<JDWP::ObjectId>* instances) {
1055  gc::Heap* heap = Runtime::Current()->GetHeap();
1056  // We only want reachable instances, so do a GC.
1057  heap->CollectGarbage(false);
1058  JDWP::JdwpError error;
1059  mirror::Class* c = DecodeClass(class_id, &error);
1060  if (c == nullptr) {
1061    return error;
1062  }
1063  std::vector<mirror::Object*> raw_instances;
1064  Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
1065  for (size_t i = 0; i < raw_instances.size(); ++i) {
1066    instances->push_back(gRegistry->Add(raw_instances[i]));
1067  }
1068  return JDWP::ERR_NONE;
1069}
1070
1071JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
1072                                         std::vector<JDWP::ObjectId>* referring_objects) {
1073  gc::Heap* heap = Runtime::Current()->GetHeap();
1074  heap->CollectGarbage(false);
1075  JDWP::JdwpError error;
1076  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1077  if (o == nullptr) {
1078    return JDWP::ERR_INVALID_OBJECT;
1079  }
1080  std::vector<mirror::Object*> raw_instances;
1081  heap->GetReferringObjects(o, max_count, raw_instances);
1082  for (size_t i = 0; i < raw_instances.size(); ++i) {
1083    referring_objects->push_back(gRegistry->Add(raw_instances[i]));
1084  }
1085  return JDWP::ERR_NONE;
1086}
1087
1088JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id) {
1089  JDWP::JdwpError error;
1090  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1091  if (o == nullptr) {
1092    return JDWP::ERR_INVALID_OBJECT;
1093  }
1094  gRegistry->DisableCollection(object_id);
1095  return JDWP::ERR_NONE;
1096}
1097
1098JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id) {
1099  JDWP::JdwpError error;
1100  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1101  // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI
1102  // also ignores these cases and never return an error. However it's not obvious why this command
1103  // should behave differently from DisableCollection and IsCollected commands. So let's be more
1104  // strict and return an error if this happens.
1105  if (o == nullptr) {
1106    return JDWP::ERR_INVALID_OBJECT;
1107  }
1108  gRegistry->EnableCollection(object_id);
1109  return JDWP::ERR_NONE;
1110}
1111
1112JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool* is_collected) {
1113  *is_collected = true;
1114  if (object_id == 0) {
1115    // Null object id is invalid.
1116    return JDWP::ERR_INVALID_OBJECT;
1117  }
1118  // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However
1119  // the RI seems to ignore this and assume object has been collected.
1120  JDWP::JdwpError error;
1121  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1122  if (o != nullptr) {
1123    *is_collected = gRegistry->IsCollected(object_id);
1124  }
1125  return JDWP::ERR_NONE;
1126}
1127
1128void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count) {
1129  gRegistry->DisposeObject(object_id, reference_count);
1130}
1131
1132JDWP::JdwpTypeTag Dbg::GetTypeTag(mirror::Class* klass) {
1133  DCHECK(klass != nullptr);
1134  if (klass->IsArrayClass()) {
1135    return JDWP::TT_ARRAY;
1136  } else if (klass->IsInterface()) {
1137    return JDWP::TT_INTERFACE;
1138  } else {
1139    return JDWP::TT_CLASS;
1140  }
1141}
1142
1143JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1144  JDWP::JdwpError error;
1145  mirror::Class* c = DecodeClass(class_id, &error);
1146  if (c == nullptr) {
1147    return error;
1148  }
1149
1150  JDWP::JdwpTypeTag type_tag = GetTypeTag(c);
1151  expandBufAdd1(pReply, type_tag);
1152  expandBufAddRefTypeId(pReply, class_id);
1153  return JDWP::ERR_NONE;
1154}
1155
1156void Dbg::GetClassList(std::vector<JDWP::RefTypeId>* classes) {
1157  // Get the complete list of reference classes (i.e. all classes except
1158  // the primitive types).
1159  // Returns a newly-allocated buffer full of RefTypeId values.
1160  struct ClassListCreator {
1161    explicit ClassListCreator(std::vector<JDWP::RefTypeId>* classes_in) : classes(classes_in) {
1162    }
1163
1164    static bool Visit(mirror::Class* c, void* arg) {
1165      return reinterpret_cast<ClassListCreator*>(arg)->Visit(c);
1166    }
1167
1168    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
1169    // annotalysis.
1170    bool Visit(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS {
1171      if (!c->IsPrimitive()) {
1172        classes->push_back(gRegistry->AddRefType(c));
1173      }
1174      return true;
1175    }
1176
1177    std::vector<JDWP::RefTypeId>* const classes;
1178  };
1179
1180  ClassListCreator clc(classes);
1181  Runtime::Current()->GetClassLinker()->VisitClassesWithoutClassesLock(ClassListCreator::Visit,
1182                                                                       &clc);
1183}
1184
1185JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag,
1186                                  uint32_t* pStatus, std::string* pDescriptor) {
1187  JDWP::JdwpError error;
1188  mirror::Class* c = DecodeClass(class_id, &error);
1189  if (c == nullptr) {
1190    return error;
1191  }
1192
1193  if (c->IsArrayClass()) {
1194    *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
1195    *pTypeTag = JDWP::TT_ARRAY;
1196  } else {
1197    if (c->IsErroneous()) {
1198      *pStatus = JDWP::CS_ERROR;
1199    } else {
1200      *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED;
1201    }
1202    *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
1203  }
1204
1205  if (pDescriptor != nullptr) {
1206    std::string temp;
1207    *pDescriptor = c->GetDescriptor(&temp);
1208  }
1209  return JDWP::ERR_NONE;
1210}
1211
1212void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>* ids) {
1213  std::vector<mirror::Class*> classes;
1214  Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
1215  ids->clear();
1216  for (size_t i = 0; i < classes.size(); ++i) {
1217    ids->push_back(gRegistry->Add(classes[i]));
1218  }
1219}
1220
1221JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply) {
1222  JDWP::JdwpError error;
1223  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1224  if (o == nullptr) {
1225    return JDWP::ERR_INVALID_OBJECT;
1226  }
1227
1228  JDWP::JdwpTypeTag type_tag = GetTypeTag(o->GetClass());
1229  JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass());
1230
1231  expandBufAdd1(pReply, type_tag);
1232  expandBufAddRefTypeId(pReply, type_id);
1233
1234  return JDWP::ERR_NONE;
1235}
1236
1237JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) {
1238  JDWP::JdwpError error;
1239  mirror::Class* c = DecodeClass(class_id, &error);
1240  if (c == nullptr) {
1241    return error;
1242  }
1243  std::string temp;
1244  *signature = c->GetDescriptor(&temp);
1245  return JDWP::ERR_NONE;
1246}
1247
1248JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string* result) {
1249  JDWP::JdwpError error;
1250  mirror::Class* c = DecodeClass(class_id, &error);
1251  if (c == nullptr) {
1252    return error;
1253  }
1254  const char* source_file = c->GetSourceFile();
1255  if (source_file == nullptr) {
1256    return JDWP::ERR_ABSENT_INFORMATION;
1257  }
1258  *result = source_file;
1259  return JDWP::ERR_NONE;
1260}
1261
1262JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t* tag) {
1263  ScopedObjectAccessUnchecked soa(Thread::Current());
1264  JDWP::JdwpError error;
1265  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id, &error);
1266  if (error != JDWP::ERR_NONE) {
1267    *tag = JDWP::JT_VOID;
1268    return error;
1269  }
1270  *tag = TagFromObject(soa, o);
1271  return JDWP::ERR_NONE;
1272}
1273
1274size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) {
1275  switch (tag) {
1276  case JDWP::JT_VOID:
1277    return 0;
1278  case JDWP::JT_BYTE:
1279  case JDWP::JT_BOOLEAN:
1280    return 1;
1281  case JDWP::JT_CHAR:
1282  case JDWP::JT_SHORT:
1283    return 2;
1284  case JDWP::JT_FLOAT:
1285  case JDWP::JT_INT:
1286    return 4;
1287  case JDWP::JT_ARRAY:
1288  case JDWP::JT_OBJECT:
1289  case JDWP::JT_STRING:
1290  case JDWP::JT_THREAD:
1291  case JDWP::JT_THREAD_GROUP:
1292  case JDWP::JT_CLASS_LOADER:
1293  case JDWP::JT_CLASS_OBJECT:
1294    return sizeof(JDWP::ObjectId);
1295  case JDWP::JT_DOUBLE:
1296  case JDWP::JT_LONG:
1297    return 8;
1298  default:
1299    LOG(FATAL) << "Unknown tag " << tag;
1300    return -1;
1301  }
1302}
1303
1304JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int32_t* length) {
1305  JDWP::JdwpError error;
1306  mirror::Array* a = DecodeNonNullArray(array_id, &error);
1307  if (a == nullptr) {
1308    return error;
1309  }
1310  *length = a->GetLength();
1311  return JDWP::ERR_NONE;
1312}
1313
1314JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) {
1315  JDWP::JdwpError error;
1316  mirror::Array* a = DecodeNonNullArray(array_id, &error);
1317  if (a == nullptr) {
1318    return error;
1319  }
1320
1321  if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) {
1322    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1323    return JDWP::ERR_INVALID_LENGTH;
1324  }
1325  JDWP::JdwpTag element_tag = BasicTagFromClass(a->GetClass()->GetComponentType());
1326  expandBufAdd1(pReply, element_tag);
1327  expandBufAdd4BE(pReply, count);
1328
1329  if (IsPrimitiveTag(element_tag)) {
1330    size_t width = GetTagWidth(element_tag);
1331    uint8_t* dst = expandBufAddSpace(pReply, count * width);
1332    if (width == 8) {
1333      const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0));
1334      for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]);
1335    } else if (width == 4) {
1336      const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0));
1337      for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]);
1338    } else if (width == 2) {
1339      const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0));
1340      for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]);
1341    } else {
1342      const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0));
1343      memcpy(dst, &src[offset * width], count * width);
1344    }
1345  } else {
1346    ScopedObjectAccessUnchecked soa(Thread::Current());
1347    mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>();
1348    for (int i = 0; i < count; ++i) {
1349      mirror::Object* element = oa->Get(offset + i);
1350      JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element)
1351                                                        : element_tag;
1352      expandBufAdd1(pReply, specific_tag);
1353      expandBufAddObjectId(pReply, gRegistry->Add(element));
1354    }
1355  }
1356
1357  return JDWP::ERR_NONE;
1358}
1359
1360template <typename T>
1361static void CopyArrayData(mirror::Array* a, JDWP::Request* src, int offset, int count)
1362    NO_THREAD_SAFETY_ANALYSIS {
1363  // TODO: fix when annotalysis correctly handles non-member functions.
1364  DCHECK(a->GetClass()->IsPrimitiveArray());
1365
1366  T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset));
1367  for (int i = 0; i < count; ++i) {
1368    *dst++ = src->ReadValue(sizeof(T));
1369  }
1370}
1371
1372JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
1373                                      JDWP::Request* request) {
1374  JDWP::JdwpError error;
1375  mirror::Array* dst = DecodeNonNullArray(array_id, &error);
1376  if (dst == nullptr) {
1377    return error;
1378  }
1379
1380  if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) {
1381    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1382    return JDWP::ERR_INVALID_LENGTH;
1383  }
1384  JDWP::JdwpTag element_tag = BasicTagFromClass(dst->GetClass()->GetComponentType());
1385
1386  if (IsPrimitiveTag(element_tag)) {
1387    size_t width = GetTagWidth(element_tag);
1388    if (width == 8) {
1389      CopyArrayData<uint64_t>(dst, request, offset, count);
1390    } else if (width == 4) {
1391      CopyArrayData<uint32_t>(dst, request, offset, count);
1392    } else if (width == 2) {
1393      CopyArrayData<uint16_t>(dst, request, offset, count);
1394    } else {
1395      CopyArrayData<uint8_t>(dst, request, offset, count);
1396    }
1397  } else {
1398    mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>();
1399    for (int i = 0; i < count; ++i) {
1400      JDWP::ObjectId id = request->ReadObjectId();
1401      mirror::Object* o = gRegistry->Get<mirror::Object*>(id, &error);
1402      if (error != JDWP::ERR_NONE) {
1403        return error;
1404      }
1405      oa->Set<false>(offset + i, o);
1406    }
1407  }
1408
1409  return JDWP::ERR_NONE;
1410}
1411
1412JDWP::ObjectId Dbg::CreateString(const std::string& str) {
1413  return gRegistry->Add(mirror::String::AllocFromModifiedUtf8(Thread::Current(), str.c_str()));
1414}
1415
1416JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId* new_object) {
1417  JDWP::JdwpError error;
1418  mirror::Class* c = DecodeClass(class_id, &error);
1419  if (c == nullptr) {
1420    *new_object = 0;
1421    return error;
1422  }
1423  *new_object = gRegistry->Add(c->AllocObject(Thread::Current()));
1424  return JDWP::ERR_NONE;
1425}
1426
1427/*
1428 * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]".
1429 */
1430JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
1431                                       JDWP::ObjectId* new_array) {
1432  JDWP::JdwpError error;
1433  mirror::Class* c = DecodeClass(array_class_id, &error);
1434  if (c == nullptr) {
1435    *new_array = 0;
1436    return error;
1437  }
1438  *new_array = gRegistry->Add(mirror::Array::Alloc<true>(Thread::Current(), c, length,
1439                                                         c->GetComponentSizeShift(),
1440                                                         Runtime::Current()->GetHeap()->GetCurrentAllocator()));
1441  return JDWP::ERR_NONE;
1442}
1443
1444JDWP::FieldId Dbg::ToFieldId(const mirror::ArtField* f) {
1445  CHECK(!kMovingFields);
1446  return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
1447}
1448
1449static JDWP::MethodId ToMethodId(const mirror::ArtMethod* m)
1450    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1451  CHECK(!kMovingMethods);
1452  return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
1453}
1454
1455static mirror::ArtField* FromFieldId(JDWP::FieldId fid)
1456    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1457  CHECK(!kMovingFields);
1458  return reinterpret_cast<mirror::ArtField*>(static_cast<uintptr_t>(fid));
1459}
1460
1461static mirror::ArtMethod* FromMethodId(JDWP::MethodId mid)
1462    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1463  CHECK(!kMovingMethods);
1464  return reinterpret_cast<mirror::ArtMethod*>(static_cast<uintptr_t>(mid));
1465}
1466
1467bool Dbg::MatchThread(JDWP::ObjectId expected_thread_id, Thread* event_thread) {
1468  CHECK(event_thread != nullptr);
1469  JDWP::JdwpError error;
1470  mirror::Object* expected_thread_peer = gRegistry->Get<mirror::Object*>(expected_thread_id,
1471                                                                         &error);
1472  return expected_thread_peer == event_thread->GetPeer();
1473}
1474
1475bool Dbg::MatchLocation(const JDWP::JdwpLocation& expected_location,
1476                        const JDWP::EventLocation& event_location) {
1477  if (expected_location.dex_pc != event_location.dex_pc) {
1478    return false;
1479  }
1480  mirror::ArtMethod* m = FromMethodId(expected_location.method_id);
1481  return m == event_location.method;
1482}
1483
1484bool Dbg::MatchType(mirror::Class* event_class, JDWP::RefTypeId class_id) {
1485  if (event_class == nullptr) {
1486    return false;
1487  }
1488  JDWP::JdwpError error;
1489  mirror::Class* expected_class = DecodeClass(class_id, &error);
1490  CHECK(expected_class != nullptr);
1491  return expected_class->IsAssignableFrom(event_class);
1492}
1493
1494bool Dbg::MatchField(JDWP::RefTypeId expected_type_id, JDWP::FieldId expected_field_id,
1495                     mirror::ArtField* event_field) {
1496  mirror::ArtField* expected_field = FromFieldId(expected_field_id);
1497  if (expected_field != event_field) {
1498    return false;
1499  }
1500  return Dbg::MatchType(event_field->GetDeclaringClass(), expected_type_id);
1501}
1502
1503bool Dbg::MatchInstance(JDWP::ObjectId expected_instance_id, mirror::Object* event_instance) {
1504  JDWP::JdwpError error;
1505  mirror::Object* modifier_instance = gRegistry->Get<mirror::Object*>(expected_instance_id, &error);
1506  return modifier_instance == event_instance;
1507}
1508
1509void Dbg::SetJdwpLocation(JDWP::JdwpLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
1510    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1511  if (m == nullptr) {
1512    memset(location, 0, sizeof(*location));
1513  } else {
1514    mirror::Class* c = m->GetDeclaringClass();
1515    location->type_tag = GetTypeTag(c);
1516    location->class_id = gRegistry->AddRefType(c);
1517    location->method_id = ToMethodId(m);
1518    location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc;
1519  }
1520}
1521
1522std::string Dbg::GetMethodName(JDWP::MethodId method_id) {
1523  mirror::ArtMethod* m = FromMethodId(method_id);
1524  if (m == nullptr) {
1525    return "NULL";
1526  }
1527  return m->GetName();
1528}
1529
1530std::string Dbg::GetFieldName(JDWP::FieldId field_id) {
1531  mirror::ArtField* f = FromFieldId(field_id);
1532  if (f == nullptr) {
1533    return "NULL";
1534  }
1535  return f->GetName();
1536}
1537
1538/*
1539 * Augment the access flags for synthetic methods and fields by setting
1540 * the (as described by the spec) "0xf0000000 bit".  Also, strip out any
1541 * flags not specified by the Java programming language.
1542 */
1543static uint32_t MangleAccessFlags(uint32_t accessFlags) {
1544  accessFlags &= kAccJavaFlagsMask;
1545  if ((accessFlags & kAccSynthetic) != 0) {
1546    accessFlags |= 0xf0000000;
1547  }
1548  return accessFlags;
1549}
1550
1551/*
1552 * Circularly shifts registers so that arguments come first. Debuggers
1553 * expect slots to begin with arguments, but dex code places them at
1554 * the end.
1555 */
1556static uint16_t MangleSlot(uint16_t slot, mirror::ArtMethod* m)
1557    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1558  const DexFile::CodeItem* code_item = m->GetCodeItem();
1559  if (code_item == nullptr) {
1560    // We should not get here for a method without code (native, proxy or abstract). Log it and
1561    // return the slot as is since all registers are arguments.
1562    LOG(WARNING) << "Trying to mangle slot for method without code " << PrettyMethod(m);
1563    return slot;
1564  }
1565  uint16_t ins_size = code_item->ins_size_;
1566  uint16_t locals_size = code_item->registers_size_ - ins_size;
1567  if (slot >= locals_size) {
1568    return slot - locals_size;
1569  } else {
1570    return slot + ins_size;
1571  }
1572}
1573
1574/*
1575 * Circularly shifts registers so that arguments come last. Reverts
1576 * slots to dex style argument placement.
1577 */
1578static uint16_t DemangleSlot(uint16_t slot, mirror::ArtMethod* m)
1579    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1580  const DexFile::CodeItem* code_item = m->GetCodeItem();
1581  if (code_item == nullptr) {
1582    // We should not get here for a method without code (native, proxy or abstract). Log it and
1583    // return the slot as is since all registers are arguments.
1584    LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m);
1585    return slot;
1586  }
1587  uint16_t ins_size = code_item->ins_size_;
1588  uint16_t locals_size = code_item->registers_size_ - ins_size;
1589  if (slot < ins_size) {
1590    return slot + locals_size;
1591  } else {
1592    return slot - ins_size;
1593  }
1594}
1595
1596JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) {
1597  JDWP::JdwpError error;
1598  mirror::Class* c = DecodeClass(class_id, &error);
1599  if (c == nullptr) {
1600    return error;
1601  }
1602
1603  size_t instance_field_count = c->NumInstanceFields();
1604  size_t static_field_count = c->NumStaticFields();
1605
1606  expandBufAdd4BE(pReply, instance_field_count + static_field_count);
1607
1608  for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
1609    mirror::ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
1610    expandBufAddFieldId(pReply, ToFieldId(f));
1611    expandBufAddUtf8String(pReply, f->GetName());
1612    expandBufAddUtf8String(pReply, f->GetTypeDescriptor());
1613    if (with_generic) {
1614      static const char genericSignature[1] = "";
1615      expandBufAddUtf8String(pReply, genericSignature);
1616    }
1617    expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags()));
1618  }
1619  return JDWP::ERR_NONE;
1620}
1621
1622JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic,
1623                                           JDWP::ExpandBuf* pReply) {
1624  JDWP::JdwpError error;
1625  mirror::Class* c = DecodeClass(class_id, &error);
1626  if (c == nullptr) {
1627    return error;
1628  }
1629
1630  size_t direct_method_count = c->NumDirectMethods();
1631  size_t virtual_method_count = c->NumVirtualMethods();
1632
1633  expandBufAdd4BE(pReply, direct_method_count + virtual_method_count);
1634
1635  for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) {
1636    mirror::ArtMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count);
1637    expandBufAddMethodId(pReply, ToMethodId(m));
1638    expandBufAddUtf8String(pReply, m->GetName());
1639    expandBufAddUtf8String(pReply, m->GetSignature().ToString());
1640    if (with_generic) {
1641      static const char genericSignature[1] = "";
1642      expandBufAddUtf8String(pReply, genericSignature);
1643    }
1644    expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags()));
1645  }
1646  return JDWP::ERR_NONE;
1647}
1648
1649JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1650  JDWP::JdwpError error;
1651  Thread* self = Thread::Current();
1652  StackHandleScope<1> hs(self);
1653  Handle<mirror::Class> c(hs.NewHandle(DecodeClass(class_id, &error)));
1654  if (c.Get() == nullptr) {
1655    return error;
1656  }
1657  size_t interface_count = c->NumDirectInterfaces();
1658  expandBufAdd4BE(pReply, interface_count);
1659  for (size_t i = 0; i < interface_count; ++i) {
1660    expandBufAddRefTypeId(pReply,
1661                          gRegistry->AddRefType(mirror::Class::GetDirectInterface(self, c, i)));
1662  }
1663  return JDWP::ERR_NONE;
1664}
1665
1666void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply) {
1667  struct DebugCallbackContext {
1668    int numItems;
1669    JDWP::ExpandBuf* pReply;
1670
1671    static bool Callback(void* context, uint32_t address, uint32_t line_number) {
1672      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1673      expandBufAdd8BE(pContext->pReply, address);
1674      expandBufAdd4BE(pContext->pReply, line_number);
1675      pContext->numItems++;
1676      return false;
1677    }
1678  };
1679  mirror::ArtMethod* m = FromMethodId(method_id);
1680  const DexFile::CodeItem* code_item = m->GetCodeItem();
1681  uint64_t start, end;
1682  if (code_item == nullptr) {
1683    DCHECK(m->IsNative() || m->IsProxyMethod());
1684    start = -1;
1685    end = -1;
1686  } else {
1687    start = 0;
1688    // Return the index of the last instruction
1689    end = code_item->insns_size_in_code_units_ - 1;
1690  }
1691
1692  expandBufAdd8BE(pReply, start);
1693  expandBufAdd8BE(pReply, end);
1694
1695  // Add numLines later
1696  size_t numLinesOffset = expandBufGetLength(pReply);
1697  expandBufAdd4BE(pReply, 0);
1698
1699  DebugCallbackContext context;
1700  context.numItems = 0;
1701  context.pReply = pReply;
1702
1703  if (code_item != nullptr) {
1704    m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
1705                                     DebugCallbackContext::Callback, nullptr, &context);
1706  }
1707
1708  JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
1709}
1710
1711void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic,
1712                              JDWP::ExpandBuf* pReply) {
1713  struct DebugCallbackContext {
1714    mirror::ArtMethod* method;
1715    JDWP::ExpandBuf* pReply;
1716    size_t variable_count;
1717    bool with_generic;
1718
1719    static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress,
1720                         const char* name, const char* descriptor, const char* signature)
1721        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1722      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1723
1724      VLOG(jdwp) << StringPrintf("    %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d",
1725                                 pContext->variable_count, startAddress, endAddress - startAddress,
1726                                 name, descriptor, signature, slot,
1727                                 MangleSlot(slot, pContext->method));
1728
1729      slot = MangleSlot(slot, pContext->method);
1730
1731      expandBufAdd8BE(pContext->pReply, startAddress);
1732      expandBufAddUtf8String(pContext->pReply, name);
1733      expandBufAddUtf8String(pContext->pReply, descriptor);
1734      if (pContext->with_generic) {
1735        expandBufAddUtf8String(pContext->pReply, signature);
1736      }
1737      expandBufAdd4BE(pContext->pReply, endAddress - startAddress);
1738      expandBufAdd4BE(pContext->pReply, slot);
1739
1740      ++pContext->variable_count;
1741    }
1742  };
1743  mirror::ArtMethod* m = FromMethodId(method_id);
1744
1745  // arg_count considers doubles and longs to take 2 units.
1746  // variable_count considers everything to take 1 unit.
1747  std::string shorty(m->GetShorty());
1748  expandBufAdd4BE(pReply, mirror::ArtMethod::NumArgRegisters(shorty));
1749
1750  // We don't know the total number of variables yet, so leave a blank and update it later.
1751  size_t variable_count_offset = expandBufGetLength(pReply);
1752  expandBufAdd4BE(pReply, 0);
1753
1754  DebugCallbackContext context;
1755  context.method = m;
1756  context.pReply = pReply;
1757  context.variable_count = 0;
1758  context.with_generic = with_generic;
1759
1760  const DexFile::CodeItem* code_item = m->GetCodeItem();
1761  if (code_item != nullptr) {
1762    m->GetDexFile()->DecodeDebugInfo(
1763        code_item, m->IsStatic(), m->GetDexMethodIndex(), nullptr, DebugCallbackContext::Callback,
1764        &context);
1765  }
1766
1767  JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count);
1768}
1769
1770void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
1771                                  JDWP::ExpandBuf* pReply) {
1772  mirror::ArtMethod* m = FromMethodId(method_id);
1773  JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty());
1774  OutputJValue(tag, return_value, pReply);
1775}
1776
1777void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
1778                           JDWP::ExpandBuf* pReply) {
1779  mirror::ArtField* f = FromFieldId(field_id);
1780  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1781  OutputJValue(tag, field_value, pReply);
1782}
1783
1784JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
1785                                  std::vector<uint8_t>* bytecodes) {
1786  mirror::ArtMethod* m = FromMethodId(method_id);
1787  if (m == nullptr) {
1788    return JDWP::ERR_INVALID_METHODID;
1789  }
1790  const DexFile::CodeItem* code_item = m->GetCodeItem();
1791  size_t byte_count = code_item->insns_size_in_code_units_ * 2;
1792  const uint8_t* begin = reinterpret_cast<const uint8_t*>(code_item->insns_);
1793  const uint8_t* end = begin + byte_count;
1794  for (const uint8_t* p = begin; p != end; ++p) {
1795    bytecodes->push_back(*p);
1796  }
1797  return JDWP::ERR_NONE;
1798}
1799
1800JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) {
1801  return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1802}
1803
1804JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) {
1805  return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor());
1806}
1807
1808static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
1809                                         JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
1810                                         bool is_static)
1811    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1812  JDWP::JdwpError error;
1813  mirror::Class* c = DecodeClass(ref_type_id, &error);
1814  if (ref_type_id != 0 && c == nullptr) {
1815    return error;
1816  }
1817
1818  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error);
1819  if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
1820    return JDWP::ERR_INVALID_OBJECT;
1821  }
1822  mirror::ArtField* f = FromFieldId(field_id);
1823
1824  mirror::Class* receiver_class = c;
1825  if (receiver_class == nullptr && o != nullptr) {
1826    receiver_class = o->GetClass();
1827  }
1828  // TODO: should we give up now if receiver_class is nullptr?
1829  if (receiver_class != nullptr && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
1830    LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class);
1831    return JDWP::ERR_INVALID_FIELDID;
1832  }
1833
1834  // The RI only enforces the static/non-static mismatch in one direction.
1835  // TODO: should we change the tests and check both?
1836  if (is_static) {
1837    if (!f->IsStatic()) {
1838      return JDWP::ERR_INVALID_FIELDID;
1839    }
1840  } else {
1841    if (f->IsStatic()) {
1842      LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.SetValues on static field "
1843          << PrettyField(f);
1844    }
1845  }
1846  if (f->IsStatic()) {
1847    o = f->GetDeclaringClass();
1848  }
1849
1850  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1851  JValue field_value;
1852  if (tag == JDWP::JT_VOID) {
1853    LOG(FATAL) << "Unknown tag: " << tag;
1854  } else if (!IsPrimitiveTag(tag)) {
1855    field_value.SetL(f->GetObject(o));
1856  } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1857    field_value.SetJ(f->Get64(o));
1858  } else {
1859    field_value.SetI(f->Get32(o));
1860  }
1861  Dbg::OutputJValue(tag, &field_value, pReply);
1862
1863  return JDWP::ERR_NONE;
1864}
1865
1866JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1867                                   JDWP::ExpandBuf* pReply) {
1868  return GetFieldValueImpl(0, object_id, field_id, pReply, false);
1869}
1870
1871JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id,
1872                                         JDWP::ExpandBuf* pReply) {
1873  return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true);
1874}
1875
1876static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1877                                         uint64_t value, int width, bool is_static)
1878    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1879  JDWP::JdwpError error;
1880  mirror::Object* o = Dbg::GetObjectRegistry()->Get<mirror::Object*>(object_id, &error);
1881  if ((!is_static && o == nullptr) || error != JDWP::ERR_NONE) {
1882    return JDWP::ERR_INVALID_OBJECT;
1883  }
1884  mirror::ArtField* f = FromFieldId(field_id);
1885
1886  // The RI only enforces the static/non-static mismatch in one direction.
1887  // TODO: should we change the tests and check both?
1888  if (is_static) {
1889    if (!f->IsStatic()) {
1890      return JDWP::ERR_INVALID_FIELDID;
1891    }
1892  } else {
1893    if (f->IsStatic()) {
1894      LOG(WARNING) << "Ignoring non-nullptr receiver for ObjectReference.SetValues on static field " << PrettyField(f);
1895    }
1896  }
1897  if (f->IsStatic()) {
1898    o = f->GetDeclaringClass();
1899  }
1900
1901  JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor());
1902
1903  if (IsPrimitiveTag(tag)) {
1904    if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1905      CHECK_EQ(width, 8);
1906      // Debugging can't use transactional mode (runtime only).
1907      f->Set64<false>(o, value);
1908    } else {
1909      CHECK_LE(width, 4);
1910      // Debugging can't use transactional mode (runtime only).
1911      f->Set32<false>(o, value);
1912    }
1913  } else {
1914    mirror::Object* v = Dbg::GetObjectRegistry()->Get<mirror::Object*>(value, &error);
1915    if (error != JDWP::ERR_NONE) {
1916      return JDWP::ERR_INVALID_OBJECT;
1917    }
1918    if (v != nullptr) {
1919      mirror::Class* field_type;
1920      {
1921        StackHandleScope<3> hs(Thread::Current());
1922        HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v));
1923        HandleWrapper<mirror::ArtField> h_f(hs.NewHandleWrapper(&f));
1924        HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o));
1925        field_type = h_f->GetType(true);
1926      }
1927      if (!field_type->IsAssignableFrom(v->GetClass())) {
1928        return JDWP::ERR_INVALID_OBJECT;
1929      }
1930    }
1931    // Debugging can't use transactional mode (runtime only).
1932    f->SetObject<false>(o, v);
1933  }
1934
1935  return JDWP::ERR_NONE;
1936}
1937
1938JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value,
1939                                   int width) {
1940  return SetFieldValueImpl(object_id, field_id, value, width, false);
1941}
1942
1943JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) {
1944  return SetFieldValueImpl(0, field_id, value, width, true);
1945}
1946
1947JDWP::JdwpError Dbg::StringToUtf8(JDWP::ObjectId string_id, std::string* str) {
1948  JDWP::JdwpError error;
1949  mirror::Object* obj = gRegistry->Get<mirror::Object*>(string_id, &error);
1950  if (error != JDWP::ERR_NONE) {
1951    return error;
1952  }
1953  if (obj == nullptr) {
1954    return JDWP::ERR_INVALID_OBJECT;
1955  }
1956  {
1957    ScopedObjectAccessUnchecked soa(Thread::Current());
1958    mirror::Class* java_lang_String = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_String);
1959    if (!java_lang_String->IsAssignableFrom(obj->GetClass())) {
1960      // This isn't a string.
1961      return JDWP::ERR_INVALID_STRING;
1962    }
1963  }
1964  *str = obj->AsString()->ToModifiedUtf8();
1965  return JDWP::ERR_NONE;
1966}
1967
1968void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) {
1969  if (IsPrimitiveTag(tag)) {
1970    expandBufAdd1(pReply, tag);
1971    if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) {
1972      expandBufAdd1(pReply, return_value->GetI());
1973    } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) {
1974      expandBufAdd2BE(pReply, return_value->GetI());
1975    } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) {
1976      expandBufAdd4BE(pReply, return_value->GetI());
1977    } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1978      expandBufAdd8BE(pReply, return_value->GetJ());
1979    } else {
1980      CHECK_EQ(tag, JDWP::JT_VOID);
1981    }
1982  } else {
1983    ScopedObjectAccessUnchecked soa(Thread::Current());
1984    mirror::Object* value = return_value->GetL();
1985    expandBufAdd1(pReply, TagFromObject(soa, value));
1986    expandBufAddObjectId(pReply, gRegistry->Add(value));
1987  }
1988}
1989
1990JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string* name) {
1991  ScopedObjectAccessUnchecked soa(Thread::Current());
1992  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1993  JDWP::JdwpError error;
1994  Thread* thread = DecodeThread(soa, thread_id, &error);
1995  UNUSED(thread);
1996  if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
1997    return error;
1998  }
1999
2000  // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName.
2001  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
2002  CHECK(thread_object != nullptr) << error;
2003  mirror::ArtField* java_lang_Thread_name_field =
2004      soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
2005  mirror::String* s =
2006      reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object));
2007  if (s != nullptr) {
2008    *name = s->ToModifiedUtf8();
2009  }
2010  return JDWP::ERR_NONE;
2011}
2012
2013JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
2014  ScopedObjectAccessUnchecked soa(Thread::Current());
2015  JDWP::JdwpError error;
2016  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id, &error);
2017  if (error != JDWP::ERR_NONE) {
2018    return JDWP::ERR_INVALID_OBJECT;
2019  }
2020  ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroup");
2021  // Okay, so it's an object, but is it actually a thread?
2022  {
2023    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2024    Thread* thread = DecodeThread(soa, thread_id, &error);
2025    UNUSED(thread);
2026  }
2027  if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
2028    // Zombie threads are in the null group.
2029    expandBufAddObjectId(pReply, JDWP::ObjectId(0));
2030    error = JDWP::ERR_NONE;
2031  } else if (error == JDWP::ERR_NONE) {
2032    mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
2033    CHECK(c != nullptr);
2034    mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
2035    CHECK(f != nullptr);
2036    mirror::Object* group = f->GetObject(thread_object);
2037    CHECK(group != nullptr);
2038    JDWP::ObjectId thread_group_id = gRegistry->Add(group);
2039    expandBufAddObjectId(pReply, thread_group_id);
2040  }
2041  return error;
2042}
2043
2044static mirror::Object* DecodeThreadGroup(ScopedObjectAccessUnchecked& soa,
2045                                         JDWP::ObjectId thread_group_id, JDWP::JdwpError* error)
2046    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2047  mirror::Object* thread_group = Dbg::GetObjectRegistry()->Get<mirror::Object*>(thread_group_id,
2048                                                                                error);
2049  if (*error != JDWP::ERR_NONE) {
2050    return nullptr;
2051  }
2052  if (thread_group == nullptr) {
2053    *error = JDWP::ERR_INVALID_OBJECT;
2054    return nullptr;
2055  }
2056  mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
2057  CHECK(c != nullptr);
2058  if (!c->IsAssignableFrom(thread_group->GetClass())) {
2059    // This is not a java.lang.ThreadGroup.
2060    *error = JDWP::ERR_INVALID_THREAD_GROUP;
2061    return nullptr;
2062  }
2063  *error = JDWP::ERR_NONE;
2064  return thread_group;
2065}
2066
2067JDWP::JdwpError Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
2068  ScopedObjectAccessUnchecked soa(Thread::Current());
2069  JDWP::JdwpError error;
2070  mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2071  if (error != JDWP::ERR_NONE) {
2072    return error;
2073  }
2074  ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupName");
2075  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
2076  CHECK(f != nullptr);
2077  mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
2078
2079  std::string thread_group_name(s->ToModifiedUtf8());
2080  expandBufAddUtf8String(pReply, thread_group_name);
2081  return JDWP::ERR_NONE;
2082}
2083
2084JDWP::JdwpError Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id, JDWP::ExpandBuf* pReply) {
2085  ScopedObjectAccessUnchecked soa(Thread::Current());
2086  JDWP::JdwpError error;
2087  mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2088  if (error != JDWP::ERR_NONE) {
2089    return error;
2090  }
2091  mirror::Object* parent;
2092  {
2093    ScopedAssertNoThreadSuspension ants(soa.Self(), "Debugger: GetThreadGroupParent");
2094    mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_parent);
2095    CHECK(f != nullptr);
2096    parent = f->GetObject(thread_group);
2097  }
2098  JDWP::ObjectId parent_group_id = gRegistry->Add(parent);
2099  expandBufAddObjectId(pReply, parent_group_id);
2100  return JDWP::ERR_NONE;
2101}
2102
2103static void GetChildThreadGroups(ScopedObjectAccessUnchecked& soa, mirror::Object* thread_group,
2104                                 std::vector<JDWP::ObjectId>* child_thread_group_ids)
2105    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2106  CHECK(thread_group != nullptr);
2107
2108  // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
2109  mirror::ArtField* groups_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_groups);
2110  mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
2111  {
2112    // The "groups" field is declared as a java.util.List: check it really is
2113    // an instance of java.util.ArrayList.
2114    CHECK(groups_array_list != nullptr);
2115    mirror::Class* java_util_ArrayList_class =
2116        soa.Decode<mirror::Class*>(WellKnownClasses::java_util_ArrayList);
2117    CHECK(groups_array_list->InstanceOf(java_util_ArrayList_class));
2118  }
2119
2120  // Get the array and size out of the ArrayList<ThreadGroup>...
2121  mirror::ArtField* array_field = soa.DecodeField(WellKnownClasses::java_util_ArrayList_array);
2122  mirror::ArtField* size_field = soa.DecodeField(WellKnownClasses::java_util_ArrayList_size);
2123  mirror::ObjectArray<mirror::Object>* groups_array =
2124      array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
2125  const int32_t size = size_field->GetInt(groups_array_list);
2126
2127  // Copy the first 'size' elements out of the array into the result.
2128  ObjectRegistry* registry = Dbg::GetObjectRegistry();
2129  for (int32_t i = 0; i < size; ++i) {
2130    child_thread_group_ids->push_back(registry->Add(groups_array->Get(i)));
2131  }
2132}
2133
2134JDWP::JdwpError Dbg::GetThreadGroupChildren(JDWP::ObjectId thread_group_id,
2135                                            JDWP::ExpandBuf* pReply) {
2136  ScopedObjectAccessUnchecked soa(Thread::Current());
2137  JDWP::JdwpError error;
2138  mirror::Object* thread_group = DecodeThreadGroup(soa, thread_group_id, &error);
2139  if (error != JDWP::ERR_NONE) {
2140    return error;
2141  }
2142
2143  // Add child threads.
2144  {
2145    std::vector<JDWP::ObjectId> child_thread_ids;
2146    GetThreads(thread_group, &child_thread_ids);
2147    expandBufAdd4BE(pReply, child_thread_ids.size());
2148    for (JDWP::ObjectId child_thread_id : child_thread_ids) {
2149      expandBufAddObjectId(pReply, child_thread_id);
2150    }
2151  }
2152
2153  // Add child thread groups.
2154  {
2155    std::vector<JDWP::ObjectId> child_thread_groups_ids;
2156    GetChildThreadGroups(soa, thread_group, &child_thread_groups_ids);
2157    expandBufAdd4BE(pReply, child_thread_groups_ids.size());
2158    for (JDWP::ObjectId child_thread_group_id : child_thread_groups_ids) {
2159      expandBufAddObjectId(pReply, child_thread_group_id);
2160    }
2161  }
2162
2163  return JDWP::ERR_NONE;
2164}
2165
2166JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
2167  ScopedObjectAccessUnchecked soa(Thread::Current());
2168  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
2169  mirror::Object* group = f->GetObject(f->GetDeclaringClass());
2170  return gRegistry->Add(group);
2171}
2172
2173JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
2174  switch (state) {
2175    case kBlocked:
2176      return JDWP::TS_MONITOR;
2177    case kNative:
2178    case kRunnable:
2179    case kSuspended:
2180      return JDWP::TS_RUNNING;
2181    case kSleeping:
2182      return JDWP::TS_SLEEPING;
2183    case kStarting:
2184    case kTerminated:
2185      return JDWP::TS_ZOMBIE;
2186    case kTimedWaiting:
2187    case kWaitingForCheckPointsToRun:
2188    case kWaitingForDebuggerSend:
2189    case kWaitingForDebuggerSuspension:
2190    case kWaitingForDebuggerToAttach:
2191    case kWaitingForDeoptimization:
2192    case kWaitingForGcToComplete:
2193    case kWaitingForJniOnLoad:
2194    case kWaitingForMethodTracingStart:
2195    case kWaitingForSignalCatcherOutput:
2196    case kWaitingForVisitObjects:
2197    case kWaitingInMainDebuggerLoop:
2198    case kWaitingInMainSignalCatcherLoop:
2199    case kWaitingPerformingGc:
2200    case kWaiting:
2201      return JDWP::TS_WAIT;
2202      // Don't add a 'default' here so the compiler can spot incompatible enum changes.
2203  }
2204  LOG(FATAL) << "Unknown thread state: " << state;
2205  return JDWP::TS_ZOMBIE;
2206}
2207
2208JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus,
2209                                     JDWP::JdwpSuspendStatus* pSuspendStatus) {
2210  ScopedObjectAccess soa(Thread::Current());
2211
2212  *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED;
2213
2214  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2215  JDWP::JdwpError error;
2216  Thread* thread = DecodeThread(soa, thread_id, &error);
2217  if (error != JDWP::ERR_NONE) {
2218    if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
2219      *pThreadStatus = JDWP::TS_ZOMBIE;
2220      return JDWP::ERR_NONE;
2221    }
2222    return error;
2223  }
2224
2225  if (IsSuspendedForDebugger(soa, thread)) {
2226    *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED;
2227  }
2228
2229  *pThreadStatus = ToJdwpThreadStatus(thread->GetState());
2230  return JDWP::ERR_NONE;
2231}
2232
2233JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
2234  ScopedObjectAccess soa(Thread::Current());
2235  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2236  JDWP::JdwpError error;
2237  Thread* thread = DecodeThread(soa, thread_id, &error);
2238  if (error != JDWP::ERR_NONE) {
2239    return error;
2240  }
2241  MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2242  expandBufAdd4BE(pReply, thread->GetDebugSuspendCount());
2243  return JDWP::ERR_NONE;
2244}
2245
2246JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) {
2247  ScopedObjectAccess soa(Thread::Current());
2248  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2249  JDWP::JdwpError error;
2250  Thread* thread = DecodeThread(soa, thread_id, &error);
2251  if (error != JDWP::ERR_NONE) {
2252    return error;
2253  }
2254  thread->Interrupt(soa.Self());
2255  return JDWP::ERR_NONE;
2256}
2257
2258static bool IsInDesiredThreadGroup(ScopedObjectAccessUnchecked& soa,
2259                                   mirror::Object* desired_thread_group, mirror::Object* peer)
2260    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2261  // Do we want threads from all thread groups?
2262  if (desired_thread_group == nullptr) {
2263    return true;
2264  }
2265  mirror::ArtField* thread_group_field = soa.DecodeField(WellKnownClasses::java_lang_Thread_group);
2266  DCHECK(thread_group_field != nullptr);
2267  mirror::Object* group = thread_group_field->GetObject(peer);
2268  return (group == desired_thread_group);
2269}
2270
2271void Dbg::GetThreads(mirror::Object* thread_group, std::vector<JDWP::ObjectId>* thread_ids) {
2272  ScopedObjectAccessUnchecked soa(Thread::Current());
2273  std::list<Thread*> all_threads_list;
2274  {
2275    MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
2276    all_threads_list = Runtime::Current()->GetThreadList()->GetList();
2277  }
2278  for (Thread* t : all_threads_list) {
2279    if (t == Dbg::GetDebugThread()) {
2280      // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
2281      // query all threads, so it's easier if we just don't tell them about this thread.
2282      continue;
2283    }
2284    if (t->IsStillStarting()) {
2285      // This thread is being started (and has been registered in the thread list). However, it is
2286      // not completely started yet so we must ignore it.
2287      continue;
2288    }
2289    mirror::Object* peer = t->GetPeer();
2290    if (peer == nullptr) {
2291      // peer might be NULL if the thread is still starting up. We can't tell the debugger about
2292      // this thread yet.
2293      // TODO: if we identified threads to the debugger by their Thread*
2294      // rather than their peer's mirror::Object*, we could fix this.
2295      // Doing so might help us report ZOMBIE threads too.
2296      continue;
2297    }
2298    if (IsInDesiredThreadGroup(soa, thread_group, peer)) {
2299      thread_ids->push_back(gRegistry->Add(peer));
2300    }
2301  }
2302}
2303
2304static int GetStackDepth(Thread* thread) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2305  struct CountStackDepthVisitor : public StackVisitor {
2306    explicit CountStackDepthVisitor(Thread* thread_in)
2307        : StackVisitor(thread_in, nullptr), depth(0) {}
2308
2309    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2310    // annotalysis.
2311    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2312      if (!GetMethod()->IsRuntimeMethod()) {
2313        ++depth;
2314      }
2315      return true;
2316    }
2317    size_t depth;
2318  };
2319
2320  CountStackDepthVisitor visitor(thread);
2321  visitor.WalkStack();
2322  return visitor.depth;
2323}
2324
2325JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t* result) {
2326  ScopedObjectAccess soa(Thread::Current());
2327  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2328  JDWP::JdwpError error;
2329  *result = 0;
2330  Thread* thread = DecodeThread(soa, thread_id, &error);
2331  if (error != JDWP::ERR_NONE) {
2332    return error;
2333  }
2334  if (!IsSuspendedForDebugger(soa, thread)) {
2335    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2336  }
2337  *result = GetStackDepth(thread);
2338  return JDWP::ERR_NONE;
2339}
2340
2341JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
2342                                     size_t frame_count, JDWP::ExpandBuf* buf) {
2343  class GetFrameVisitor : public StackVisitor {
2344   public:
2345    GetFrameVisitor(Thread* thread, size_t start_frame_in, size_t frame_count_in,
2346                    JDWP::ExpandBuf* buf_in)
2347        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2348        : StackVisitor(thread, nullptr), depth_(0),
2349          start_frame_(start_frame_in), frame_count_(frame_count_in), buf_(buf_in) {
2350      expandBufAdd4BE(buf_, frame_count_);
2351    }
2352
2353    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2354    // annotalysis.
2355    virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2356      if (GetMethod()->IsRuntimeMethod()) {
2357        return true;  // The debugger can't do anything useful with a frame that has no Method*.
2358      }
2359      if (depth_ >= start_frame_ + frame_count_) {
2360        return false;
2361      }
2362      if (depth_ >= start_frame_) {
2363        JDWP::FrameId frame_id(GetFrameId());
2364        JDWP::JdwpLocation location;
2365        SetJdwpLocation(&location, GetMethod(), GetDexPc());
2366        VLOG(jdwp) << StringPrintf("    Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
2367        expandBufAdd8BE(buf_, frame_id);
2368        expandBufAddLocation(buf_, location);
2369      }
2370      ++depth_;
2371      return true;
2372    }
2373
2374   private:
2375    size_t depth_;
2376    const size_t start_frame_;
2377    const size_t frame_count_;
2378    JDWP::ExpandBuf* buf_;
2379  };
2380
2381  ScopedObjectAccessUnchecked soa(Thread::Current());
2382  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2383  JDWP::JdwpError error;
2384  Thread* thread = DecodeThread(soa, thread_id, &error);
2385  if (error != JDWP::ERR_NONE) {
2386    return error;
2387  }
2388  if (!IsSuspendedForDebugger(soa, thread)) {
2389    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2390  }
2391  GetFrameVisitor visitor(thread, start_frame, frame_count, buf);
2392  visitor.WalkStack();
2393  return JDWP::ERR_NONE;
2394}
2395
2396JDWP::ObjectId Dbg::GetThreadSelfId() {
2397  return GetThreadId(Thread::Current());
2398}
2399
2400JDWP::ObjectId Dbg::GetThreadId(Thread* thread) {
2401  ScopedObjectAccessUnchecked soa(Thread::Current());
2402  return gRegistry->Add(thread->GetPeer());
2403}
2404
2405void Dbg::SuspendVM() {
2406  Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
2407}
2408
2409void Dbg::ResumeVM() {
2410  Runtime::Current()->GetThreadList()->ResumeAllForDebugger();
2411}
2412
2413JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
2414  Thread* self = Thread::Current();
2415  ScopedLocalRef<jobject> peer(self->GetJniEnv(), nullptr);
2416  {
2417    ScopedObjectAccess soa(self);
2418    JDWP::JdwpError error;
2419    peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id, &error)));
2420  }
2421  if (peer.get() == nullptr) {
2422    return JDWP::ERR_THREAD_NOT_ALIVE;
2423  }
2424  // Suspend thread to build stack trace.
2425  bool timed_out;
2426  ThreadList* thread_list = Runtime::Current()->GetThreadList();
2427  Thread* thread = thread_list->SuspendThreadByPeer(peer.get(), request_suspension, true,
2428                                                    &timed_out);
2429  if (thread != nullptr) {
2430    return JDWP::ERR_NONE;
2431  } else if (timed_out) {
2432    return JDWP::ERR_INTERNAL;
2433  } else {
2434    return JDWP::ERR_THREAD_NOT_ALIVE;
2435  }
2436}
2437
2438void Dbg::ResumeThread(JDWP::ObjectId thread_id) {
2439  ScopedObjectAccessUnchecked soa(Thread::Current());
2440  JDWP::JdwpError error;
2441  mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id, &error);
2442  CHECK(peer != nullptr) << error;
2443  Thread* thread;
2444  {
2445    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2446    thread = Thread::FromManagedThread(soa, peer);
2447  }
2448  if (thread == nullptr) {
2449    LOG(WARNING) << "No such thread for resume: " << peer;
2450    return;
2451  }
2452  bool needs_resume;
2453  {
2454    MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2455    needs_resume = thread->GetSuspendCount() > 0;
2456  }
2457  if (needs_resume) {
2458    Runtime::Current()->GetThreadList()->Resume(thread, true);
2459  }
2460}
2461
2462void Dbg::SuspendSelf() {
2463  Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
2464}
2465
2466struct GetThisVisitor : public StackVisitor {
2467  GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id_in)
2468      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2469      : StackVisitor(thread, context), this_object(nullptr), frame_id(frame_id_in) {}
2470
2471  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2472  // annotalysis.
2473  virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2474    if (frame_id != GetFrameId()) {
2475      return true;  // continue
2476    } else {
2477      this_object = GetThisObject();
2478      return false;
2479    }
2480  }
2481
2482  mirror::Object* this_object;
2483  JDWP::FrameId frame_id;
2484};
2485
2486JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
2487                                   JDWP::ObjectId* result) {
2488  ScopedObjectAccessUnchecked soa(Thread::Current());
2489  Thread* thread;
2490  {
2491    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2492    JDWP::JdwpError error;
2493    thread = DecodeThread(soa, thread_id, &error);
2494    if (error != JDWP::ERR_NONE) {
2495      return error;
2496    }
2497    if (!IsSuspendedForDebugger(soa, thread)) {
2498      return JDWP::ERR_THREAD_NOT_SUSPENDED;
2499    }
2500  }
2501  std::unique_ptr<Context> context(Context::Create());
2502  GetThisVisitor visitor(thread, context.get(), frame_id);
2503  visitor.WalkStack();
2504  *result = gRegistry->Add(visitor.this_object);
2505  return JDWP::ERR_NONE;
2506}
2507
2508// Walks the stack until we find the frame with the given FrameId.
2509class FindFrameVisitor FINAL : public StackVisitor {
2510 public:
2511  FindFrameVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
2512      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2513      : StackVisitor(thread, context), frame_id_(frame_id), error_(JDWP::ERR_INVALID_FRAMEID) {}
2514
2515  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2516  // annotalysis.
2517  bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2518    if (GetFrameId() != frame_id_) {
2519      return true;  // Not our frame, carry on.
2520    }
2521    mirror::ArtMethod* m = GetMethod();
2522    if (m->IsNative()) {
2523      // We can't read/write local value from/into native method.
2524      error_ = JDWP::ERR_OPAQUE_FRAME;
2525    } else {
2526      // We found our frame.
2527      error_ = JDWP::ERR_NONE;
2528    }
2529    return false;
2530  }
2531
2532  JDWP::JdwpError GetError() const {
2533    return error_;
2534  }
2535
2536 private:
2537  const JDWP::FrameId frame_id_;
2538  JDWP::JdwpError error_;
2539};
2540
2541JDWP::JdwpError Dbg::GetLocalValues(JDWP::Request* request, JDWP::ExpandBuf* pReply) {
2542  JDWP::ObjectId thread_id = request->ReadThreadId();
2543  JDWP::FrameId frame_id = request->ReadFrameId();
2544
2545  ScopedObjectAccessUnchecked soa(Thread::Current());
2546  Thread* thread;
2547  {
2548    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2549    JDWP::JdwpError error;
2550    thread = DecodeThread(soa, thread_id, &error);
2551    if (error != JDWP::ERR_NONE) {
2552      return error;
2553    }
2554  }
2555  // Find the frame with the given frame_id.
2556  std::unique_ptr<Context> context(Context::Create());
2557  FindFrameVisitor visitor(thread, context.get(), frame_id);
2558  visitor.WalkStack();
2559  if (visitor.GetError() != JDWP::ERR_NONE) {
2560    return visitor.GetError();
2561  }
2562
2563  // Read the values from visitor's context.
2564  int32_t slot_count = request->ReadSigned32("slot count");
2565  expandBufAdd4BE(pReply, slot_count);     /* "int values" */
2566  for (int32_t i = 0; i < slot_count; ++i) {
2567    uint32_t slot = request->ReadUnsigned32("slot");
2568    JDWP::JdwpTag reqSigByte = request->ReadTag();
2569
2570    VLOG(jdwp) << "    --> slot " << slot << " " << reqSigByte;
2571
2572    size_t width = Dbg::GetTagWidth(reqSigByte);
2573    uint8_t* ptr = expandBufAddSpace(pReply, width + 1);
2574    JDWP::JdwpError error = Dbg::GetLocalValue(visitor, soa, slot, reqSigByte, ptr, width);
2575    if (error != JDWP::ERR_NONE) {
2576      return error;
2577    }
2578  }
2579  return JDWP::ERR_NONE;
2580}
2581
2582JDWP::JdwpError Dbg::GetLocalValue(const StackVisitor& visitor, ScopedObjectAccessUnchecked& soa,
2583                                   int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
2584  mirror::ArtMethod* m = visitor.GetMethod();
2585  uint16_t reg = DemangleSlot(slot, m);
2586  // TODO: check that the tag is compatible with the actual type of the slot!
2587  // TODO: check slot is valid for this method or return INVALID_SLOT error.
2588  constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
2589  switch (tag) {
2590    case JDWP::JT_BOOLEAN: {
2591      CHECK_EQ(width, 1U);
2592      uint32_t intVal;
2593      if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) {
2594        VLOG(jdwp) << "get boolean local " << reg << " = " << intVal;
2595        JDWP::Set1(buf + 1, intVal != 0);
2596      } else {
2597        VLOG(jdwp) << "failed to get boolean local " << reg;
2598        return kFailureErrorCode;
2599      }
2600      break;
2601    }
2602    case JDWP::JT_BYTE: {
2603      CHECK_EQ(width, 1U);
2604      uint32_t intVal;
2605      if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) {
2606        VLOG(jdwp) << "get byte local " << reg << " = " << intVal;
2607        JDWP::Set1(buf + 1, intVal);
2608      } else {
2609        VLOG(jdwp) << "failed to get byte local " << reg;
2610        return kFailureErrorCode;
2611      }
2612      break;
2613    }
2614    case JDWP::JT_SHORT:
2615    case JDWP::JT_CHAR: {
2616      CHECK_EQ(width, 2U);
2617      uint32_t intVal;
2618      if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) {
2619        VLOG(jdwp) << "get short/char local " << reg << " = " << intVal;
2620        JDWP::Set2BE(buf + 1, intVal);
2621      } else {
2622        VLOG(jdwp) << "failed to get short/char local " << reg;
2623        return kFailureErrorCode;
2624      }
2625      break;
2626    }
2627    case JDWP::JT_INT: {
2628      CHECK_EQ(width, 4U);
2629      uint32_t intVal;
2630      if (visitor.GetVReg(m, reg, kIntVReg, &intVal)) {
2631        VLOG(jdwp) << "get int local " << reg << " = " << intVal;
2632        JDWP::Set4BE(buf + 1, intVal);
2633      } else {
2634        VLOG(jdwp) << "failed to get int local " << reg;
2635        return kFailureErrorCode;
2636      }
2637      break;
2638    }
2639    case JDWP::JT_FLOAT: {
2640      CHECK_EQ(width, 4U);
2641      uint32_t intVal;
2642      if (visitor.GetVReg(m, reg, kFloatVReg, &intVal)) {
2643        VLOG(jdwp) << "get float local " << reg << " = " << intVal;
2644        JDWP::Set4BE(buf + 1, intVal);
2645      } else {
2646        VLOG(jdwp) << "failed to get float local " << reg;
2647        return kFailureErrorCode;
2648      }
2649      break;
2650    }
2651    case JDWP::JT_ARRAY:
2652    case JDWP::JT_CLASS_LOADER:
2653    case JDWP::JT_CLASS_OBJECT:
2654    case JDWP::JT_OBJECT:
2655    case JDWP::JT_STRING:
2656    case JDWP::JT_THREAD:
2657    case JDWP::JT_THREAD_GROUP: {
2658      CHECK_EQ(width, sizeof(JDWP::ObjectId));
2659      uint32_t intVal;
2660      if (visitor.GetVReg(m, reg, kReferenceVReg, &intVal)) {
2661        mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal);
2662        VLOG(jdwp) << "get " << tag << " object local " << reg << " = " << o;
2663        if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
2664          LOG(FATAL) << "Register " << reg << " expected to hold " << tag << " object: " << o;
2665        }
2666        tag = TagFromObject(soa, o);
2667        JDWP::SetObjectId(buf + 1, gRegistry->Add(o));
2668      } else {
2669        VLOG(jdwp) << "failed to get " << tag << " object local " << reg;
2670        return kFailureErrorCode;
2671      }
2672      break;
2673    }
2674    case JDWP::JT_DOUBLE: {
2675      CHECK_EQ(width, 8U);
2676      uint64_t longVal;
2677      if (visitor.GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) {
2678        VLOG(jdwp) << "get double local " << reg << " = " << longVal;
2679        JDWP::Set8BE(buf + 1, longVal);
2680      } else {
2681        VLOG(jdwp) << "failed to get double local " << reg;
2682        return kFailureErrorCode;
2683      }
2684      break;
2685    }
2686    case JDWP::JT_LONG: {
2687      CHECK_EQ(width, 8U);
2688      uint64_t longVal;
2689      if (visitor.GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &longVal)) {
2690        VLOG(jdwp) << "get long local " << reg << " = " << longVal;
2691        JDWP::Set8BE(buf + 1, longVal);
2692      } else {
2693        VLOG(jdwp) << "failed to get long local " << reg;
2694        return kFailureErrorCode;
2695      }
2696      break;
2697    }
2698    default:
2699      LOG(FATAL) << "Unknown tag " << tag;
2700      break;
2701  }
2702
2703  // Prepend tag, which may have been updated.
2704  JDWP::Set1(buf, tag);
2705  return JDWP::ERR_NONE;
2706}
2707
2708JDWP::JdwpError Dbg::SetLocalValues(JDWP::Request* request) {
2709  JDWP::ObjectId thread_id = request->ReadThreadId();
2710  JDWP::FrameId frame_id = request->ReadFrameId();
2711
2712  ScopedObjectAccessUnchecked soa(Thread::Current());
2713  Thread* thread;
2714  {
2715    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2716    JDWP::JdwpError error;
2717    thread = DecodeThread(soa, thread_id, &error);
2718    if (error != JDWP::ERR_NONE) {
2719      return error;
2720    }
2721  }
2722  // Find the frame with the given frame_id.
2723  std::unique_ptr<Context> context(Context::Create());
2724  FindFrameVisitor visitor(thread, context.get(), frame_id);
2725  visitor.WalkStack();
2726  if (visitor.GetError() != JDWP::ERR_NONE) {
2727    return visitor.GetError();
2728  }
2729
2730  // Writes the values into visitor's context.
2731  int32_t slot_count = request->ReadSigned32("slot count");
2732  for (int32_t i = 0; i < slot_count; ++i) {
2733    uint32_t slot = request->ReadUnsigned32("slot");
2734    JDWP::JdwpTag sigByte = request->ReadTag();
2735    size_t width = Dbg::GetTagWidth(sigByte);
2736    uint64_t value = request->ReadValue(width);
2737
2738    VLOG(jdwp) << "    --> slot " << slot << " " << sigByte << " " << value;
2739    JDWP::JdwpError error = Dbg::SetLocalValue(visitor, slot, sigByte, value, width);
2740    if (error != JDWP::ERR_NONE) {
2741      return error;
2742    }
2743  }
2744  return JDWP::ERR_NONE;
2745}
2746
2747JDWP::JdwpError Dbg::SetLocalValue(StackVisitor& visitor, int slot, JDWP::JdwpTag tag,
2748                                   uint64_t value, size_t width) {
2749  mirror::ArtMethod* m = visitor.GetMethod();
2750  uint16_t reg = DemangleSlot(slot, m);
2751  // TODO: check that the tag is compatible with the actual type of the slot!
2752  // TODO: check slot is valid for this method or return INVALID_SLOT error.
2753  constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION;
2754  switch (tag) {
2755    case JDWP::JT_BOOLEAN:
2756    case JDWP::JT_BYTE:
2757      CHECK_EQ(width, 1U);
2758      if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kIntVReg)) {
2759        VLOG(jdwp) << "failed to set boolean/byte local " << reg << " = "
2760                   << static_cast<uint32_t>(value);
2761        return kFailureErrorCode;
2762      }
2763      break;
2764    case JDWP::JT_SHORT:
2765    case JDWP::JT_CHAR:
2766      CHECK_EQ(width, 2U);
2767      if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kIntVReg)) {
2768        VLOG(jdwp) << "failed to set short/char local " << reg << " = "
2769                   << static_cast<uint32_t>(value);
2770        return kFailureErrorCode;
2771      }
2772      break;
2773    case JDWP::JT_INT:
2774      CHECK_EQ(width, 4U);
2775      if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kIntVReg)) {
2776        VLOG(jdwp) << "failed to set int local " << reg << " = "
2777                   << static_cast<uint32_t>(value);
2778        return kFailureErrorCode;
2779      }
2780      break;
2781    case JDWP::JT_FLOAT:
2782      CHECK_EQ(width, 4U);
2783      if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(value), kFloatVReg)) {
2784        VLOG(jdwp) << "failed to set float local " << reg << " = "
2785                   << static_cast<uint32_t>(value);
2786        return kFailureErrorCode;
2787      }
2788      break;
2789    case JDWP::JT_ARRAY:
2790    case JDWP::JT_CLASS_LOADER:
2791    case JDWP::JT_CLASS_OBJECT:
2792    case JDWP::JT_OBJECT:
2793    case JDWP::JT_STRING:
2794    case JDWP::JT_THREAD:
2795    case JDWP::JT_THREAD_GROUP: {
2796      CHECK_EQ(width, sizeof(JDWP::ObjectId));
2797      JDWP::JdwpError error;
2798      mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value),
2799                                                          &error);
2800      if (error != JDWP::ERR_NONE) {
2801        VLOG(jdwp) << tag << " object " << o << " is an invalid object";
2802        return JDWP::ERR_INVALID_OBJECT;
2803      } else if (!visitor.SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)),
2804                          kReferenceVReg)) {
2805        VLOG(jdwp) << "failed to set " << tag << " object local " << reg << " = " << o;
2806        return kFailureErrorCode;
2807      }
2808      break;
2809    }
2810    case JDWP::JT_DOUBLE: {
2811      CHECK_EQ(width, 8U);
2812      if (!visitor.SetVRegPair(m, reg, value, kDoubleLoVReg, kDoubleHiVReg)) {
2813        VLOG(jdwp) << "failed to set double local " << reg << " = " << value;
2814        return kFailureErrorCode;
2815      }
2816      break;
2817    }
2818    case JDWP::JT_LONG: {
2819      CHECK_EQ(width, 8U);
2820      if (!visitor.SetVRegPair(m, reg, value, kLongLoVReg, kLongHiVReg)) {
2821        VLOG(jdwp) << "failed to set double local " << reg << " = " << value;
2822        return kFailureErrorCode;
2823      }
2824      break;
2825    }
2826    default:
2827      LOG(FATAL) << "Unknown tag " << tag;
2828      break;
2829  }
2830  return JDWP::ERR_NONE;
2831}
2832
2833static void SetEventLocation(JDWP::EventLocation* location, mirror::ArtMethod* m, uint32_t dex_pc)
2834    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2835  DCHECK(location != nullptr);
2836  if (m == nullptr) {
2837    memset(location, 0, sizeof(*location));
2838  } else {
2839    location->method = m;
2840    location->dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint32_t>(-1) : dex_pc;
2841  }
2842}
2843
2844void Dbg::PostLocationEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object,
2845                            int event_flags, const JValue* return_value) {
2846  if (!IsDebuggerActive()) {
2847    return;
2848  }
2849  DCHECK(m != nullptr);
2850  DCHECK_EQ(m->IsStatic(), this_object == nullptr);
2851  JDWP::EventLocation location;
2852  SetEventLocation(&location, m, dex_pc);
2853
2854  gJdwpState->PostLocationEvent(&location, this_object, event_flags, return_value);
2855}
2856
2857void Dbg::PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc,
2858                               mirror::Object* this_object, mirror::ArtField* f) {
2859  if (!IsDebuggerActive()) {
2860    return;
2861  }
2862  DCHECK(m != nullptr);
2863  DCHECK(f != nullptr);
2864  JDWP::EventLocation location;
2865  SetEventLocation(&location, m, dex_pc);
2866
2867  gJdwpState->PostFieldEvent(&location, f, this_object, nullptr, false);
2868}
2869
2870void Dbg::PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc,
2871                                     mirror::Object* this_object, mirror::ArtField* f,
2872                                     const JValue* field_value) {
2873  if (!IsDebuggerActive()) {
2874    return;
2875  }
2876  DCHECK(m != nullptr);
2877  DCHECK(f != nullptr);
2878  DCHECK(field_value != nullptr);
2879  JDWP::EventLocation location;
2880  SetEventLocation(&location, m, dex_pc);
2881
2882  gJdwpState->PostFieldEvent(&location, f, this_object, field_value, true);
2883}
2884
2885void Dbg::PostException(const ThrowLocation& throw_location,
2886                        mirror::ArtMethod* catch_method,
2887                        uint32_t catch_dex_pc, mirror::Throwable* exception_object) {
2888  if (!IsDebuggerActive()) {
2889    return;
2890  }
2891  JDWP::EventLocation exception_throw_location;
2892  SetEventLocation(&exception_throw_location, throw_location.GetMethod(), throw_location.GetDexPc());
2893  JDWP::EventLocation exception_catch_location;
2894  SetEventLocation(&exception_catch_location, catch_method, catch_dex_pc);
2895
2896  gJdwpState->PostException(&exception_throw_location, exception_object, &exception_catch_location,
2897                            throw_location.GetThis());
2898}
2899
2900void Dbg::PostClassPrepare(mirror::Class* c) {
2901  if (!IsDebuggerActive()) {
2902    return;
2903  }
2904  gJdwpState->PostClassPrepare(c);
2905}
2906
2907void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
2908                         mirror::ArtMethod* m, uint32_t dex_pc,
2909                         int event_flags, const JValue* return_value) {
2910  if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
2911    return;
2912  }
2913
2914  if (IsBreakpoint(m, dex_pc)) {
2915    event_flags |= kBreakpoint;
2916  }
2917
2918  // If the debugger is single-stepping one of our threads, check to
2919  // see if we're that thread and we've reached a step point.
2920  const SingleStepControl* single_step_control = thread->GetSingleStepControl();
2921  DCHECK(single_step_control != nullptr);
2922  if (single_step_control->is_active) {
2923    CHECK(!m->IsNative());
2924    if (single_step_control->step_depth == JDWP::SD_INTO) {
2925      // Step into method calls.  We break when the line number
2926      // or method pointer changes.  If we're in SS_MIN mode, we
2927      // always stop.
2928      if (single_step_control->method != m) {
2929        event_flags |= kSingleStep;
2930        VLOG(jdwp) << "SS new method";
2931      } else if (single_step_control->step_size == JDWP::SS_MIN) {
2932        event_flags |= kSingleStep;
2933        VLOG(jdwp) << "SS new instruction";
2934      } else if (single_step_control->ContainsDexPc(dex_pc)) {
2935        event_flags |= kSingleStep;
2936        VLOG(jdwp) << "SS new line";
2937      }
2938    } else if (single_step_control->step_depth == JDWP::SD_OVER) {
2939      // Step over method calls.  We break when the line number is
2940      // different and the frame depth is <= the original frame
2941      // depth.  (We can't just compare on the method, because we
2942      // might get unrolled past it by an exception, and it's tricky
2943      // to identify recursion.)
2944
2945      int stack_depth = GetStackDepth(thread);
2946
2947      if (stack_depth < single_step_control->stack_depth) {
2948        // Popped up one or more frames, always trigger.
2949        event_flags |= kSingleStep;
2950        VLOG(jdwp) << "SS method pop";
2951      } else if (stack_depth == single_step_control->stack_depth) {
2952        // Same depth, see if we moved.
2953        if (single_step_control->step_size == JDWP::SS_MIN) {
2954          event_flags |= kSingleStep;
2955          VLOG(jdwp) << "SS new instruction";
2956        } else if (single_step_control->ContainsDexPc(dex_pc)) {
2957          event_flags |= kSingleStep;
2958          VLOG(jdwp) << "SS new line";
2959        }
2960      }
2961    } else {
2962      CHECK_EQ(single_step_control->step_depth, JDWP::SD_OUT);
2963      // Return from the current method.  We break when the frame
2964      // depth pops up.
2965
2966      // This differs from the "method exit" break in that it stops
2967      // with the PC at the next instruction in the returned-to
2968      // function, rather than the end of the returning function.
2969
2970      int stack_depth = GetStackDepth(thread);
2971      if (stack_depth < single_step_control->stack_depth) {
2972        event_flags |= kSingleStep;
2973        VLOG(jdwp) << "SS method pop";
2974      }
2975    }
2976  }
2977
2978  // If there's something interesting going on, see if it matches one
2979  // of the debugger filters.
2980  if (event_flags != 0) {
2981    Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, return_value);
2982  }
2983}
2984
2985size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) {
2986  switch (instrumentation_event) {
2987    case instrumentation::Instrumentation::kMethodEntered:
2988      return &method_enter_event_ref_count_;
2989    case instrumentation::Instrumentation::kMethodExited:
2990      return &method_exit_event_ref_count_;
2991    case instrumentation::Instrumentation::kDexPcMoved:
2992      return &dex_pc_change_event_ref_count_;
2993    case instrumentation::Instrumentation::kFieldRead:
2994      return &field_read_event_ref_count_;
2995    case instrumentation::Instrumentation::kFieldWritten:
2996      return &field_write_event_ref_count_;
2997    case instrumentation::Instrumentation::kExceptionCaught:
2998      return &exception_catch_event_ref_count_;
2999    default:
3000      return nullptr;
3001  }
3002}
3003
3004// Process request while all mutator threads are suspended.
3005void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
3006  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
3007  switch (request.GetKind()) {
3008    case DeoptimizationRequest::kNothing:
3009      LOG(WARNING) << "Ignoring empty deoptimization request.";
3010      break;
3011    case DeoptimizationRequest::kRegisterForEvent:
3012      VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x",
3013                                 request.InstrumentationEvent());
3014      instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent());
3015      instrumentation_events_ |= request.InstrumentationEvent();
3016      break;
3017    case DeoptimizationRequest::kUnregisterForEvent:
3018      VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x",
3019                                 request.InstrumentationEvent());
3020      instrumentation->RemoveListener(&gDebugInstrumentationListener,
3021                                      request.InstrumentationEvent());
3022      instrumentation_events_ &= ~request.InstrumentationEvent();
3023      break;
3024    case DeoptimizationRequest::kFullDeoptimization:
3025      VLOG(jdwp) << "Deoptimize the world ...";
3026      instrumentation->DeoptimizeEverything();
3027      VLOG(jdwp) << "Deoptimize the world DONE";
3028      break;
3029    case DeoptimizationRequest::kFullUndeoptimization:
3030      VLOG(jdwp) << "Undeoptimize the world ...";
3031      instrumentation->UndeoptimizeEverything();
3032      VLOG(jdwp) << "Undeoptimize the world DONE";
3033      break;
3034    case DeoptimizationRequest::kSelectiveDeoptimization:
3035      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " ...";
3036      instrumentation->Deoptimize(request.Method());
3037      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " DONE";
3038      break;
3039    case DeoptimizationRequest::kSelectiveUndeoptimization:
3040      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " ...";
3041      instrumentation->Undeoptimize(request.Method());
3042      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " DONE";
3043      break;
3044    default:
3045      LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind();
3046      break;
3047  }
3048}
3049
3050void Dbg::DelayFullUndeoptimization() {
3051  if (RequiresDeoptimization()) {
3052    MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
3053    ++delayed_full_undeoptimization_count_;
3054    DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_);
3055  }
3056}
3057
3058void Dbg::ProcessDelayedFullUndeoptimizations() {
3059  // TODO: avoid taking the lock twice (once here and once in ManageDeoptimization).
3060  {
3061    MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
3062    while (delayed_full_undeoptimization_count_ > 0) {
3063      DeoptimizationRequest req;
3064      req.SetKind(DeoptimizationRequest::kFullUndeoptimization);
3065      req.SetMethod(nullptr);
3066      RequestDeoptimizationLocked(req);
3067      --delayed_full_undeoptimization_count_;
3068    }
3069  }
3070  ManageDeoptimization();
3071}
3072
3073void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
3074  if (req.GetKind() == DeoptimizationRequest::kNothing) {
3075    // Nothing to do.
3076    return;
3077  }
3078  MutexLock mu(Thread::Current(), *Locks::deoptimization_lock_);
3079  RequestDeoptimizationLocked(req);
3080}
3081
3082void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
3083  switch (req.GetKind()) {
3084    case DeoptimizationRequest::kRegisterForEvent: {
3085      DCHECK_NE(req.InstrumentationEvent(), 0u);
3086      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
3087      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
3088                                                req.InstrumentationEvent());
3089      if (*counter == 0) {
3090        VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x",
3091                                   deoptimization_requests_.size(), req.InstrumentationEvent());
3092        deoptimization_requests_.push_back(req);
3093      }
3094      *counter = *counter + 1;
3095      break;
3096    }
3097    case DeoptimizationRequest::kUnregisterForEvent: {
3098      DCHECK_NE(req.InstrumentationEvent(), 0u);
3099      size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent());
3100      CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x",
3101                                                req.InstrumentationEvent());
3102      *counter = *counter - 1;
3103      if (*counter == 0) {
3104        VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x",
3105                                   deoptimization_requests_.size(), req.InstrumentationEvent());
3106        deoptimization_requests_.push_back(req);
3107      }
3108      break;
3109    }
3110    case DeoptimizationRequest::kFullDeoptimization: {
3111      DCHECK(req.Method() == nullptr);
3112      if (full_deoptimization_event_count_ == 0) {
3113        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3114                   << " for full deoptimization";
3115        deoptimization_requests_.push_back(req);
3116      }
3117      ++full_deoptimization_event_count_;
3118      break;
3119    }
3120    case DeoptimizationRequest::kFullUndeoptimization: {
3121      DCHECK(req.Method() == nullptr);
3122      DCHECK_GT(full_deoptimization_event_count_, 0U);
3123      --full_deoptimization_event_count_;
3124      if (full_deoptimization_event_count_ == 0) {
3125        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3126                   << " for full undeoptimization";
3127        deoptimization_requests_.push_back(req);
3128      }
3129      break;
3130    }
3131    case DeoptimizationRequest::kSelectiveDeoptimization: {
3132      DCHECK(req.Method() != nullptr);
3133      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3134                 << " for deoptimization of " << PrettyMethod(req.Method());
3135      deoptimization_requests_.push_back(req);
3136      break;
3137    }
3138    case DeoptimizationRequest::kSelectiveUndeoptimization: {
3139      DCHECK(req.Method() != nullptr);
3140      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
3141                 << " for undeoptimization of " << PrettyMethod(req.Method());
3142      deoptimization_requests_.push_back(req);
3143      break;
3144    }
3145    default: {
3146      LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind();
3147      break;
3148    }
3149  }
3150}
3151
3152void Dbg::ManageDeoptimization() {
3153  Thread* const self = Thread::Current();
3154  {
3155    // Avoid suspend/resume if there is no pending request.
3156    MutexLock mu(self, *Locks::deoptimization_lock_);
3157    if (deoptimization_requests_.empty()) {
3158      return;
3159    }
3160  }
3161  CHECK_EQ(self->GetState(), kRunnable);
3162  self->TransitionFromRunnableToSuspended(kWaitingForDeoptimization);
3163  // We need to suspend mutator threads first.
3164  Runtime* const runtime = Runtime::Current();
3165  runtime->GetThreadList()->SuspendAll();
3166  const ThreadState old_state = self->SetStateUnsafe(kRunnable);
3167  {
3168    MutexLock mu(self, *Locks::deoptimization_lock_);
3169    size_t req_index = 0;
3170    for (DeoptimizationRequest& request : deoptimization_requests_) {
3171      VLOG(jdwp) << "Process deoptimization request #" << req_index++;
3172      ProcessDeoptimizationRequest(request);
3173    }
3174    deoptimization_requests_.clear();
3175  }
3176  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
3177  runtime->GetThreadList()->ResumeAll();
3178  self->TransitionFromSuspendedToRunnable();
3179}
3180
3181static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m)
3182    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
3183  const DexFile::CodeItem* code_item = m->GetCodeItem();
3184  if (code_item == nullptr) {
3185    // TODO We should not be asked to watch location in a native or abstract method so the code item
3186    // should never be null. We could just check we never encounter this case.
3187    return false;
3188  }
3189  // Note: method verifier may cause thread suspension.
3190  self->AssertThreadSuspensionIsAllowable();
3191  StackHandleScope<3> hs(self);
3192  mirror::Class* declaring_class = m->GetDeclaringClass();
3193  Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
3194  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
3195  Handle<mirror::ArtMethod> method(hs.NewHandle(m));
3196  verifier::MethodVerifier verifier(self, dex_cache->GetDexFile(), dex_cache, class_loader,
3197                                    &m->GetClassDef(), code_item, m->GetDexMethodIndex(), method,
3198                                    m->GetAccessFlags(), false, true, false, true);
3199  // Note: we don't need to verify the method.
3200  return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr);
3201}
3202
3203static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m)
3204    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3205  for (Breakpoint& breakpoint : gBreakpoints) {
3206    if (breakpoint.Method() == m) {
3207      return &breakpoint;
3208    }
3209  }
3210  return nullptr;
3211}
3212
3213// Sanity checks all existing breakpoints on the same method.
3214static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m,
3215                                           DeoptimizationRequest::Kind deoptimization_kind)
3216    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::breakpoint_lock_) {
3217  for (const Breakpoint& breakpoint : gBreakpoints) {
3218    if (breakpoint.Method() == m) {
3219      CHECK_EQ(deoptimization_kind, breakpoint.GetDeoptimizationKind());
3220    }
3221  }
3222  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
3223  if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
3224    // We should have deoptimized everything but not "selectively" deoptimized this method.
3225    CHECK(instrumentation->AreAllMethodsDeoptimized());
3226    CHECK(!instrumentation->IsDeoptimized(m));
3227  } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3228    // We should have "selectively" deoptimized this method.
3229    // Note: while we have not deoptimized everything for this method, we may have done it for
3230    // another event.
3231    CHECK(instrumentation->IsDeoptimized(m));
3232  } else {
3233    // This method does not require deoptimization.
3234    CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
3235    CHECK(!instrumentation->IsDeoptimized(m));
3236  }
3237}
3238
3239// Returns the deoptimization kind required to set a breakpoint in a method.
3240// If a breakpoint has already been set, we also return the first breakpoint
3241// through the given 'existing_brkpt' pointer.
3242static DeoptimizationRequest::Kind GetRequiredDeoptimizationKind(Thread* self,
3243                                                                 mirror::ArtMethod* m,
3244                                                                 const Breakpoint** existing_brkpt)
3245    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
3246  if (!Dbg::RequiresDeoptimization()) {
3247    // We already run in interpreter-only mode so we don't need to deoptimize anything.
3248    VLOG(jdwp) << "No need for deoptimization when fully running with interpreter for method "
3249               << PrettyMethod(m);
3250    return DeoptimizationRequest::kNothing;
3251  }
3252  const Breakpoint* first_breakpoint;
3253  {
3254    ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
3255    first_breakpoint = FindFirstBreakpointForMethod(m);
3256    *existing_brkpt = first_breakpoint;
3257  }
3258
3259  if (first_breakpoint == nullptr) {
3260    // There is no breakpoint on this method yet: we need to deoptimize. If this method may be
3261    // inlined, we deoptimize everything; otherwise we deoptimize only this method.
3262    // Note: IsMethodPossiblyInlined goes into the method verifier and may cause thread suspension.
3263    // Therefore we must not hold any lock when we call it.
3264    bool need_full_deoptimization = IsMethodPossiblyInlined(self, m);
3265    if (need_full_deoptimization) {
3266      VLOG(jdwp) << "Need full deoptimization because of possible inlining of method "
3267                 << PrettyMethod(m);
3268      return DeoptimizationRequest::kFullDeoptimization;
3269    } else {
3270      // We don't need to deoptimize if the method has not been compiled.
3271      ClassLinker* const class_linker = Runtime::Current()->GetClassLinker();
3272      const bool is_compiled = class_linker->GetOatMethodQuickCodeFor(m) != nullptr;
3273      if (is_compiled) {
3274        // If the method may be called through its direct code pointer (without loading
3275        // its updated entrypoint), we need full deoptimization to not miss the breakpoint.
3276        if (class_linker->MayBeCalledWithDirectCodePointer(m)) {
3277          VLOG(jdwp) << "Need full deoptimization because of possible direct code call "
3278                     << "into image for compiled method " << PrettyMethod(m);
3279          return DeoptimizationRequest::kFullDeoptimization;
3280        } else {
3281          VLOG(jdwp) << "Need selective deoptimization for compiled method " << PrettyMethod(m);
3282          return DeoptimizationRequest::kSelectiveDeoptimization;
3283        }
3284      } else {
3285        // Method is not compiled: we don't need to deoptimize.
3286        VLOG(jdwp) << "No need for deoptimization for non-compiled method " << PrettyMethod(m);
3287        return DeoptimizationRequest::kNothing;
3288      }
3289    }
3290  } else {
3291    // There is at least one breakpoint for this method: we don't need to deoptimize.
3292    // Let's check that all breakpoints are configured the same way for deoptimization.
3293    VLOG(jdwp) << "Breakpoint already set: no deoptimization is required";
3294    DeoptimizationRequest::Kind deoptimization_kind = first_breakpoint->GetDeoptimizationKind();
3295    if (kIsDebugBuild) {
3296      ReaderMutexLock mu(self, *Locks::breakpoint_lock_);
3297      SanityCheckExistingBreakpoints(m, deoptimization_kind);
3298    }
3299    return DeoptimizationRequest::kNothing;
3300  }
3301}
3302
3303// Installs a breakpoint at the specified location. Also indicates through the deoptimization
3304// request if we need to deoptimize.
3305void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3306  Thread* const self = Thread::Current();
3307  mirror::ArtMethod* m = FromMethodId(location->method_id);
3308  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3309
3310  const Breakpoint* existing_breakpoint = nullptr;
3311  const DeoptimizationRequest::Kind deoptimization_kind =
3312      GetRequiredDeoptimizationKind(self, m, &existing_breakpoint);
3313  req->SetKind(deoptimization_kind);
3314  if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3315    req->SetMethod(m);
3316  } else {
3317    CHECK(deoptimization_kind == DeoptimizationRequest::kNothing ||
3318          deoptimization_kind == DeoptimizationRequest::kFullDeoptimization);
3319    req->SetMethod(nullptr);
3320  }
3321
3322  {
3323    WriterMutexLock mu(self, *Locks::breakpoint_lock_);
3324    // If there is at least one existing breakpoint on the same method, the new breakpoint
3325    // must have the same deoptimization kind than the existing breakpoint(s).
3326    DeoptimizationRequest::Kind breakpoint_deoptimization_kind;
3327    if (existing_breakpoint != nullptr) {
3328      breakpoint_deoptimization_kind = existing_breakpoint->GetDeoptimizationKind();
3329    } else {
3330      breakpoint_deoptimization_kind = deoptimization_kind;
3331    }
3332    gBreakpoints.push_back(Breakpoint(m, location->dex_pc, breakpoint_deoptimization_kind));
3333    VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
3334               << gBreakpoints[gBreakpoints.size() - 1];
3335  }
3336}
3337
3338// Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
3339// request if we need to undeoptimize.
3340void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
3341  WriterMutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
3342  mirror::ArtMethod* m = FromMethodId(location->method_id);
3343  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
3344  DeoptimizationRequest::Kind deoptimization_kind = DeoptimizationRequest::kNothing;
3345  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
3346    if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].Method() == m) {
3347      VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
3348      deoptimization_kind = gBreakpoints[i].GetDeoptimizationKind();
3349      DCHECK_EQ(deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization,
3350                Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
3351      gBreakpoints.erase(gBreakpoints.begin() + i);
3352      break;
3353    }
3354  }
3355  const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
3356  if (existing_breakpoint == nullptr) {
3357    // There is no more breakpoint on this method: we need to undeoptimize.
3358    if (deoptimization_kind == DeoptimizationRequest::kFullDeoptimization) {
3359      // This method required full deoptimization: we need to undeoptimize everything.
3360      req->SetKind(DeoptimizationRequest::kFullUndeoptimization);
3361      req->SetMethod(nullptr);
3362    } else if (deoptimization_kind == DeoptimizationRequest::kSelectiveDeoptimization) {
3363      // This method required selective deoptimization: we need to undeoptimize only that method.
3364      req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization);
3365      req->SetMethod(m);
3366    } else {
3367      // This method had no need for deoptimization: do nothing.
3368      CHECK_EQ(deoptimization_kind, DeoptimizationRequest::kNothing);
3369      req->SetKind(DeoptimizationRequest::kNothing);
3370      req->SetMethod(nullptr);
3371    }
3372  } else {
3373    // There is at least one breakpoint for this method: we don't need to undeoptimize.
3374    req->SetKind(DeoptimizationRequest::kNothing);
3375    req->SetMethod(nullptr);
3376    if (kIsDebugBuild) {
3377      SanityCheckExistingBreakpoints(m, deoptimization_kind);
3378    }
3379  }
3380}
3381
3382// Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
3383// cause suspension if the thread is the current thread.
3384class ScopedThreadSuspension {
3385 public:
3386  ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
3387      LOCKS_EXCLUDED(Locks::thread_list_lock_)
3388      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
3389      thread_(nullptr),
3390      error_(JDWP::ERR_NONE),
3391      self_suspend_(false),
3392      other_suspend_(false) {
3393    ScopedObjectAccessUnchecked soa(self);
3394    {
3395      MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3396      thread_ = DecodeThread(soa, thread_id, &error_);
3397    }
3398    if (error_ == JDWP::ERR_NONE) {
3399      if (thread_ == soa.Self()) {
3400        self_suspend_ = true;
3401      } else {
3402        soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3403        jobject thread_peer = Dbg::GetObjectRegistry()->GetJObject(thread_id);
3404        bool timed_out;
3405        ThreadList* thread_list = Runtime::Current()->GetThreadList();
3406        Thread* suspended_thread = thread_list->SuspendThreadByPeer(thread_peer, true, true,
3407                                                                    &timed_out);
3408        CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
3409        if (suspended_thread == nullptr) {
3410          // Thread terminated from under us while suspending.
3411          error_ = JDWP::ERR_INVALID_THREAD;
3412        } else {
3413          CHECK_EQ(suspended_thread, thread_);
3414          other_suspend_ = true;
3415        }
3416      }
3417    }
3418  }
3419
3420  Thread* GetThread() const {
3421    return thread_;
3422  }
3423
3424  JDWP::JdwpError GetError() const {
3425    return error_;
3426  }
3427
3428  ~ScopedThreadSuspension() {
3429    if (other_suspend_) {
3430      Runtime::Current()->GetThreadList()->Resume(thread_, true);
3431    }
3432  }
3433
3434 private:
3435  Thread* thread_;
3436  JDWP::JdwpError error_;
3437  bool self_suspend_;
3438  bool other_suspend_;
3439};
3440
3441JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size,
3442                                   JDWP::JdwpStepDepth step_depth) {
3443  Thread* self = Thread::Current();
3444  ScopedThreadSuspension sts(self, thread_id);
3445  if (sts.GetError() != JDWP::ERR_NONE) {
3446    return sts.GetError();
3447  }
3448
3449  //
3450  // Work out what Method* we're in, the current line number, and how deep the stack currently
3451  // is for step-out.
3452  //
3453
3454  struct SingleStepStackVisitor : public StackVisitor {
3455    explicit SingleStepStackVisitor(Thread* thread, SingleStepControl* single_step_control,
3456                                    int32_t* line_number)
3457        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
3458        : StackVisitor(thread, nullptr), single_step_control_(single_step_control),
3459          line_number_(line_number) {
3460      DCHECK_EQ(single_step_control_, thread->GetSingleStepControl());
3461      single_step_control_->method = nullptr;
3462      single_step_control_->stack_depth = 0;
3463    }
3464
3465    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
3466    // annotalysis.
3467    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
3468      mirror::ArtMethod* m = GetMethod();
3469      if (!m->IsRuntimeMethod()) {
3470        ++single_step_control_->stack_depth;
3471        if (single_step_control_->method == nullptr) {
3472          mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
3473          single_step_control_->method = m;
3474          *line_number_ = -1;
3475          if (dex_cache != nullptr) {
3476            const DexFile& dex_file = *dex_cache->GetDexFile();
3477            *line_number_ = dex_file.GetLineNumFromPC(m, GetDexPc());
3478          }
3479        }
3480      }
3481      return true;
3482    }
3483
3484    SingleStepControl* const single_step_control_;
3485    int32_t* const line_number_;
3486  };
3487
3488  Thread* const thread = sts.GetThread();
3489  SingleStepControl* const single_step_control = thread->GetSingleStepControl();
3490  DCHECK(single_step_control != nullptr);
3491  int32_t line_number = -1;
3492  SingleStepStackVisitor visitor(thread, single_step_control, &line_number);
3493  visitor.WalkStack();
3494
3495  //
3496  // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
3497  //
3498
3499  struct DebugCallbackContext {
3500    explicit DebugCallbackContext(SingleStepControl* single_step_control_cb, int32_t line_number_cb,
3501                                  const DexFile::CodeItem* code_item)
3502      : single_step_control_(single_step_control_cb), line_number_(line_number_cb),
3503        code_item_(code_item), last_pc_valid(false), last_pc(0) {
3504    }
3505
3506    static bool Callback(void* raw_context, uint32_t address, uint32_t line_number_cb) {
3507      DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
3508      if (static_cast<int32_t>(line_number_cb) == context->line_number_) {
3509        if (!context->last_pc_valid) {
3510          // Everything from this address until the next line change is ours.
3511          context->last_pc = address;
3512          context->last_pc_valid = true;
3513        }
3514        // Otherwise, if we're already in a valid range for this line,
3515        // just keep going (shouldn't really happen)...
3516      } else if (context->last_pc_valid) {  // and the line number is new
3517        // Add everything from the last entry up until here to the set
3518        for (uint32_t dex_pc = context->last_pc; dex_pc < address; ++dex_pc) {
3519          context->single_step_control_->dex_pcs.insert(dex_pc);
3520        }
3521        context->last_pc_valid = false;
3522      }
3523      return false;  // There may be multiple entries for any given line.
3524    }
3525
3526    ~DebugCallbackContext() {
3527      // If the line number was the last in the position table...
3528      if (last_pc_valid) {
3529        size_t end = code_item_->insns_size_in_code_units_;
3530        for (uint32_t dex_pc = last_pc; dex_pc < end; ++dex_pc) {
3531          single_step_control_->dex_pcs.insert(dex_pc);
3532        }
3533      }
3534    }
3535
3536    SingleStepControl* const single_step_control_;
3537    const int32_t line_number_;
3538    const DexFile::CodeItem* const code_item_;
3539    bool last_pc_valid;
3540    uint32_t last_pc;
3541  };
3542  single_step_control->dex_pcs.clear();
3543  mirror::ArtMethod* m = single_step_control->method;
3544  if (!m->IsNative()) {
3545    const DexFile::CodeItem* const code_item = m->GetCodeItem();
3546    DebugCallbackContext context(single_step_control, line_number, code_item);
3547    m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
3548                                     DebugCallbackContext::Callback, nullptr, &context);
3549  }
3550
3551  //
3552  // Everything else...
3553  //
3554
3555  single_step_control->step_size = step_size;
3556  single_step_control->step_depth = step_depth;
3557  single_step_control->is_active = true;
3558
3559  if (VLOG_IS_ON(jdwp)) {
3560    VLOG(jdwp) << "Single-step thread: " << *thread;
3561    VLOG(jdwp) << "Single-step step size: " << single_step_control->step_size;
3562    VLOG(jdwp) << "Single-step step depth: " << single_step_control->step_depth;
3563    VLOG(jdwp) << "Single-step current method: " << PrettyMethod(single_step_control->method);
3564    VLOG(jdwp) << "Single-step current line: " << line_number;
3565    VLOG(jdwp) << "Single-step current stack depth: " << single_step_control->stack_depth;
3566    VLOG(jdwp) << "Single-step dex_pc values:";
3567    for (uint32_t dex_pc : single_step_control->dex_pcs) {
3568      VLOG(jdwp) << StringPrintf(" %#x", dex_pc);
3569    }
3570  }
3571
3572  return JDWP::ERR_NONE;
3573}
3574
3575void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) {
3576  ScopedObjectAccessUnchecked soa(Thread::Current());
3577  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3578  JDWP::JdwpError error;
3579  Thread* thread = DecodeThread(soa, thread_id, &error);
3580  if (error == JDWP::ERR_NONE) {
3581    SingleStepControl* single_step_control = thread->GetSingleStepControl();
3582    DCHECK(single_step_control != nullptr);
3583    single_step_control->Clear();
3584  }
3585}
3586
3587static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
3588  switch (tag) {
3589    default:
3590      LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag);
3591      UNREACHABLE();
3592
3593    // Primitives.
3594    case JDWP::JT_BYTE:    return 'B';
3595    case JDWP::JT_CHAR:    return 'C';
3596    case JDWP::JT_FLOAT:   return 'F';
3597    case JDWP::JT_DOUBLE:  return 'D';
3598    case JDWP::JT_INT:     return 'I';
3599    case JDWP::JT_LONG:    return 'J';
3600    case JDWP::JT_SHORT:   return 'S';
3601    case JDWP::JT_VOID:    return 'V';
3602    case JDWP::JT_BOOLEAN: return 'Z';
3603
3604    // Reference types.
3605    case JDWP::JT_ARRAY:
3606    case JDWP::JT_OBJECT:
3607    case JDWP::JT_STRING:
3608    case JDWP::JT_THREAD:
3609    case JDWP::JT_THREAD_GROUP:
3610    case JDWP::JT_CLASS_LOADER:
3611    case JDWP::JT_CLASS_OBJECT:
3612      return 'L';
3613  }
3614}
3615
3616JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id,
3617                                  JDWP::RefTypeId class_id, JDWP::MethodId method_id,
3618                                  uint32_t arg_count, uint64_t* arg_values,
3619                                  JDWP::JdwpTag* arg_types, uint32_t options,
3620                                  JDWP::JdwpTag* pResultTag, uint64_t* pResultValue,
3621                                  JDWP::ObjectId* pExceptionId) {
3622  ThreadList* thread_list = Runtime::Current()->GetThreadList();
3623
3624  Thread* targetThread = nullptr;
3625  DebugInvokeReq* req = nullptr;
3626  Thread* self = Thread::Current();
3627  {
3628    ScopedObjectAccessUnchecked soa(self);
3629    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3630    JDWP::JdwpError error;
3631    targetThread = DecodeThread(soa, thread_id, &error);
3632    if (error != JDWP::ERR_NONE) {
3633      LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id;
3634      return error;
3635    }
3636    req = targetThread->GetInvokeReq();
3637    if (!req->ready) {
3638      LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
3639      return JDWP::ERR_INVALID_THREAD;
3640    }
3641
3642    /*
3643     * We currently have a bug where we don't successfully resume the
3644     * target thread if the suspend count is too deep.  We're expected to
3645     * require one "resume" for each "suspend", but when asked to execute
3646     * a method we have to resume fully and then re-suspend it back to the
3647     * same level.  (The easiest way to cause this is to type "suspend"
3648     * multiple times in jdb.)
3649     *
3650     * It's unclear what this means when the event specifies "resume all"
3651     * and some threads are suspended more deeply than others.  This is
3652     * a rare problem, so for now we just prevent it from hanging forever
3653     * by rejecting the method invocation request.  Without this, we will
3654     * be stuck waiting on a suspended thread.
3655     */
3656    int suspend_count;
3657    {
3658      MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
3659      suspend_count = targetThread->GetSuspendCount();
3660    }
3661    if (suspend_count > 1) {
3662      LOG(ERROR) << *targetThread << " suspend count too deep for method invocation: " << suspend_count;
3663      return JDWP::ERR_THREAD_SUSPENDED;  // Probably not expected here.
3664    }
3665
3666    mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id, &error);
3667    if (error != JDWP::ERR_NONE) {
3668      return JDWP::ERR_INVALID_OBJECT;
3669    }
3670
3671    mirror::Object* thread = gRegistry->Get<mirror::Object*>(thread_id, &error);
3672    if (error != JDWP::ERR_NONE) {
3673      return JDWP::ERR_INVALID_OBJECT;
3674    }
3675    // TODO: check that 'thread' is actually a java.lang.Thread!
3676
3677    mirror::Class* c = DecodeClass(class_id, &error);
3678    if (c == nullptr) {
3679      return error;
3680    }
3681
3682    mirror::ArtMethod* m = FromMethodId(method_id);
3683    if (m->IsStatic() != (receiver == nullptr)) {
3684      return JDWP::ERR_INVALID_METHODID;
3685    }
3686    if (m->IsStatic()) {
3687      if (m->GetDeclaringClass() != c) {
3688        return JDWP::ERR_INVALID_METHODID;
3689      }
3690    } else {
3691      if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
3692        return JDWP::ERR_INVALID_METHODID;
3693      }
3694    }
3695
3696    // Check the argument list matches the method.
3697    uint32_t shorty_len = 0;
3698    const char* shorty = m->GetShorty(&shorty_len);
3699    if (shorty_len - 1 != arg_count) {
3700      return JDWP::ERR_ILLEGAL_ARGUMENT;
3701    }
3702
3703    {
3704      StackHandleScope<3> hs(soa.Self());
3705      HandleWrapper<mirror::ArtMethod> h_m(hs.NewHandleWrapper(&m));
3706      HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver));
3707      HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c));
3708      const DexFile::TypeList* types = m->GetParameterTypeList();
3709      for (size_t i = 0; i < arg_count; ++i) {
3710        if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) {
3711          return JDWP::ERR_ILLEGAL_ARGUMENT;
3712        }
3713
3714        if (shorty[i + 1] == 'L') {
3715          // Did we really get an argument of an appropriate reference type?
3716          mirror::Class* parameter_type =
3717              h_m->GetClassFromTypeIndex(types->GetTypeItem(i).type_idx_, true);
3718          mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i], &error);
3719          if (error != JDWP::ERR_NONE) {
3720            return JDWP::ERR_INVALID_OBJECT;
3721          }
3722          if (argument != nullptr && !argument->InstanceOf(parameter_type)) {
3723            return JDWP::ERR_ILLEGAL_ARGUMENT;
3724          }
3725
3726          // Turn the on-the-wire ObjectId into a jobject.
3727          jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]);
3728          v.l = gRegistry->GetJObject(arg_values[i]);
3729        }
3730      }
3731    }
3732
3733    req->receiver = receiver;
3734    req->thread = thread;
3735    req->klass = c;
3736    req->method = m;
3737    req->arg_count = arg_count;
3738    req->arg_values = arg_values;
3739    req->options = options;
3740    req->invoke_needed = true;
3741  }
3742
3743  // The fact that we've released the thread list lock is a bit risky --- if the thread goes
3744  // away we're sitting high and dry -- but we must release this before the ResumeAllThreads
3745  // call, and it's unwise to hold it during WaitForSuspend.
3746
3747  {
3748    /*
3749     * We change our (JDWP thread) status, which should be THREAD_RUNNING,
3750     * so we can suspend for a GC if the invoke request causes us to
3751     * run out of memory.  It's also a good idea to change it before locking
3752     * the invokeReq mutex, although that should never be held for long.
3753     */
3754    self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
3755
3756    VLOG(jdwp) << "    Transferring control to event thread";
3757    {
3758      MutexLock mu(self, req->lock);
3759
3760      if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3761        VLOG(jdwp) << "      Resuming all threads";
3762        thread_list->UndoDebuggerSuspensions();
3763      } else {
3764        VLOG(jdwp) << "      Resuming event thread only";
3765        thread_list->Resume(targetThread, true);
3766      }
3767
3768      // Wait for the request to finish executing.
3769      while (req->invoke_needed) {
3770        req->cond.Wait(self);
3771      }
3772    }
3773    VLOG(jdwp) << "    Control has returned from event thread";
3774
3775    /* wait for thread to re-suspend itself */
3776    SuspendThread(thread_id, false /* request_suspension */);
3777    self->TransitionFromSuspendedToRunnable();
3778  }
3779
3780  /*
3781   * Suspend the threads.  We waited for the target thread to suspend
3782   * itself, so all we need to do is suspend the others.
3783   *
3784   * The suspendAllThreads() call will double-suspend the event thread,
3785   * so we want to resume the target thread once to keep the books straight.
3786   */
3787  if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3788    self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3789    VLOG(jdwp) << "      Suspending all threads";
3790    thread_list->SuspendAllForDebugger();
3791    self->TransitionFromSuspendedToRunnable();
3792    VLOG(jdwp) << "      Resuming event thread to balance the count";
3793    thread_list->Resume(targetThread, true);
3794  }
3795
3796  // Copy the result.
3797  *pResultTag = req->result_tag;
3798  if (IsPrimitiveTag(req->result_tag)) {
3799    *pResultValue = req->result_value.GetJ();
3800  } else {
3801    *pResultValue = gRegistry->Add(req->result_value.GetL());
3802  }
3803  *pExceptionId = req->exception;
3804  return req->error;
3805}
3806
3807void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
3808  ScopedObjectAccess soa(Thread::Current());
3809
3810  // We can be called while an exception is pending. We need
3811  // to preserve that across the method invocation.
3812  StackHandleScope<4> hs(soa.Self());
3813  auto old_throw_this_object = hs.NewHandle<mirror::Object>(nullptr);
3814  auto old_throw_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
3815  auto old_exception = hs.NewHandle<mirror::Throwable>(nullptr);
3816  uint32_t old_throw_dex_pc;
3817  bool old_exception_report_flag;
3818  {
3819    ThrowLocation old_throw_location;
3820    mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location);
3821    old_throw_this_object.Assign(old_throw_location.GetThis());
3822    old_throw_method.Assign(old_throw_location.GetMethod());
3823    old_exception.Assign(old_exception_obj);
3824    old_throw_dex_pc = old_throw_location.GetDexPc();
3825    old_exception_report_flag = soa.Self()->IsExceptionReportedToInstrumentation();
3826    soa.Self()->ClearException();
3827  }
3828
3829  // Translate the method through the vtable, unless the debugger wants to suppress it.
3830  MutableHandle<mirror::ArtMethod> m(hs.NewHandle(pReq->method));
3831  if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver != nullptr) {
3832    mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.Get());
3833    if (actual_method != m.Get()) {
3834      VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.Get()) << " to " << PrettyMethod(actual_method);
3835      m.Assign(actual_method);
3836    }
3837  }
3838  VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.Get())
3839             << " receiver=" << pReq->receiver
3840             << " arg_count=" << pReq->arg_count;
3841  CHECK(m.Get() != nullptr);
3842
3843  CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
3844
3845  pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(m.Get()),
3846                                         reinterpret_cast<jvalue*>(pReq->arg_values));
3847
3848  mirror::Throwable* exception = soa.Self()->GetException(nullptr);
3849  soa.Self()->ClearException();
3850  pReq->exception = gRegistry->Add(exception);
3851  pReq->result_tag = BasicTagFromDescriptor(m.Get()->GetShorty());
3852  if (pReq->exception != 0) {
3853    VLOG(jdwp) << "  JDWP invocation returning with exception=" << exception
3854        << " " << exception->Dump();
3855    pReq->result_value.SetJ(0);
3856  } else if (pReq->result_tag == JDWP::JT_OBJECT) {
3857    /* if no exception thrown, examine object result more closely */
3858    JDWP::JdwpTag new_tag = TagFromObject(soa, pReq->result_value.GetL());
3859    if (new_tag != pReq->result_tag) {
3860      VLOG(jdwp) << "  JDWP promoted result from " << pReq->result_tag << " to " << new_tag;
3861      pReq->result_tag = new_tag;
3862    }
3863
3864    /*
3865     * Register the object.  We don't actually need an ObjectId yet,
3866     * but we do need to be sure that the GC won't move or discard the
3867     * object when we switch out of RUNNING.  The ObjectId conversion
3868     * will add the object to the "do not touch" list.
3869     *
3870     * We can't use the "tracked allocation" mechanism here because
3871     * the object is going to be handed off to a different thread.
3872     */
3873    gRegistry->Add(pReq->result_value.GetL());
3874  }
3875
3876  if (old_exception.Get() != nullptr) {
3877    ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(),
3878                                         old_throw_dex_pc);
3879    soa.Self()->SetException(gc_safe_throw_location, old_exception.Get());
3880    soa.Self()->SetExceptionReportedToInstrumentation(old_exception_report_flag);
3881  }
3882}
3883
3884/*
3885 * "request" contains a full JDWP packet, possibly with multiple chunks.  We
3886 * need to process each, accumulate the replies, and ship the whole thing
3887 * back.
3888 *
3889 * Returns "true" if we have a reply.  The reply buffer is newly allocated,
3890 * and includes the chunk type/length, followed by the data.
3891 *
3892 * OLD-TODO: we currently assume that the request and reply include a single
3893 * chunk.  If this becomes inconvenient we will need to adapt.
3894 */
3895bool Dbg::DdmHandlePacket(JDWP::Request* request, uint8_t** pReplyBuf, int* pReplyLen) {
3896  Thread* self = Thread::Current();
3897  JNIEnv* env = self->GetJniEnv();
3898
3899  uint32_t type = request->ReadUnsigned32("type");
3900  uint32_t length = request->ReadUnsigned32("length");
3901
3902  // Create a byte[] corresponding to 'request'.
3903  size_t request_length = request->size();
3904  ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(request_length));
3905  if (dataArray.get() == nullptr) {
3906    LOG(WARNING) << "byte[] allocation failed: " << request_length;
3907    env->ExceptionClear();
3908    return false;
3909  }
3910  env->SetByteArrayRegion(dataArray.get(), 0, request_length,
3911                          reinterpret_cast<const jbyte*>(request->data()));
3912  request->Skip(request_length);
3913
3914  // Run through and find all chunks.  [Currently just find the first.]
3915  ScopedByteArrayRO contents(env, dataArray.get());
3916  if (length != request_length) {
3917    LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length);
3918    return false;
3919  }
3920
3921  // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)".
3922  ScopedLocalRef<jobject> chunk(env, env->CallStaticObjectMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3923                                                                 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch,
3924                                                                 type, dataArray.get(), 0, length));
3925  if (env->ExceptionCheck()) {
3926    LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type);
3927    env->ExceptionDescribe();
3928    env->ExceptionClear();
3929    return false;
3930  }
3931
3932  if (chunk.get() == nullptr) {
3933    return false;
3934  }
3935
3936  /*
3937   * Pull the pieces out of the chunk.  We copy the results into a
3938   * newly-allocated buffer that the caller can free.  We don't want to
3939   * continue using the Chunk object because nothing has a reference to it.
3940   *
3941   * We could avoid this by returning type/data/offset/length and having
3942   * the caller be aware of the object lifetime issues, but that
3943   * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work
3944   * if we have responses for multiple chunks.
3945   *
3946   * So we're pretty much stuck with copying data around multiple times.
3947   */
3948  ScopedLocalRef<jbyteArray> replyData(env, reinterpret_cast<jbyteArray>(env->GetObjectField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data)));
3949  jint offset = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset);
3950  length = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length);
3951  type = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type);
3952
3953  VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length);
3954  if (length == 0 || replyData.get() == nullptr) {
3955    return false;
3956  }
3957
3958  const int kChunkHdrLen = 8;
3959  uint8_t* reply = new uint8_t[length + kChunkHdrLen];
3960  if (reply == nullptr) {
3961    LOG(WARNING) << "malloc failed: " << (length + kChunkHdrLen);
3962    return false;
3963  }
3964  JDWP::Set4BE(reply + 0, type);
3965  JDWP::Set4BE(reply + 4, length);
3966  env->GetByteArrayRegion(replyData.get(), offset, length, reinterpret_cast<jbyte*>(reply + kChunkHdrLen));
3967
3968  *pReplyBuf = reply;
3969  *pReplyLen = length + kChunkHdrLen;
3970
3971  VLOG(jdwp) << StringPrintf("dvmHandleDdm returning type=%.4s %p len=%d", reinterpret_cast<char*>(reply), reply, length);
3972  return true;
3973}
3974
3975void Dbg::DdmBroadcast(bool connect) {
3976  VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
3977
3978  Thread* self = Thread::Current();
3979  if (self->GetState() != kRunnable) {
3980    LOG(ERROR) << "DDM broadcast in thread state " << self->GetState();
3981    /* try anyway? */
3982  }
3983
3984  JNIEnv* env = self->GetJniEnv();
3985  jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/;
3986  env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3987                            WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast,
3988                            event);
3989  if (env->ExceptionCheck()) {
3990    LOG(ERROR) << "DdmServer.broadcast " << event << " failed";
3991    env->ExceptionDescribe();
3992    env->ExceptionClear();
3993  }
3994}
3995
3996void Dbg::DdmConnected() {
3997  Dbg::DdmBroadcast(true);
3998}
3999
4000void Dbg::DdmDisconnected() {
4001  Dbg::DdmBroadcast(false);
4002  gDdmThreadNotification = false;
4003}
4004
4005/*
4006 * Send a notification when a thread starts, stops, or changes its name.
4007 *
4008 * Because we broadcast the full set of threads when the notifications are
4009 * first enabled, it's possible for "thread" to be actively executing.
4010 */
4011void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
4012  if (!gDdmThreadNotification) {
4013    return;
4014  }
4015
4016  if (type == CHUNK_TYPE("THDE")) {
4017    uint8_t buf[4];
4018    JDWP::Set4BE(&buf[0], t->GetThreadId());
4019    Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf);
4020  } else {
4021    CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
4022    ScopedObjectAccessUnchecked soa(Thread::Current());
4023    StackHandleScope<1> hs(soa.Self());
4024    Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa)));
4025    size_t char_count = (name.Get() != nullptr) ? name->GetLength() : 0;
4026    const jchar* chars = (name.Get() != nullptr) ? name->GetCharArray()->GetData() : nullptr;
4027
4028    std::vector<uint8_t> bytes;
4029    JDWP::Append4BE(bytes, t->GetThreadId());
4030    JDWP::AppendUtf16BE(bytes, chars, char_count);
4031    CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
4032    Dbg::DdmSendChunk(type, bytes);
4033  }
4034}
4035
4036void Dbg::DdmSetThreadNotification(bool enable) {
4037  // Enable/disable thread notifications.
4038  gDdmThreadNotification = enable;
4039  if (enable) {
4040    // Suspend the VM then post thread start notifications for all threads. Threads attaching will
4041    // see a suspension in progress and block until that ends. They then post their own start
4042    // notification.
4043    SuspendVM();
4044    std::list<Thread*> threads;
4045    Thread* self = Thread::Current();
4046    {
4047      MutexLock mu(self, *Locks::thread_list_lock_);
4048      threads = Runtime::Current()->GetThreadList()->GetList();
4049    }
4050    {
4051      ScopedObjectAccess soa(self);
4052      for (Thread* thread : threads) {
4053        Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
4054      }
4055    }
4056    ResumeVM();
4057  }
4058}
4059
4060void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
4061  if (IsDebuggerActive()) {
4062    gJdwpState->PostThreadChange(t, type == CHUNK_TYPE("THCR"));
4063  }
4064  Dbg::DdmSendThreadNotification(t, type);
4065}
4066
4067void Dbg::PostThreadStart(Thread* t) {
4068  Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR"));
4069}
4070
4071void Dbg::PostThreadDeath(Thread* t) {
4072  Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
4073}
4074
4075void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) {
4076  CHECK(buf != nullptr);
4077  iovec vec[1];
4078  vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf));
4079  vec[0].iov_len = byte_count;
4080  Dbg::DdmSendChunkV(type, vec, 1);
4081}
4082
4083void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) {
4084  DdmSendChunk(type, bytes.size(), &bytes[0]);
4085}
4086
4087void Dbg::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) {
4088  if (gJdwpState == nullptr) {
4089    VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
4090  } else {
4091    gJdwpState->DdmSendChunkV(type, iov, iov_count);
4092  }
4093}
4094
4095JDWP::JdwpState* Dbg::GetJdwpState() {
4096  return gJdwpState;
4097}
4098
4099int Dbg::DdmHandleHpifChunk(HpifWhen when) {
4100  if (when == HPIF_WHEN_NOW) {
4101    DdmSendHeapInfo(when);
4102    return true;
4103  }
4104
4105  if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
4106    LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
4107    return false;
4108  }
4109
4110  gDdmHpifWhen = when;
4111  return true;
4112}
4113
4114bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
4115  if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) {
4116    LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when);
4117    return false;
4118  }
4119
4120  if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) {
4121    LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what);
4122    return false;
4123  }
4124
4125  if (native) {
4126    gDdmNhsgWhen = when;
4127    gDdmNhsgWhat = what;
4128  } else {
4129    gDdmHpsgWhen = when;
4130    gDdmHpsgWhat = what;
4131  }
4132  return true;
4133}
4134
4135void Dbg::DdmSendHeapInfo(HpifWhen reason) {
4136  // If there's a one-shot 'when', reset it.
4137  if (reason == gDdmHpifWhen) {
4138    if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) {
4139      gDdmHpifWhen = HPIF_WHEN_NEVER;
4140    }
4141  }
4142
4143  /*
4144   * Chunk HPIF (client --> server)
4145   *
4146   * Heap Info. General information about the heap,
4147   * suitable for a summary display.
4148   *
4149   *   [u4]: number of heaps
4150   *
4151   *   For each heap:
4152   *     [u4]: heap ID
4153   *     [u8]: timestamp in ms since Unix epoch
4154   *     [u1]: capture reason (same as 'when' value from server)
4155   *     [u4]: max heap size in bytes (-Xmx)
4156   *     [u4]: current heap size in bytes
4157   *     [u4]: current number of bytes allocated
4158   *     [u4]: current number of objects allocated
4159   */
4160  uint8_t heap_count = 1;
4161  gc::Heap* heap = Runtime::Current()->GetHeap();
4162  std::vector<uint8_t> bytes;
4163  JDWP::Append4BE(bytes, heap_count);
4164  JDWP::Append4BE(bytes, 1);  // Heap id (bogus; we only have one heap).
4165  JDWP::Append8BE(bytes, MilliTime());
4166  JDWP::Append1BE(bytes, reason);
4167  JDWP::Append4BE(bytes, heap->GetMaxMemory());  // Max allowed heap size in bytes.
4168  JDWP::Append4BE(bytes, heap->GetTotalMemory());  // Current heap size in bytes.
4169  JDWP::Append4BE(bytes, heap->GetBytesAllocated());
4170  JDWP::Append4BE(bytes, heap->GetObjectsAllocated());
4171  CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
4172  Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes);
4173}
4174
4175enum HpsgSolidity {
4176  SOLIDITY_FREE = 0,
4177  SOLIDITY_HARD = 1,
4178  SOLIDITY_SOFT = 2,
4179  SOLIDITY_WEAK = 3,
4180  SOLIDITY_PHANTOM = 4,
4181  SOLIDITY_FINALIZABLE = 5,
4182  SOLIDITY_SWEEP = 6,
4183};
4184
4185enum HpsgKind {
4186  KIND_OBJECT = 0,
4187  KIND_CLASS_OBJECT = 1,
4188  KIND_ARRAY_1 = 2,
4189  KIND_ARRAY_2 = 3,
4190  KIND_ARRAY_4 = 4,
4191  KIND_ARRAY_8 = 5,
4192  KIND_UNKNOWN = 6,
4193  KIND_NATIVE = 7,
4194};
4195
4196#define HPSG_PARTIAL (1<<7)
4197#define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
4198
4199class HeapChunkContext {
4200 public:
4201  // Maximum chunk size.  Obtain this from the formula:
4202  // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
4203  HeapChunkContext(bool merge, bool native)
4204      : buf_(16384 - 16),
4205        type_(0),
4206        chunk_overhead_(0) {
4207    Reset();
4208    if (native) {
4209      type_ = CHUNK_TYPE("NHSG");
4210    } else {
4211      type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO");
4212    }
4213  }
4214
4215  ~HeapChunkContext() {
4216    if (p_ > &buf_[0]) {
4217      Flush();
4218    }
4219  }
4220
4221  void SetChunkOverhead(size_t chunk_overhead) {
4222    chunk_overhead_ = chunk_overhead;
4223  }
4224
4225  void ResetStartOfNextChunk() {
4226    startOfNextMemoryChunk_ = nullptr;
4227  }
4228
4229  void EnsureHeader(const void* chunk_ptr) {
4230    if (!needHeader_) {
4231      return;
4232    }
4233
4234    // Start a new HPSx chunk.
4235    JDWP::Write4BE(&p_, 1);  // Heap id (bogus; we only have one heap).
4236    JDWP::Write1BE(&p_, 8);  // Size of allocation unit, in bytes.
4237
4238    JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr));  // virtual address of segment start.
4239    JDWP::Write4BE(&p_, 0);  // offset of this piece (relative to the virtual address).
4240    // [u4]: length of piece, in allocation units
4241    // We won't know this until we're done, so save the offset and stuff in a dummy value.
4242    pieceLenField_ = p_;
4243    JDWP::Write4BE(&p_, 0x55555555);
4244    needHeader_ = false;
4245  }
4246
4247  void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4248    if (pieceLenField_ == nullptr) {
4249      // Flush immediately post Reset (maybe back-to-back Flush). Ignore.
4250      CHECK(needHeader_);
4251      return;
4252    }
4253    // Patch the "length of piece" field.
4254    CHECK_LE(&buf_[0], pieceLenField_);
4255    CHECK_LE(pieceLenField_, p_);
4256    JDWP::Set4BE(pieceLenField_, totalAllocationUnits_);
4257
4258    Dbg::DdmSendChunk(type_, p_ - &buf_[0], &buf_[0]);
4259    Reset();
4260  }
4261
4262  static void HeapChunkJavaCallback(void* start, void* end, size_t used_bytes, void* arg)
4263      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
4264                            Locks::mutator_lock_) {
4265    reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkJavaCallback(start, end, used_bytes);
4266  }
4267
4268  static void HeapChunkNativeCallback(void* start, void* end, size_t used_bytes, void* arg)
4269      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4270    reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkNativeCallback(start, end, used_bytes);
4271  }
4272
4273 private:
4274  enum { ALLOCATION_UNIT_SIZE = 8 };
4275
4276  void Reset() {
4277    p_ = &buf_[0];
4278    ResetStartOfNextChunk();
4279    totalAllocationUnits_ = 0;
4280    needHeader_ = true;
4281    pieceLenField_ = nullptr;
4282  }
4283
4284  bool IsNative() const {
4285    return type_ == CHUNK_TYPE("NHSG");
4286  }
4287
4288  // Returns true if the object is not an empty chunk.
4289  bool ProcessRecord(void* start, size_t used_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4290    // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
4291    // in the following code not to allocate memory, by ensuring buf_ is of the correct size
4292    if (used_bytes == 0) {
4293      if (start == nullptr) {
4294        // Reset for start of new heap.
4295        startOfNextMemoryChunk_ = nullptr;
4296        Flush();
4297      }
4298      // Only process in use memory so that free region information
4299      // also includes dlmalloc book keeping.
4300      return false;
4301    }
4302    if (startOfNextMemoryChunk_ != nullptr) {
4303      // Transmit any pending free memory. Native free memory of over kMaxFreeLen could be because
4304      // of the use of mmaps, so don't report. If not free memory then start a new segment.
4305      bool flush = true;
4306      if (start > startOfNextMemoryChunk_) {
4307        const size_t kMaxFreeLen = 2 * kPageSize;
4308        void* free_start = startOfNextMemoryChunk_;
4309        void* free_end = start;
4310        const size_t free_len =
4311            reinterpret_cast<uintptr_t>(free_end) - reinterpret_cast<uintptr_t>(free_start);
4312        if (!IsNative() || free_len < kMaxFreeLen) {
4313          AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), free_start, free_len, IsNative());
4314          flush = false;
4315        }
4316      }
4317      if (flush) {
4318        startOfNextMemoryChunk_ = nullptr;
4319        Flush();
4320      }
4321    }
4322    return true;
4323  }
4324
4325  void HeapChunkNativeCallback(void* start, void* /*end*/, size_t used_bytes)
4326      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4327    if (ProcessRecord(start, used_bytes)) {
4328      uint8_t state = ExamineNativeObject(start);
4329      AppendChunk(state, start, used_bytes + chunk_overhead_, true /*is_native*/);
4330      startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
4331    }
4332  }
4333
4334  void HeapChunkJavaCallback(void* start, void* /*end*/, size_t used_bytes)
4335      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
4336    if (ProcessRecord(start, used_bytes)) {
4337      // Determine the type of this chunk.
4338      // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
4339      // If it's the same, we should combine them.
4340      uint8_t state = ExamineJavaObject(reinterpret_cast<mirror::Object*>(start));
4341      AppendChunk(state, start, used_bytes + chunk_overhead_, false /*is_native*/);
4342      startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + chunk_overhead_;
4343    }
4344  }
4345
4346  void AppendChunk(uint8_t state, void* ptr, size_t length, bool is_native)
4347      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4348    // Make sure there's enough room left in the buffer.
4349    // We need to use two bytes for every fractional 256 allocation units used by the chunk plus
4350    // 17 bytes for any header.
4351    const size_t needed = ((RoundUp(length / ALLOCATION_UNIT_SIZE, 256) / 256) * 2) + 17;
4352    size_t byte_left = &buf_.back() - p_;
4353    if (byte_left < needed) {
4354      if (is_native) {
4355      // Cannot trigger memory allocation while walking native heap.
4356        return;
4357      }
4358      Flush();
4359    }
4360
4361    byte_left = &buf_.back() - p_;
4362    if (byte_left < needed) {
4363      LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", "
4364          << needed << " bytes)";
4365      return;
4366    }
4367    EnsureHeader(ptr);
4368    // Write out the chunk description.
4369    length /= ALLOCATION_UNIT_SIZE;   // Convert to allocation units.
4370    totalAllocationUnits_ += length;
4371    while (length > 256) {
4372      *p_++ = state | HPSG_PARTIAL;
4373      *p_++ = 255;     // length - 1
4374      length -= 256;
4375    }
4376    *p_++ = state;
4377    *p_++ = length - 1;
4378  }
4379
4380  uint8_t ExamineNativeObject(const void* p) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4381    return p == nullptr ? HPSG_STATE(SOLIDITY_FREE, 0) : HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4382  }
4383
4384  uint8_t ExamineJavaObject(mirror::Object* o)
4385      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
4386    if (o == nullptr) {
4387      return HPSG_STATE(SOLIDITY_FREE, 0);
4388    }
4389    // It's an allocated chunk. Figure out what it is.
4390    gc::Heap* heap = Runtime::Current()->GetHeap();
4391    if (!heap->IsLiveObjectLocked(o)) {
4392      LOG(ERROR) << "Invalid object in managed heap: " << o;
4393      return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
4394    }
4395    mirror::Class* c = o->GetClass();
4396    if (c == nullptr) {
4397      // The object was probably just created but hasn't been initialized yet.
4398      return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4399    }
4400    if (!heap->IsValidObjectAddress(c)) {
4401      LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
4402      return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
4403    }
4404    if (c->IsClassClass()) {
4405      return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
4406    }
4407    if (c->IsArrayClass()) {
4408      switch (c->GetComponentSize()) {
4409      case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
4410      case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
4411      case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
4412      case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
4413      }
4414    }
4415    return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
4416  }
4417
4418  std::vector<uint8_t> buf_;
4419  uint8_t* p_;
4420  uint8_t* pieceLenField_;
4421  void* startOfNextMemoryChunk_;
4422  size_t totalAllocationUnits_;
4423  uint32_t type_;
4424  bool needHeader_;
4425  size_t chunk_overhead_;
4426
4427  DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
4428};
4429
4430static void BumpPointerSpaceCallback(mirror::Object* obj, void* arg)
4431    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
4432  const size_t size = RoundUp(obj->SizeOf(), kObjectAlignment);
4433  HeapChunkContext::HeapChunkJavaCallback(
4434      obj, reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(obj) + size), size, arg);
4435}
4436
4437void Dbg::DdmSendHeapSegments(bool native) {
4438  Dbg::HpsgWhen when = native ? gDdmNhsgWhen : gDdmHpsgWhen;
4439  Dbg::HpsgWhat what = native ? gDdmNhsgWhat : gDdmHpsgWhat;
4440  if (when == HPSG_WHEN_NEVER) {
4441    return;
4442  }
4443  // Figure out what kind of chunks we'll be sending.
4444  CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS)
4445      << static_cast<int>(what);
4446
4447  // First, send a heap start chunk.
4448  uint8_t heap_id[4];
4449  JDWP::Set4BE(&heap_id[0], 1);  // Heap id (bogus; we only have one heap).
4450  Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id);
4451  Thread* self = Thread::Current();
4452  Locks::mutator_lock_->AssertSharedHeld(self);
4453
4454  // Send a series of heap segment chunks.
4455  HeapChunkContext context(what == HPSG_WHAT_MERGED_OBJECTS, native);
4456  if (native) {
4457#if defined(HAVE_ANDROID_OS) && defined(USE_DLMALLOC)
4458    dlmalloc_inspect_all(HeapChunkContext::HeapChunkNativeCallback, &context);
4459    HeapChunkContext::HeapChunkNativeCallback(nullptr, nullptr, 0, &context);  // Indicate end of a space.
4460#else
4461    UNIMPLEMENTED(WARNING) << "Native heap inspection is only supported with dlmalloc";
4462#endif
4463  } else {
4464    gc::Heap* heap = Runtime::Current()->GetHeap();
4465    for (const auto& space : heap->GetContinuousSpaces()) {
4466      if (space->IsDlMallocSpace()) {
4467        ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4468        // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
4469        // allocation then the first sizeof(size_t) may belong to it.
4470        context.SetChunkOverhead(sizeof(size_t));
4471        space->AsDlMallocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
4472      } else if (space->IsRosAllocSpace()) {
4473        context.SetChunkOverhead(0);
4474        // Need to acquire the mutator lock before the heap bitmap lock with exclusive access since
4475        // RosAlloc's internal logic doesn't know to release and reacquire the heap bitmap lock.
4476        self->TransitionFromRunnableToSuspended(kSuspended);
4477        ThreadList* tl = Runtime::Current()->GetThreadList();
4478        tl->SuspendAll();
4479        {
4480          ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4481          space->AsRosAllocSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
4482        }
4483        tl->ResumeAll();
4484        self->TransitionFromSuspendedToRunnable();
4485      } else if (space->IsBumpPointerSpace()) {
4486        ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4487        context.SetChunkOverhead(0);
4488        space->AsBumpPointerSpace()->Walk(BumpPointerSpaceCallback, &context);
4489        HeapChunkContext::HeapChunkJavaCallback(nullptr, nullptr, 0, &context);
4490      } else {
4491        UNIMPLEMENTED(WARNING) << "Not counting objects in space " << *space;
4492      }
4493      context.ResetStartOfNextChunk();
4494    }
4495    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
4496    // Walk the large objects, these are not in the AllocSpace.
4497    context.SetChunkOverhead(0);
4498    heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkJavaCallback, &context);
4499  }
4500
4501  // Finally, send a heap end chunk.
4502  Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
4503}
4504
4505static size_t GetAllocTrackerMax() {
4506#ifdef HAVE_ANDROID_OS
4507  // Check whether there's a system property overriding the number of records.
4508  const char* propertyName = "dalvik.vm.allocTrackerMax";
4509  char allocRecordMaxString[PROPERTY_VALUE_MAX];
4510  if (property_get(propertyName, allocRecordMaxString, "") > 0) {
4511    char* end;
4512    size_t value = strtoul(allocRecordMaxString, &end, 10);
4513    if (*end != '\0') {
4514      LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocRecordMaxString
4515                 << "' --- invalid";
4516      return kDefaultNumAllocRecords;
4517    }
4518    if (!IsPowerOfTwo(value)) {
4519      LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocRecordMaxString
4520                 << "' --- not power of two";
4521      return kDefaultNumAllocRecords;
4522    }
4523    return value;
4524  }
4525#endif
4526  return kDefaultNumAllocRecords;
4527}
4528
4529void Dbg::SetAllocTrackingEnabled(bool enable) {
4530  Thread* self = Thread::Current();
4531  if (enable) {
4532    {
4533      MutexLock mu(self, *Locks::alloc_tracker_lock_);
4534      if (recent_allocation_records_ != nullptr) {
4535        return;  // Already enabled, bail.
4536      }
4537      alloc_record_max_ = GetAllocTrackerMax();
4538      LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
4539                << kMaxAllocRecordStackDepth << " frames, taking "
4540                << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
4541      DCHECK_EQ(alloc_record_head_, 0U);
4542      DCHECK_EQ(alloc_record_count_, 0U);
4543      recent_allocation_records_ = new AllocRecord[alloc_record_max_];
4544      CHECK(recent_allocation_records_ != nullptr);
4545    }
4546    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
4547  } else {
4548    {
4549      ScopedObjectAccess soa(self);  // For type_cache_.Clear();
4550      MutexLock mu(self, *Locks::alloc_tracker_lock_);
4551      if (recent_allocation_records_ == nullptr) {
4552        return;  // Already disabled, bail.
4553      }
4554      LOG(INFO) << "Disabling alloc tracker";
4555      delete[] recent_allocation_records_;
4556      recent_allocation_records_ = nullptr;
4557      alloc_record_head_ = 0;
4558      alloc_record_count_ = 0;
4559      type_cache_.Clear();
4560    }
4561    // If an allocation comes in before we uninstrument, we will safely drop it on the floor.
4562    Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
4563  }
4564}
4565
4566struct AllocRecordStackVisitor : public StackVisitor {
4567  AllocRecordStackVisitor(Thread* thread, AllocRecord* record_in)
4568      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
4569      : StackVisitor(thread, nullptr), record(record_in), depth(0) {}
4570
4571  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
4572  // annotalysis.
4573  bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
4574    if (depth >= kMaxAllocRecordStackDepth) {
4575      return false;
4576    }
4577    mirror::ArtMethod* m = GetMethod();
4578    if (!m->IsRuntimeMethod()) {
4579      record->StackElement(depth)->SetMethod(m);
4580      record->StackElement(depth)->SetDexPc(GetDexPc());
4581      ++depth;
4582    }
4583    return true;
4584  }
4585
4586  ~AllocRecordStackVisitor() {
4587    // Clear out any unused stack trace elements.
4588    for (; depth < kMaxAllocRecordStackDepth; ++depth) {
4589      record->StackElement(depth)->SetMethod(nullptr);
4590      record->StackElement(depth)->SetDexPc(0);
4591    }
4592  }
4593
4594  AllocRecord* record;
4595  size_t depth;
4596};
4597
4598void Dbg::RecordAllocation(Thread* self, mirror::Class* type, size_t byte_count) {
4599  MutexLock mu(self, *Locks::alloc_tracker_lock_);
4600  if (recent_allocation_records_ == nullptr) {
4601    // In the process of shutting down recording, bail.
4602    return;
4603  }
4604
4605  // Advance and clip.
4606  if (++alloc_record_head_ == alloc_record_max_) {
4607    alloc_record_head_ = 0;
4608  }
4609
4610  // Fill in the basics.
4611  AllocRecord* record = &recent_allocation_records_[alloc_record_head_];
4612  record->SetType(type);
4613  record->SetByteCount(byte_count);
4614  record->SetThinLockId(self->GetThreadId());
4615
4616  // Fill in the stack trace.
4617  AllocRecordStackVisitor visitor(self, record);
4618  visitor.WalkStack();
4619
4620  if (alloc_record_count_ < alloc_record_max_) {
4621    ++alloc_record_count_;
4622  }
4623}
4624
4625// Returns the index of the head element.
4626//
4627// We point at the most-recently-written record, so if alloc_record_count_ is 1
4628// we want to use the current element.  Take "head+1" and subtract count
4629// from it.
4630//
4631// We need to handle underflow in our circular buffer, so we add
4632// alloc_record_max_ and then mask it back down.
4633size_t Dbg::HeadIndex() {
4634  return (Dbg::alloc_record_head_ + 1 + Dbg::alloc_record_max_ - Dbg::alloc_record_count_) &
4635      (Dbg::alloc_record_max_ - 1);
4636}
4637
4638void Dbg::DumpRecentAllocations() {
4639  ScopedObjectAccess soa(Thread::Current());
4640  MutexLock mu(soa.Self(), *Locks::alloc_tracker_lock_);
4641  if (recent_allocation_records_ == nullptr) {
4642    LOG(INFO) << "Not recording tracked allocations";
4643    return;
4644  }
4645
4646  // "i" is the head of the list.  We want to start at the end of the
4647  // list and move forward to the tail.
4648  size_t i = HeadIndex();
4649  const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
4650  uint16_t count = capped_count;
4651
4652  LOG(INFO) << "Tracked allocations, (head=" << alloc_record_head_ << " count=" << count << ")";
4653  while (count--) {
4654    AllocRecord* record = &recent_allocation_records_[i];
4655
4656    LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->ThinLockId(), record->ByteCount())
4657              << PrettyClass(record->Type());
4658
4659    for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
4660      AllocRecordStackTraceElement* stack_element = record->StackElement(stack_frame);
4661      mirror::ArtMethod* m = stack_element->Method();
4662      if (m == nullptr) {
4663        break;
4664      }
4665      LOG(INFO) << "    " << PrettyMethod(m) << " line " << stack_element->LineNumber();
4666    }
4667
4668    // pause periodically to help logcat catch up
4669    if ((count % 5) == 0) {
4670      usleep(40000);
4671    }
4672
4673    i = (i + 1) & (alloc_record_max_ - 1);
4674  }
4675}
4676
4677class StringTable {
4678 public:
4679  StringTable() {
4680  }
4681
4682  void Add(const std::string& str) {
4683    table_.insert(str);
4684  }
4685
4686  void Add(const char* str) {
4687    table_.insert(str);
4688  }
4689
4690  size_t IndexOf(const char* s) const {
4691    auto it = table_.find(s);
4692    if (it == table_.end()) {
4693      LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
4694    }
4695    return std::distance(table_.begin(), it);
4696  }
4697
4698  size_t Size() const {
4699    return table_.size();
4700  }
4701
4702  void WriteTo(std::vector<uint8_t>& bytes) const {
4703    for (const std::string& str : table_) {
4704      const char* s = str.c_str();
4705      size_t s_len = CountModifiedUtf8Chars(s);
4706      std::unique_ptr<uint16_t> s_utf16(new uint16_t[s_len]);
4707      ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
4708      JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len);
4709    }
4710  }
4711
4712 private:
4713  std::set<std::string> table_;
4714  DISALLOW_COPY_AND_ASSIGN(StringTable);
4715};
4716
4717static const char* GetMethodSourceFile(mirror::ArtMethod* method)
4718    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4719  DCHECK(method != nullptr);
4720  const char* source_file = method->GetDeclaringClassSourceFile();
4721  return (source_file != nullptr) ? source_file : "";
4722}
4723
4724/*
4725 * The data we send to DDMS contains everything we have recorded.
4726 *
4727 * Message header (all values big-endian):
4728 * (1b) message header len (to allow future expansion); includes itself
4729 * (1b) entry header len
4730 * (1b) stack frame len
4731 * (2b) number of entries
4732 * (4b) offset to string table from start of message
4733 * (2b) number of class name strings
4734 * (2b) number of method name strings
4735 * (2b) number of source file name strings
4736 * For each entry:
4737 *   (4b) total allocation size
4738 *   (2b) thread id
4739 *   (2b) allocated object's class name index
4740 *   (1b) stack depth
4741 *   For each stack frame:
4742 *     (2b) method's class name
4743 *     (2b) method name
4744 *     (2b) method source file
4745 *     (2b) line number, clipped to 32767; -2 if native; -1 if no source
4746 * (xb) class name strings
4747 * (xb) method name strings
4748 * (xb) source file strings
4749 *
4750 * As with other DDM traffic, strings are sent as a 4-byte length
4751 * followed by UTF-16 data.
4752 *
4753 * We send up 16-bit unsigned indexes into string tables.  In theory there
4754 * can be (kMaxAllocRecordStackDepth * alloc_record_max_) unique strings in
4755 * each table, but in practice there should be far fewer.
4756 *
4757 * The chief reason for using a string table here is to keep the size of
4758 * the DDMS message to a minimum.  This is partly to make the protocol
4759 * efficient, but also because we have to form the whole thing up all at
4760 * once in a memory buffer.
4761 *
4762 * We use separate string tables for class names, method names, and source
4763 * files to keep the indexes small.  There will generally be no overlap
4764 * between the contents of these tables.
4765 */
4766jbyteArray Dbg::GetRecentAllocations() {
4767  if ((false)) {
4768    DumpRecentAllocations();
4769  }
4770
4771  Thread* self = Thread::Current();
4772  std::vector<uint8_t> bytes;
4773  {
4774    MutexLock mu(self, *Locks::alloc_tracker_lock_);
4775    //
4776    // Part 1: generate string tables.
4777    //
4778    StringTable class_names;
4779    StringTable method_names;
4780    StringTable filenames;
4781
4782    const uint16_t capped_count = CappedAllocRecordCount(Dbg::alloc_record_count_);
4783    uint16_t count = capped_count;
4784    size_t idx = HeadIndex();
4785    while (count--) {
4786      AllocRecord* record = &recent_allocation_records_[idx];
4787      std::string temp;
4788      class_names.Add(record->Type()->GetDescriptor(&temp));
4789      for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) {
4790        mirror::ArtMethod* m = record->StackElement(i)->Method();
4791        if (m != nullptr) {
4792          class_names.Add(m->GetDeclaringClassDescriptor());
4793          method_names.Add(m->GetName());
4794          filenames.Add(GetMethodSourceFile(m));
4795        }
4796      }
4797
4798      idx = (idx + 1) & (alloc_record_max_ - 1);
4799    }
4800
4801    LOG(INFO) << "allocation records: " << capped_count;
4802
4803    //
4804    // Part 2: Generate the output and store it in the buffer.
4805    //
4806
4807    // (1b) message header len (to allow future expansion); includes itself
4808    // (1b) entry header len
4809    // (1b) stack frame len
4810    const int kMessageHeaderLen = 15;
4811    const int kEntryHeaderLen = 9;
4812    const int kStackFrameLen = 8;
4813    JDWP::Append1BE(bytes, kMessageHeaderLen);
4814    JDWP::Append1BE(bytes, kEntryHeaderLen);
4815    JDWP::Append1BE(bytes, kStackFrameLen);
4816
4817    // (2b) number of entries
4818    // (4b) offset to string table from start of message
4819    // (2b) number of class name strings
4820    // (2b) number of method name strings
4821    // (2b) number of source file name strings
4822    JDWP::Append2BE(bytes, capped_count);
4823    size_t string_table_offset = bytes.size();
4824    JDWP::Append4BE(bytes, 0);  // We'll patch this later...
4825    JDWP::Append2BE(bytes, class_names.Size());
4826    JDWP::Append2BE(bytes, method_names.Size());
4827    JDWP::Append2BE(bytes, filenames.Size());
4828
4829    idx = HeadIndex();
4830    std::string temp;
4831    for (count = capped_count; count != 0; --count) {
4832      // For each entry:
4833      // (4b) total allocation size
4834      // (2b) thread id
4835      // (2b) allocated object's class name index
4836      // (1b) stack depth
4837      AllocRecord* record = &recent_allocation_records_[idx];
4838      size_t stack_depth = record->GetDepth();
4839      size_t allocated_object_class_name_index =
4840          class_names.IndexOf(record->Type()->GetDescriptor(&temp));
4841      JDWP::Append4BE(bytes, record->ByteCount());
4842      JDWP::Append2BE(bytes, record->ThinLockId());
4843      JDWP::Append2BE(bytes, allocated_object_class_name_index);
4844      JDWP::Append1BE(bytes, stack_depth);
4845
4846      for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) {
4847        // For each stack frame:
4848        // (2b) method's class name
4849        // (2b) method name
4850        // (2b) method source file
4851        // (2b) line number, clipped to 32767; -2 if native; -1 if no source
4852        mirror::ArtMethod* m = record->StackElement(stack_frame)->Method();
4853        size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor());
4854        size_t method_name_index = method_names.IndexOf(m->GetName());
4855        size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m));
4856        JDWP::Append2BE(bytes, class_name_index);
4857        JDWP::Append2BE(bytes, method_name_index);
4858        JDWP::Append2BE(bytes, file_name_index);
4859        JDWP::Append2BE(bytes, record->StackElement(stack_frame)->LineNumber());
4860      }
4861      idx = (idx + 1) & (alloc_record_max_ - 1);
4862    }
4863
4864    // (xb) class name strings
4865    // (xb) method name strings
4866    // (xb) source file strings
4867    JDWP::Set4BE(&bytes[string_table_offset], bytes.size());
4868    class_names.WriteTo(bytes);
4869    method_names.WriteTo(bytes);
4870    filenames.WriteTo(bytes);
4871  }
4872  JNIEnv* env = self->GetJniEnv();
4873  jbyteArray result = env->NewByteArray(bytes.size());
4874  if (result != nullptr) {
4875    env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
4876  }
4877  return result;
4878}
4879
4880mirror::ArtMethod* DeoptimizationRequest::Method() const {
4881  ScopedObjectAccessUnchecked soa(Thread::Current());
4882  return soa.DecodeMethod(method_);
4883}
4884
4885void DeoptimizationRequest::SetMethod(mirror::ArtMethod* m) {
4886  ScopedObjectAccessUnchecked soa(Thread::Current());
4887  method_ = soa.EncodeMethod(m);
4888}
4889
4890}  // namespace art
4891