debugger.cc revision 4d466a8e4587422c989705dce3b2a19e7f0137f5
1/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "debugger.h"
18
19#include <sys/uio.h>
20
21#include <set>
22
23#include "arch/context.h"
24#include "class_linker.h"
25#include "class_linker-inl.h"
26#include "dex_file-inl.h"
27#include "dex_instruction.h"
28#include "gc/accounting/card_table-inl.h"
29#include "gc/space/large_object_space.h"
30#include "gc/space/space-inl.h"
31#include "jdwp/object_registry.h"
32#include "mirror/art_field-inl.h"
33#include "mirror/art_method-inl.h"
34#include "mirror/class.h"
35#include "mirror/class-inl.h"
36#include "mirror/class_loader.h"
37#include "mirror/object-inl.h"
38#include "mirror/object_array-inl.h"
39#include "mirror/string-inl.h"
40#include "mirror/throwable.h"
41#include "object_utils.h"
42#include "quick/inline_method_analyser.h"
43#include "reflection.h"
44#include "safe_map.h"
45#include "scoped_thread_state_change.h"
46#include "ScopedLocalRef.h"
47#include "ScopedPrimitiveArray.h"
48#include "sirt_ref.h"
49#include "stack_indirect_reference_table.h"
50#include "thread_list.h"
51#include "throw_location.h"
52#include "utf.h"
53#include "verifier/method_verifier-inl.h"
54#include "well_known_classes.h"
55
56#ifdef HAVE_ANDROID_OS
57#include "cutils/properties.h"
58#endif
59
60namespace art {
61
62static const size_t kMaxAllocRecordStackDepth = 16;  // Max 255.
63static const size_t kDefaultNumAllocRecords = 64*1024;  // Must be a power of 2.
64
65struct AllocRecordStackTraceElement {
66  mirror::ArtMethod* method;
67  uint32_t dex_pc;
68
69  AllocRecordStackTraceElement() : method(nullptr), dex_pc(0) {
70  }
71
72  int32_t LineNumber() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
73    return MethodHelper(method).GetLineNumFromDexPC(dex_pc);
74  }
75};
76
77struct AllocRecord {
78  mirror::Class* type;
79  size_t byte_count;
80  uint16_t thin_lock_id;
81  AllocRecordStackTraceElement stack[kMaxAllocRecordStackDepth];  // Unused entries have NULL method.
82
83  size_t GetDepth() {
84    size_t depth = 0;
85    while (depth < kMaxAllocRecordStackDepth && stack[depth].method != NULL) {
86      ++depth;
87    }
88    return depth;
89  }
90
91  void UpdateObjectPointers(IsMarkedCallback* callback, void* arg)
92      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
93    if (type != nullptr) {
94      type = down_cast<mirror::Class*>(callback(type, arg));
95    }
96    for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
97      mirror::ArtMethod*& m = stack[stack_frame].method;
98      if (m == nullptr) {
99        break;
100      }
101      m = down_cast<mirror::ArtMethod*>(callback(m, arg));
102    }
103  }
104};
105
106struct Breakpoint {
107  // The location of this breakpoint.
108  mirror::ArtMethod* method;
109  uint32_t dex_pc;
110
111  // Indicates whether breakpoint needs full deoptimization or selective deoptimization.
112  bool need_full_deoptimization;
113
114  Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization)
115    : method(method), dex_pc(dex_pc), need_full_deoptimization(need_full_deoptimization) {}
116
117  void VisitRoots(RootCallback* callback, void* arg) {
118    if (method != nullptr) {
119      callback(reinterpret_cast<mirror::Object**>(&method), arg, 0, kRootDebugger);
120    }
121  }
122};
123
124static std::ostream& operator<<(std::ostream& os, const Breakpoint& rhs)
125    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
126  os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.method).c_str(), rhs.dex_pc);
127  return os;
128}
129
130class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener {
131 public:
132  DebugInstrumentationListener() {}
133  virtual ~DebugInstrumentationListener() {}
134
135  void MethodEntered(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
136                     uint32_t dex_pc)
137      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
138    if (method->IsNative()) {
139      // TODO: post location events is a suspension point and native method entry stubs aren't.
140      return;
141    }
142    Dbg::PostLocationEvent(method, 0, this_object, Dbg::kMethodEntry, nullptr);
143  }
144
145  void MethodExited(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
146                    uint32_t dex_pc, const JValue& return_value)
147      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
148    if (method->IsNative()) {
149      // TODO: post location events is a suspension point and native method entry stubs aren't.
150      return;
151    }
152    Dbg::PostLocationEvent(method, dex_pc, this_object, Dbg::kMethodExit, &return_value);
153  }
154
155  void MethodUnwind(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
156                    uint32_t dex_pc)
157      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
158    // We're not recorded to listen to this kind of event, so complain.
159    LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method)
160               << " " << dex_pc;
161  }
162
163  void DexPcMoved(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
164                  uint32_t new_dex_pc)
165      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
166    Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc);
167  }
168
169  void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
170                 uint32_t dex_pc, mirror::ArtField* field)
171      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
172    Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field);
173  }
174
175  void FieldWritten(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method,
176                    uint32_t dex_pc, mirror::ArtField* field, const JValue& field_value)
177      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
178    Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value);
179  }
180
181  void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location,
182                       mirror::ArtMethod* catch_method, uint32_t catch_dex_pc,
183                       mirror::Throwable* exception_object)
184      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
185    Dbg::PostException(throw_location, catch_method, catch_dex_pc, exception_object);
186  }
187
188 private:
189  DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener);
190} gDebugInstrumentationListener;
191
192// JDWP is allowed unless the Zygote forbids it.
193static bool gJdwpAllowed = true;
194
195// Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line?
196static bool gJdwpConfigured = false;
197
198// Broken-down JDWP options. (Only valid if IsJdwpConfigured() is true.)
199static JDWP::JdwpOptions gJdwpOptions;
200
201// Runtime JDWP state.
202static JDWP::JdwpState* gJdwpState = NULL;
203static bool gDebuggerConnected;  // debugger or DDMS is connected.
204static bool gDebuggerActive;     // debugger is making requests.
205static bool gDisposed;           // debugger called VirtualMachine.Dispose, so we should drop the connection.
206
207static bool gDdmThreadNotification = false;
208
209// DDMS GC-related settings.
210static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER;
211static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER;
212static Dbg::HpsgWhat gDdmHpsgWhat;
213static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER;
214static Dbg::HpsgWhat gDdmNhsgWhat;
215
216static ObjectRegistry* gRegistry = nullptr;
217
218// Recent allocation tracking.
219Mutex* Dbg::alloc_tracker_lock_ = nullptr;
220AllocRecord* Dbg::recent_allocation_records_ = nullptr;  // TODO: CircularBuffer<AllocRecord>
221size_t Dbg::alloc_record_max_ = 0;
222size_t Dbg::alloc_record_head_ = 0;
223size_t Dbg::alloc_record_count_ = 0;
224
225// Deoptimization support.
226Mutex* Dbg::deoptimization_lock_ = nullptr;
227std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_;
228size_t Dbg::full_deoptimization_event_count_ = 0;
229size_t Dbg::delayed_full_undeoptimization_count_ = 0;
230
231// Breakpoints.
232static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_);
233
234void DebugInvokeReq::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
235                                RootType root_type) {
236  if (receiver != nullptr) {
237    callback(&receiver, arg, tid, root_type);
238  }
239  if (thread != nullptr) {
240    callback(&thread, arg, tid, root_type);
241  }
242  if (klass != nullptr) {
243    callback(reinterpret_cast<mirror::Object**>(&klass), arg, tid, root_type);
244  }
245  if (method != nullptr) {
246    callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
247  }
248}
249
250void DebugInvokeReq::Clear() {
251  invoke_needed = false;
252  receiver = nullptr;
253  thread = nullptr;
254  klass = nullptr;
255  method = nullptr;
256}
257
258void SingleStepControl::VisitRoots(RootCallback* callback, void* arg, uint32_t tid,
259                                   RootType root_type) {
260  if (method != nullptr) {
261    callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type);
262  }
263}
264
265bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const {
266  return dex_pcs.find(dex_pc) == dex_pcs.end();
267}
268
269void SingleStepControl::Clear() {
270  is_active = false;
271  method = nullptr;
272  dex_pcs.clear();
273}
274
275void DeoptimizationRequest::VisitRoots(RootCallback* callback, void* arg) {
276  if (method != nullptr) {
277    callback(reinterpret_cast<mirror::Object**>(&method), arg, 0, kRootDebugger);
278  }
279}
280
281static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc)
282    LOCKS_EXCLUDED(Locks::breakpoint_lock_)
283    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
284  MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
285  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
286    if (gBreakpoints[i].method == m && gBreakpoints[i].dex_pc == dex_pc) {
287      VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i];
288      return true;
289    }
290  }
291  return false;
292}
293
294static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread)
295    LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) {
296  MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
297  // A thread may be suspended for GC; in this code, we really want to know whether
298  // there's a debugger suspension active.
299  return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0;
300}
301
302static mirror::Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status)
303    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
304  mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
305  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
306    status = JDWP::ERR_INVALID_OBJECT;
307    return NULL;
308  }
309  if (!o->IsArrayInstance()) {
310    status = JDWP::ERR_INVALID_ARRAY;
311    return NULL;
312  }
313  status = JDWP::ERR_NONE;
314  return o->AsArray();
315}
316
317static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status)
318    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
319  mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
320  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
321    status = JDWP::ERR_INVALID_OBJECT;
322    return NULL;
323  }
324  if (!o->IsClass()) {
325    status = JDWP::ERR_INVALID_CLASS;
326    return NULL;
327  }
328  status = JDWP::ERR_NONE;
329  return o->AsClass();
330}
331
332static JDWP::JdwpError DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id, Thread*& thread)
333    EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
334    LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
335    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
336  mirror::Object* thread_peer = gRegistry->Get<mirror::Object*>(thread_id);
337  if (thread_peer == NULL || thread_peer == ObjectRegistry::kInvalidObject) {
338    // This isn't even an object.
339    return JDWP::ERR_INVALID_OBJECT;
340  }
341
342  mirror::Class* java_lang_Thread = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
343  if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) {
344    // This isn't a thread.
345    return JDWP::ERR_INVALID_THREAD;
346  }
347
348  thread = Thread::FromManagedThread(soa, thread_peer);
349  if (thread == NULL) {
350    // This is a java.lang.Thread without a Thread*. Must be a zombie.
351    return JDWP::ERR_THREAD_NOT_ALIVE;
352  }
353  return JDWP::ERR_NONE;
354}
355
356static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) {
357  // JDWP deliberately uses the descriptor characters' ASCII values for its enum.
358  // Note that by "basic" we mean that we don't get more specific than JT_OBJECT.
359  return static_cast<JDWP::JdwpTag>(descriptor[0]);
360}
361
362static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c)
363    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
364  CHECK(c != NULL);
365  if (c->IsArrayClass()) {
366    return JDWP::JT_ARRAY;
367  }
368  if (c->IsStringClass()) {
369    return JDWP::JT_STRING;
370  }
371  if (c->IsClassClass()) {
372    return JDWP::JT_CLASS_OBJECT;
373  }
374  {
375    mirror::Class* thread_class = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
376    if (thread_class->IsAssignableFrom(c)) {
377      return JDWP::JT_THREAD;
378    }
379  }
380  {
381    mirror::Class* thread_group_class =
382        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
383    if (thread_group_class->IsAssignableFrom(c)) {
384      return JDWP::JT_THREAD_GROUP;
385    }
386  }
387  {
388    mirror::Class* class_loader_class =
389        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader);
390    if (class_loader_class->IsAssignableFrom(c)) {
391      return JDWP::JT_CLASS_LOADER;
392    }
393  }
394  return JDWP::JT_OBJECT;
395}
396
397/*
398 * Objects declared to hold Object might actually hold a more specific
399 * type.  The debugger may take a special interest in these (e.g. it
400 * wants to display the contents of Strings), so we want to return an
401 * appropriate tag.
402 *
403 * Null objects are tagged JT_OBJECT.
404 */
405static JDWP::JdwpTag TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o)
406    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
407  return (o == NULL) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass());
408}
409
410static bool IsPrimitiveTag(JDWP::JdwpTag tag) {
411  switch (tag) {
412  case JDWP::JT_BOOLEAN:
413  case JDWP::JT_BYTE:
414  case JDWP::JT_CHAR:
415  case JDWP::JT_FLOAT:
416  case JDWP::JT_DOUBLE:
417  case JDWP::JT_INT:
418  case JDWP::JT_LONG:
419  case JDWP::JT_SHORT:
420  case JDWP::JT_VOID:
421    return true;
422  default:
423    return false;
424  }
425}
426
427/*
428 * Handle one of the JDWP name/value pairs.
429 *
430 * JDWP options are:
431 *  help: if specified, show help message and bail
432 *  transport: may be dt_socket or dt_shmem
433 *  address: for dt_socket, "host:port", or just "port" when listening
434 *  server: if "y", wait for debugger to attach; if "n", attach to debugger
435 *  timeout: how long to wait for debugger to connect / listen
436 *
437 * Useful with server=n (these aren't supported yet):
438 *  onthrow=<exception-name>: connect to debugger when exception thrown
439 *  onuncaught=y|n: connect to debugger when uncaught exception thrown
440 *  launch=<command-line>: launch the debugger itself
441 *
442 * The "transport" option is required, as is "address" if server=n.
443 */
444static bool ParseJdwpOption(const std::string& name, const std::string& value) {
445  if (name == "transport") {
446    if (value == "dt_socket") {
447      gJdwpOptions.transport = JDWP::kJdwpTransportSocket;
448    } else if (value == "dt_android_adb") {
449      gJdwpOptions.transport = JDWP::kJdwpTransportAndroidAdb;
450    } else {
451      LOG(ERROR) << "JDWP transport not supported: " << value;
452      return false;
453    }
454  } else if (name == "server") {
455    if (value == "n") {
456      gJdwpOptions.server = false;
457    } else if (value == "y") {
458      gJdwpOptions.server = true;
459    } else {
460      LOG(ERROR) << "JDWP option 'server' must be 'y' or 'n'";
461      return false;
462    }
463  } else if (name == "suspend") {
464    if (value == "n") {
465      gJdwpOptions.suspend = false;
466    } else if (value == "y") {
467      gJdwpOptions.suspend = true;
468    } else {
469      LOG(ERROR) << "JDWP option 'suspend' must be 'y' or 'n'";
470      return false;
471    }
472  } else if (name == "address") {
473    /* this is either <port> or <host>:<port> */
474    std::string port_string;
475    gJdwpOptions.host.clear();
476    std::string::size_type colon = value.find(':');
477    if (colon != std::string::npos) {
478      gJdwpOptions.host = value.substr(0, colon);
479      port_string = value.substr(colon + 1);
480    } else {
481      port_string = value;
482    }
483    if (port_string.empty()) {
484      LOG(ERROR) << "JDWP address missing port: " << value;
485      return false;
486    }
487    char* end;
488    uint64_t port = strtoul(port_string.c_str(), &end, 10);
489    if (*end != '\0' || port > 0xffff) {
490      LOG(ERROR) << "JDWP address has junk in port field: " << value;
491      return false;
492    }
493    gJdwpOptions.port = port;
494  } else if (name == "launch" || name == "onthrow" || name == "oncaught" || name == "timeout") {
495    /* valid but unsupported */
496    LOG(INFO) << "Ignoring JDWP option '" << name << "'='" << value << "'";
497  } else {
498    LOG(INFO) << "Ignoring unrecognized JDWP option '" << name << "'='" << value << "'";
499  }
500
501  return true;
502}
503
504/*
505 * Parse the latter half of a -Xrunjdwp/-agentlib:jdwp= string, e.g.:
506 * "transport=dt_socket,address=8000,server=y,suspend=n"
507 */
508bool Dbg::ParseJdwpOptions(const std::string& options) {
509  VLOG(jdwp) << "ParseJdwpOptions: " << options;
510
511  std::vector<std::string> pairs;
512  Split(options, ',', pairs);
513
514  for (size_t i = 0; i < pairs.size(); ++i) {
515    std::string::size_type equals = pairs[i].find('=');
516    if (equals == std::string::npos) {
517      LOG(ERROR) << "Can't parse JDWP option '" << pairs[i] << "' in '" << options << "'";
518      return false;
519    }
520    ParseJdwpOption(pairs[i].substr(0, equals), pairs[i].substr(equals + 1));
521  }
522
523  if (gJdwpOptions.transport == JDWP::kJdwpTransportUnknown) {
524    LOG(ERROR) << "Must specify JDWP transport: " << options;
525  }
526  if (!gJdwpOptions.server && (gJdwpOptions.host.empty() || gJdwpOptions.port == 0)) {
527    LOG(ERROR) << "Must specify JDWP host and port when server=n: " << options;
528    return false;
529  }
530
531  gJdwpConfigured = true;
532  return true;
533}
534
535void Dbg::StartJdwp() {
536  if (!gJdwpAllowed || !IsJdwpConfigured()) {
537    // No JDWP for you!
538    return;
539  }
540
541  CHECK(gRegistry == nullptr);
542  gRegistry = new ObjectRegistry;
543
544  alloc_tracker_lock_ = new Mutex("AllocTracker lock");
545  deoptimization_lock_ = new Mutex("deoptimization lock", kDeoptimizationLock);
546  // Init JDWP if the debugger is enabled. This may connect out to a
547  // debugger, passively listen for a debugger, or block waiting for a
548  // debugger.
549  gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions);
550  if (gJdwpState == NULL) {
551    // We probably failed because some other process has the port already, which means that
552    // if we don't abort the user is likely to think they're talking to us when they're actually
553    // talking to that other process.
554    LOG(FATAL) << "Debugger thread failed to initialize";
555  }
556
557  // If a debugger has already attached, send the "welcome" message.
558  // This may cause us to suspend all threads.
559  if (gJdwpState->IsActive()) {
560    ScopedObjectAccess soa(Thread::Current());
561    if (!gJdwpState->PostVMStart()) {
562      LOG(WARNING) << "Failed to post 'start' message to debugger";
563    }
564  }
565}
566
567void Dbg::VisitRoots(RootCallback* callback, void* arg) {
568  {
569    MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
570    for (Breakpoint& bp : gBreakpoints) {
571      bp.VisitRoots(callback, arg);
572    }
573  }
574  if (deoptimization_lock_ != nullptr) {  // only true if the debugger is started.
575    MutexLock mu(Thread::Current(), *deoptimization_lock_);
576    for (DeoptimizationRequest& req : deoptimization_requests_) {
577      req.VisitRoots(callback, arg);
578    }
579  }
580}
581
582void Dbg::StopJdwp() {
583  // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection.
584  Disposed();
585  delete gJdwpState;
586  gJdwpState = nullptr;
587  delete gRegistry;
588  gRegistry = nullptr;
589  delete alloc_tracker_lock_;
590  alloc_tracker_lock_ = nullptr;
591  delete deoptimization_lock_;
592  deoptimization_lock_ = nullptr;
593}
594
595void Dbg::GcDidFinish() {
596  if (gDdmHpifWhen != HPIF_WHEN_NEVER) {
597    ScopedObjectAccess soa(Thread::Current());
598    VLOG(jdwp) << "Sending heap info to DDM";
599    DdmSendHeapInfo(gDdmHpifWhen);
600  }
601  if (gDdmHpsgWhen != HPSG_WHEN_NEVER) {
602    ScopedObjectAccess soa(Thread::Current());
603    VLOG(jdwp) << "Dumping heap to DDM";
604    DdmSendHeapSegments(false);
605  }
606  if (gDdmNhsgWhen != HPSG_WHEN_NEVER) {
607    ScopedObjectAccess soa(Thread::Current());
608    VLOG(jdwp) << "Dumping native heap to DDM";
609    DdmSendHeapSegments(true);
610  }
611}
612
613void Dbg::SetJdwpAllowed(bool allowed) {
614  gJdwpAllowed = allowed;
615}
616
617DebugInvokeReq* Dbg::GetInvokeReq() {
618  return Thread::Current()->GetInvokeReq();
619}
620
621Thread* Dbg::GetDebugThread() {
622  return (gJdwpState != NULL) ? gJdwpState->GetDebugThread() : NULL;
623}
624
625void Dbg::ClearWaitForEventThread() {
626  gJdwpState->ClearWaitForEventThread();
627}
628
629void Dbg::Connected() {
630  CHECK(!gDebuggerConnected);
631  VLOG(jdwp) << "JDWP has attached";
632  gDebuggerConnected = true;
633  gDisposed = false;
634}
635
636void Dbg::Disposed() {
637  gDisposed = true;
638}
639
640bool Dbg::IsDisposed() {
641  return gDisposed;
642}
643
644// All the instrumentation events the debugger is registered for.
645static constexpr uint32_t kListenerEvents = instrumentation::Instrumentation::kMethodEntered |
646                                            instrumentation::Instrumentation::kMethodExited |
647                                            instrumentation::Instrumentation::kDexPcMoved |
648                                            instrumentation::Instrumentation::kFieldRead |
649                                            instrumentation::Instrumentation::kFieldWritten |
650                                            instrumentation::Instrumentation::kExceptionCaught;
651
652void Dbg::GoActive() {
653  // Enable all debugging features, including scans for breakpoints.
654  // This is a no-op if we're already active.
655  // Only called from the JDWP handler thread.
656  if (gDebuggerActive) {
657    return;
658  }
659
660  {
661    // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected?
662    MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
663    CHECK_EQ(gBreakpoints.size(), 0U);
664  }
665
666  {
667    MutexLock mu(Thread::Current(), *deoptimization_lock_);
668    CHECK_EQ(deoptimization_requests_.size(), 0U);
669    CHECK_EQ(full_deoptimization_event_count_, 0U);
670    CHECK_EQ(delayed_full_undeoptimization_count_, 0U);
671  }
672
673  Runtime* runtime = Runtime::Current();
674  runtime->GetThreadList()->SuspendAll();
675  Thread* self = Thread::Current();
676  ThreadState old_state = self->SetStateUnsafe(kRunnable);
677  CHECK_NE(old_state, kRunnable);
678  runtime->GetInstrumentation()->EnableDeoptimization();
679  runtime->GetInstrumentation()->AddListener(&gDebugInstrumentationListener, kListenerEvents);
680  gDebuggerActive = true;
681  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
682  runtime->GetThreadList()->ResumeAll();
683
684  LOG(INFO) << "Debugger is active";
685}
686
687void Dbg::Disconnected() {
688  CHECK(gDebuggerConnected);
689
690  LOG(INFO) << "Debugger is no longer active";
691
692  // Suspend all threads and exclusively acquire the mutator lock. Set the state of the thread
693  // to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener
694  // and clear the object registry.
695  Runtime* runtime = Runtime::Current();
696  runtime->GetThreadList()->SuspendAll();
697  Thread* self = Thread::Current();
698  ThreadState old_state = self->SetStateUnsafe(kRunnable);
699
700  // Debugger may not be active at this point.
701  if (gDebuggerActive) {
702    {
703      // Since we're going to disable deoptimization, we clear the deoptimization requests queue.
704      // This prevents us from having any pending deoptimization request when the debugger attaches
705      // to us again while no event has been requested yet.
706      MutexLock mu(Thread::Current(), *deoptimization_lock_);
707      deoptimization_requests_.clear();
708      full_deoptimization_event_count_ = 0U;
709      delayed_full_undeoptimization_count_ = 0U;
710    }
711    runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener, kListenerEvents);
712    runtime->GetInstrumentation()->DisableDeoptimization();
713    gDebuggerActive = false;
714  }
715  gRegistry->Clear();
716  gDebuggerConnected = false;
717  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
718  runtime->GetThreadList()->ResumeAll();
719}
720
721bool Dbg::IsDebuggerActive() {
722  return gDebuggerActive;
723}
724
725bool Dbg::IsJdwpConfigured() {
726  return gJdwpConfigured;
727}
728
729int64_t Dbg::LastDebuggerActivity() {
730  return gJdwpState->LastDebuggerActivity();
731}
732
733void Dbg::UndoDebuggerSuspensions() {
734  Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
735}
736
737std::string Dbg::GetClassName(JDWP::RefTypeId class_id) {
738  mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id);
739  if (o == NULL) {
740    return "NULL";
741  }
742  if (o == ObjectRegistry::kInvalidObject) {
743    return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id));
744  }
745  if (!o->IsClass()) {
746    return StringPrintf("non-class %p", o);  // This is only used for debugging output anyway.
747  }
748  return DescriptorToName(ClassHelper(o->AsClass()).GetDescriptor());
749}
750
751JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& class_object_id) {
752  JDWP::JdwpError status;
753  mirror::Class* c = DecodeClass(id, status);
754  if (c == NULL) {
755    return status;
756  }
757  class_object_id = gRegistry->Add(c);
758  return JDWP::ERR_NONE;
759}
760
761JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId& superclass_id) {
762  JDWP::JdwpError status;
763  mirror::Class* c = DecodeClass(id, status);
764  if (c == NULL) {
765    return status;
766  }
767  if (c->IsInterface()) {
768    // http://code.google.com/p/android/issues/detail?id=20856
769    superclass_id = 0;
770  } else {
771    superclass_id = gRegistry->Add(c->GetSuperClass());
772  }
773  return JDWP::ERR_NONE;
774}
775
776JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
777  mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
778  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
779    return JDWP::ERR_INVALID_OBJECT;
780  }
781  expandBufAddObjectId(pReply, gRegistry->Add(o->GetClass()->GetClassLoader()));
782  return JDWP::ERR_NONE;
783}
784
785JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) {
786  JDWP::JdwpError status;
787  mirror::Class* c = DecodeClass(id, status);
788  if (c == NULL) {
789    return status;
790  }
791
792  uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask;
793
794  // Set ACC_SUPER. Dex files don't contain this flag but only classes are supposed to have it set,
795  // not interfaces.
796  // Class.getModifiers doesn't return it, but JDWP does, so we set it here.
797  if ((access_flags & kAccInterface) == 0) {
798    access_flags |= kAccSuper;
799  }
800
801  expandBufAdd4BE(pReply, access_flags);
802
803  return JDWP::ERR_NONE;
804}
805
806JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply)
807    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
808  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
809  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
810    return JDWP::ERR_INVALID_OBJECT;
811  }
812
813  // Ensure all threads are suspended while we read objects' lock words.
814  Thread* self = Thread::Current();
815  CHECK_EQ(self->GetState(), kRunnable);
816  self->TransitionFromRunnableToSuspended(kSuspended);
817  Runtime::Current()->GetThreadList()->SuspendAll();
818
819  MonitorInfo monitor_info(o);
820
821  Runtime::Current()->GetThreadList()->ResumeAll();
822  self->TransitionFromSuspendedToRunnable();
823
824  if (monitor_info.owner_ != NULL) {
825    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeer()));
826  } else {
827    expandBufAddObjectId(reply, gRegistry->Add(NULL));
828  }
829  expandBufAdd4BE(reply, monitor_info.entry_count_);
830  expandBufAdd4BE(reply, monitor_info.waiters_.size());
831  for (size_t i = 0; i < monitor_info.waiters_.size(); ++i) {
832    expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters_[i]->GetPeer()));
833  }
834  return JDWP::ERR_NONE;
835}
836
837JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id,
838                                      std::vector<JDWP::ObjectId>& monitors,
839                                      std::vector<uint32_t>& stack_depths) {
840  ScopedObjectAccessUnchecked soa(Thread::Current());
841  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
842  Thread* thread;
843  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
844  if (error != JDWP::ERR_NONE) {
845    return error;
846  }
847  if (!IsSuspendedForDebugger(soa, thread)) {
848    return JDWP::ERR_THREAD_NOT_SUSPENDED;
849  }
850
851  struct OwnedMonitorVisitor : public StackVisitor {
852    OwnedMonitorVisitor(Thread* thread, Context* context)
853        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
854        : StackVisitor(thread, context), current_stack_depth(0) {}
855
856    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
857    // annotalysis.
858    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
859      if (!GetMethod()->IsRuntimeMethod()) {
860        Monitor::VisitLocks(this, AppendOwnedMonitors, this);
861        ++current_stack_depth;
862      }
863      return true;
864    }
865
866    static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg) {
867      OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg);
868      visitor->monitors.push_back(owned_monitor);
869      visitor->stack_depths.push_back(visitor->current_stack_depth);
870    }
871
872    size_t current_stack_depth;
873    std::vector<mirror::Object*> monitors;
874    std::vector<uint32_t> stack_depths;
875  };
876  UniquePtr<Context> context(Context::Create());
877  OwnedMonitorVisitor visitor(thread, context.get());
878  visitor.WalkStack();
879
880  for (size_t i = 0; i < visitor.monitors.size(); ++i) {
881    monitors.push_back(gRegistry->Add(visitor.monitors[i]));
882    stack_depths.push_back(visitor.stack_depths[i]);
883  }
884
885  return JDWP::ERR_NONE;
886}
887
888JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id,
889                                         JDWP::ObjectId& contended_monitor) {
890  ScopedObjectAccessUnchecked soa(Thread::Current());
891  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
892  Thread* thread;
893  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
894  if (error != JDWP::ERR_NONE) {
895    return error;
896  }
897  if (!IsSuspendedForDebugger(soa, thread)) {
898    return JDWP::ERR_THREAD_NOT_SUSPENDED;
899  }
900
901  contended_monitor = gRegistry->Add(Monitor::GetContendedMonitor(thread));
902
903  return JDWP::ERR_NONE;
904}
905
906JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids,
907                                       std::vector<uint64_t>& counts)
908    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
909  gc::Heap* heap = Runtime::Current()->GetHeap();
910  heap->CollectGarbage(false);
911  std::vector<mirror::Class*> classes;
912  counts.clear();
913  for (size_t i = 0; i < class_ids.size(); ++i) {
914    JDWP::JdwpError status;
915    mirror::Class* c = DecodeClass(class_ids[i], status);
916    if (c == NULL) {
917      return status;
918    }
919    classes.push_back(c);
920    counts.push_back(0);
921  }
922  heap->CountInstances(classes, false, &counts[0]);
923  return JDWP::ERR_NONE;
924}
925
926JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, std::vector<JDWP::ObjectId>& instances)
927    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
928  gc::Heap* heap = Runtime::Current()->GetHeap();
929  // We only want reachable instances, so do a GC.
930  heap->CollectGarbage(false);
931  JDWP::JdwpError status;
932  mirror::Class* c = DecodeClass(class_id, status);
933  if (c == nullptr) {
934    return status;
935  }
936  std::vector<mirror::Object*> raw_instances;
937  Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances);
938  for (size_t i = 0; i < raw_instances.size(); ++i) {
939    instances.push_back(gRegistry->Add(raw_instances[i]));
940  }
941  return JDWP::ERR_NONE;
942}
943
944JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count,
945                                         std::vector<JDWP::ObjectId>& referring_objects)
946    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
947  gc::Heap* heap = Runtime::Current()->GetHeap();
948  heap->CollectGarbage(false);
949  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
950  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
951    return JDWP::ERR_INVALID_OBJECT;
952  }
953  std::vector<mirror::Object*> raw_instances;
954  heap->GetReferringObjects(o, max_count, raw_instances);
955  for (size_t i = 0; i < raw_instances.size(); ++i) {
956    referring_objects.push_back(gRegistry->Add(raw_instances[i]));
957  }
958  return JDWP::ERR_NONE;
959}
960
961JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id)
962    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
963  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
964  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
965    return JDWP::ERR_INVALID_OBJECT;
966  }
967  gRegistry->DisableCollection(object_id);
968  return JDWP::ERR_NONE;
969}
970
971JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id)
972    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
973  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
974  // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI
975  // also ignores these cases and never return an error. However it's not obvious why this command
976  // should behave differently from DisableCollection and IsCollected commands. So let's be more
977  // strict and return an error if this happens.
978  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
979    return JDWP::ERR_INVALID_OBJECT;
980  }
981  gRegistry->EnableCollection(object_id);
982  return JDWP::ERR_NONE;
983}
984
985JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool& is_collected)
986    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
987  if (object_id == 0) {
988    // Null object id is invalid.
989    return JDWP::ERR_INVALID_OBJECT;
990  }
991  // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However
992  // the RI seems to ignore this and assume object has been collected.
993  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
994  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
995    is_collected = true;
996  } else {
997    is_collected = gRegistry->IsCollected(object_id);
998  }
999  return JDWP::ERR_NONE;
1000}
1001
1002void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count)
1003    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1004  gRegistry->DisposeObject(object_id, reference_count);
1005}
1006
1007static JDWP::JdwpTypeTag GetTypeTag(mirror::Class* klass)
1008    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1009  DCHECK(klass != nullptr);
1010  if (klass->IsArrayClass()) {
1011    return JDWP::TT_ARRAY;
1012  } else if (klass->IsInterface()) {
1013    return JDWP::TT_INTERFACE;
1014  } else {
1015    return JDWP::TT_CLASS;
1016  }
1017}
1018
1019JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1020  JDWP::JdwpError status;
1021  mirror::Class* c = DecodeClass(class_id, status);
1022  if (c == NULL) {
1023    return status;
1024  }
1025
1026  JDWP::JdwpTypeTag type_tag = GetTypeTag(c);
1027  expandBufAdd1(pReply, type_tag);
1028  expandBufAddRefTypeId(pReply, class_id);
1029  return JDWP::ERR_NONE;
1030}
1031
1032void Dbg::GetClassList(std::vector<JDWP::RefTypeId>& classes) {
1033  // Get the complete list of reference classes (i.e. all classes except
1034  // the primitive types).
1035  // Returns a newly-allocated buffer full of RefTypeId values.
1036  struct ClassListCreator {
1037    explicit ClassListCreator(std::vector<JDWP::RefTypeId>& classes) : classes(classes) {
1038    }
1039
1040    static bool Visit(mirror::Class* c, void* arg) {
1041      return reinterpret_cast<ClassListCreator*>(arg)->Visit(c);
1042    }
1043
1044    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
1045    // annotalysis.
1046    bool Visit(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS {
1047      if (!c->IsPrimitive()) {
1048        classes.push_back(gRegistry->AddRefType(c));
1049      }
1050      return true;
1051    }
1052
1053    std::vector<JDWP::RefTypeId>& classes;
1054  };
1055
1056  ClassListCreator clc(classes);
1057  Runtime::Current()->GetClassLinker()->VisitClasses(ClassListCreator::Visit, &clc);
1058}
1059
1060JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag, uint32_t* pStatus, std::string* pDescriptor) {
1061  JDWP::JdwpError status;
1062  mirror::Class* c = DecodeClass(class_id, status);
1063  if (c == NULL) {
1064    return status;
1065  }
1066
1067  if (c->IsArrayClass()) {
1068    *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
1069    *pTypeTag = JDWP::TT_ARRAY;
1070  } else {
1071    if (c->IsErroneous()) {
1072      *pStatus = JDWP::CS_ERROR;
1073    } else {
1074      *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED;
1075    }
1076    *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS;
1077  }
1078
1079  if (pDescriptor != NULL) {
1080    *pDescriptor = ClassHelper(c).GetDescriptor();
1081  }
1082  return JDWP::ERR_NONE;
1083}
1084
1085void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>& ids) {
1086  std::vector<mirror::Class*> classes;
1087  Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes);
1088  ids.clear();
1089  for (size_t i = 0; i < classes.size(); ++i) {
1090    ids.push_back(gRegistry->Add(classes[i]));
1091  }
1092}
1093
1094JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply)
1095    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1096  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1097  if (o == NULL || o == ObjectRegistry::kInvalidObject) {
1098    return JDWP::ERR_INVALID_OBJECT;
1099  }
1100
1101  JDWP::JdwpTypeTag type_tag = GetTypeTag(o->GetClass());
1102  JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass());
1103
1104  expandBufAdd1(pReply, type_tag);
1105  expandBufAddRefTypeId(pReply, type_id);
1106
1107  return JDWP::ERR_NONE;
1108}
1109
1110JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) {
1111  JDWP::JdwpError status;
1112  mirror::Class* c = DecodeClass(class_id, status);
1113  if (c == NULL) {
1114    return status;
1115  }
1116  *signature = ClassHelper(c).GetDescriptor();
1117  return JDWP::ERR_NONE;
1118}
1119
1120JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string& result) {
1121  JDWP::JdwpError status;
1122  mirror::Class* c = DecodeClass(class_id, status);
1123  if (c == NULL) {
1124    return status;
1125  }
1126  if (c->IsProxyClass()) {
1127    return JDWP::ERR_ABSENT_INFORMATION;
1128  }
1129  result = ClassHelper(c).GetSourceFile();
1130  return JDWP::ERR_NONE;
1131}
1132
1133JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t& tag) {
1134  ScopedObjectAccessUnchecked soa(Thread::Current());
1135  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1136  if (o == ObjectRegistry::kInvalidObject) {
1137    return JDWP::ERR_INVALID_OBJECT;
1138  }
1139  tag = TagFromObject(soa, o);
1140  return JDWP::ERR_NONE;
1141}
1142
1143size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) {
1144  switch (tag) {
1145  case JDWP::JT_VOID:
1146    return 0;
1147  case JDWP::JT_BYTE:
1148  case JDWP::JT_BOOLEAN:
1149    return 1;
1150  case JDWP::JT_CHAR:
1151  case JDWP::JT_SHORT:
1152    return 2;
1153  case JDWP::JT_FLOAT:
1154  case JDWP::JT_INT:
1155    return 4;
1156  case JDWP::JT_ARRAY:
1157  case JDWP::JT_OBJECT:
1158  case JDWP::JT_STRING:
1159  case JDWP::JT_THREAD:
1160  case JDWP::JT_THREAD_GROUP:
1161  case JDWP::JT_CLASS_LOADER:
1162  case JDWP::JT_CLASS_OBJECT:
1163    return sizeof(JDWP::ObjectId);
1164  case JDWP::JT_DOUBLE:
1165  case JDWP::JT_LONG:
1166    return 8;
1167  default:
1168    LOG(FATAL) << "Unknown tag " << tag;
1169    return -1;
1170  }
1171}
1172
1173JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int& length) {
1174  JDWP::JdwpError status;
1175  mirror::Array* a = DecodeArray(array_id, status);
1176  if (a == NULL) {
1177    return status;
1178  }
1179  length = a->GetLength();
1180  return JDWP::ERR_NONE;
1181}
1182
1183JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) {
1184  JDWP::JdwpError status;
1185  mirror::Array* a = DecodeArray(array_id, status);
1186  if (a == nullptr) {
1187    return status;
1188  }
1189
1190  if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) {
1191    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1192    return JDWP::ERR_INVALID_LENGTH;
1193  }
1194  std::string descriptor(ClassHelper(a->GetClass()).GetDescriptor());
1195  JDWP::JdwpTag tag = BasicTagFromDescriptor(descriptor.c_str() + 1);
1196
1197  expandBufAdd1(pReply, tag);
1198  expandBufAdd4BE(pReply, count);
1199
1200  if (IsPrimitiveTag(tag)) {
1201    size_t width = GetTagWidth(tag);
1202    uint8_t* dst = expandBufAddSpace(pReply, count * width);
1203    if (width == 8) {
1204      const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0));
1205      for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]);
1206    } else if (width == 4) {
1207      const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0));
1208      for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]);
1209    } else if (width == 2) {
1210      const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0));
1211      for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]);
1212    } else {
1213      const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0));
1214      memcpy(dst, &src[offset * width], count * width);
1215    }
1216  } else {
1217    ScopedObjectAccessUnchecked soa(Thread::Current());
1218    mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>();
1219    for (int i = 0; i < count; ++i) {
1220      mirror::Object* element = oa->Get(offset + i);
1221      JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element)
1222                                                        : tag;
1223      expandBufAdd1(pReply, specific_tag);
1224      expandBufAddObjectId(pReply, gRegistry->Add(element));
1225    }
1226  }
1227
1228  return JDWP::ERR_NONE;
1229}
1230
1231template <typename T>
1232static void CopyArrayData(mirror::Array* a, JDWP::Request& src, int offset, int count)
1233    NO_THREAD_SAFETY_ANALYSIS {
1234  // TODO: fix when annotalysis correctly handles non-member functions.
1235  DCHECK(a->GetClass()->IsPrimitiveArray());
1236
1237  T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset));
1238  for (int i = 0; i < count; ++i) {
1239    *dst++ = src.ReadValue(sizeof(T));
1240  }
1241}
1242
1243JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count,
1244                                      JDWP::Request& request)
1245    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1246  JDWP::JdwpError status;
1247  mirror::Array* dst = DecodeArray(array_id, status);
1248  if (dst == NULL) {
1249    return status;
1250  }
1251
1252  if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) {
1253    LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count;
1254    return JDWP::ERR_INVALID_LENGTH;
1255  }
1256  ClassHelper ch(dst->GetClass());
1257  const char* descriptor = ch.GetDescriptor();
1258  JDWP::JdwpTag tag = BasicTagFromDescriptor(descriptor + 1);
1259
1260  if (IsPrimitiveTag(tag)) {
1261    size_t width = GetTagWidth(tag);
1262    if (width == 8) {
1263      CopyArrayData<uint64_t>(dst, request, offset, count);
1264    } else if (width == 4) {
1265      CopyArrayData<uint32_t>(dst, request, offset, count);
1266    } else if (width == 2) {
1267      CopyArrayData<uint16_t>(dst, request, offset, count);
1268    } else {
1269      CopyArrayData<uint8_t>(dst, request, offset, count);
1270    }
1271  } else {
1272    mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>();
1273    for (int i = 0; i < count; ++i) {
1274      JDWP::ObjectId id = request.ReadObjectId();
1275      mirror::Object* o = gRegistry->Get<mirror::Object*>(id);
1276      if (o == ObjectRegistry::kInvalidObject) {
1277        return JDWP::ERR_INVALID_OBJECT;
1278      }
1279      oa->Set<false>(offset + i, o);
1280    }
1281  }
1282
1283  return JDWP::ERR_NONE;
1284}
1285
1286JDWP::ObjectId Dbg::CreateString(const std::string& str) {
1287  return gRegistry->Add(mirror::String::AllocFromModifiedUtf8(Thread::Current(), str.c_str()));
1288}
1289
1290JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId& new_object) {
1291  JDWP::JdwpError status;
1292  mirror::Class* c = DecodeClass(class_id, status);
1293  if (c == NULL) {
1294    return status;
1295  }
1296  new_object = gRegistry->Add(c->AllocObject(Thread::Current()));
1297  return JDWP::ERR_NONE;
1298}
1299
1300/*
1301 * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]".
1302 */
1303JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length,
1304                                       JDWP::ObjectId& new_array) {
1305  JDWP::JdwpError status;
1306  mirror::Class* c = DecodeClass(array_class_id, status);
1307  if (c == NULL) {
1308    return status;
1309  }
1310  new_array = gRegistry->Add(mirror::Array::Alloc<true>(Thread::Current(), c, length,
1311                                                        c->GetComponentSize(),
1312                                                        Runtime::Current()->GetHeap()->GetCurrentAllocator()));
1313  return JDWP::ERR_NONE;
1314}
1315
1316bool Dbg::MatchType(JDWP::RefTypeId instance_class_id, JDWP::RefTypeId class_id) {
1317  JDWP::JdwpError status;
1318  mirror::Class* c1 = DecodeClass(instance_class_id, status);
1319  CHECK(c1 != NULL);
1320  mirror::Class* c2 = DecodeClass(class_id, status);
1321  CHECK(c2 != NULL);
1322  return c2->IsAssignableFrom(c1);
1323}
1324
1325static JDWP::FieldId ToFieldId(const mirror::ArtField* f)
1326    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1327  CHECK(!kMovingFields);
1328  return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f));
1329}
1330
1331static JDWP::MethodId ToMethodId(const mirror::ArtMethod* m)
1332    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1333  CHECK(!kMovingMethods);
1334  return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m));
1335}
1336
1337static mirror::ArtField* FromFieldId(JDWP::FieldId fid)
1338    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1339  CHECK(!kMovingFields);
1340  return reinterpret_cast<mirror::ArtField*>(static_cast<uintptr_t>(fid));
1341}
1342
1343static mirror::ArtMethod* FromMethodId(JDWP::MethodId mid)
1344    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1345  CHECK(!kMovingMethods);
1346  return reinterpret_cast<mirror::ArtMethod*>(static_cast<uintptr_t>(mid));
1347}
1348
1349static void SetLocation(JDWP::JdwpLocation& location, mirror::ArtMethod* m, uint32_t dex_pc)
1350    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1351  if (m == NULL) {
1352    memset(&location, 0, sizeof(location));
1353  } else {
1354    mirror::Class* c = m->GetDeclaringClass();
1355    location.type_tag = GetTypeTag(c);
1356    location.class_id = gRegistry->AddRefType(c);
1357    location.method_id = ToMethodId(m);
1358    location.dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc;
1359  }
1360}
1361
1362std::string Dbg::GetMethodName(JDWP::MethodId method_id)
1363    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1364  mirror::ArtMethod* m = FromMethodId(method_id);
1365  return MethodHelper(m).GetName();
1366}
1367
1368std::string Dbg::GetFieldName(JDWP::FieldId field_id)
1369    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1370  mirror::ArtField* f = FromFieldId(field_id);
1371  return FieldHelper(f).GetName();
1372}
1373
1374/*
1375 * Augment the access flags for synthetic methods and fields by setting
1376 * the (as described by the spec) "0xf0000000 bit".  Also, strip out any
1377 * flags not specified by the Java programming language.
1378 */
1379static uint32_t MangleAccessFlags(uint32_t accessFlags) {
1380  accessFlags &= kAccJavaFlagsMask;
1381  if ((accessFlags & kAccSynthetic) != 0) {
1382    accessFlags |= 0xf0000000;
1383  }
1384  return accessFlags;
1385}
1386
1387/*
1388 * Circularly shifts registers so that arguments come first. Debuggers
1389 * expect slots to begin with arguments, but dex code places them at
1390 * the end.
1391 */
1392static uint16_t MangleSlot(uint16_t slot, mirror::ArtMethod* m)
1393    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1394  const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
1395  if (code_item == nullptr) {
1396    // We should not get here for a method without code (native, proxy or abstract). Log it and
1397    // return the slot as is since all registers are arguments.
1398    LOG(WARNING) << "Trying to mangle slot for method without code " << PrettyMethod(m);
1399    return slot;
1400  }
1401  uint16_t ins_size = code_item->ins_size_;
1402  uint16_t locals_size = code_item->registers_size_ - ins_size;
1403  if (slot >= locals_size) {
1404    return slot - locals_size;
1405  } else {
1406    return slot + ins_size;
1407  }
1408}
1409
1410/*
1411 * Circularly shifts registers so that arguments come last. Reverts
1412 * slots to dex style argument placement.
1413 */
1414static uint16_t DemangleSlot(uint16_t slot, mirror::ArtMethod* m)
1415    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1416  const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem();
1417  if (code_item == nullptr) {
1418    // We should not get here for a method without code (native, proxy or abstract). Log it and
1419    // return the slot as is since all registers are arguments.
1420    LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m);
1421    return slot;
1422  }
1423  uint16_t ins_size = code_item->ins_size_;
1424  uint16_t locals_size = code_item->registers_size_ - ins_size;
1425  if (slot < ins_size) {
1426    return slot + locals_size;
1427  } else {
1428    return slot - ins_size;
1429  }
1430}
1431
1432JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) {
1433  JDWP::JdwpError status;
1434  mirror::Class* c = DecodeClass(class_id, status);
1435  if (c == NULL) {
1436    return status;
1437  }
1438
1439  size_t instance_field_count = c->NumInstanceFields();
1440  size_t static_field_count = c->NumStaticFields();
1441
1442  expandBufAdd4BE(pReply, instance_field_count + static_field_count);
1443
1444  for (size_t i = 0; i < instance_field_count + static_field_count; ++i) {
1445    mirror::ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count);
1446    FieldHelper fh(f);
1447    expandBufAddFieldId(pReply, ToFieldId(f));
1448    expandBufAddUtf8String(pReply, fh.GetName());
1449    expandBufAddUtf8String(pReply, fh.GetTypeDescriptor());
1450    if (with_generic) {
1451      static const char genericSignature[1] = "";
1452      expandBufAddUtf8String(pReply, genericSignature);
1453    }
1454    expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags()));
1455  }
1456  return JDWP::ERR_NONE;
1457}
1458
1459JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic,
1460                                           JDWP::ExpandBuf* pReply) {
1461  JDWP::JdwpError status;
1462  mirror::Class* c = DecodeClass(class_id, status);
1463  if (c == NULL) {
1464    return status;
1465  }
1466
1467  size_t direct_method_count = c->NumDirectMethods();
1468  size_t virtual_method_count = c->NumVirtualMethods();
1469
1470  expandBufAdd4BE(pReply, direct_method_count + virtual_method_count);
1471
1472  for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) {
1473    mirror::ArtMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count);
1474    MethodHelper mh(m);
1475    expandBufAddMethodId(pReply, ToMethodId(m));
1476    expandBufAddUtf8String(pReply, mh.GetName());
1477    expandBufAddUtf8String(pReply, mh.GetSignature().ToString());
1478    if (with_generic) {
1479      static const char genericSignature[1] = "";
1480      expandBufAddUtf8String(pReply, genericSignature);
1481    }
1482    expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags()));
1483  }
1484  return JDWP::ERR_NONE;
1485}
1486
1487JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) {
1488  JDWP::JdwpError status;
1489  mirror::Class* c = DecodeClass(class_id, status);
1490  if (c == NULL) {
1491    return status;
1492  }
1493
1494  ClassHelper kh(c);
1495  size_t interface_count = kh.NumDirectInterfaces();
1496  expandBufAdd4BE(pReply, interface_count);
1497  for (size_t i = 0; i < interface_count; ++i) {
1498    expandBufAddRefTypeId(pReply, gRegistry->AddRefType(kh.GetDirectInterface(i)));
1499  }
1500  return JDWP::ERR_NONE;
1501}
1502
1503void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply)
1504    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1505  struct DebugCallbackContext {
1506    int numItems;
1507    JDWP::ExpandBuf* pReply;
1508
1509    static bool Callback(void* context, uint32_t address, uint32_t line_number) {
1510      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1511      expandBufAdd8BE(pContext->pReply, address);
1512      expandBufAdd4BE(pContext->pReply, line_number);
1513      pContext->numItems++;
1514      return false;
1515    }
1516  };
1517  mirror::ArtMethod* m = FromMethodId(method_id);
1518  MethodHelper mh(m);
1519  const DexFile::CodeItem* code_item = mh.GetCodeItem();
1520  uint64_t start, end;
1521  if (code_item == nullptr) {
1522    DCHECK(m->IsNative() || m->IsProxyMethod());
1523    start = -1;
1524    end = -1;
1525  } else {
1526    start = 0;
1527    // Return the index of the last instruction
1528    end = code_item->insns_size_in_code_units_ - 1;
1529  }
1530
1531  expandBufAdd8BE(pReply, start);
1532  expandBufAdd8BE(pReply, end);
1533
1534  // Add numLines later
1535  size_t numLinesOffset = expandBufGetLength(pReply);
1536  expandBufAdd4BE(pReply, 0);
1537
1538  DebugCallbackContext context;
1539  context.numItems = 0;
1540  context.pReply = pReply;
1541
1542  if (code_item != nullptr) {
1543    mh.GetDexFile().DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
1544                                    DebugCallbackContext::Callback, NULL, &context);
1545  }
1546
1547  JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems);
1548}
1549
1550void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic, JDWP::ExpandBuf* pReply) {
1551  struct DebugCallbackContext {
1552    mirror::ArtMethod* method;
1553    JDWP::ExpandBuf* pReply;
1554    size_t variable_count;
1555    bool with_generic;
1556
1557    static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress, const char* name, const char* descriptor, const char* signature)
1558        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1559      DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context);
1560
1561      VLOG(jdwp) << StringPrintf("    %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d", pContext->variable_count, startAddress, endAddress - startAddress, name, descriptor, signature, slot, MangleSlot(slot, pContext->method));
1562
1563      slot = MangleSlot(slot, pContext->method);
1564
1565      expandBufAdd8BE(pContext->pReply, startAddress);
1566      expandBufAddUtf8String(pContext->pReply, name);
1567      expandBufAddUtf8String(pContext->pReply, descriptor);
1568      if (pContext->with_generic) {
1569        expandBufAddUtf8String(pContext->pReply, signature);
1570      }
1571      expandBufAdd4BE(pContext->pReply, endAddress - startAddress);
1572      expandBufAdd4BE(pContext->pReply, slot);
1573
1574      ++pContext->variable_count;
1575    }
1576  };
1577  mirror::ArtMethod* m = FromMethodId(method_id);
1578  MethodHelper mh(m);
1579
1580  // arg_count considers doubles and longs to take 2 units.
1581  // variable_count considers everything to take 1 unit.
1582  std::string shorty(mh.GetShorty());
1583  expandBufAdd4BE(pReply, mirror::ArtMethod::NumArgRegisters(shorty));
1584
1585  // We don't know the total number of variables yet, so leave a blank and update it later.
1586  size_t variable_count_offset = expandBufGetLength(pReply);
1587  expandBufAdd4BE(pReply, 0);
1588
1589  DebugCallbackContext context;
1590  context.method = m;
1591  context.pReply = pReply;
1592  context.variable_count = 0;
1593  context.with_generic = with_generic;
1594
1595  const DexFile::CodeItem* code_item = mh.GetCodeItem();
1596  if (code_item != nullptr) {
1597    mh.GetDexFile().DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(), NULL,
1598                                    DebugCallbackContext::Callback, &context);
1599  }
1600
1601  JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count);
1602}
1603
1604void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value,
1605                                  JDWP::ExpandBuf* pReply) {
1606  mirror::ArtMethod* m = FromMethodId(method_id);
1607  JDWP::JdwpTag tag = BasicTagFromDescriptor(MethodHelper(m).GetShorty());
1608  OutputJValue(tag, return_value, pReply);
1609}
1610
1611void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value,
1612                           JDWP::ExpandBuf* pReply) {
1613  mirror::ArtField* f = FromFieldId(field_id);
1614  JDWP::JdwpTag tag = BasicTagFromDescriptor(FieldHelper(f).GetTypeDescriptor());
1615  OutputJValue(tag, field_value, pReply);
1616}
1617
1618JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id,
1619                                  std::vector<uint8_t>& bytecodes)
1620    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1621  mirror::ArtMethod* m = FromMethodId(method_id);
1622  if (m == NULL) {
1623    return JDWP::ERR_INVALID_METHODID;
1624  }
1625  MethodHelper mh(m);
1626  const DexFile::CodeItem* code_item = mh.GetCodeItem();
1627  size_t byte_count = code_item->insns_size_in_code_units_ * 2;
1628  const uint8_t* begin = reinterpret_cast<const uint8_t*>(code_item->insns_);
1629  const uint8_t* end = begin + byte_count;
1630  for (const uint8_t* p = begin; p != end; ++p) {
1631    bytecodes.push_back(*p);
1632  }
1633  return JDWP::ERR_NONE;
1634}
1635
1636JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) {
1637  return BasicTagFromDescriptor(FieldHelper(FromFieldId(field_id)).GetTypeDescriptor());
1638}
1639
1640JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) {
1641  return BasicTagFromDescriptor(FieldHelper(FromFieldId(field_id)).GetTypeDescriptor());
1642}
1643
1644static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id,
1645                                         JDWP::FieldId field_id, JDWP::ExpandBuf* pReply,
1646                                         bool is_static)
1647    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1648  JDWP::JdwpError status;
1649  mirror::Class* c = DecodeClass(ref_type_id, status);
1650  if (ref_type_id != 0 && c == NULL) {
1651    return status;
1652  }
1653
1654  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1655  if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) {
1656    return JDWP::ERR_INVALID_OBJECT;
1657  }
1658  mirror::ArtField* f = FromFieldId(field_id);
1659
1660  mirror::Class* receiver_class = c;
1661  if (receiver_class == NULL && o != NULL) {
1662    receiver_class = o->GetClass();
1663  }
1664  // TODO: should we give up now if receiver_class is NULL?
1665  if (receiver_class != NULL && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) {
1666    LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class);
1667    return JDWP::ERR_INVALID_FIELDID;
1668  }
1669
1670  // The RI only enforces the static/non-static mismatch in one direction.
1671  // TODO: should we change the tests and check both?
1672  if (is_static) {
1673    if (!f->IsStatic()) {
1674      return JDWP::ERR_INVALID_FIELDID;
1675    }
1676  } else {
1677    if (f->IsStatic()) {
1678      LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f);
1679    }
1680  }
1681  if (f->IsStatic()) {
1682    o = f->GetDeclaringClass();
1683  }
1684
1685  JDWP::JdwpTag tag = BasicTagFromDescriptor(FieldHelper(f).GetTypeDescriptor());
1686  JValue field_value;
1687  if (tag == JDWP::JT_VOID) {
1688    LOG(FATAL) << "Unknown tag: " << tag;
1689  } else if (!IsPrimitiveTag(tag)) {
1690    field_value.SetL(f->GetObject(o));
1691  } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1692    field_value.SetJ(f->Get64(o));
1693  } else {
1694    field_value.SetI(f->Get32(o));
1695  }
1696  Dbg::OutputJValue(tag, &field_value, pReply);
1697
1698  return JDWP::ERR_NONE;
1699}
1700
1701JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1702                                   JDWP::ExpandBuf* pReply) {
1703  return GetFieldValueImpl(0, object_id, field_id, pReply, false);
1704}
1705
1706JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id, JDWP::ExpandBuf* pReply) {
1707  return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true);
1708}
1709
1710static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id,
1711                                         uint64_t value, int width, bool is_static)
1712    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1713  mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id);
1714  if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) {
1715    return JDWP::ERR_INVALID_OBJECT;
1716  }
1717  mirror::ArtField* f = FromFieldId(field_id);
1718
1719  // The RI only enforces the static/non-static mismatch in one direction.
1720  // TODO: should we change the tests and check both?
1721  if (is_static) {
1722    if (!f->IsStatic()) {
1723      return JDWP::ERR_INVALID_FIELDID;
1724    }
1725  } else {
1726    if (f->IsStatic()) {
1727      LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f);
1728    }
1729  }
1730  if (f->IsStatic()) {
1731    o = f->GetDeclaringClass();
1732  }
1733
1734  JDWP::JdwpTag tag = BasicTagFromDescriptor(FieldHelper(f).GetTypeDescriptor());
1735
1736  if (IsPrimitiveTag(tag)) {
1737    if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1738      CHECK_EQ(width, 8);
1739      // Debugging can't use transactional mode (runtime only).
1740      f->Set64<false>(o, value);
1741    } else {
1742      CHECK_LE(width, 4);
1743      // Debugging can't use transactional mode (runtime only).
1744      f->Set32<false>(o, value);
1745    }
1746  } else {
1747    mirror::Object* v = gRegistry->Get<mirror::Object*>(value);
1748    if (v == ObjectRegistry::kInvalidObject) {
1749      return JDWP::ERR_INVALID_OBJECT;
1750    }
1751    if (v != NULL) {
1752      mirror::Class* field_type = FieldHelper(f).GetType();
1753      if (!field_type->IsAssignableFrom(v->GetClass())) {
1754        return JDWP::ERR_INVALID_OBJECT;
1755      }
1756    }
1757    // Debugging can't use transactional mode (runtime only).
1758    f->SetObject<false>(o, v);
1759  }
1760
1761  return JDWP::ERR_NONE;
1762}
1763
1764JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value,
1765                                   int width) {
1766  return SetFieldValueImpl(object_id, field_id, value, width, false);
1767}
1768
1769JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) {
1770  return SetFieldValueImpl(0, field_id, value, width, true);
1771}
1772
1773std::string Dbg::StringToUtf8(JDWP::ObjectId string_id) {
1774  mirror::String* s = gRegistry->Get<mirror::String*>(string_id);
1775  return s->ToModifiedUtf8();
1776}
1777
1778void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) {
1779  if (IsPrimitiveTag(tag)) {
1780    expandBufAdd1(pReply, tag);
1781    if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) {
1782      expandBufAdd1(pReply, return_value->GetI());
1783    } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) {
1784      expandBufAdd2BE(pReply, return_value->GetI());
1785    } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) {
1786      expandBufAdd4BE(pReply, return_value->GetI());
1787    } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) {
1788      expandBufAdd8BE(pReply, return_value->GetJ());
1789    } else {
1790      CHECK_EQ(tag, JDWP::JT_VOID);
1791    }
1792  } else {
1793    ScopedObjectAccessUnchecked soa(Thread::Current());
1794    mirror::Object* value = return_value->GetL();
1795    expandBufAdd1(pReply, TagFromObject(soa, value));
1796    expandBufAddObjectId(pReply, gRegistry->Add(value));
1797  }
1798}
1799
1800JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string& name) {
1801  ScopedObjectAccessUnchecked soa(Thread::Current());
1802  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1803  Thread* thread;
1804  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1805  if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) {
1806    return error;
1807  }
1808
1809  // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName.
1810  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
1811  mirror::ArtField* java_lang_Thread_name_field =
1812      soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
1813  mirror::String* s =
1814      reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object));
1815  if (s != NULL) {
1816    name = s->ToModifiedUtf8();
1817  }
1818  return JDWP::ERR_NONE;
1819}
1820
1821JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
1822  ScopedObjectAccess soa(Thread::Current());
1823  mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id);
1824  if (thread_object == ObjectRegistry::kInvalidObject) {
1825    return JDWP::ERR_INVALID_OBJECT;
1826  }
1827  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroup");
1828  // Okay, so it's an object, but is it actually a thread?
1829  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1830  Thread* thread;
1831  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1832  if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
1833    // Zombie threads are in the null group.
1834    expandBufAddObjectId(pReply, JDWP::ObjectId(0));
1835    error = JDWP::ERR_NONE;
1836  } else if (error == JDWP::ERR_NONE) {
1837    mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread);
1838    CHECK(c != nullptr);
1839    mirror::ArtField* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;");
1840    CHECK(f != NULL);
1841    mirror::Object* group = f->GetObject(thread_object);
1842    CHECK(group != NULL);
1843    JDWP::ObjectId thread_group_id = gRegistry->Add(group);
1844    expandBufAddObjectId(pReply, thread_group_id);
1845  }
1846  soa.Self()->EndAssertNoThreadSuspension(old_cause);
1847  return error;
1848}
1849
1850std::string Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id) {
1851  ScopedObjectAccess soa(Thread::Current());
1852  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
1853  CHECK(thread_group != nullptr);
1854  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupName");
1855  mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
1856  CHECK(c != nullptr);
1857  mirror::ArtField* f = c->FindInstanceField("name", "Ljava/lang/String;");
1858  CHECK(f != NULL);
1859  mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group));
1860  soa.Self()->EndAssertNoThreadSuspension(old_cause);
1861  return s->ToModifiedUtf8();
1862}
1863
1864JDWP::ObjectId Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id) {
1865  ScopedObjectAccessUnchecked soa(Thread::Current());
1866  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
1867  CHECK(thread_group != nullptr);
1868  const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupParent");
1869  mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup);
1870  CHECK(c != nullptr);
1871  mirror::ArtField* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;");
1872  CHECK(f != NULL);
1873  mirror::Object* parent = f->GetObject(thread_group);
1874  soa.Self()->EndAssertNoThreadSuspension(old_cause);
1875  return gRegistry->Add(parent);
1876}
1877
1878JDWP::ObjectId Dbg::GetSystemThreadGroupId() {
1879  ScopedObjectAccessUnchecked soa(Thread::Current());
1880  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup);
1881  mirror::Object* group = f->GetObject(f->GetDeclaringClass());
1882  return gRegistry->Add(group);
1883}
1884
1885JDWP::ObjectId Dbg::GetMainThreadGroupId() {
1886  ScopedObjectAccess soa(Thread::Current());
1887  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup);
1888  mirror::Object* group = f->GetObject(f->GetDeclaringClass());
1889  return gRegistry->Add(group);
1890}
1891
1892JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) {
1893  switch (state) {
1894    case kBlocked:
1895      return JDWP::TS_MONITOR;
1896    case kNative:
1897    case kRunnable:
1898    case kSuspended:
1899      return JDWP::TS_RUNNING;
1900    case kSleeping:
1901      return JDWP::TS_SLEEPING;
1902    case kStarting:
1903    case kTerminated:
1904      return JDWP::TS_ZOMBIE;
1905    case kTimedWaiting:
1906    case kWaitingForDebuggerSend:
1907    case kWaitingForDebuggerSuspension:
1908    case kWaitingForDebuggerToAttach:
1909    case kWaitingForDeoptimization:
1910    case kWaitingForGcToComplete:
1911    case kWaitingForCheckPointsToRun:
1912    case kWaitingForJniOnLoad:
1913    case kWaitingForSignalCatcherOutput:
1914    case kWaitingInMainDebuggerLoop:
1915    case kWaitingInMainSignalCatcherLoop:
1916    case kWaitingPerformingGc:
1917    case kWaiting:
1918      return JDWP::TS_WAIT;
1919      // Don't add a 'default' here so the compiler can spot incompatible enum changes.
1920  }
1921  LOG(FATAL) << "Unknown thread state: " << state;
1922  return JDWP::TS_ZOMBIE;
1923}
1924
1925JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus,
1926                                     JDWP::JdwpSuspendStatus* pSuspendStatus) {
1927  ScopedObjectAccess soa(Thread::Current());
1928
1929  *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED;
1930
1931  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1932  Thread* thread;
1933  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1934  if (error != JDWP::ERR_NONE) {
1935    if (error == JDWP::ERR_THREAD_NOT_ALIVE) {
1936      *pThreadStatus = JDWP::TS_ZOMBIE;
1937      return JDWP::ERR_NONE;
1938    }
1939    return error;
1940  }
1941
1942  if (IsSuspendedForDebugger(soa, thread)) {
1943    *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED;
1944  }
1945
1946  *pThreadStatus = ToJdwpThreadStatus(thread->GetState());
1947  return JDWP::ERR_NONE;
1948}
1949
1950JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) {
1951  ScopedObjectAccess soa(Thread::Current());
1952  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1953  Thread* thread;
1954  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1955  if (error != JDWP::ERR_NONE) {
1956    return error;
1957  }
1958  MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
1959  expandBufAdd4BE(pReply, thread->GetDebugSuspendCount());
1960  return JDWP::ERR_NONE;
1961}
1962
1963JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) {
1964  ScopedObjectAccess soa(Thread::Current());
1965  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
1966  Thread* thread;
1967  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
1968  if (error != JDWP::ERR_NONE) {
1969    return error;
1970  }
1971  thread->Interrupt(soa.Self());
1972  return JDWP::ERR_NONE;
1973}
1974
1975void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& thread_ids) {
1976  class ThreadListVisitor {
1977   public:
1978    ThreadListVisitor(const ScopedObjectAccessUnchecked& soa, mirror::Object* desired_thread_group,
1979                      std::vector<JDWP::ObjectId>& thread_ids)
1980        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1981        : soa_(soa), desired_thread_group_(desired_thread_group), thread_ids_(thread_ids) {}
1982
1983    static void Visit(Thread* t, void* arg) {
1984      reinterpret_cast<ThreadListVisitor*>(arg)->Visit(t);
1985    }
1986
1987    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
1988    // annotalysis.
1989    void Visit(Thread* t) NO_THREAD_SAFETY_ANALYSIS {
1990      if (t == Dbg::GetDebugThread()) {
1991        // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and
1992        // query all threads, so it's easier if we just don't tell them about this thread.
1993        return;
1994      }
1995      mirror::Object* peer = t->GetPeer();
1996      if (IsInDesiredThreadGroup(peer)) {
1997        thread_ids_.push_back(gRegistry->Add(peer));
1998      }
1999    }
2000
2001   private:
2002    bool IsInDesiredThreadGroup(mirror::Object* peer)
2003        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2004      // peer might be NULL if the thread is still starting up.
2005      if (peer == NULL) {
2006        // We can't tell the debugger about this thread yet.
2007        // TODO: if we identified threads to the debugger by their Thread*
2008        // rather than their peer's mirror::Object*, we could fix this.
2009        // Doing so might help us report ZOMBIE threads too.
2010        return false;
2011      }
2012      // Do we want threads from all thread groups?
2013      if (desired_thread_group_ == NULL) {
2014        return true;
2015      }
2016      mirror::Object* group = soa_.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(peer);
2017      return (group == desired_thread_group_);
2018    }
2019
2020    const ScopedObjectAccessUnchecked& soa_;
2021    mirror::Object* const desired_thread_group_;
2022    std::vector<JDWP::ObjectId>& thread_ids_;
2023  };
2024
2025  ScopedObjectAccessUnchecked soa(Thread::Current());
2026  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
2027  ThreadListVisitor tlv(soa, thread_group, thread_ids);
2028  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2029  Runtime::Current()->GetThreadList()->ForEach(ThreadListVisitor::Visit, &tlv);
2030}
2031
2032void Dbg::GetChildThreadGroups(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& child_thread_group_ids) {
2033  ScopedObjectAccess soa(Thread::Current());
2034  mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id);
2035
2036  // Get the ArrayList<ThreadGroup> "groups" out of this thread group...
2037  mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;");
2038  mirror::Object* groups_array_list = groups_field->GetObject(thread_group);
2039
2040  // Get the array and size out of the ArrayList<ThreadGroup>...
2041  mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;");
2042  mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I");
2043  mirror::ObjectArray<mirror::Object>* groups_array =
2044      array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>();
2045  const int32_t size = size_field->GetInt(groups_array_list);
2046
2047  // Copy the first 'size' elements out of the array into the result.
2048  for (int32_t i = 0; i < size; ++i) {
2049    child_thread_group_ids.push_back(gRegistry->Add(groups_array->Get(i)));
2050  }
2051}
2052
2053static int GetStackDepth(Thread* thread)
2054    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2055  struct CountStackDepthVisitor : public StackVisitor {
2056    explicit CountStackDepthVisitor(Thread* thread)
2057        : StackVisitor(thread, NULL), depth(0) {}
2058
2059    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2060    // annotalysis.
2061    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2062      if (!GetMethod()->IsRuntimeMethod()) {
2063        ++depth;
2064      }
2065      return true;
2066    }
2067    size_t depth;
2068  };
2069
2070  CountStackDepthVisitor visitor(thread);
2071  visitor.WalkStack();
2072  return visitor.depth;
2073}
2074
2075JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t& result) {
2076  ScopedObjectAccess soa(Thread::Current());
2077  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2078  Thread* thread;
2079  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2080  if (error != JDWP::ERR_NONE) {
2081    return error;
2082  }
2083  if (!IsSuspendedForDebugger(soa, thread)) {
2084    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2085  }
2086  result = GetStackDepth(thread);
2087  return JDWP::ERR_NONE;
2088}
2089
2090JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame,
2091                                     size_t frame_count, JDWP::ExpandBuf* buf) {
2092  class GetFrameVisitor : public StackVisitor {
2093   public:
2094    GetFrameVisitor(Thread* thread, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf)
2095        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2096        : StackVisitor(thread, NULL), depth_(0),
2097          start_frame_(start_frame), frame_count_(frame_count), buf_(buf) {
2098      expandBufAdd4BE(buf_, frame_count_);
2099    }
2100
2101    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2102    // annotalysis.
2103    virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2104      if (GetMethod()->IsRuntimeMethod()) {
2105        return true;  // The debugger can't do anything useful with a frame that has no Method*.
2106      }
2107      if (depth_ >= start_frame_ + frame_count_) {
2108        return false;
2109      }
2110      if (depth_ >= start_frame_) {
2111        JDWP::FrameId frame_id(GetFrameId());
2112        JDWP::JdwpLocation location;
2113        SetLocation(location, GetMethod(), GetDexPc());
2114        VLOG(jdwp) << StringPrintf("    Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location;
2115        expandBufAdd8BE(buf_, frame_id);
2116        expandBufAddLocation(buf_, location);
2117      }
2118      ++depth_;
2119      return true;
2120    }
2121
2122   private:
2123    size_t depth_;
2124    const size_t start_frame_;
2125    const size_t frame_count_;
2126    JDWP::ExpandBuf* buf_;
2127  };
2128
2129  ScopedObjectAccessUnchecked soa(Thread::Current());
2130  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2131  Thread* thread;
2132  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2133  if (error != JDWP::ERR_NONE) {
2134    return error;
2135  }
2136  if (!IsSuspendedForDebugger(soa, thread)) {
2137    return JDWP::ERR_THREAD_NOT_SUSPENDED;
2138  }
2139  GetFrameVisitor visitor(thread, start_frame, frame_count, buf);
2140  visitor.WalkStack();
2141  return JDWP::ERR_NONE;
2142}
2143
2144JDWP::ObjectId Dbg::GetThreadSelfId() {
2145  ScopedObjectAccessUnchecked soa(Thread::Current());
2146  return gRegistry->Add(soa.Self()->GetPeer());
2147}
2148
2149void Dbg::SuspendVM() {
2150  Runtime::Current()->GetThreadList()->SuspendAllForDebugger();
2151}
2152
2153void Dbg::ResumeVM() {
2154  Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions();
2155}
2156
2157JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) {
2158  ScopedLocalRef<jobject> peer(Thread::Current()->GetJniEnv(), NULL);
2159  {
2160    ScopedObjectAccess soa(Thread::Current());
2161    peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id)));
2162  }
2163  if (peer.get() == NULL) {
2164    return JDWP::ERR_THREAD_NOT_ALIVE;
2165  }
2166  // Suspend thread to build stack trace.
2167  bool timed_out;
2168  Thread* thread = ThreadList::SuspendThreadByPeer(peer.get(), request_suspension, true,
2169                                                   &timed_out);
2170  if (thread != NULL) {
2171    return JDWP::ERR_NONE;
2172  } else if (timed_out) {
2173    return JDWP::ERR_INTERNAL;
2174  } else {
2175    return JDWP::ERR_THREAD_NOT_ALIVE;
2176  }
2177}
2178
2179void Dbg::ResumeThread(JDWP::ObjectId thread_id) {
2180  ScopedObjectAccessUnchecked soa(Thread::Current());
2181  mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id);
2182  Thread* thread;
2183  {
2184    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2185    thread = Thread::FromManagedThread(soa, peer);
2186  }
2187  if (thread == NULL) {
2188    LOG(WARNING) << "No such thread for resume: " << peer;
2189    return;
2190  }
2191  bool needs_resume;
2192  {
2193    MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
2194    needs_resume = thread->GetSuspendCount() > 0;
2195  }
2196  if (needs_resume) {
2197    Runtime::Current()->GetThreadList()->Resume(thread, true);
2198  }
2199}
2200
2201void Dbg::SuspendSelf() {
2202  Runtime::Current()->GetThreadList()->SuspendSelfForDebugger();
2203}
2204
2205struct GetThisVisitor : public StackVisitor {
2206  GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id)
2207      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2208      : StackVisitor(thread, context), this_object(NULL), frame_id(frame_id) {}
2209
2210  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2211  // annotalysis.
2212  virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2213    if (frame_id != GetFrameId()) {
2214      return true;  // continue
2215    } else {
2216      this_object = GetThisObject();
2217      return false;
2218    }
2219  }
2220
2221  mirror::Object* this_object;
2222  JDWP::FrameId frame_id;
2223};
2224
2225JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id,
2226                                   JDWP::ObjectId* result) {
2227  ScopedObjectAccessUnchecked soa(Thread::Current());
2228  Thread* thread;
2229  {
2230    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2231    JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2232    if (error != JDWP::ERR_NONE) {
2233      return error;
2234    }
2235    if (!IsSuspendedForDebugger(soa, thread)) {
2236      return JDWP::ERR_THREAD_NOT_SUSPENDED;
2237    }
2238  }
2239  UniquePtr<Context> context(Context::Create());
2240  GetThisVisitor visitor(thread, context.get(), frame_id);
2241  visitor.WalkStack();
2242  *result = gRegistry->Add(visitor.this_object);
2243  return JDWP::ERR_NONE;
2244}
2245
2246JDWP::JdwpError Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
2247                                   JDWP::JdwpTag tag, uint8_t* buf, size_t width) {
2248  struct GetLocalVisitor : public StackVisitor {
2249    GetLocalVisitor(const ScopedObjectAccessUnchecked& soa, Thread* thread, Context* context,
2250                    JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width)
2251        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2252        : StackVisitor(thread, context), soa_(soa), frame_id_(frame_id), slot_(slot), tag_(tag),
2253          buf_(buf), width_(width), error_(JDWP::ERR_NONE) {}
2254
2255    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2256    // annotalysis.
2257    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2258      if (GetFrameId() != frame_id_) {
2259        return true;  // Not our frame, carry on.
2260      }
2261      // TODO: check that the tag is compatible with the actual type of the slot!
2262      // TODO: check slot is valid for this method or return INVALID_SLOT error.
2263      mirror::ArtMethod* m = GetMethod();
2264      if (m->IsNative()) {
2265        // We can't read local value from native method.
2266        error_ = JDWP::ERR_OPAQUE_FRAME;
2267        return false;
2268      }
2269      uint16_t reg = DemangleSlot(slot_, m);
2270
2271      switch (tag_) {
2272      case JDWP::JT_BOOLEAN:
2273        {
2274          CHECK_EQ(width_, 1U);
2275          uint32_t intVal = GetVReg(m, reg, kIntVReg);
2276          VLOG(jdwp) << "get boolean local " << reg << " = " << intVal;
2277          JDWP::Set1(buf_+1, intVal != 0);
2278        }
2279        break;
2280      case JDWP::JT_BYTE:
2281        {
2282          CHECK_EQ(width_, 1U);
2283          uint32_t intVal = GetVReg(m, reg, kIntVReg);
2284          VLOG(jdwp) << "get byte local " << reg << " = " << intVal;
2285          JDWP::Set1(buf_+1, intVal);
2286        }
2287        break;
2288      case JDWP::JT_SHORT:
2289      case JDWP::JT_CHAR:
2290        {
2291          CHECK_EQ(width_, 2U);
2292          uint32_t intVal = GetVReg(m, reg, kIntVReg);
2293          VLOG(jdwp) << "get short/char local " << reg << " = " << intVal;
2294          JDWP::Set2BE(buf_+1, intVal);
2295        }
2296        break;
2297      case JDWP::JT_INT:
2298        {
2299          CHECK_EQ(width_, 4U);
2300          uint32_t intVal = GetVReg(m, reg, kIntVReg);
2301          VLOG(jdwp) << "get int local " << reg << " = " << intVal;
2302          JDWP::Set4BE(buf_+1, intVal);
2303        }
2304        break;
2305      case JDWP::JT_FLOAT:
2306        {
2307          CHECK_EQ(width_, 4U);
2308          uint32_t intVal = GetVReg(m, reg, kFloatVReg);
2309          VLOG(jdwp) << "get int/float local " << reg << " = " << intVal;
2310          JDWP::Set4BE(buf_+1, intVal);
2311        }
2312        break;
2313      case JDWP::JT_ARRAY:
2314        {
2315          CHECK_EQ(width_, sizeof(JDWP::ObjectId));
2316          mirror::Object* o = reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kReferenceVReg));
2317          VLOG(jdwp) << "get array local " << reg << " = " << o;
2318          if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
2319            LOG(FATAL) << "Register " << reg << " expected to hold array: " << o;
2320          }
2321          JDWP::SetObjectId(buf_+1, gRegistry->Add(o));
2322        }
2323        break;
2324      case JDWP::JT_CLASS_LOADER:
2325      case JDWP::JT_CLASS_OBJECT:
2326      case JDWP::JT_OBJECT:
2327      case JDWP::JT_STRING:
2328      case JDWP::JT_THREAD:
2329      case JDWP::JT_THREAD_GROUP:
2330        {
2331          CHECK_EQ(width_, sizeof(JDWP::ObjectId));
2332          mirror::Object* o = reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kReferenceVReg));
2333          VLOG(jdwp) << "get object local " << reg << " = " << o;
2334          if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
2335            LOG(FATAL) << "Register " << reg << " expected to hold object: " << o;
2336          }
2337          tag_ = TagFromObject(soa_, o);
2338          JDWP::SetObjectId(buf_+1, gRegistry->Add(o));
2339        }
2340        break;
2341      case JDWP::JT_DOUBLE:
2342        {
2343          CHECK_EQ(width_, 8U);
2344          uint32_t lo = GetVReg(m, reg, kDoubleLoVReg);
2345          uint64_t hi = GetVReg(m, reg + 1, kDoubleHiVReg);
2346          uint64_t longVal = (hi << 32) | lo;
2347          VLOG(jdwp) << "get double/long local " << hi << ":" << lo << " = " << longVal;
2348          JDWP::Set8BE(buf_+1, longVal);
2349        }
2350        break;
2351      case JDWP::JT_LONG:
2352        {
2353          CHECK_EQ(width_, 8U);
2354          uint32_t lo = GetVReg(m, reg, kLongLoVReg);
2355          uint64_t hi = GetVReg(m, reg + 1, kLongHiVReg);
2356          uint64_t longVal = (hi << 32) | lo;
2357          VLOG(jdwp) << "get double/long local " << hi << ":" << lo << " = " << longVal;
2358          JDWP::Set8BE(buf_+1, longVal);
2359        }
2360        break;
2361      default:
2362        LOG(FATAL) << "Unknown tag " << tag_;
2363        break;
2364      }
2365
2366      // Prepend tag, which may have been updated.
2367      JDWP::Set1(buf_, tag_);
2368      return false;
2369    }
2370    const ScopedObjectAccessUnchecked& soa_;
2371    const JDWP::FrameId frame_id_;
2372    const int slot_;
2373    JDWP::JdwpTag tag_;
2374    uint8_t* const buf_;
2375    const size_t width_;
2376    JDWP::JdwpError error_;
2377  };
2378
2379  ScopedObjectAccessUnchecked soa(Thread::Current());
2380  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2381  Thread* thread;
2382  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2383  if (error != JDWP::ERR_NONE) {
2384    return error;
2385  }
2386  // TODO check thread is suspended by the debugger ?
2387  UniquePtr<Context> context(Context::Create());
2388  GetLocalVisitor visitor(soa, thread, context.get(), frame_id, slot, tag, buf, width);
2389  visitor.WalkStack();
2390  return visitor.error_;
2391}
2392
2393JDWP::JdwpError Dbg::SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot,
2394                                   JDWP::JdwpTag tag, uint64_t value, size_t width) {
2395  struct SetLocalVisitor : public StackVisitor {
2396    SetLocalVisitor(Thread* thread, Context* context,
2397                    JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint64_t value,
2398                    size_t width)
2399        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2400        : StackVisitor(thread, context),
2401          frame_id_(frame_id), slot_(slot), tag_(tag), value_(value), width_(width),
2402          error_(JDWP::ERR_NONE) {}
2403
2404    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
2405    // annotalysis.
2406    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
2407      if (GetFrameId() != frame_id_) {
2408        return true;  // Not our frame, carry on.
2409      }
2410      // TODO: check that the tag is compatible with the actual type of the slot!
2411      // TODO: check slot is valid for this method or return INVALID_SLOT error.
2412      mirror::ArtMethod* m = GetMethod();
2413      if (m->IsNative()) {
2414        // We can't read local value from native method.
2415        error_ = JDWP::ERR_OPAQUE_FRAME;
2416        return false;
2417      }
2418      uint16_t reg = DemangleSlot(slot_, m);
2419
2420      switch (tag_) {
2421        case JDWP::JT_BOOLEAN:
2422        case JDWP::JT_BYTE:
2423          CHECK_EQ(width_, 1U);
2424          SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg);
2425          break;
2426        case JDWP::JT_SHORT:
2427        case JDWP::JT_CHAR:
2428          CHECK_EQ(width_, 2U);
2429          SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg);
2430          break;
2431        case JDWP::JT_INT:
2432          CHECK_EQ(width_, 4U);
2433          SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg);
2434          break;
2435        case JDWP::JT_FLOAT:
2436          CHECK_EQ(width_, 4U);
2437          SetVReg(m, reg, static_cast<uint32_t>(value_), kFloatVReg);
2438          break;
2439        case JDWP::JT_ARRAY:
2440        case JDWP::JT_OBJECT:
2441        case JDWP::JT_STRING:
2442        {
2443          CHECK_EQ(width_, sizeof(JDWP::ObjectId));
2444          mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value_));
2445          if (o == ObjectRegistry::kInvalidObject) {
2446            UNIMPLEMENTED(FATAL) << "return an error code when given an invalid object to store";
2447          }
2448          SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)), kReferenceVReg);
2449        }
2450        break;
2451        case JDWP::JT_DOUBLE:
2452          CHECK_EQ(width_, 8U);
2453          SetVReg(m, reg, static_cast<uint32_t>(value_), kDoubleLoVReg);
2454          SetVReg(m, reg + 1, static_cast<uint32_t>(value_ >> 32), kDoubleHiVReg);
2455          break;
2456        case JDWP::JT_LONG:
2457          CHECK_EQ(width_, 8U);
2458          SetVReg(m, reg, static_cast<uint32_t>(value_), kLongLoVReg);
2459          SetVReg(m, reg + 1, static_cast<uint32_t>(value_ >> 32), kLongHiVReg);
2460          break;
2461        default:
2462          LOG(FATAL) << "Unknown tag " << tag_;
2463          break;
2464      }
2465      return false;
2466    }
2467
2468    const JDWP::FrameId frame_id_;
2469    const int slot_;
2470    const JDWP::JdwpTag tag_;
2471    const uint64_t value_;
2472    const size_t width_;
2473    JDWP::JdwpError error_;
2474  };
2475
2476  ScopedObjectAccessUnchecked soa(Thread::Current());
2477  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2478  Thread* thread;
2479  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
2480  if (error != JDWP::ERR_NONE) {
2481    return error;
2482  }
2483  // TODO check thread is suspended by the debugger ?
2484  UniquePtr<Context> context(Context::Create());
2485  SetLocalVisitor visitor(thread, context.get(), frame_id, slot, tag, value, width);
2486  visitor.WalkStack();
2487  return visitor.error_;
2488}
2489
2490JDWP::ObjectId Dbg::GetThisObjectIdForEvent(mirror::Object* this_object) {
2491  // If 'this_object' isn't already in the registry, we know that we're not looking for it, so
2492  // there's no point adding it to the registry and burning through ids.
2493  // When registering an event request with an instance filter, we've been given an existing object
2494  // id so it must already be present in the registry when the event fires.
2495  JDWP::ObjectId this_id = 0;
2496  if (this_object != nullptr && gRegistry->Contains(this_object)) {
2497    this_id = gRegistry->Add(this_object);
2498  }
2499  return this_id;
2500}
2501
2502void Dbg::PostLocationEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object,
2503                            int event_flags, const JValue* return_value) {
2504  if (!IsDebuggerActive()) {
2505    return;
2506  }
2507  DCHECK(m != nullptr);
2508  DCHECK_EQ(m->IsStatic(), this_object == nullptr);
2509  JDWP::JdwpLocation location;
2510  SetLocation(location, m, dex_pc);
2511
2512  // We need 'this' for InstanceOnly filters only.
2513  JDWP::ObjectId this_id = GetThisObjectIdForEvent(this_object);
2514  gJdwpState->PostLocationEvent(&location, this_id, event_flags, return_value);
2515}
2516
2517void Dbg::PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc,
2518                               mirror::Object* this_object, mirror::ArtField* f) {
2519  if (!IsDebuggerActive()) {
2520    return;
2521  }
2522  DCHECK(m != nullptr);
2523  DCHECK(f != nullptr);
2524  JDWP::JdwpLocation location;
2525  SetLocation(location, m, dex_pc);
2526
2527  JDWP::RefTypeId type_id = gRegistry->AddRefType(f->GetDeclaringClass());
2528  JDWP::FieldId field_id = ToFieldId(f);
2529  JDWP::ObjectId this_id = gRegistry->Add(this_object);
2530
2531  gJdwpState->PostFieldEvent(&location, type_id, field_id, this_id, nullptr, false);
2532}
2533
2534void Dbg::PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc,
2535                                     mirror::Object* this_object, mirror::ArtField* f,
2536                                     const JValue* field_value) {
2537  if (!IsDebuggerActive()) {
2538    return;
2539  }
2540  DCHECK(m != nullptr);
2541  DCHECK(f != nullptr);
2542  DCHECK(field_value != nullptr);
2543  JDWP::JdwpLocation location;
2544  SetLocation(location, m, dex_pc);
2545
2546  JDWP::RefTypeId type_id = gRegistry->AddRefType(f->GetDeclaringClass());
2547  JDWP::FieldId field_id = ToFieldId(f);
2548  JDWP::ObjectId this_id = gRegistry->Add(this_object);
2549
2550  gJdwpState->PostFieldEvent(&location, type_id, field_id, this_id, field_value, true);
2551}
2552
2553void Dbg::PostException(const ThrowLocation& throw_location,
2554                        mirror::ArtMethod* catch_method,
2555                        uint32_t catch_dex_pc, mirror::Throwable* exception_object) {
2556  if (!IsDebuggerActive()) {
2557    return;
2558  }
2559
2560  JDWP::JdwpLocation jdwp_throw_location;
2561  SetLocation(jdwp_throw_location, throw_location.GetMethod(), throw_location.GetDexPc());
2562  JDWP::JdwpLocation catch_location;
2563  SetLocation(catch_location, catch_method, catch_dex_pc);
2564
2565  // We need 'this' for InstanceOnly filters only.
2566  JDWP::ObjectId this_id = GetThisObjectIdForEvent(throw_location.GetThis());
2567  JDWP::ObjectId exception_id = gRegistry->Add(exception_object);
2568  JDWP::RefTypeId exception_class_id = gRegistry->AddRefType(exception_object->GetClass());
2569
2570  gJdwpState->PostException(&jdwp_throw_location, exception_id, exception_class_id, &catch_location,
2571                            this_id);
2572}
2573
2574void Dbg::PostClassPrepare(mirror::Class* c) {
2575  if (!IsDebuggerActive()) {
2576    return;
2577  }
2578
2579  // OLD-TODO - we currently always send both "verified" and "prepared" since
2580  // debuggers seem to like that.  There might be some advantage to honesty,
2581  // since the class may not yet be verified.
2582  int state = JDWP::CS_VERIFIED | JDWP::CS_PREPARED;
2583  JDWP::JdwpTypeTag tag = GetTypeTag(c);
2584  gJdwpState->PostClassPrepare(tag, gRegistry->Add(c),
2585                               ClassHelper(c).GetDescriptor(), state);
2586}
2587
2588void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object,
2589                         mirror::ArtMethod* m, uint32_t dex_pc) {
2590  if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) {
2591    return;
2592  }
2593
2594  int event_flags = 0;
2595
2596  if (IsBreakpoint(m, dex_pc)) {
2597    event_flags |= kBreakpoint;
2598  }
2599
2600  // If the debugger is single-stepping one of our threads, check to
2601  // see if we're that thread and we've reached a step point.
2602  const SingleStepControl* single_step_control = thread->GetSingleStepControl();
2603  DCHECK(single_step_control != nullptr);
2604  if (single_step_control->is_active) {
2605    CHECK(!m->IsNative());
2606    if (single_step_control->step_depth == JDWP::SD_INTO) {
2607      // Step into method calls.  We break when the line number
2608      // or method pointer changes.  If we're in SS_MIN mode, we
2609      // always stop.
2610      if (single_step_control->method != m) {
2611        event_flags |= kSingleStep;
2612        VLOG(jdwp) << "SS new method";
2613      } else if (single_step_control->step_size == JDWP::SS_MIN) {
2614        event_flags |= kSingleStep;
2615        VLOG(jdwp) << "SS new instruction";
2616      } else if (single_step_control->ContainsDexPc(dex_pc)) {
2617        event_flags |= kSingleStep;
2618        VLOG(jdwp) << "SS new line";
2619      }
2620    } else if (single_step_control->step_depth == JDWP::SD_OVER) {
2621      // Step over method calls.  We break when the line number is
2622      // different and the frame depth is <= the original frame
2623      // depth.  (We can't just compare on the method, because we
2624      // might get unrolled past it by an exception, and it's tricky
2625      // to identify recursion.)
2626
2627      int stack_depth = GetStackDepth(thread);
2628
2629      if (stack_depth < single_step_control->stack_depth) {
2630        // Popped up one or more frames, always trigger.
2631        event_flags |= kSingleStep;
2632        VLOG(jdwp) << "SS method pop";
2633      } else if (stack_depth == single_step_control->stack_depth) {
2634        // Same depth, see if we moved.
2635        if (single_step_control->step_size == JDWP::SS_MIN) {
2636          event_flags |= kSingleStep;
2637          VLOG(jdwp) << "SS new instruction";
2638        } else if (single_step_control->ContainsDexPc(dex_pc)) {
2639          event_flags |= kSingleStep;
2640          VLOG(jdwp) << "SS new line";
2641        }
2642      }
2643    } else {
2644      CHECK_EQ(single_step_control->step_depth, JDWP::SD_OUT);
2645      // Return from the current method.  We break when the frame
2646      // depth pops up.
2647
2648      // This differs from the "method exit" break in that it stops
2649      // with the PC at the next instruction in the returned-to
2650      // function, rather than the end of the returning function.
2651
2652      int stack_depth = GetStackDepth(thread);
2653      if (stack_depth < single_step_control->stack_depth) {
2654        event_flags |= kSingleStep;
2655        VLOG(jdwp) << "SS method pop";
2656      }
2657    }
2658  }
2659
2660  // If there's something interesting going on, see if it matches one
2661  // of the debugger filters.
2662  if (event_flags != 0) {
2663    Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, nullptr);
2664  }
2665}
2666
2667// Process request while all mutator threads are suspended.
2668void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) {
2669  instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
2670  switch (request.kind) {
2671    case DeoptimizationRequest::kNothing:
2672      LOG(WARNING) << "Ignoring empty deoptimization request.";
2673      break;
2674    case DeoptimizationRequest::kFullDeoptimization:
2675      VLOG(jdwp) << "Deoptimize the world ...";
2676      instrumentation->DeoptimizeEverything();
2677      VLOG(jdwp) << "Deoptimize the world DONE";
2678      break;
2679    case DeoptimizationRequest::kFullUndeoptimization:
2680      VLOG(jdwp) << "Undeoptimize the world ...";
2681      instrumentation->UndeoptimizeEverything();
2682      VLOG(jdwp) << "Undeoptimize the world DONE";
2683      break;
2684    case DeoptimizationRequest::kSelectiveDeoptimization:
2685      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.method) << " ...";
2686      instrumentation->Deoptimize(request.method);
2687      VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.method) << " DONE";
2688      break;
2689    case DeoptimizationRequest::kSelectiveUndeoptimization:
2690      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.method) << " ...";
2691      instrumentation->Undeoptimize(request.method);
2692      VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.method) << " DONE";
2693      break;
2694    default:
2695      LOG(FATAL) << "Unsupported deoptimization request kind " << request.kind;
2696      break;
2697  }
2698}
2699
2700void Dbg::DelayFullUndeoptimization() {
2701  MutexLock mu(Thread::Current(), *deoptimization_lock_);
2702  ++delayed_full_undeoptimization_count_;
2703  DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_);
2704}
2705
2706void Dbg::ProcessDelayedFullUndeoptimizations() {
2707  // TODO: avoid taking the lock twice (once here and once in ManageDeoptimization).
2708  {
2709    MutexLock mu(Thread::Current(), *deoptimization_lock_);
2710    while (delayed_full_undeoptimization_count_ > 0) {
2711      DeoptimizationRequest req;
2712      req.kind = DeoptimizationRequest::kFullUndeoptimization;
2713      req.method = nullptr;
2714      RequestDeoptimizationLocked(req);
2715      --delayed_full_undeoptimization_count_;
2716    }
2717  }
2718  ManageDeoptimization();
2719}
2720
2721void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) {
2722  if (req.kind == DeoptimizationRequest::kNothing) {
2723    // Nothing to do.
2724    return;
2725  }
2726  MutexLock mu(Thread::Current(), *deoptimization_lock_);
2727  RequestDeoptimizationLocked(req);
2728}
2729
2730void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) {
2731  switch (req.kind) {
2732    case DeoptimizationRequest::kFullDeoptimization: {
2733      DCHECK(req.method == nullptr);
2734      if (full_deoptimization_event_count_ == 0) {
2735        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2736                   << " for full deoptimization";
2737        deoptimization_requests_.push_back(req);
2738      }
2739      ++full_deoptimization_event_count_;
2740      break;
2741    }
2742    case DeoptimizationRequest::kFullUndeoptimization: {
2743      DCHECK(req.method == nullptr);
2744      DCHECK_GT(full_deoptimization_event_count_, 0U);
2745      --full_deoptimization_event_count_;
2746      if (full_deoptimization_event_count_ == 0) {
2747        VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2748                   << " for full undeoptimization";
2749        deoptimization_requests_.push_back(req);
2750      }
2751      break;
2752    }
2753    case DeoptimizationRequest::kSelectiveDeoptimization: {
2754      DCHECK(req.method != nullptr);
2755      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2756                 << " for deoptimization of " << PrettyMethod(req.method);
2757      deoptimization_requests_.push_back(req);
2758      break;
2759    }
2760    case DeoptimizationRequest::kSelectiveUndeoptimization: {
2761      DCHECK(req.method != nullptr);
2762      VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size()
2763                 << " for undeoptimization of " << PrettyMethod(req.method);
2764      deoptimization_requests_.push_back(req);
2765      break;
2766    }
2767    default: {
2768      LOG(FATAL) << "Unknown deoptimization request kind " << req.kind;
2769      break;
2770    }
2771  }
2772}
2773
2774void Dbg::ManageDeoptimization() {
2775  Thread* const self = Thread::Current();
2776  {
2777    // Avoid suspend/resume if there is no pending request.
2778    MutexLock mu(self, *deoptimization_lock_);
2779    if (deoptimization_requests_.empty()) {
2780      return;
2781    }
2782  }
2783  CHECK_EQ(self->GetState(), kRunnable);
2784  self->TransitionFromRunnableToSuspended(kWaitingForDeoptimization);
2785  // We need to suspend mutator threads first.
2786  Runtime* const runtime = Runtime::Current();
2787  runtime->GetThreadList()->SuspendAll();
2788  const ThreadState old_state = self->SetStateUnsafe(kRunnable);
2789  {
2790    MutexLock mu(self, *deoptimization_lock_);
2791    size_t req_index = 0;
2792    for (const DeoptimizationRequest& request : deoptimization_requests_) {
2793      VLOG(jdwp) << "Process deoptimization request #" << req_index++;
2794      ProcessDeoptimizationRequest(request);
2795    }
2796    deoptimization_requests_.clear();
2797  }
2798  CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable);
2799  runtime->GetThreadList()->ResumeAll();
2800  self->TransitionFromSuspendedToRunnable();
2801}
2802
2803static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m)
2804    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2805  MethodHelper mh(m);
2806  const DexFile::CodeItem* code_item = mh.GetCodeItem();
2807  if (code_item == nullptr) {
2808    // TODO We should not be asked to watch location in a native or abstract method so the code item
2809    // should never be null. We could just check we never encounter this case.
2810    return false;
2811  }
2812  SirtRef<mirror::DexCache> dex_cache(self, mh.GetDexCache());
2813  SirtRef<mirror::ClassLoader> class_loader(self, mh.GetClassLoader());
2814  verifier::MethodVerifier verifier(&mh.GetDexFile(), &dex_cache, &class_loader,
2815                                    &mh.GetClassDef(), code_item, m->GetDexMethodIndex(), m,
2816                                    m->GetAccessFlags(), false, true);
2817  // Note: we don't need to verify the method.
2818  return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr);
2819}
2820
2821static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m)
2822    EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) {
2823  for (const Breakpoint& breakpoint : gBreakpoints) {
2824    if (breakpoint.method == m) {
2825      return &breakpoint;
2826    }
2827  }
2828  return nullptr;
2829}
2830
2831// Sanity checks all existing breakpoints on the same method.
2832static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_deoptimization)
2833    EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_)  {
2834  if (kIsDebugBuild) {
2835    for (const Breakpoint& breakpoint : gBreakpoints) {
2836      CHECK_EQ(need_full_deoptimization, breakpoint.need_full_deoptimization);
2837    }
2838    if (need_full_deoptimization) {
2839      // We should have deoptimized everything but not "selectively" deoptimized this method.
2840      CHECK(Runtime::Current()->GetInstrumentation()->AreAllMethodsDeoptimized());
2841      CHECK(!Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
2842    } else {
2843      // We should have "selectively" deoptimized this method.
2844      // Note: while we have not deoptimized everything for this method, we may have done it for
2845      // another event.
2846      CHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
2847    }
2848  }
2849}
2850
2851// Installs a breakpoint at the specified location. Also indicates through the deoptimization
2852// request if we need to deoptimize.
2853void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
2854  Thread* const self = Thread::Current();
2855  mirror::ArtMethod* m = FromMethodId(location->method_id);
2856  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
2857
2858  MutexLock mu(self, *Locks::breakpoint_lock_);
2859  const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
2860  bool need_full_deoptimization;
2861  if (existing_breakpoint == nullptr) {
2862    // There is no breakpoint on this method yet: we need to deoptimize. If this method may be
2863    // inlined, we deoptimize everything; otherwise we deoptimize only this method.
2864    need_full_deoptimization = IsMethodPossiblyInlined(self, m);
2865    if (need_full_deoptimization) {
2866      req->kind = DeoptimizationRequest::kFullDeoptimization;
2867      req->method = nullptr;
2868    } else {
2869      req->kind = DeoptimizationRequest::kSelectiveDeoptimization;
2870      req->method = m;
2871    }
2872  } else {
2873    // There is at least one breakpoint for this method: we don't need to deoptimize.
2874    req->kind = DeoptimizationRequest::kNothing;
2875    req->method = nullptr;
2876
2877    need_full_deoptimization = existing_breakpoint->need_full_deoptimization;
2878    SanityCheckExistingBreakpoints(m, need_full_deoptimization);
2879  }
2880
2881  gBreakpoints.push_back(Breakpoint(m, location->dex_pc, need_full_deoptimization));
2882  VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": "
2883             << gBreakpoints[gBreakpoints.size() - 1];
2884}
2885
2886// Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization
2887// request if we need to undeoptimize.
2888void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) {
2889  mirror::ArtMethod* m = FromMethodId(location->method_id);
2890  DCHECK(m != nullptr) << "No method for method id " << location->method_id;
2891
2892  MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_);
2893  bool need_full_deoptimization = false;
2894  for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) {
2895    if (gBreakpoints[i].method == m && gBreakpoints[i].dex_pc == location->dex_pc) {
2896      VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i];
2897      need_full_deoptimization = gBreakpoints[i].need_full_deoptimization;
2898      DCHECK_NE(need_full_deoptimization, Runtime::Current()->GetInstrumentation()->IsDeoptimized(m));
2899      gBreakpoints.erase(gBreakpoints.begin() + i);
2900      break;
2901    }
2902  }
2903  const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m);
2904  if (existing_breakpoint == nullptr) {
2905    // There is no more breakpoint on this method: we need to undeoptimize.
2906    if (need_full_deoptimization) {
2907      // This method required full deoptimization: we need to undeoptimize everything.
2908      req->kind = DeoptimizationRequest::kFullUndeoptimization;
2909      req->method = nullptr;
2910    } else {
2911      // This method required selective deoptimization: we need to undeoptimize only that method.
2912      req->kind = DeoptimizationRequest::kSelectiveUndeoptimization;
2913      req->method = m;
2914    }
2915  } else {
2916    // There is at least one breakpoint for this method: we don't need to undeoptimize.
2917    req->kind = DeoptimizationRequest::kNothing;
2918    req->method = nullptr;
2919    SanityCheckExistingBreakpoints(m, need_full_deoptimization);
2920  }
2921}
2922
2923// Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't
2924// cause suspension if the thread is the current thread.
2925class ScopedThreadSuspension {
2926 public:
2927  ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id)
2928      LOCKS_EXCLUDED(Locks::thread_list_lock_)
2929      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) :
2930      thread_(NULL),
2931      error_(JDWP::ERR_NONE),
2932      self_suspend_(false),
2933      other_suspend_(false) {
2934    ScopedObjectAccessUnchecked soa(self);
2935    {
2936      MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
2937      error_ = DecodeThread(soa, thread_id, thread_);
2938    }
2939    if (error_ == JDWP::ERR_NONE) {
2940      if (thread_ == soa.Self()) {
2941        self_suspend_ = true;
2942      } else {
2943        soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
2944        jobject thread_peer = gRegistry->GetJObject(thread_id);
2945        bool timed_out;
2946        Thread* suspended_thread = ThreadList::SuspendThreadByPeer(thread_peer, true, true,
2947                                                                   &timed_out);
2948        CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension);
2949        if (suspended_thread == NULL) {
2950          // Thread terminated from under us while suspending.
2951          error_ = JDWP::ERR_INVALID_THREAD;
2952        } else {
2953          CHECK_EQ(suspended_thread, thread_);
2954          other_suspend_ = true;
2955        }
2956      }
2957    }
2958  }
2959
2960  Thread* GetThread() const {
2961    return thread_;
2962  }
2963
2964  JDWP::JdwpError GetError() const {
2965    return error_;
2966  }
2967
2968  ~ScopedThreadSuspension() {
2969    if (other_suspend_) {
2970      Runtime::Current()->GetThreadList()->Resume(thread_, true);
2971    }
2972  }
2973
2974 private:
2975  Thread* thread_;
2976  JDWP::JdwpError error_;
2977  bool self_suspend_;
2978  bool other_suspend_;
2979};
2980
2981JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size,
2982                                   JDWP::JdwpStepDepth step_depth) {
2983  Thread* self = Thread::Current();
2984  ScopedThreadSuspension sts(self, thread_id);
2985  if (sts.GetError() != JDWP::ERR_NONE) {
2986    return sts.GetError();
2987  }
2988
2989  //
2990  // Work out what Method* we're in, the current line number, and how deep the stack currently
2991  // is for step-out.
2992  //
2993
2994  struct SingleStepStackVisitor : public StackVisitor {
2995    explicit SingleStepStackVisitor(Thread* thread, SingleStepControl* single_step_control,
2996                                    int32_t* line_number)
2997        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2998        : StackVisitor(thread, NULL), single_step_control_(single_step_control),
2999          line_number_(line_number) {
3000      DCHECK_EQ(single_step_control_, thread->GetSingleStepControl());
3001      single_step_control_->method = NULL;
3002      single_step_control_->stack_depth = 0;
3003    }
3004
3005    // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
3006    // annotalysis.
3007    bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
3008      mirror::ArtMethod* m = GetMethod();
3009      if (!m->IsRuntimeMethod()) {
3010        ++single_step_control_->stack_depth;
3011        if (single_step_control_->method == NULL) {
3012          mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache();
3013          single_step_control_->method = m;
3014          *line_number_ = -1;
3015          if (dex_cache != NULL) {
3016            const DexFile& dex_file = *dex_cache->GetDexFile();
3017            *line_number_ = dex_file.GetLineNumFromPC(m, GetDexPc());
3018          }
3019        }
3020      }
3021      return true;
3022    }
3023
3024    SingleStepControl* const single_step_control_;
3025    int32_t* const line_number_;
3026  };
3027
3028  Thread* const thread = sts.GetThread();
3029  SingleStepControl* const single_step_control = thread->GetSingleStepControl();
3030  DCHECK(single_step_control != nullptr);
3031  int32_t line_number = -1;
3032  SingleStepStackVisitor visitor(thread, single_step_control, &line_number);
3033  visitor.WalkStack();
3034
3035  //
3036  // Find the dex_pc values that correspond to the current line, for line-based single-stepping.
3037  //
3038
3039  struct DebugCallbackContext {
3040    explicit DebugCallbackContext(SingleStepControl* single_step_control, int32_t line_number,
3041                                  const DexFile::CodeItem* code_item)
3042      : single_step_control_(single_step_control), line_number_(line_number), code_item_(code_item),
3043        last_pc_valid(false), last_pc(0) {
3044    }
3045
3046    static bool Callback(void* raw_context, uint32_t address, uint32_t line_number) {
3047      DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context);
3048      if (static_cast<int32_t>(line_number) == context->line_number_) {
3049        if (!context->last_pc_valid) {
3050          // Everything from this address until the next line change is ours.
3051          context->last_pc = address;
3052          context->last_pc_valid = true;
3053        }
3054        // Otherwise, if we're already in a valid range for this line,
3055        // just keep going (shouldn't really happen)...
3056      } else if (context->last_pc_valid) {  // and the line number is new
3057        // Add everything from the last entry up until here to the set
3058        for (uint32_t dex_pc = context->last_pc; dex_pc < address; ++dex_pc) {
3059          context->single_step_control_->dex_pcs.insert(dex_pc);
3060        }
3061        context->last_pc_valid = false;
3062      }
3063      return false;  // There may be multiple entries for any given line.
3064    }
3065
3066    ~DebugCallbackContext() {
3067      // If the line number was the last in the position table...
3068      if (last_pc_valid) {
3069        size_t end = code_item_->insns_size_in_code_units_;
3070        for (uint32_t dex_pc = last_pc; dex_pc < end; ++dex_pc) {
3071          single_step_control_->dex_pcs.insert(dex_pc);
3072        }
3073      }
3074    }
3075
3076    SingleStepControl* const single_step_control_;
3077    const int32_t line_number_;
3078    const DexFile::CodeItem* const code_item_;
3079    bool last_pc_valid;
3080    uint32_t last_pc;
3081  };
3082  single_step_control->dex_pcs.clear();
3083  mirror::ArtMethod* m = single_step_control->method;
3084  if (!m->IsNative()) {
3085    MethodHelper mh(m);
3086    const DexFile::CodeItem* const code_item = mh.GetCodeItem();
3087    DebugCallbackContext context(single_step_control, line_number, code_item);
3088    mh.GetDexFile().DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(),
3089                                    DebugCallbackContext::Callback, NULL, &context);
3090  }
3091
3092  //
3093  // Everything else...
3094  //
3095
3096  single_step_control->step_size = step_size;
3097  single_step_control->step_depth = step_depth;
3098  single_step_control->is_active = true;
3099
3100  if (VLOG_IS_ON(jdwp)) {
3101    VLOG(jdwp) << "Single-step thread: " << *thread;
3102    VLOG(jdwp) << "Single-step step size: " << single_step_control->step_size;
3103    VLOG(jdwp) << "Single-step step depth: " << single_step_control->step_depth;
3104    VLOG(jdwp) << "Single-step current method: " << PrettyMethod(single_step_control->method);
3105    VLOG(jdwp) << "Single-step current line: " << line_number;
3106    VLOG(jdwp) << "Single-step current stack depth: " << single_step_control->stack_depth;
3107    VLOG(jdwp) << "Single-step dex_pc values:";
3108    for (uint32_t dex_pc : single_step_control->dex_pcs) {
3109      VLOG(jdwp) << StringPrintf(" %#x", dex_pc);
3110    }
3111  }
3112
3113  return JDWP::ERR_NONE;
3114}
3115
3116void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) {
3117  ScopedObjectAccessUnchecked soa(Thread::Current());
3118  MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3119  Thread* thread;
3120  JDWP::JdwpError error = DecodeThread(soa, thread_id, thread);
3121  if (error == JDWP::ERR_NONE) {
3122    SingleStepControl* single_step_control = thread->GetSingleStepControl();
3123    DCHECK(single_step_control != nullptr);
3124    single_step_control->Clear();
3125  }
3126}
3127
3128static char JdwpTagToShortyChar(JDWP::JdwpTag tag) {
3129  switch (tag) {
3130    default:
3131      LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag);
3132
3133    // Primitives.
3134    case JDWP::JT_BYTE:    return 'B';
3135    case JDWP::JT_CHAR:    return 'C';
3136    case JDWP::JT_FLOAT:   return 'F';
3137    case JDWP::JT_DOUBLE:  return 'D';
3138    case JDWP::JT_INT:     return 'I';
3139    case JDWP::JT_LONG:    return 'J';
3140    case JDWP::JT_SHORT:   return 'S';
3141    case JDWP::JT_VOID:    return 'V';
3142    case JDWP::JT_BOOLEAN: return 'Z';
3143
3144    // Reference types.
3145    case JDWP::JT_ARRAY:
3146    case JDWP::JT_OBJECT:
3147    case JDWP::JT_STRING:
3148    case JDWP::JT_THREAD:
3149    case JDWP::JT_THREAD_GROUP:
3150    case JDWP::JT_CLASS_LOADER:
3151    case JDWP::JT_CLASS_OBJECT:
3152      return 'L';
3153  }
3154}
3155
3156JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id,
3157                                  JDWP::RefTypeId class_id, JDWP::MethodId method_id,
3158                                  uint32_t arg_count, uint64_t* arg_values,
3159                                  JDWP::JdwpTag* arg_types, uint32_t options,
3160                                  JDWP::JdwpTag* pResultTag, uint64_t* pResultValue,
3161                                  JDWP::ObjectId* pExceptionId) {
3162  ThreadList* thread_list = Runtime::Current()->GetThreadList();
3163
3164  Thread* targetThread = NULL;
3165  DebugInvokeReq* req = NULL;
3166  Thread* self = Thread::Current();
3167  {
3168    ScopedObjectAccessUnchecked soa(self);
3169    MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
3170    JDWP::JdwpError error = DecodeThread(soa, thread_id, targetThread);
3171    if (error != JDWP::ERR_NONE) {
3172      LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id;
3173      return error;
3174    }
3175    req = targetThread->GetInvokeReq();
3176    if (!req->ready) {
3177      LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread;
3178      return JDWP::ERR_INVALID_THREAD;
3179    }
3180
3181    /*
3182     * We currently have a bug where we don't successfully resume the
3183     * target thread if the suspend count is too deep.  We're expected to
3184     * require one "resume" for each "suspend", but when asked to execute
3185     * a method we have to resume fully and then re-suspend it back to the
3186     * same level.  (The easiest way to cause this is to type "suspend"
3187     * multiple times in jdb.)
3188     *
3189     * It's unclear what this means when the event specifies "resume all"
3190     * and some threads are suspended more deeply than others.  This is
3191     * a rare problem, so for now we just prevent it from hanging forever
3192     * by rejecting the method invocation request.  Without this, we will
3193     * be stuck waiting on a suspended thread.
3194     */
3195    int suspend_count;
3196    {
3197      MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_);
3198      suspend_count = targetThread->GetSuspendCount();
3199    }
3200    if (suspend_count > 1) {
3201      LOG(ERROR) << *targetThread << " suspend count too deep for method invocation: " << suspend_count;
3202      return JDWP::ERR_THREAD_SUSPENDED;  // Probably not expected here.
3203    }
3204
3205    JDWP::JdwpError status;
3206    mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id);
3207    if (receiver == ObjectRegistry::kInvalidObject) {
3208      return JDWP::ERR_INVALID_OBJECT;
3209    }
3210
3211    mirror::Object* thread = gRegistry->Get<mirror::Object*>(thread_id);
3212    if (thread == ObjectRegistry::kInvalidObject) {
3213      return JDWP::ERR_INVALID_OBJECT;
3214    }
3215    // TODO: check that 'thread' is actually a java.lang.Thread!
3216
3217    mirror::Class* c = DecodeClass(class_id, status);
3218    if (c == NULL) {
3219      return status;
3220    }
3221
3222    mirror::ArtMethod* m = FromMethodId(method_id);
3223    if (m->IsStatic() != (receiver == NULL)) {
3224      return JDWP::ERR_INVALID_METHODID;
3225    }
3226    if (m->IsStatic()) {
3227      if (m->GetDeclaringClass() != c) {
3228        return JDWP::ERR_INVALID_METHODID;
3229      }
3230    } else {
3231      if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
3232        return JDWP::ERR_INVALID_METHODID;
3233      }
3234    }
3235
3236    // Check the argument list matches the method.
3237    MethodHelper mh(m);
3238    if (mh.GetShortyLength() - 1 != arg_count) {
3239      return JDWP::ERR_ILLEGAL_ARGUMENT;
3240    }
3241    const char* shorty = mh.GetShorty();
3242    const DexFile::TypeList* types = mh.GetParameterTypeList();
3243    for (size_t i = 0; i < arg_count; ++i) {
3244      if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) {
3245        return JDWP::ERR_ILLEGAL_ARGUMENT;
3246      }
3247
3248      if (shorty[i + 1] == 'L') {
3249        // Did we really get an argument of an appropriate reference type?
3250        mirror::Class* parameter_type = mh.GetClassFromTypeIdx(types->GetTypeItem(i).type_idx_);
3251        mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i]);
3252        if (argument == ObjectRegistry::kInvalidObject) {
3253          return JDWP::ERR_INVALID_OBJECT;
3254        }
3255        if (argument != NULL && !argument->InstanceOf(parameter_type)) {
3256          return JDWP::ERR_ILLEGAL_ARGUMENT;
3257        }
3258
3259        // Turn the on-the-wire ObjectId into a jobject.
3260        jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]);
3261        v.l = gRegistry->GetJObject(arg_values[i]);
3262      }
3263    }
3264
3265    req->receiver = receiver;
3266    req->thread = thread;
3267    req->klass = c;
3268    req->method = m;
3269    req->arg_count = arg_count;
3270    req->arg_values = arg_values;
3271    req->options = options;
3272    req->invoke_needed = true;
3273  }
3274
3275  // The fact that we've released the thread list lock is a bit risky --- if the thread goes
3276  // away we're sitting high and dry -- but we must release this before the ResumeAllThreads
3277  // call, and it's unwise to hold it during WaitForSuspend.
3278
3279  {
3280    /*
3281     * We change our (JDWP thread) status, which should be THREAD_RUNNING,
3282     * so we can suspend for a GC if the invoke request causes us to
3283     * run out of memory.  It's also a good idea to change it before locking
3284     * the invokeReq mutex, although that should never be held for long.
3285     */
3286    self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend);
3287
3288    VLOG(jdwp) << "    Transferring control to event thread";
3289    {
3290      MutexLock mu(self, req->lock);
3291
3292      if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3293        VLOG(jdwp) << "      Resuming all threads";
3294        thread_list->UndoDebuggerSuspensions();
3295      } else {
3296        VLOG(jdwp) << "      Resuming event thread only";
3297        thread_list->Resume(targetThread, true);
3298      }
3299
3300      // Wait for the request to finish executing.
3301      while (req->invoke_needed) {
3302        req->cond.Wait(self);
3303      }
3304    }
3305    VLOG(jdwp) << "    Control has returned from event thread";
3306
3307    /* wait for thread to re-suspend itself */
3308    SuspendThread(thread_id, false /* request_suspension */);
3309    self->TransitionFromSuspendedToRunnable();
3310  }
3311
3312  /*
3313   * Suspend the threads.  We waited for the target thread to suspend
3314   * itself, so all we need to do is suspend the others.
3315   *
3316   * The suspendAllThreads() call will double-suspend the event thread,
3317   * so we want to resume the target thread once to keep the books straight.
3318   */
3319  if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) {
3320    self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension);
3321    VLOG(jdwp) << "      Suspending all threads";
3322    thread_list->SuspendAllForDebugger();
3323    self->TransitionFromSuspendedToRunnable();
3324    VLOG(jdwp) << "      Resuming event thread to balance the count";
3325    thread_list->Resume(targetThread, true);
3326  }
3327
3328  // Copy the result.
3329  *pResultTag = req->result_tag;
3330  if (IsPrimitiveTag(req->result_tag)) {
3331    *pResultValue = req->result_value.GetJ();
3332  } else {
3333    *pResultValue = gRegistry->Add(req->result_value.GetL());
3334  }
3335  *pExceptionId = req->exception;
3336  return req->error;
3337}
3338
3339void Dbg::ExecuteMethod(DebugInvokeReq* pReq) {
3340  ScopedObjectAccess soa(Thread::Current());
3341
3342  // We can be called while an exception is pending. We need
3343  // to preserve that across the method invocation.
3344  SirtRef<mirror::Object> old_throw_this_object(soa.Self(), NULL);
3345  SirtRef<mirror::ArtMethod> old_throw_method(soa.Self(), NULL);
3346  SirtRef<mirror::Throwable> old_exception(soa.Self(), NULL);
3347  uint32_t old_throw_dex_pc;
3348  {
3349    ThrowLocation old_throw_location;
3350    mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location);
3351    old_throw_this_object.reset(old_throw_location.GetThis());
3352    old_throw_method.reset(old_throw_location.GetMethod());
3353    old_exception.reset(old_exception_obj);
3354    old_throw_dex_pc = old_throw_location.GetDexPc();
3355    soa.Self()->ClearException();
3356  }
3357
3358  // Translate the method through the vtable, unless the debugger wants to suppress it.
3359  SirtRef<mirror::ArtMethod> m(soa.Self(), pReq->method);
3360  if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver != NULL) {
3361    mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.get());
3362    if (actual_method != m.get()) {
3363      VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.get()) << " to " << PrettyMethod(actual_method);
3364      m.reset(actual_method);
3365    }
3366  }
3367  VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.get())
3368             << " receiver=" << pReq->receiver
3369             << " arg_count=" << pReq->arg_count;
3370  CHECK(m.get() != nullptr);
3371
3372  CHECK_EQ(sizeof(jvalue), sizeof(uint64_t));
3373
3374  pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(m.get()),
3375                                         reinterpret_cast<jvalue*>(pReq->arg_values));
3376
3377  mirror::Throwable* exception = soa.Self()->GetException(NULL);
3378  soa.Self()->ClearException();
3379  pReq->exception = gRegistry->Add(exception);
3380  pReq->result_tag = BasicTagFromDescriptor(MethodHelper(m.get()).GetShorty());
3381  if (pReq->exception != 0) {
3382    VLOG(jdwp) << "  JDWP invocation returning with exception=" << exception
3383        << " " << exception->Dump();
3384    pReq->result_value.SetJ(0);
3385  } else if (pReq->result_tag == JDWP::JT_OBJECT) {
3386    /* if no exception thrown, examine object result more closely */
3387    JDWP::JdwpTag new_tag = TagFromObject(soa, pReq->result_value.GetL());
3388    if (new_tag != pReq->result_tag) {
3389      VLOG(jdwp) << "  JDWP promoted result from " << pReq->result_tag << " to " << new_tag;
3390      pReq->result_tag = new_tag;
3391    }
3392
3393    /*
3394     * Register the object.  We don't actually need an ObjectId yet,
3395     * but we do need to be sure that the GC won't move or discard the
3396     * object when we switch out of RUNNING.  The ObjectId conversion
3397     * will add the object to the "do not touch" list.
3398     *
3399     * We can't use the "tracked allocation" mechanism here because
3400     * the object is going to be handed off to a different thread.
3401     */
3402    gRegistry->Add(pReq->result_value.GetL());
3403  }
3404
3405  if (old_exception.get() != NULL) {
3406    ThrowLocation gc_safe_throw_location(old_throw_this_object.get(), old_throw_method.get(),
3407                                         old_throw_dex_pc);
3408    soa.Self()->SetException(gc_safe_throw_location, old_exception.get());
3409  }
3410}
3411
3412/*
3413 * "request" contains a full JDWP packet, possibly with multiple chunks.  We
3414 * need to process each, accumulate the replies, and ship the whole thing
3415 * back.
3416 *
3417 * Returns "true" if we have a reply.  The reply buffer is newly allocated,
3418 * and includes the chunk type/length, followed by the data.
3419 *
3420 * OLD-TODO: we currently assume that the request and reply include a single
3421 * chunk.  If this becomes inconvenient we will need to adapt.
3422 */
3423bool Dbg::DdmHandlePacket(JDWP::Request& request, uint8_t** pReplyBuf, int* pReplyLen) {
3424  Thread* self = Thread::Current();
3425  JNIEnv* env = self->GetJniEnv();
3426
3427  uint32_t type = request.ReadUnsigned32("type");
3428  uint32_t length = request.ReadUnsigned32("length");
3429
3430  // Create a byte[] corresponding to 'request'.
3431  size_t request_length = request.size();
3432  ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(request_length));
3433  if (dataArray.get() == NULL) {
3434    LOG(WARNING) << "byte[] allocation failed: " << request_length;
3435    env->ExceptionClear();
3436    return false;
3437  }
3438  env->SetByteArrayRegion(dataArray.get(), 0, request_length, reinterpret_cast<const jbyte*>(request.data()));
3439  request.Skip(request_length);
3440
3441  // Run through and find all chunks.  [Currently just find the first.]
3442  ScopedByteArrayRO contents(env, dataArray.get());
3443  if (length != request_length) {
3444    LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length);
3445    return false;
3446  }
3447
3448  // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)".
3449  ScopedLocalRef<jobject> chunk(env, env->CallStaticObjectMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3450                                                                 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch,
3451                                                                 type, dataArray.get(), 0, length));
3452  if (env->ExceptionCheck()) {
3453    LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type);
3454    env->ExceptionDescribe();
3455    env->ExceptionClear();
3456    return false;
3457  }
3458
3459  if (chunk.get() == NULL) {
3460    return false;
3461  }
3462
3463  /*
3464   * Pull the pieces out of the chunk.  We copy the results into a
3465   * newly-allocated buffer that the caller can free.  We don't want to
3466   * continue using the Chunk object because nothing has a reference to it.
3467   *
3468   * We could avoid this by returning type/data/offset/length and having
3469   * the caller be aware of the object lifetime issues, but that
3470   * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work
3471   * if we have responses for multiple chunks.
3472   *
3473   * So we're pretty much stuck with copying data around multiple times.
3474   */
3475  ScopedLocalRef<jbyteArray> replyData(env, reinterpret_cast<jbyteArray>(env->GetObjectField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data)));
3476  jint offset = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset);
3477  length = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length);
3478  type = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type);
3479
3480  VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length);
3481  if (length == 0 || replyData.get() == NULL) {
3482    return false;
3483  }
3484
3485  const int kChunkHdrLen = 8;
3486  uint8_t* reply = new uint8_t[length + kChunkHdrLen];
3487  if (reply == NULL) {
3488    LOG(WARNING) << "malloc failed: " << (length + kChunkHdrLen);
3489    return false;
3490  }
3491  JDWP::Set4BE(reply + 0, type);
3492  JDWP::Set4BE(reply + 4, length);
3493  env->GetByteArrayRegion(replyData.get(), offset, length, reinterpret_cast<jbyte*>(reply + kChunkHdrLen));
3494
3495  *pReplyBuf = reply;
3496  *pReplyLen = length + kChunkHdrLen;
3497
3498  VLOG(jdwp) << StringPrintf("dvmHandleDdm returning type=%.4s %p len=%d", reinterpret_cast<char*>(reply), reply, length);
3499  return true;
3500}
3501
3502void Dbg::DdmBroadcast(bool connect) {
3503  VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "...";
3504
3505  Thread* self = Thread::Current();
3506  if (self->GetState() != kRunnable) {
3507    LOG(ERROR) << "DDM broadcast in thread state " << self->GetState();
3508    /* try anyway? */
3509  }
3510
3511  JNIEnv* env = self->GetJniEnv();
3512  jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/;
3513  env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer,
3514                            WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast,
3515                            event);
3516  if (env->ExceptionCheck()) {
3517    LOG(ERROR) << "DdmServer.broadcast " << event << " failed";
3518    env->ExceptionDescribe();
3519    env->ExceptionClear();
3520  }
3521}
3522
3523void Dbg::DdmConnected() {
3524  Dbg::DdmBroadcast(true);
3525}
3526
3527void Dbg::DdmDisconnected() {
3528  Dbg::DdmBroadcast(false);
3529  gDdmThreadNotification = false;
3530}
3531
3532/*
3533 * Send a notification when a thread starts, stops, or changes its name.
3534 *
3535 * Because we broadcast the full set of threads when the notifications are
3536 * first enabled, it's possible for "thread" to be actively executing.
3537 */
3538void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) {
3539  if (!gDdmThreadNotification) {
3540    return;
3541  }
3542
3543  if (type == CHUNK_TYPE("THDE")) {
3544    uint8_t buf[4];
3545    JDWP::Set4BE(&buf[0], t->GetThreadId());
3546    Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf);
3547  } else {
3548    CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type;
3549    ScopedObjectAccessUnchecked soa(Thread::Current());
3550    SirtRef<mirror::String> name(soa.Self(), t->GetThreadName(soa));
3551    size_t char_count = (name.get() != NULL) ? name->GetLength() : 0;
3552    const jchar* chars = (name.get() != NULL) ? name->GetCharArray()->GetData() : NULL;
3553
3554    std::vector<uint8_t> bytes;
3555    JDWP::Append4BE(bytes, t->GetThreadId());
3556    JDWP::AppendUtf16BE(bytes, chars, char_count);
3557    CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2);
3558    Dbg::DdmSendChunk(type, bytes);
3559  }
3560}
3561
3562void Dbg::DdmSetThreadNotification(bool enable) {
3563  // Enable/disable thread notifications.
3564  gDdmThreadNotification = enable;
3565  if (enable) {
3566    // Suspend the VM then post thread start notifications for all threads. Threads attaching will
3567    // see a suspension in progress and block until that ends. They then post their own start
3568    // notification.
3569    SuspendVM();
3570    std::list<Thread*> threads;
3571    Thread* self = Thread::Current();
3572    {
3573      MutexLock mu(self, *Locks::thread_list_lock_);
3574      threads = Runtime::Current()->GetThreadList()->GetList();
3575    }
3576    {
3577      ScopedObjectAccess soa(self);
3578      for (Thread* thread : threads) {
3579        Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR"));
3580      }
3581    }
3582    ResumeVM();
3583  }
3584}
3585
3586void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) {
3587  if (IsDebuggerActive()) {
3588    ScopedObjectAccessUnchecked soa(Thread::Current());
3589    JDWP::ObjectId id = gRegistry->Add(t->GetPeer());
3590    gJdwpState->PostThreadChange(id, type == CHUNK_TYPE("THCR"));
3591  }
3592  Dbg::DdmSendThreadNotification(t, type);
3593}
3594
3595void Dbg::PostThreadStart(Thread* t) {
3596  Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR"));
3597}
3598
3599void Dbg::PostThreadDeath(Thread* t) {
3600  Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE"));
3601}
3602
3603void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) {
3604  CHECK(buf != NULL);
3605  iovec vec[1];
3606  vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf));
3607  vec[0].iov_len = byte_count;
3608  Dbg::DdmSendChunkV(type, vec, 1);
3609}
3610
3611void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) {
3612  DdmSendChunk(type, bytes.size(), &bytes[0]);
3613}
3614
3615void Dbg::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) {
3616  if (gJdwpState == NULL) {
3617    VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type;
3618  } else {
3619    gJdwpState->DdmSendChunkV(type, iov, iov_count);
3620  }
3621}
3622
3623int Dbg::DdmHandleHpifChunk(HpifWhen when) {
3624  if (when == HPIF_WHEN_NOW) {
3625    DdmSendHeapInfo(when);
3626    return true;
3627  }
3628
3629  if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) {
3630    LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when);
3631    return false;
3632  }
3633
3634  gDdmHpifWhen = when;
3635  return true;
3636}
3637
3638bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) {
3639  if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) {
3640    LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when);
3641    return false;
3642  }
3643
3644  if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) {
3645    LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what);
3646    return false;
3647  }
3648
3649  if (native) {
3650    gDdmNhsgWhen = when;
3651    gDdmNhsgWhat = what;
3652  } else {
3653    gDdmHpsgWhen = when;
3654    gDdmHpsgWhat = what;
3655  }
3656  return true;
3657}
3658
3659void Dbg::DdmSendHeapInfo(HpifWhen reason) {
3660  // If there's a one-shot 'when', reset it.
3661  if (reason == gDdmHpifWhen) {
3662    if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) {
3663      gDdmHpifWhen = HPIF_WHEN_NEVER;
3664    }
3665  }
3666
3667  /*
3668   * Chunk HPIF (client --> server)
3669   *
3670   * Heap Info. General information about the heap,
3671   * suitable for a summary display.
3672   *
3673   *   [u4]: number of heaps
3674   *
3675   *   For each heap:
3676   *     [u4]: heap ID
3677   *     [u8]: timestamp in ms since Unix epoch
3678   *     [u1]: capture reason (same as 'when' value from server)
3679   *     [u4]: max heap size in bytes (-Xmx)
3680   *     [u4]: current heap size in bytes
3681   *     [u4]: current number of bytes allocated
3682   *     [u4]: current number of objects allocated
3683   */
3684  uint8_t heap_count = 1;
3685  gc::Heap* heap = Runtime::Current()->GetHeap();
3686  std::vector<uint8_t> bytes;
3687  JDWP::Append4BE(bytes, heap_count);
3688  JDWP::Append4BE(bytes, 1);  // Heap id (bogus; we only have one heap).
3689  JDWP::Append8BE(bytes, MilliTime());
3690  JDWP::Append1BE(bytes, reason);
3691  JDWP::Append4BE(bytes, heap->GetMaxMemory());  // Max allowed heap size in bytes.
3692  JDWP::Append4BE(bytes, heap->GetTotalMemory());  // Current heap size in bytes.
3693  JDWP::Append4BE(bytes, heap->GetBytesAllocated());
3694  JDWP::Append4BE(bytes, heap->GetObjectsAllocated());
3695  CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4)));
3696  Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes);
3697}
3698
3699enum HpsgSolidity {
3700  SOLIDITY_FREE = 0,
3701  SOLIDITY_HARD = 1,
3702  SOLIDITY_SOFT = 2,
3703  SOLIDITY_WEAK = 3,
3704  SOLIDITY_PHANTOM = 4,
3705  SOLIDITY_FINALIZABLE = 5,
3706  SOLIDITY_SWEEP = 6,
3707};
3708
3709enum HpsgKind {
3710  KIND_OBJECT = 0,
3711  KIND_CLASS_OBJECT = 1,
3712  KIND_ARRAY_1 = 2,
3713  KIND_ARRAY_2 = 3,
3714  KIND_ARRAY_4 = 4,
3715  KIND_ARRAY_8 = 5,
3716  KIND_UNKNOWN = 6,
3717  KIND_NATIVE = 7,
3718};
3719
3720#define HPSG_PARTIAL (1<<7)
3721#define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7)))
3722
3723class HeapChunkContext {
3724 public:
3725  // Maximum chunk size.  Obtain this from the formula:
3726  // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2
3727  HeapChunkContext(bool merge, bool native)
3728      : buf_(16384 - 16),
3729        type_(0),
3730        merge_(merge) {
3731    Reset();
3732    if (native) {
3733      type_ = CHUNK_TYPE("NHSG");
3734    } else {
3735      type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO");
3736    }
3737  }
3738
3739  ~HeapChunkContext() {
3740    if (p_ > &buf_[0]) {
3741      Flush();
3742    }
3743  }
3744
3745  void EnsureHeader(const void* chunk_ptr) {
3746    if (!needHeader_) {
3747      return;
3748    }
3749
3750    // Start a new HPSx chunk.
3751    JDWP::Write4BE(&p_, 1);  // Heap id (bogus; we only have one heap).
3752    JDWP::Write1BE(&p_, 8);  // Size of allocation unit, in bytes.
3753
3754    JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr));  // virtual address of segment start.
3755    JDWP::Write4BE(&p_, 0);  // offset of this piece (relative to the virtual address).
3756    // [u4]: length of piece, in allocation units
3757    // We won't know this until we're done, so save the offset and stuff in a dummy value.
3758    pieceLenField_ = p_;
3759    JDWP::Write4BE(&p_, 0x55555555);
3760    needHeader_ = false;
3761  }
3762
3763  void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
3764    if (pieceLenField_ == NULL) {
3765      // Flush immediately post Reset (maybe back-to-back Flush). Ignore.
3766      CHECK(needHeader_);
3767      return;
3768    }
3769    // Patch the "length of piece" field.
3770    CHECK_LE(&buf_[0], pieceLenField_);
3771    CHECK_LE(pieceLenField_, p_);
3772    JDWP::Set4BE(pieceLenField_, totalAllocationUnits_);
3773
3774    Dbg::DdmSendChunk(type_, p_ - &buf_[0], &buf_[0]);
3775    Reset();
3776  }
3777
3778  static void HeapChunkCallback(void* start, void* end, size_t used_bytes, void* arg)
3779      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
3780                            Locks::mutator_lock_) {
3781    reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkCallback(start, end, used_bytes);
3782  }
3783
3784 private:
3785  enum { ALLOCATION_UNIT_SIZE = 8 };
3786
3787  void Reset() {
3788    p_ = &buf_[0];
3789    startOfNextMemoryChunk_ = NULL;
3790    totalAllocationUnits_ = 0;
3791    needHeader_ = true;
3792    pieceLenField_ = NULL;
3793  }
3794
3795  void HeapChunkCallback(void* start, void* /*end*/, size_t used_bytes)
3796      SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_,
3797                            Locks::mutator_lock_) {
3798    // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken
3799    // in the following code not to allocate memory, by ensuring buf_ is of the correct size
3800    if (used_bytes == 0) {
3801        if (start == NULL) {
3802            // Reset for start of new heap.
3803            startOfNextMemoryChunk_ = NULL;
3804            Flush();
3805        }
3806        // Only process in use memory so that free region information
3807        // also includes dlmalloc book keeping.
3808        return;
3809    }
3810
3811    /* If we're looking at the native heap, we'll just return
3812     * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks
3813     */
3814    bool native = type_ == CHUNK_TYPE("NHSG");
3815
3816    if (startOfNextMemoryChunk_ != NULL) {
3817        // Transmit any pending free memory. Native free memory of
3818        // over kMaxFreeLen could be because of the use of mmaps, so
3819        // don't report. If not free memory then start a new segment.
3820        bool flush = true;
3821        if (start > startOfNextMemoryChunk_) {
3822            const size_t kMaxFreeLen = 2 * kPageSize;
3823            void* freeStart = startOfNextMemoryChunk_;
3824            void* freeEnd = start;
3825            size_t freeLen = reinterpret_cast<char*>(freeEnd) - reinterpret_cast<char*>(freeStart);
3826            if (!native || freeLen < kMaxFreeLen) {
3827                AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), freeStart, freeLen);
3828                flush = false;
3829            }
3830        }
3831        if (flush) {
3832            startOfNextMemoryChunk_ = NULL;
3833            Flush();
3834        }
3835    }
3836    mirror::Object* obj = reinterpret_cast<mirror::Object*>(start);
3837
3838    // Determine the type of this chunk.
3839    // OLD-TODO: if context.merge, see if this chunk is different from the last chunk.
3840    // If it's the same, we should combine them.
3841    uint8_t state = ExamineObject(obj, native);
3842    // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an
3843    // allocation then the first sizeof(size_t) may belong to it.
3844    const size_t dlMallocOverhead = sizeof(size_t);
3845    AppendChunk(state, start, used_bytes + dlMallocOverhead);
3846    startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + dlMallocOverhead;
3847  }
3848
3849  void AppendChunk(uint8_t state, void* ptr, size_t length)
3850      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
3851    // Make sure there's enough room left in the buffer.
3852    // We need to use two bytes for every fractional 256 allocation units used by the chunk plus
3853    // 17 bytes for any header.
3854    size_t needed = (((length/ALLOCATION_UNIT_SIZE + 255) / 256) * 2) + 17;
3855    size_t bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
3856    if (bytesLeft < needed) {
3857      Flush();
3858    }
3859
3860    bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]);
3861    if (bytesLeft < needed) {
3862      LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", "
3863          << needed << " bytes)";
3864      return;
3865    }
3866    EnsureHeader(ptr);
3867    // Write out the chunk description.
3868    length /= ALLOCATION_UNIT_SIZE;   // Convert to allocation units.
3869    totalAllocationUnits_ += length;
3870    while (length > 256) {
3871      *p_++ = state | HPSG_PARTIAL;
3872      *p_++ = 255;     // length - 1
3873      length -= 256;
3874    }
3875    *p_++ = state;
3876    *p_++ = length - 1;
3877  }
3878
3879  uint8_t ExamineObject(mirror::Object* o, bool is_native_heap)
3880      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) {
3881    if (o == NULL) {
3882      return HPSG_STATE(SOLIDITY_FREE, 0);
3883    }
3884
3885    // It's an allocated chunk. Figure out what it is.
3886
3887    // If we're looking at the native heap, we'll just return
3888    // (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks.
3889    if (is_native_heap) {
3890      return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
3891    }
3892
3893    if (!Runtime::Current()->GetHeap()->IsLiveObjectLocked(o)) {
3894      return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE);
3895    }
3896
3897    mirror::Class* c = o->GetClass();
3898    if (c == NULL) {
3899      // The object was probably just created but hasn't been initialized yet.
3900      return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
3901    }
3902
3903    if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
3904      LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c;
3905      return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN);
3906    }
3907
3908    if (c->IsClassClass()) {
3909      return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT);
3910    }
3911
3912    if (c->IsArrayClass()) {
3913      if (o->IsObjectArray()) {
3914        return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
3915      }
3916      switch (c->GetComponentSize()) {
3917      case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1);
3918      case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2);
3919      case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4);
3920      case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8);
3921      }
3922    }
3923
3924    return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT);
3925  }
3926
3927  std::vector<uint8_t> buf_;
3928  uint8_t* p_;
3929  uint8_t* pieceLenField_;
3930  void* startOfNextMemoryChunk_;
3931  size_t totalAllocationUnits_;
3932  uint32_t type_;
3933  bool merge_;
3934  bool needHeader_;
3935
3936  DISALLOW_COPY_AND_ASSIGN(HeapChunkContext);
3937};
3938
3939void Dbg::DdmSendHeapSegments(bool native) {
3940  Dbg::HpsgWhen when;
3941  Dbg::HpsgWhat what;
3942  if (!native) {
3943    when = gDdmHpsgWhen;
3944    what = gDdmHpsgWhat;
3945  } else {
3946    when = gDdmNhsgWhen;
3947    what = gDdmNhsgWhat;
3948  }
3949  if (when == HPSG_WHEN_NEVER) {
3950    return;
3951  }
3952
3953  // Figure out what kind of chunks we'll be sending.
3954  CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS) << static_cast<int>(what);
3955
3956  // First, send a heap start chunk.
3957  uint8_t heap_id[4];
3958  JDWP::Set4BE(&heap_id[0], 1);  // Heap id (bogus; we only have one heap).
3959  Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id);
3960
3961  Thread* self = Thread::Current();
3962
3963  // To allow the Walk/InspectAll() below to exclusively-lock the
3964  // mutator lock, temporarily release the shared access to the
3965  // mutator lock here by transitioning to the suspended state.
3966  Locks::mutator_lock_->AssertSharedHeld(self);
3967  self->TransitionFromRunnableToSuspended(kSuspended);
3968
3969  // Send a series of heap segment chunks.
3970  HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native);
3971  if (native) {
3972    dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context);
3973  } else {
3974    gc::Heap* heap = Runtime::Current()->GetHeap();
3975    const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces();
3976    typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It;
3977    for (It cur = spaces.begin(), end = spaces.end(); cur != end; ++cur) {
3978      if ((*cur)->IsMallocSpace()) {
3979        (*cur)->AsMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
3980      }
3981    }
3982    // Walk the large objects, these are not in the AllocSpace.
3983    heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context);
3984  }
3985
3986  // Shared-lock the mutator lock back.
3987  self->TransitionFromSuspendedToRunnable();
3988  Locks::mutator_lock_->AssertSharedHeld(self);
3989
3990  // Finally, send a heap end chunk.
3991  Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id);
3992}
3993
3994static size_t GetAllocTrackerMax() {
3995#ifdef HAVE_ANDROID_OS
3996  // Check whether there's a system property overriding the number of records.
3997  const char* propertyName = "dalvik.vm.allocTrackerMax";
3998  char allocRecordMaxString[PROPERTY_VALUE_MAX];
3999  if (property_get(propertyName, allocRecordMaxString, "") > 0) {
4000    char* end;
4001    size_t value = strtoul(allocRecordMaxString, &end, 10);
4002    if (*end != '\0') {
4003      LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocRecordMaxString
4004                 << "' --- invalid";
4005      return kDefaultNumAllocRecords;
4006    }
4007    if (!IsPowerOfTwo(value)) {
4008      LOG(ERROR) << "Ignoring  " << propertyName << " '" << allocRecordMaxString
4009                 << "' --- not power of two";
4010      return kDefaultNumAllocRecords;
4011    }
4012    return value;
4013  }
4014#endif
4015  return kDefaultNumAllocRecords;
4016}
4017
4018void Dbg::SetAllocTrackingEnabled(bool enabled) {
4019  if (enabled) {
4020    {
4021      MutexLock mu(Thread::Current(), *alloc_tracker_lock_);
4022      if (recent_allocation_records_ == NULL) {
4023        alloc_record_max_ = GetAllocTrackerMax();
4024        LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of "
4025            << kMaxAllocRecordStackDepth << " frames, taking "
4026            << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")";
4027        alloc_record_head_ = alloc_record_count_ = 0;
4028        recent_allocation_records_ = new AllocRecord[alloc_record_max_];
4029        CHECK(recent_allocation_records_ != NULL);
4030      }
4031    }
4032    Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints();
4033  } else {
4034    Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints();
4035    {
4036      MutexLock mu(Thread::Current(), *alloc_tracker_lock_);
4037      delete[] recent_allocation_records_;
4038      recent_allocation_records_ = NULL;
4039    }
4040  }
4041}
4042
4043struct AllocRecordStackVisitor : public StackVisitor {
4044  AllocRecordStackVisitor(Thread* thread, AllocRecord* record)
4045      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
4046      : StackVisitor(thread, NULL), record(record), depth(0) {}
4047
4048  // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses
4049  // annotalysis.
4050  bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS {
4051    if (depth >= kMaxAllocRecordStackDepth) {
4052      return false;
4053    }
4054    mirror::ArtMethod* m = GetMethod();
4055    if (!m->IsRuntimeMethod()) {
4056      record->stack[depth].method = m;
4057      record->stack[depth].dex_pc = GetDexPc();
4058      ++depth;
4059    }
4060    return true;
4061  }
4062
4063  ~AllocRecordStackVisitor() {
4064    // Clear out any unused stack trace elements.
4065    for (; depth < kMaxAllocRecordStackDepth; ++depth) {
4066      record->stack[depth].method = NULL;
4067      record->stack[depth].dex_pc = 0;
4068    }
4069  }
4070
4071  AllocRecord* record;
4072  size_t depth;
4073};
4074
4075void Dbg::RecordAllocation(mirror::Class* type, size_t byte_count) {
4076  Thread* self = Thread::Current();
4077  CHECK(self != NULL);
4078
4079  MutexLock mu(self, *alloc_tracker_lock_);
4080  if (recent_allocation_records_ == NULL) {
4081    return;
4082  }
4083
4084  // Advance and clip.
4085  if (++alloc_record_head_ == alloc_record_max_) {
4086    alloc_record_head_ = 0;
4087  }
4088
4089  // Fill in the basics.
4090  AllocRecord* record = &recent_allocation_records_[alloc_record_head_];
4091  record->type = type;
4092  record->byte_count = byte_count;
4093  record->thin_lock_id = self->GetThreadId();
4094
4095  // Fill in the stack trace.
4096  AllocRecordStackVisitor visitor(self, record);
4097  visitor.WalkStack();
4098
4099  if (alloc_record_count_ < alloc_record_max_) {
4100    ++alloc_record_count_;
4101  }
4102}
4103
4104// Returns the index of the head element.
4105//
4106// We point at the most-recently-written record, so if gAllocRecordCount is 1
4107// we want to use the current element.  Take "head+1" and subtract count
4108// from it.
4109//
4110// We need to handle underflow in our circular buffer, so we add
4111// gAllocRecordMax and then mask it back down.
4112size_t Dbg::HeadIndex() {
4113  return (Dbg::alloc_record_head_ + 1 + Dbg::alloc_record_max_ - Dbg::alloc_record_count_) &
4114      (Dbg::alloc_record_max_ - 1);
4115}
4116
4117void Dbg::DumpRecentAllocations() {
4118  ScopedObjectAccess soa(Thread::Current());
4119  MutexLock mu(soa.Self(), *alloc_tracker_lock_);
4120  if (recent_allocation_records_ == NULL) {
4121    LOG(INFO) << "Not recording tracked allocations";
4122    return;
4123  }
4124
4125  // "i" is the head of the list.  We want to start at the end of the
4126  // list and move forward to the tail.
4127  size_t i = HeadIndex();
4128  size_t count = alloc_record_count_;
4129
4130  LOG(INFO) << "Tracked allocations, (head=" << alloc_record_head_ << " count=" << count << ")";
4131  while (count--) {
4132    AllocRecord* record = &recent_allocation_records_[i];
4133
4134    LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->thin_lock_id, record->byte_count)
4135              << PrettyClass(record->type);
4136
4137    for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) {
4138      mirror::ArtMethod* m = record->stack[stack_frame].method;
4139      if (m == NULL) {
4140        break;
4141      }
4142      LOG(INFO) << "    " << PrettyMethod(m) << " line " << record->stack[stack_frame].LineNumber();
4143    }
4144
4145    // pause periodically to help logcat catch up
4146    if ((count % 5) == 0) {
4147      usleep(40000);
4148    }
4149
4150    i = (i + 1) & (alloc_record_max_ - 1);
4151  }
4152}
4153
4154void Dbg::UpdateObjectPointers(IsMarkedCallback* callback, void* arg) {
4155  if (recent_allocation_records_ != nullptr) {
4156    MutexLock mu(Thread::Current(), *alloc_tracker_lock_);
4157    size_t i = HeadIndex();
4158    size_t count = alloc_record_count_;
4159    while (count--) {
4160      AllocRecord* record = &recent_allocation_records_[i];
4161      DCHECK(record != nullptr);
4162      record->UpdateObjectPointers(callback, arg);
4163      i = (i + 1) & (alloc_record_max_ - 1);
4164    }
4165  }
4166  if (gRegistry != nullptr) {
4167    gRegistry->UpdateObjectPointers(callback, arg);
4168  }
4169}
4170
4171void Dbg::AllowNewObjectRegistryObjects() {
4172  if (gRegistry != nullptr) {
4173    gRegistry->AllowNewObjects();
4174  }
4175}
4176
4177void Dbg::DisallowNewObjectRegistryObjects() {
4178  if (gRegistry != nullptr) {
4179    gRegistry->DisallowNewObjects();
4180  }
4181}
4182
4183class StringTable {
4184 public:
4185  StringTable() {
4186  }
4187
4188  void Add(const char* s) {
4189    table_.insert(s);
4190  }
4191
4192  size_t IndexOf(const char* s) const {
4193    auto it = table_.find(s);
4194    if (it == table_.end()) {
4195      LOG(FATAL) << "IndexOf(\"" << s << "\") failed";
4196    }
4197    return std::distance(table_.begin(), it);
4198  }
4199
4200  size_t Size() const {
4201    return table_.size();
4202  }
4203
4204  void WriteTo(std::vector<uint8_t>& bytes) const {
4205    for (const std::string& str : table_) {
4206      const char* s = str.c_str();
4207      size_t s_len = CountModifiedUtf8Chars(s);
4208      UniquePtr<uint16_t> s_utf16(new uint16_t[s_len]);
4209      ConvertModifiedUtf8ToUtf16(s_utf16.get(), s);
4210      JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len);
4211    }
4212  }
4213
4214 private:
4215  std::set<std::string> table_;
4216  DISALLOW_COPY_AND_ASSIGN(StringTable);
4217};
4218
4219static const char* GetMethodSourceFile(MethodHelper* mh)
4220    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
4221  DCHECK(mh != nullptr);
4222  const char* source_file = mh->GetDeclaringClassSourceFile();
4223  return (source_file != nullptr) ? source_file : "";
4224}
4225
4226/*
4227 * The data we send to DDMS contains everything we have recorded.
4228 *
4229 * Message header (all values big-endian):
4230 * (1b) message header len (to allow future expansion); includes itself
4231 * (1b) entry header len
4232 * (1b) stack frame len
4233 * (2b) number of entries
4234 * (4b) offset to string table from start of message
4235 * (2b) number of class name strings
4236 * (2b) number of method name strings
4237 * (2b) number of source file name strings
4238 * For each entry:
4239 *   (4b) total allocation size
4240 *   (2b) thread id
4241 *   (2b) allocated object's class name index
4242 *   (1b) stack depth
4243 *   For each stack frame:
4244 *     (2b) method's class name
4245 *     (2b) method name
4246 *     (2b) method source file
4247 *     (2b) line number, clipped to 32767; -2 if native; -1 if no source
4248 * (xb) class name strings
4249 * (xb) method name strings
4250 * (xb) source file strings
4251 *
4252 * As with other DDM traffic, strings are sent as a 4-byte length
4253 * followed by UTF-16 data.
4254 *
4255 * We send up 16-bit unsigned indexes into string tables.  In theory there
4256 * can be (kMaxAllocRecordStackDepth * gAllocRecordMax) unique strings in
4257 * each table, but in practice there should be far fewer.
4258 *
4259 * The chief reason for using a string table here is to keep the size of
4260 * the DDMS message to a minimum.  This is partly to make the protocol
4261 * efficient, but also because we have to form the whole thing up all at
4262 * once in a memory buffer.
4263 *
4264 * We use separate string tables for class names, method names, and source
4265 * files to keep the indexes small.  There will generally be no overlap
4266 * between the contents of these tables.
4267 */
4268jbyteArray Dbg::GetRecentAllocations() {
4269  if (false) {
4270    DumpRecentAllocations();
4271  }
4272
4273  Thread* self = Thread::Current();
4274  std::vector<uint8_t> bytes;
4275  {
4276    MutexLock mu(self, *alloc_tracker_lock_);
4277    //
4278    // Part 1: generate string tables.
4279    //
4280    StringTable class_names;
4281    StringTable method_names;
4282    StringTable filenames;
4283
4284    int count = alloc_record_count_;
4285    int idx = HeadIndex();
4286    while (count--) {
4287      AllocRecord* record = &recent_allocation_records_[idx];
4288
4289      class_names.Add(ClassHelper(record->type).GetDescriptor());
4290
4291      MethodHelper mh;
4292      for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) {
4293        mirror::ArtMethod* m = record->stack[i].method;
4294        if (m != NULL) {
4295          mh.ChangeMethod(m);
4296          class_names.Add(mh.GetDeclaringClassDescriptor());
4297          method_names.Add(mh.GetName());
4298          filenames.Add(GetMethodSourceFile(&mh));
4299        }
4300      }
4301
4302      idx = (idx + 1) & (alloc_record_max_ - 1);
4303    }
4304
4305    LOG(INFO) << "allocation records: " << alloc_record_count_;
4306
4307    //
4308    // Part 2: Generate the output and store it in the buffer.
4309    //
4310
4311    // (1b) message header len (to allow future expansion); includes itself
4312    // (1b) entry header len
4313    // (1b) stack frame len
4314    const int kMessageHeaderLen = 15;
4315    const int kEntryHeaderLen = 9;
4316    const int kStackFrameLen = 8;
4317    JDWP::Append1BE(bytes, kMessageHeaderLen);
4318    JDWP::Append1BE(bytes, kEntryHeaderLen);
4319    JDWP::Append1BE(bytes, kStackFrameLen);
4320
4321    // (2b) number of entries
4322    // (4b) offset to string table from start of message
4323    // (2b) number of class name strings
4324    // (2b) number of method name strings
4325    // (2b) number of source file name strings
4326    JDWP::Append2BE(bytes, alloc_record_count_);
4327    size_t string_table_offset = bytes.size();
4328    JDWP::Append4BE(bytes, 0);  // We'll patch this later...
4329    JDWP::Append2BE(bytes, class_names.Size());
4330    JDWP::Append2BE(bytes, method_names.Size());
4331    JDWP::Append2BE(bytes, filenames.Size());
4332
4333    count = alloc_record_count_;
4334    idx = HeadIndex();
4335    while (count--) {
4336      // For each entry:
4337      // (4b) total allocation size
4338      // (2b) thread id
4339      // (2b) allocated object's class name index
4340      // (1b) stack depth
4341      AllocRecord* record = &recent_allocation_records_[idx];
4342      size_t stack_depth = record->GetDepth();
4343      ClassHelper kh(record->type);
4344      size_t allocated_object_class_name_index = class_names.IndexOf(kh.GetDescriptor());
4345      JDWP::Append4BE(bytes, record->byte_count);
4346      JDWP::Append2BE(bytes, record->thin_lock_id);
4347      JDWP::Append2BE(bytes, allocated_object_class_name_index);
4348      JDWP::Append1BE(bytes, stack_depth);
4349
4350      MethodHelper mh;
4351      for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) {
4352        // For each stack frame:
4353        // (2b) method's class name
4354        // (2b) method name
4355        // (2b) method source file
4356        // (2b) line number, clipped to 32767; -2 if native; -1 if no source
4357        mh.ChangeMethod(record->stack[stack_frame].method);
4358        size_t class_name_index = class_names.IndexOf(mh.GetDeclaringClassDescriptor());
4359        size_t method_name_index = method_names.IndexOf(mh.GetName());
4360        size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(&mh));
4361        JDWP::Append2BE(bytes, class_name_index);
4362        JDWP::Append2BE(bytes, method_name_index);
4363        JDWP::Append2BE(bytes, file_name_index);
4364        JDWP::Append2BE(bytes, record->stack[stack_frame].LineNumber());
4365      }
4366
4367      idx = (idx + 1) & (alloc_record_max_ - 1);
4368    }
4369
4370    // (xb) class name strings
4371    // (xb) method name strings
4372    // (xb) source file strings
4373    JDWP::Set4BE(&bytes[string_table_offset], bytes.size());
4374    class_names.WriteTo(bytes);
4375    method_names.WriteTo(bytes);
4376    filenames.WriteTo(bytes);
4377  }
4378  JNIEnv* env = self->GetJniEnv();
4379  jbyteArray result = env->NewByteArray(bytes.size());
4380  if (result != NULL) {
4381    env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0]));
4382  }
4383  return result;
4384}
4385
4386}  // namespace art
4387