debugger.cc revision 5dc158e9b6635b6c5d3916ae4094e8886feb4580
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "debugger.h" 18 19#include <sys/uio.h> 20 21#include <set> 22 23#include "arch/context.h" 24#include "class_linker.h" 25#include "class_linker-inl.h" 26#include "dex_file-inl.h" 27#include "dex_instruction.h" 28#include "gc/accounting/card_table-inl.h" 29#include "gc/space/large_object_space.h" 30#include "gc/space/space-inl.h" 31#include "handle_scope.h" 32#include "jdwp/object_registry.h" 33#include "mirror/art_field-inl.h" 34#include "mirror/art_method-inl.h" 35#include "mirror/class.h" 36#include "mirror/class-inl.h" 37#include "mirror/class_loader.h" 38#include "mirror/object-inl.h" 39#include "mirror/object_array-inl.h" 40#include "mirror/string-inl.h" 41#include "mirror/throwable.h" 42#include "object_utils.h" 43#include "quick/inline_method_analyser.h" 44#include "reflection.h" 45#include "safe_map.h" 46#include "scoped_thread_state_change.h" 47#include "ScopedLocalRef.h" 48#include "ScopedPrimitiveArray.h" 49#include "handle_scope-inl.h" 50#include "thread_list.h" 51#include "throw_location.h" 52#include "utf.h" 53#include "verifier/method_verifier-inl.h" 54#include "well_known_classes.h" 55 56#ifdef HAVE_ANDROID_OS 57#include "cutils/properties.h" 58#endif 59 60namespace art { 61 62static const size_t kMaxAllocRecordStackDepth = 16; // Max 255. 63static const size_t kDefaultNumAllocRecords = 64*1024; // Must be a power of 2. 64 65class AllocRecordStackTraceElement { 66 public: 67 AllocRecordStackTraceElement() : method_(nullptr), dex_pc_(0) { 68 } 69 70 int32_t LineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 71 mirror::ArtMethod* method = Method(); 72 DCHECK(method != nullptr); 73 return method->GetLineNumFromDexPC(DexPc()); 74 } 75 76 mirror::ArtMethod* Method() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 77 ScopedObjectAccessUnchecked soa(Thread::Current()); 78 return soa.DecodeMethod(method_); 79 } 80 81 void SetMethod(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 82 ScopedObjectAccessUnchecked soa(Thread::Current()); 83 method_ = soa.EncodeMethod(m); 84 } 85 86 uint32_t DexPc() const { 87 return dex_pc_; 88 } 89 90 void SetDexPc(uint32_t pc) { 91 dex_pc_ = pc; 92 } 93 94 private: 95 jmethodID method_; 96 uint32_t dex_pc_; 97}; 98 99jobject Dbg::TypeCache::Add(mirror::Class* t) { 100 ScopedObjectAccessUnchecked soa(Thread::Current()); 101 int32_t hash_code = t->IdentityHashCode(); 102 auto range = objects_.equal_range(hash_code); 103 for (auto it = range.first; it != range.second; ++it) { 104 if (soa.Decode<mirror::Class*>(it->second) == t) { 105 // Found a matching weak global, return it. 106 return it->second; 107 } 108 } 109 JNIEnv* env = soa.Env(); 110 const jobject local_ref = soa.AddLocalReference<jobject>(t); 111 const jobject weak_global = env->NewWeakGlobalRef(local_ref); 112 env->DeleteLocalRef(local_ref); 113 objects_.insert(std::make_pair(hash_code, weak_global)); 114 return weak_global; 115} 116 117void Dbg::TypeCache::Clear() { 118 ScopedObjectAccess soa(Thread::Current()); 119 for (const auto& p : objects_) { 120 soa.Vm()->DeleteWeakGlobalRef(soa.Self(), p.second); 121 } 122 objects_.clear(); 123} 124 125class AllocRecord { 126 public: 127 AllocRecord() : type_(nullptr), byte_count_(0), thin_lock_id_(0) {} 128 129 mirror::Class* Type() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 130 return down_cast<mirror::Class*>(Thread::Current()->DecodeJObject(type_)); 131 } 132 133 void SetType(mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 134 type_ = Dbg::GetTypeCache().Add(t); 135 } 136 137 size_t GetDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 138 size_t depth = 0; 139 while (depth < kMaxAllocRecordStackDepth && stack_[depth].Method() != NULL) { 140 ++depth; 141 } 142 return depth; 143 } 144 145 size_t ByteCount() const { 146 return byte_count_; 147 } 148 149 void SetByteCount(size_t count) { 150 byte_count_ = count; 151 } 152 153 uint16_t ThinLockId() const { 154 return thin_lock_id_; 155 } 156 157 void SetThinLockId(uint16_t id) { 158 thin_lock_id_ = id; 159 } 160 161 AllocRecordStackTraceElement* StackElement(size_t index) { 162 DCHECK_LT(index, kMaxAllocRecordStackDepth); 163 return &stack_[index]; 164 } 165 166 private: 167 jobject type_; // This is a weak global. 168 size_t byte_count_; 169 uint16_t thin_lock_id_; 170 AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth]; // Unused entries have NULL method. 171}; 172 173class Breakpoint { 174 public: 175 Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization) 176 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 177 : method_(nullptr), dex_pc_(dex_pc), need_full_deoptimization_(need_full_deoptimization) { 178 ScopedObjectAccessUnchecked soa(Thread::Current()); 179 method_ = soa.EncodeMethod(method); 180 } 181 182 Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 183 : method_(nullptr), dex_pc_(other.dex_pc_), 184 need_full_deoptimization_(other.need_full_deoptimization_) { 185 ScopedObjectAccessUnchecked soa(Thread::Current()); 186 method_ = soa.EncodeMethod(other.Method()); 187 } 188 189 mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 190 ScopedObjectAccessUnchecked soa(Thread::Current()); 191 return soa.DecodeMethod(method_); 192 } 193 194 uint32_t DexPc() const { 195 return dex_pc_; 196 } 197 198 bool NeedFullDeoptimization() const { 199 return need_full_deoptimization_; 200 } 201 202 private: 203 // The location of this breakpoint. 204 jmethodID method_; 205 uint32_t dex_pc_; 206 207 // Indicates whether breakpoint needs full deoptimization or selective deoptimization. 208 bool need_full_deoptimization_; 209}; 210 211static std::ostream& operator<<(std::ostream& os, Breakpoint& rhs) 212 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 213 os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc()); 214 return os; 215} 216 217class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener { 218 public: 219 DebugInstrumentationListener() {} 220 virtual ~DebugInstrumentationListener() {} 221 222 void MethodEntered(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method, 223 uint32_t dex_pc) 224 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 225 if (method->IsNative()) { 226 // TODO: post location events is a suspension point and native method entry stubs aren't. 227 return; 228 } 229 Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr); 230 } 231 232 void MethodExited(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method, 233 uint32_t dex_pc, const JValue& return_value) 234 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 235 if (method->IsNative()) { 236 // TODO: post location events is a suspension point and native method entry stubs aren't. 237 return; 238 } 239 Dbg::UpdateDebugger(thread, this_object, method, dex_pc, Dbg::kMethodExit, &return_value); 240 } 241 242 void MethodUnwind(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method, 243 uint32_t dex_pc) 244 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 245 // We're not recorded to listen to this kind of event, so complain. 246 LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method) 247 << " " << dex_pc; 248 } 249 250 void DexPcMoved(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method, 251 uint32_t new_dex_pc) 252 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 253 Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, 0, nullptr); 254 } 255 256 void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method, 257 uint32_t dex_pc, mirror::ArtField* field) 258 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 259 Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field); 260 } 261 262 void FieldWritten(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method, 263 uint32_t dex_pc, mirror::ArtField* field, const JValue& field_value) 264 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 265 Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value); 266 } 267 268 void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location, 269 mirror::ArtMethod* catch_method, uint32_t catch_dex_pc, 270 mirror::Throwable* exception_object) 271 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 272 Dbg::PostException(throw_location, catch_method, catch_dex_pc, exception_object); 273 } 274 275 private: 276 DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener); 277} gDebugInstrumentationListener; 278 279// JDWP is allowed unless the Zygote forbids it. 280static bool gJdwpAllowed = true; 281 282// Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line? 283static bool gJdwpConfigured = false; 284 285// Broken-down JDWP options. (Only valid if IsJdwpConfigured() is true.) 286static JDWP::JdwpOptions gJdwpOptions; 287 288// Runtime JDWP state. 289static JDWP::JdwpState* gJdwpState = NULL; 290static bool gDebuggerConnected; // debugger or DDMS is connected. 291static bool gDebuggerActive; // debugger is making requests. 292static bool gDisposed; // debugger called VirtualMachine.Dispose, so we should drop the connection. 293 294static bool gDdmThreadNotification = false; 295 296// DDMS GC-related settings. 297static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER; 298static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER; 299static Dbg::HpsgWhat gDdmHpsgWhat; 300static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER; 301static Dbg::HpsgWhat gDdmNhsgWhat; 302 303static ObjectRegistry* gRegistry = nullptr; 304 305// Recent allocation tracking. 306Mutex* Dbg::alloc_tracker_lock_ = nullptr; 307AllocRecord* Dbg::recent_allocation_records_ = nullptr; // TODO: CircularBuffer<AllocRecord> 308size_t Dbg::alloc_record_max_ = 0; 309size_t Dbg::alloc_record_head_ = 0; 310size_t Dbg::alloc_record_count_ = 0; 311Dbg::TypeCache Dbg::type_cache_; 312 313// Deoptimization support. 314Mutex* Dbg::deoptimization_lock_ = nullptr; 315std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_; 316size_t Dbg::full_deoptimization_event_count_ = 0; 317size_t Dbg::delayed_full_undeoptimization_count_ = 0; 318 319// Instrumentation event reference counters. 320size_t Dbg::dex_pc_change_event_ref_count_ = 0; 321size_t Dbg::method_enter_event_ref_count_ = 0; 322size_t Dbg::method_exit_event_ref_count_ = 0; 323size_t Dbg::field_read_event_ref_count_ = 0; 324size_t Dbg::field_write_event_ref_count_ = 0; 325size_t Dbg::exception_catch_event_ref_count_ = 0; 326uint32_t Dbg::instrumentation_events_ = 0; 327 328// Breakpoints. 329static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_); 330 331void DebugInvokeReq::VisitRoots(RootCallback* callback, void* arg, uint32_t tid, 332 RootType root_type) { 333 if (receiver != nullptr) { 334 callback(&receiver, arg, tid, root_type); 335 } 336 if (thread != nullptr) { 337 callback(&thread, arg, tid, root_type); 338 } 339 if (klass != nullptr) { 340 callback(reinterpret_cast<mirror::Object**>(&klass), arg, tid, root_type); 341 } 342 if (method != nullptr) { 343 callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type); 344 } 345} 346 347void DebugInvokeReq::Clear() { 348 invoke_needed = false; 349 receiver = nullptr; 350 thread = nullptr; 351 klass = nullptr; 352 method = nullptr; 353} 354 355void SingleStepControl::VisitRoots(RootCallback* callback, void* arg, uint32_t tid, 356 RootType root_type) { 357 if (method != nullptr) { 358 callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type); 359 } 360} 361 362bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const { 363 return dex_pcs.find(dex_pc) == dex_pcs.end(); 364} 365 366void SingleStepControl::Clear() { 367 is_active = false; 368 method = nullptr; 369 dex_pcs.clear(); 370} 371 372static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc) 373 LOCKS_EXCLUDED(Locks::breakpoint_lock_) 374 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 375 MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); 376 for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) { 377 if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) { 378 VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i]; 379 return true; 380 } 381 } 382 return false; 383} 384 385static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread) 386 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) { 387 MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_); 388 // A thread may be suspended for GC; in this code, we really want to know whether 389 // there's a debugger suspension active. 390 return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0; 391} 392 393static mirror::Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status) 394 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 395 mirror::Object* o = gRegistry->Get<mirror::Object*>(id); 396 if (o == NULL || o == ObjectRegistry::kInvalidObject) { 397 status = JDWP::ERR_INVALID_OBJECT; 398 return NULL; 399 } 400 if (!o->IsArrayInstance()) { 401 status = JDWP::ERR_INVALID_ARRAY; 402 return NULL; 403 } 404 status = JDWP::ERR_NONE; 405 return o->AsArray(); 406} 407 408static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status) 409 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 410 mirror::Object* o = gRegistry->Get<mirror::Object*>(id); 411 if (o == NULL || o == ObjectRegistry::kInvalidObject) { 412 status = JDWP::ERR_INVALID_OBJECT; 413 return NULL; 414 } 415 if (!o->IsClass()) { 416 status = JDWP::ERR_INVALID_CLASS; 417 return NULL; 418 } 419 status = JDWP::ERR_NONE; 420 return o->AsClass(); 421} 422 423static JDWP::JdwpError DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id, Thread*& thread) 424 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) 425 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) 426 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 427 mirror::Object* thread_peer = gRegistry->Get<mirror::Object*>(thread_id); 428 if (thread_peer == NULL || thread_peer == ObjectRegistry::kInvalidObject) { 429 // This isn't even an object. 430 return JDWP::ERR_INVALID_OBJECT; 431 } 432 433 mirror::Class* java_lang_Thread = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread); 434 if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) { 435 // This isn't a thread. 436 return JDWP::ERR_INVALID_THREAD; 437 } 438 439 thread = Thread::FromManagedThread(soa, thread_peer); 440 if (thread == NULL) { 441 // This is a java.lang.Thread without a Thread*. Must be a zombie. 442 return JDWP::ERR_THREAD_NOT_ALIVE; 443 } 444 return JDWP::ERR_NONE; 445} 446 447static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) { 448 // JDWP deliberately uses the descriptor characters' ASCII values for its enum. 449 // Note that by "basic" we mean that we don't get more specific than JT_OBJECT. 450 return static_cast<JDWP::JdwpTag>(descriptor[0]); 451} 452 453static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c) 454 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 455 CHECK(c != NULL); 456 if (c->IsArrayClass()) { 457 return JDWP::JT_ARRAY; 458 } 459 if (c->IsStringClass()) { 460 return JDWP::JT_STRING; 461 } 462 if (c->IsClassClass()) { 463 return JDWP::JT_CLASS_OBJECT; 464 } 465 { 466 mirror::Class* thread_class = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread); 467 if (thread_class->IsAssignableFrom(c)) { 468 return JDWP::JT_THREAD; 469 } 470 } 471 { 472 mirror::Class* thread_group_class = 473 soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup); 474 if (thread_group_class->IsAssignableFrom(c)) { 475 return JDWP::JT_THREAD_GROUP; 476 } 477 } 478 { 479 mirror::Class* class_loader_class = 480 soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader); 481 if (class_loader_class->IsAssignableFrom(c)) { 482 return JDWP::JT_CLASS_LOADER; 483 } 484 } 485 return JDWP::JT_OBJECT; 486} 487 488/* 489 * Objects declared to hold Object might actually hold a more specific 490 * type. The debugger may take a special interest in these (e.g. it 491 * wants to display the contents of Strings), so we want to return an 492 * appropriate tag. 493 * 494 * Null objects are tagged JT_OBJECT. 495 */ 496static JDWP::JdwpTag TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o) 497 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 498 return (o == NULL) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass()); 499} 500 501static bool IsPrimitiveTag(JDWP::JdwpTag tag) { 502 switch (tag) { 503 case JDWP::JT_BOOLEAN: 504 case JDWP::JT_BYTE: 505 case JDWP::JT_CHAR: 506 case JDWP::JT_FLOAT: 507 case JDWP::JT_DOUBLE: 508 case JDWP::JT_INT: 509 case JDWP::JT_LONG: 510 case JDWP::JT_SHORT: 511 case JDWP::JT_VOID: 512 return true; 513 default: 514 return false; 515 } 516} 517 518/* 519 * Handle one of the JDWP name/value pairs. 520 * 521 * JDWP options are: 522 * help: if specified, show help message and bail 523 * transport: may be dt_socket or dt_shmem 524 * address: for dt_socket, "host:port", or just "port" when listening 525 * server: if "y", wait for debugger to attach; if "n", attach to debugger 526 * timeout: how long to wait for debugger to connect / listen 527 * 528 * Useful with server=n (these aren't supported yet): 529 * onthrow=<exception-name>: connect to debugger when exception thrown 530 * onuncaught=y|n: connect to debugger when uncaught exception thrown 531 * launch=<command-line>: launch the debugger itself 532 * 533 * The "transport" option is required, as is "address" if server=n. 534 */ 535static bool ParseJdwpOption(const std::string& name, const std::string& value) { 536 if (name == "transport") { 537 if (value == "dt_socket") { 538 gJdwpOptions.transport = JDWP::kJdwpTransportSocket; 539 } else if (value == "dt_android_adb") { 540 gJdwpOptions.transport = JDWP::kJdwpTransportAndroidAdb; 541 } else { 542 LOG(ERROR) << "JDWP transport not supported: " << value; 543 return false; 544 } 545 } else if (name == "server") { 546 if (value == "n") { 547 gJdwpOptions.server = false; 548 } else if (value == "y") { 549 gJdwpOptions.server = true; 550 } else { 551 LOG(ERROR) << "JDWP option 'server' must be 'y' or 'n'"; 552 return false; 553 } 554 } else if (name == "suspend") { 555 if (value == "n") { 556 gJdwpOptions.suspend = false; 557 } else if (value == "y") { 558 gJdwpOptions.suspend = true; 559 } else { 560 LOG(ERROR) << "JDWP option 'suspend' must be 'y' or 'n'"; 561 return false; 562 } 563 } else if (name == "address") { 564 /* this is either <port> or <host>:<port> */ 565 std::string port_string; 566 gJdwpOptions.host.clear(); 567 std::string::size_type colon = value.find(':'); 568 if (colon != std::string::npos) { 569 gJdwpOptions.host = value.substr(0, colon); 570 port_string = value.substr(colon + 1); 571 } else { 572 port_string = value; 573 } 574 if (port_string.empty()) { 575 LOG(ERROR) << "JDWP address missing port: " << value; 576 return false; 577 } 578 char* end; 579 uint64_t port = strtoul(port_string.c_str(), &end, 10); 580 if (*end != '\0' || port > 0xffff) { 581 LOG(ERROR) << "JDWP address has junk in port field: " << value; 582 return false; 583 } 584 gJdwpOptions.port = port; 585 } else if (name == "launch" || name == "onthrow" || name == "oncaught" || name == "timeout") { 586 /* valid but unsupported */ 587 LOG(INFO) << "Ignoring JDWP option '" << name << "'='" << value << "'"; 588 } else { 589 LOG(INFO) << "Ignoring unrecognized JDWP option '" << name << "'='" << value << "'"; 590 } 591 592 return true; 593} 594 595/* 596 * Parse the latter half of a -Xrunjdwp/-agentlib:jdwp= string, e.g.: 597 * "transport=dt_socket,address=8000,server=y,suspend=n" 598 */ 599bool Dbg::ParseJdwpOptions(const std::string& options) { 600 VLOG(jdwp) << "ParseJdwpOptions: " << options; 601 602 std::vector<std::string> pairs; 603 Split(options, ',', pairs); 604 605 for (size_t i = 0; i < pairs.size(); ++i) { 606 std::string::size_type equals = pairs[i].find('='); 607 if (equals == std::string::npos) { 608 LOG(ERROR) << "Can't parse JDWP option '" << pairs[i] << "' in '" << options << "'"; 609 return false; 610 } 611 ParseJdwpOption(pairs[i].substr(0, equals), pairs[i].substr(equals + 1)); 612 } 613 614 if (gJdwpOptions.transport == JDWP::kJdwpTransportUnknown) { 615 LOG(ERROR) << "Must specify JDWP transport: " << options; 616 } 617 if (!gJdwpOptions.server && (gJdwpOptions.host.empty() || gJdwpOptions.port == 0)) { 618 LOG(ERROR) << "Must specify JDWP host and port when server=n: " << options; 619 return false; 620 } 621 622 gJdwpConfigured = true; 623 return true; 624} 625 626void Dbg::StartJdwp() { 627 if (!gJdwpAllowed || !IsJdwpConfigured()) { 628 // No JDWP for you! 629 return; 630 } 631 632 CHECK(gRegistry == nullptr); 633 gRegistry = new ObjectRegistry; 634 635 alloc_tracker_lock_ = new Mutex("AllocTracker lock"); 636 deoptimization_lock_ = new Mutex("deoptimization lock", kDeoptimizationLock); 637 // Init JDWP if the debugger is enabled. This may connect out to a 638 // debugger, passively listen for a debugger, or block waiting for a 639 // debugger. 640 gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions); 641 if (gJdwpState == NULL) { 642 // We probably failed because some other process has the port already, which means that 643 // if we don't abort the user is likely to think they're talking to us when they're actually 644 // talking to that other process. 645 LOG(FATAL) << "Debugger thread failed to initialize"; 646 } 647 648 // If a debugger has already attached, send the "welcome" message. 649 // This may cause us to suspend all threads. 650 if (gJdwpState->IsActive()) { 651 ScopedObjectAccess soa(Thread::Current()); 652 if (!gJdwpState->PostVMStart()) { 653 LOG(WARNING) << "Failed to post 'start' message to debugger"; 654 } 655 } 656} 657 658void Dbg::StopJdwp() { 659 // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection. 660 Disposed(); 661 delete gJdwpState; 662 gJdwpState = nullptr; 663 delete gRegistry; 664 gRegistry = nullptr; 665 delete alloc_tracker_lock_; 666 alloc_tracker_lock_ = nullptr; 667 delete deoptimization_lock_; 668 deoptimization_lock_ = nullptr; 669} 670 671void Dbg::GcDidFinish() { 672 if (gDdmHpifWhen != HPIF_WHEN_NEVER) { 673 ScopedObjectAccess soa(Thread::Current()); 674 VLOG(jdwp) << "Sending heap info to DDM"; 675 DdmSendHeapInfo(gDdmHpifWhen); 676 } 677 if (gDdmHpsgWhen != HPSG_WHEN_NEVER) { 678 ScopedObjectAccess soa(Thread::Current()); 679 VLOG(jdwp) << "Dumping heap to DDM"; 680 DdmSendHeapSegments(false); 681 } 682 if (gDdmNhsgWhen != HPSG_WHEN_NEVER) { 683 ScopedObjectAccess soa(Thread::Current()); 684 VLOG(jdwp) << "Dumping native heap to DDM"; 685 DdmSendHeapSegments(true); 686 } 687} 688 689void Dbg::SetJdwpAllowed(bool allowed) { 690 gJdwpAllowed = allowed; 691} 692 693DebugInvokeReq* Dbg::GetInvokeReq() { 694 return Thread::Current()->GetInvokeReq(); 695} 696 697Thread* Dbg::GetDebugThread() { 698 return (gJdwpState != NULL) ? gJdwpState->GetDebugThread() : NULL; 699} 700 701void Dbg::ClearWaitForEventThread() { 702 gJdwpState->ClearWaitForEventThread(); 703} 704 705void Dbg::Connected() { 706 CHECK(!gDebuggerConnected); 707 VLOG(jdwp) << "JDWP has attached"; 708 gDebuggerConnected = true; 709 gDisposed = false; 710} 711 712void Dbg::Disposed() { 713 gDisposed = true; 714} 715 716bool Dbg::IsDisposed() { 717 return gDisposed; 718} 719 720void Dbg::GoActive() { 721 // Enable all debugging features, including scans for breakpoints. 722 // This is a no-op if we're already active. 723 // Only called from the JDWP handler thread. 724 if (gDebuggerActive) { 725 return; 726 } 727 728 { 729 // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected? 730 MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); 731 CHECK_EQ(gBreakpoints.size(), 0U); 732 } 733 734 { 735 MutexLock mu(Thread::Current(), *deoptimization_lock_); 736 CHECK_EQ(deoptimization_requests_.size(), 0U); 737 CHECK_EQ(full_deoptimization_event_count_, 0U); 738 CHECK_EQ(delayed_full_undeoptimization_count_, 0U); 739 CHECK_EQ(dex_pc_change_event_ref_count_, 0U); 740 CHECK_EQ(method_enter_event_ref_count_, 0U); 741 CHECK_EQ(method_exit_event_ref_count_, 0U); 742 CHECK_EQ(field_read_event_ref_count_, 0U); 743 CHECK_EQ(field_write_event_ref_count_, 0U); 744 CHECK_EQ(exception_catch_event_ref_count_, 0U); 745 } 746 747 Runtime* runtime = Runtime::Current(); 748 runtime->GetThreadList()->SuspendAll(); 749 Thread* self = Thread::Current(); 750 ThreadState old_state = self->SetStateUnsafe(kRunnable); 751 CHECK_NE(old_state, kRunnable); 752 runtime->GetInstrumentation()->EnableDeoptimization(); 753 instrumentation_events_ = 0; 754 gDebuggerActive = true; 755 CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable); 756 runtime->GetThreadList()->ResumeAll(); 757 758 LOG(INFO) << "Debugger is active"; 759} 760 761void Dbg::Disconnected() { 762 CHECK(gDebuggerConnected); 763 764 LOG(INFO) << "Debugger is no longer active"; 765 766 // Suspend all threads and exclusively acquire the mutator lock. Set the state of the thread 767 // to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener 768 // and clear the object registry. 769 Runtime* runtime = Runtime::Current(); 770 runtime->GetThreadList()->SuspendAll(); 771 Thread* self = Thread::Current(); 772 ThreadState old_state = self->SetStateUnsafe(kRunnable); 773 774 // Debugger may not be active at this point. 775 if (gDebuggerActive) { 776 { 777 // Since we're going to disable deoptimization, we clear the deoptimization requests queue. 778 // This prevents us from having any pending deoptimization request when the debugger attaches 779 // to us again while no event has been requested yet. 780 MutexLock mu(Thread::Current(), *deoptimization_lock_); 781 deoptimization_requests_.clear(); 782 full_deoptimization_event_count_ = 0U; 783 delayed_full_undeoptimization_count_ = 0U; 784 } 785 if (instrumentation_events_ != 0) { 786 runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener, 787 instrumentation_events_); 788 instrumentation_events_ = 0; 789 } 790 runtime->GetInstrumentation()->DisableDeoptimization(); 791 gDebuggerActive = false; 792 } 793 gRegistry->Clear(); 794 gDebuggerConnected = false; 795 CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable); 796 runtime->GetThreadList()->ResumeAll(); 797} 798 799bool Dbg::IsDebuggerActive() { 800 return gDebuggerActive; 801} 802 803bool Dbg::IsJdwpConfigured() { 804 return gJdwpConfigured; 805} 806 807int64_t Dbg::LastDebuggerActivity() { 808 return gJdwpState->LastDebuggerActivity(); 809} 810 811void Dbg::UndoDebuggerSuspensions() { 812 Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions(); 813} 814 815std::string Dbg::GetClassName(JDWP::RefTypeId class_id) { 816 mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id); 817 if (o == NULL) { 818 return "NULL"; 819 } 820 if (o == ObjectRegistry::kInvalidObject) { 821 return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id)); 822 } 823 if (!o->IsClass()) { 824 return StringPrintf("non-class %p", o); // This is only used for debugging output anyway. 825 } 826 return DescriptorToName(o->AsClass()->GetDescriptor().c_str()); 827} 828 829JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& class_object_id) { 830 JDWP::JdwpError status; 831 mirror::Class* c = DecodeClass(id, status); 832 if (c == NULL) { 833 return status; 834 } 835 class_object_id = gRegistry->Add(c); 836 return JDWP::ERR_NONE; 837} 838 839JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId& superclass_id) { 840 JDWP::JdwpError status; 841 mirror::Class* c = DecodeClass(id, status); 842 if (c == NULL) { 843 return status; 844 } 845 if (c->IsInterface()) { 846 // http://code.google.com/p/android/issues/detail?id=20856 847 superclass_id = 0; 848 } else { 849 superclass_id = gRegistry->Add(c->GetSuperClass()); 850 } 851 return JDWP::ERR_NONE; 852} 853 854JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) { 855 mirror::Object* o = gRegistry->Get<mirror::Object*>(id); 856 if (o == NULL || o == ObjectRegistry::kInvalidObject) { 857 return JDWP::ERR_INVALID_OBJECT; 858 } 859 expandBufAddObjectId(pReply, gRegistry->Add(o->GetClass()->GetClassLoader())); 860 return JDWP::ERR_NONE; 861} 862 863JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) { 864 JDWP::JdwpError status; 865 mirror::Class* c = DecodeClass(id, status); 866 if (c == NULL) { 867 return status; 868 } 869 870 uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask; 871 872 // Set ACC_SUPER. Dex files don't contain this flag but only classes are supposed to have it set, 873 // not interfaces. 874 // Class.getModifiers doesn't return it, but JDWP does, so we set it here. 875 if ((access_flags & kAccInterface) == 0) { 876 access_flags |= kAccSuper; 877 } 878 879 expandBufAdd4BE(pReply, access_flags); 880 881 return JDWP::ERR_NONE; 882} 883 884JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply) 885 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 886 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); 887 if (o == NULL || o == ObjectRegistry::kInvalidObject) { 888 return JDWP::ERR_INVALID_OBJECT; 889 } 890 891 // Ensure all threads are suspended while we read objects' lock words. 892 Thread* self = Thread::Current(); 893 CHECK_EQ(self->GetState(), kRunnable); 894 self->TransitionFromRunnableToSuspended(kSuspended); 895 Runtime::Current()->GetThreadList()->SuspendAll(); 896 897 MonitorInfo monitor_info(o); 898 899 Runtime::Current()->GetThreadList()->ResumeAll(); 900 self->TransitionFromSuspendedToRunnable(); 901 902 if (monitor_info.owner_ != NULL) { 903 expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeer())); 904 } else { 905 expandBufAddObjectId(reply, gRegistry->Add(NULL)); 906 } 907 expandBufAdd4BE(reply, monitor_info.entry_count_); 908 expandBufAdd4BE(reply, monitor_info.waiters_.size()); 909 for (size_t i = 0; i < monitor_info.waiters_.size(); ++i) { 910 expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters_[i]->GetPeer())); 911 } 912 return JDWP::ERR_NONE; 913} 914 915JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id, 916 std::vector<JDWP::ObjectId>& monitors, 917 std::vector<uint32_t>& stack_depths) { 918 struct OwnedMonitorVisitor : public StackVisitor { 919 OwnedMonitorVisitor(Thread* thread, Context* context, 920 std::vector<JDWP::ObjectId>* monitor_vector, 921 std::vector<uint32_t>* stack_depth_vector) 922 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 923 : StackVisitor(thread, context), current_stack_depth(0), 924 monitors(monitor_vector), stack_depths(stack_depth_vector) {} 925 926 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 927 // annotalysis. 928 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 929 if (!GetMethod()->IsRuntimeMethod()) { 930 Monitor::VisitLocks(this, AppendOwnedMonitors, this); 931 ++current_stack_depth; 932 } 933 return true; 934 } 935 936 static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg) 937 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 938 OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg); 939 visitor->monitors->push_back(gRegistry->Add(owned_monitor)); 940 visitor->stack_depths->push_back(visitor->current_stack_depth); 941 } 942 943 size_t current_stack_depth; 944 std::vector<JDWP::ObjectId>* monitors; 945 std::vector<uint32_t>* stack_depths; 946 }; 947 948 ScopedObjectAccessUnchecked soa(Thread::Current()); 949 Thread* thread; 950 { 951 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 952 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 953 if (error != JDWP::ERR_NONE) { 954 return error; 955 } 956 if (!IsSuspendedForDebugger(soa, thread)) { 957 return JDWP::ERR_THREAD_NOT_SUSPENDED; 958 } 959 } 960 std::unique_ptr<Context> context(Context::Create()); 961 OwnedMonitorVisitor visitor(thread, context.get(), &monitors, &stack_depths); 962 visitor.WalkStack(); 963 return JDWP::ERR_NONE; 964} 965 966JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id, 967 JDWP::ObjectId& contended_monitor) { 968 mirror::Object* contended_monitor_obj; 969 ScopedObjectAccessUnchecked soa(Thread::Current()); 970 { 971 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 972 Thread* thread; 973 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 974 if (error != JDWP::ERR_NONE) { 975 return error; 976 } 977 if (!IsSuspendedForDebugger(soa, thread)) { 978 return JDWP::ERR_THREAD_NOT_SUSPENDED; 979 } 980 contended_monitor_obj = Monitor::GetContendedMonitor(thread); 981 } 982 // Add() requires the thread_list_lock_ not held to avoid the lock 983 // level violation. 984 contended_monitor = gRegistry->Add(contended_monitor_obj); 985 return JDWP::ERR_NONE; 986} 987 988JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids, 989 std::vector<uint64_t>& counts) 990 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 991 gc::Heap* heap = Runtime::Current()->GetHeap(); 992 heap->CollectGarbage(false); 993 std::vector<mirror::Class*> classes; 994 counts.clear(); 995 for (size_t i = 0; i < class_ids.size(); ++i) { 996 JDWP::JdwpError status; 997 mirror::Class* c = DecodeClass(class_ids[i], status); 998 if (c == NULL) { 999 return status; 1000 } 1001 classes.push_back(c); 1002 counts.push_back(0); 1003 } 1004 heap->CountInstances(classes, false, &counts[0]); 1005 return JDWP::ERR_NONE; 1006} 1007 1008JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, std::vector<JDWP::ObjectId>& instances) 1009 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1010 gc::Heap* heap = Runtime::Current()->GetHeap(); 1011 // We only want reachable instances, so do a GC. 1012 heap->CollectGarbage(false); 1013 JDWP::JdwpError status; 1014 mirror::Class* c = DecodeClass(class_id, status); 1015 if (c == nullptr) { 1016 return status; 1017 } 1018 std::vector<mirror::Object*> raw_instances; 1019 Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances); 1020 for (size_t i = 0; i < raw_instances.size(); ++i) { 1021 instances.push_back(gRegistry->Add(raw_instances[i])); 1022 } 1023 return JDWP::ERR_NONE; 1024} 1025 1026JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count, 1027 std::vector<JDWP::ObjectId>& referring_objects) 1028 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1029 gc::Heap* heap = Runtime::Current()->GetHeap(); 1030 heap->CollectGarbage(false); 1031 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); 1032 if (o == NULL || o == ObjectRegistry::kInvalidObject) { 1033 return JDWP::ERR_INVALID_OBJECT; 1034 } 1035 std::vector<mirror::Object*> raw_instances; 1036 heap->GetReferringObjects(o, max_count, raw_instances); 1037 for (size_t i = 0; i < raw_instances.size(); ++i) { 1038 referring_objects.push_back(gRegistry->Add(raw_instances[i])); 1039 } 1040 return JDWP::ERR_NONE; 1041} 1042 1043JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id) 1044 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1045 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); 1046 if (o == NULL || o == ObjectRegistry::kInvalidObject) { 1047 return JDWP::ERR_INVALID_OBJECT; 1048 } 1049 gRegistry->DisableCollection(object_id); 1050 return JDWP::ERR_NONE; 1051} 1052 1053JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id) 1054 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1055 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); 1056 // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI 1057 // also ignores these cases and never return an error. However it's not obvious why this command 1058 // should behave differently from DisableCollection and IsCollected commands. So let's be more 1059 // strict and return an error if this happens. 1060 if (o == NULL || o == ObjectRegistry::kInvalidObject) { 1061 return JDWP::ERR_INVALID_OBJECT; 1062 } 1063 gRegistry->EnableCollection(object_id); 1064 return JDWP::ERR_NONE; 1065} 1066 1067JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool& is_collected) 1068 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1069 if (object_id == 0) { 1070 // Null object id is invalid. 1071 return JDWP::ERR_INVALID_OBJECT; 1072 } 1073 // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However 1074 // the RI seems to ignore this and assume object has been collected. 1075 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); 1076 if (o == NULL || o == ObjectRegistry::kInvalidObject) { 1077 is_collected = true; 1078 } else { 1079 is_collected = gRegistry->IsCollected(object_id); 1080 } 1081 return JDWP::ERR_NONE; 1082} 1083 1084void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count) 1085 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1086 gRegistry->DisposeObject(object_id, reference_count); 1087} 1088 1089static JDWP::JdwpTypeTag GetTypeTag(mirror::Class* klass) 1090 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1091 DCHECK(klass != nullptr); 1092 if (klass->IsArrayClass()) { 1093 return JDWP::TT_ARRAY; 1094 } else if (klass->IsInterface()) { 1095 return JDWP::TT_INTERFACE; 1096 } else { 1097 return JDWP::TT_CLASS; 1098 } 1099} 1100 1101JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) { 1102 JDWP::JdwpError status; 1103 mirror::Class* c = DecodeClass(class_id, status); 1104 if (c == NULL) { 1105 return status; 1106 } 1107 1108 JDWP::JdwpTypeTag type_tag = GetTypeTag(c); 1109 expandBufAdd1(pReply, type_tag); 1110 expandBufAddRefTypeId(pReply, class_id); 1111 return JDWP::ERR_NONE; 1112} 1113 1114void Dbg::GetClassList(std::vector<JDWP::RefTypeId>& classes) { 1115 // Get the complete list of reference classes (i.e. all classes except 1116 // the primitive types). 1117 // Returns a newly-allocated buffer full of RefTypeId values. 1118 struct ClassListCreator { 1119 explicit ClassListCreator(std::vector<JDWP::RefTypeId>& classes) : classes(classes) { 1120 } 1121 1122 static bool Visit(mirror::Class* c, void* arg) { 1123 return reinterpret_cast<ClassListCreator*>(arg)->Visit(c); 1124 } 1125 1126 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 1127 // annotalysis. 1128 bool Visit(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS { 1129 if (!c->IsPrimitive()) { 1130 classes.push_back(gRegistry->AddRefType(c)); 1131 } 1132 return true; 1133 } 1134 1135 std::vector<JDWP::RefTypeId>& classes; 1136 }; 1137 1138 ClassListCreator clc(classes); 1139 Runtime::Current()->GetClassLinker()->VisitClasses(ClassListCreator::Visit, &clc); 1140} 1141 1142JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag, uint32_t* pStatus, std::string* pDescriptor) { 1143 JDWP::JdwpError status; 1144 mirror::Class* c = DecodeClass(class_id, status); 1145 if (c == NULL) { 1146 return status; 1147 } 1148 1149 if (c->IsArrayClass()) { 1150 *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED; 1151 *pTypeTag = JDWP::TT_ARRAY; 1152 } else { 1153 if (c->IsErroneous()) { 1154 *pStatus = JDWP::CS_ERROR; 1155 } else { 1156 *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED; 1157 } 1158 *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS; 1159 } 1160 1161 if (pDescriptor != NULL) { 1162 *pDescriptor = c->GetDescriptor(); 1163 } 1164 return JDWP::ERR_NONE; 1165} 1166 1167void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>& ids) { 1168 std::vector<mirror::Class*> classes; 1169 Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes); 1170 ids.clear(); 1171 for (size_t i = 0; i < classes.size(); ++i) { 1172 ids.push_back(gRegistry->Add(classes[i])); 1173 } 1174} 1175 1176JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply) 1177 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1178 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); 1179 if (o == NULL || o == ObjectRegistry::kInvalidObject) { 1180 return JDWP::ERR_INVALID_OBJECT; 1181 } 1182 1183 JDWP::JdwpTypeTag type_tag = GetTypeTag(o->GetClass()); 1184 JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass()); 1185 1186 expandBufAdd1(pReply, type_tag); 1187 expandBufAddRefTypeId(pReply, type_id); 1188 1189 return JDWP::ERR_NONE; 1190} 1191 1192JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) { 1193 JDWP::JdwpError status; 1194 mirror::Class* c = DecodeClass(class_id, status); 1195 if (c == NULL) { 1196 return status; 1197 } 1198 *signature = c->GetDescriptor(); 1199 return JDWP::ERR_NONE; 1200} 1201 1202JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string& result) { 1203 JDWP::JdwpError status; 1204 mirror::Class* c = DecodeClass(class_id, status); 1205 if (c == nullptr) { 1206 return status; 1207 } 1208 const char* source_file = c->GetSourceFile(); 1209 if (source_file == nullptr) { 1210 return JDWP::ERR_ABSENT_INFORMATION; 1211 } 1212 result = source_file; 1213 return JDWP::ERR_NONE; 1214} 1215 1216JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t& tag) { 1217 ScopedObjectAccessUnchecked soa(Thread::Current()); 1218 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); 1219 if (o == ObjectRegistry::kInvalidObject) { 1220 return JDWP::ERR_INVALID_OBJECT; 1221 } 1222 tag = TagFromObject(soa, o); 1223 return JDWP::ERR_NONE; 1224} 1225 1226size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) { 1227 switch (tag) { 1228 case JDWP::JT_VOID: 1229 return 0; 1230 case JDWP::JT_BYTE: 1231 case JDWP::JT_BOOLEAN: 1232 return 1; 1233 case JDWP::JT_CHAR: 1234 case JDWP::JT_SHORT: 1235 return 2; 1236 case JDWP::JT_FLOAT: 1237 case JDWP::JT_INT: 1238 return 4; 1239 case JDWP::JT_ARRAY: 1240 case JDWP::JT_OBJECT: 1241 case JDWP::JT_STRING: 1242 case JDWP::JT_THREAD: 1243 case JDWP::JT_THREAD_GROUP: 1244 case JDWP::JT_CLASS_LOADER: 1245 case JDWP::JT_CLASS_OBJECT: 1246 return sizeof(JDWP::ObjectId); 1247 case JDWP::JT_DOUBLE: 1248 case JDWP::JT_LONG: 1249 return 8; 1250 default: 1251 LOG(FATAL) << "Unknown tag " << tag; 1252 return -1; 1253 } 1254} 1255 1256JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int& length) { 1257 JDWP::JdwpError status; 1258 mirror::Array* a = DecodeArray(array_id, status); 1259 if (a == NULL) { 1260 return status; 1261 } 1262 length = a->GetLength(); 1263 return JDWP::ERR_NONE; 1264} 1265 1266JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) { 1267 JDWP::JdwpError status; 1268 mirror::Array* a = DecodeArray(array_id, status); 1269 if (a == nullptr) { 1270 return status; 1271 } 1272 1273 if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) { 1274 LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count; 1275 return JDWP::ERR_INVALID_LENGTH; 1276 } 1277 std::string descriptor(a->GetClass()->GetDescriptor()); 1278 JDWP::JdwpTag tag = BasicTagFromDescriptor(descriptor.c_str() + 1); 1279 1280 expandBufAdd1(pReply, tag); 1281 expandBufAdd4BE(pReply, count); 1282 1283 if (IsPrimitiveTag(tag)) { 1284 size_t width = GetTagWidth(tag); 1285 uint8_t* dst = expandBufAddSpace(pReply, count * width); 1286 if (width == 8) { 1287 const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0)); 1288 for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]); 1289 } else if (width == 4) { 1290 const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0)); 1291 for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]); 1292 } else if (width == 2) { 1293 const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0)); 1294 for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]); 1295 } else { 1296 const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0)); 1297 memcpy(dst, &src[offset * width], count * width); 1298 } 1299 } else { 1300 ScopedObjectAccessUnchecked soa(Thread::Current()); 1301 mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>(); 1302 for (int i = 0; i < count; ++i) { 1303 mirror::Object* element = oa->Get(offset + i); 1304 JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element) 1305 : tag; 1306 expandBufAdd1(pReply, specific_tag); 1307 expandBufAddObjectId(pReply, gRegistry->Add(element)); 1308 } 1309 } 1310 1311 return JDWP::ERR_NONE; 1312} 1313 1314template <typename T> 1315static void CopyArrayData(mirror::Array* a, JDWP::Request& src, int offset, int count) 1316 NO_THREAD_SAFETY_ANALYSIS { 1317 // TODO: fix when annotalysis correctly handles non-member functions. 1318 DCHECK(a->GetClass()->IsPrimitiveArray()); 1319 1320 T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset)); 1321 for (int i = 0; i < count; ++i) { 1322 *dst++ = src.ReadValue(sizeof(T)); 1323 } 1324} 1325 1326JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count, 1327 JDWP::Request& request) 1328 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1329 JDWP::JdwpError status; 1330 mirror::Array* dst = DecodeArray(array_id, status); 1331 if (dst == NULL) { 1332 return status; 1333 } 1334 1335 if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) { 1336 LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count; 1337 return JDWP::ERR_INVALID_LENGTH; 1338 } 1339 std::string descriptor = dst->GetClass()->GetDescriptor(); 1340 JDWP::JdwpTag tag = BasicTagFromDescriptor(descriptor.c_str() + 1); 1341 1342 if (IsPrimitiveTag(tag)) { 1343 size_t width = GetTagWidth(tag); 1344 if (width == 8) { 1345 CopyArrayData<uint64_t>(dst, request, offset, count); 1346 } else if (width == 4) { 1347 CopyArrayData<uint32_t>(dst, request, offset, count); 1348 } else if (width == 2) { 1349 CopyArrayData<uint16_t>(dst, request, offset, count); 1350 } else { 1351 CopyArrayData<uint8_t>(dst, request, offset, count); 1352 } 1353 } else { 1354 mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>(); 1355 for (int i = 0; i < count; ++i) { 1356 JDWP::ObjectId id = request.ReadObjectId(); 1357 mirror::Object* o = gRegistry->Get<mirror::Object*>(id); 1358 if (o == ObjectRegistry::kInvalidObject) { 1359 return JDWP::ERR_INVALID_OBJECT; 1360 } 1361 oa->Set<false>(offset + i, o); 1362 } 1363 } 1364 1365 return JDWP::ERR_NONE; 1366} 1367 1368JDWP::ObjectId Dbg::CreateString(const std::string& str) { 1369 return gRegistry->Add(mirror::String::AllocFromModifiedUtf8(Thread::Current(), str.c_str())); 1370} 1371 1372JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId& new_object) { 1373 JDWP::JdwpError status; 1374 mirror::Class* c = DecodeClass(class_id, status); 1375 if (c == NULL) { 1376 return status; 1377 } 1378 new_object = gRegistry->Add(c->AllocObject(Thread::Current())); 1379 return JDWP::ERR_NONE; 1380} 1381 1382/* 1383 * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]". 1384 */ 1385JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length, 1386 JDWP::ObjectId& new_array) { 1387 JDWP::JdwpError status; 1388 mirror::Class* c = DecodeClass(array_class_id, status); 1389 if (c == NULL) { 1390 return status; 1391 } 1392 new_array = gRegistry->Add(mirror::Array::Alloc<true>(Thread::Current(), c, length, 1393 c->GetComponentSize(), 1394 Runtime::Current()->GetHeap()->GetCurrentAllocator())); 1395 return JDWP::ERR_NONE; 1396} 1397 1398bool Dbg::MatchType(JDWP::RefTypeId instance_class_id, JDWP::RefTypeId class_id) { 1399 JDWP::JdwpError status; 1400 mirror::Class* c1 = DecodeClass(instance_class_id, status); 1401 CHECK(c1 != NULL); 1402 mirror::Class* c2 = DecodeClass(class_id, status); 1403 CHECK(c2 != NULL); 1404 return c2->IsAssignableFrom(c1); 1405} 1406 1407static JDWP::FieldId ToFieldId(const mirror::ArtField* f) 1408 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1409 CHECK(!kMovingFields); 1410 return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f)); 1411} 1412 1413static JDWP::MethodId ToMethodId(const mirror::ArtMethod* m) 1414 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1415 CHECK(!kMovingMethods); 1416 return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m)); 1417} 1418 1419static mirror::ArtField* FromFieldId(JDWP::FieldId fid) 1420 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1421 CHECK(!kMovingFields); 1422 return reinterpret_cast<mirror::ArtField*>(static_cast<uintptr_t>(fid)); 1423} 1424 1425static mirror::ArtMethod* FromMethodId(JDWP::MethodId mid) 1426 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1427 CHECK(!kMovingMethods); 1428 return reinterpret_cast<mirror::ArtMethod*>(static_cast<uintptr_t>(mid)); 1429} 1430 1431static void SetLocation(JDWP::JdwpLocation& location, mirror::ArtMethod* m, uint32_t dex_pc) 1432 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1433 if (m == NULL) { 1434 memset(&location, 0, sizeof(location)); 1435 } else { 1436 mirror::Class* c = m->GetDeclaringClass(); 1437 location.type_tag = GetTypeTag(c); 1438 location.class_id = gRegistry->AddRefType(c); 1439 location.method_id = ToMethodId(m); 1440 location.dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc; 1441 } 1442} 1443 1444std::string Dbg::GetMethodName(JDWP::MethodId method_id) 1445 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1446 mirror::ArtMethod* m = FromMethodId(method_id); 1447 return m->GetName(); 1448} 1449 1450std::string Dbg::GetFieldName(JDWP::FieldId field_id) 1451 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1452 return FromFieldId(field_id)->GetName(); 1453} 1454 1455/* 1456 * Augment the access flags for synthetic methods and fields by setting 1457 * the (as described by the spec) "0xf0000000 bit". Also, strip out any 1458 * flags not specified by the Java programming language. 1459 */ 1460static uint32_t MangleAccessFlags(uint32_t accessFlags) { 1461 accessFlags &= kAccJavaFlagsMask; 1462 if ((accessFlags & kAccSynthetic) != 0) { 1463 accessFlags |= 0xf0000000; 1464 } 1465 return accessFlags; 1466} 1467 1468/* 1469 * Circularly shifts registers so that arguments come first. Debuggers 1470 * expect slots to begin with arguments, but dex code places them at 1471 * the end. 1472 */ 1473static uint16_t MangleSlot(uint16_t slot, mirror::ArtMethod* m) 1474 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1475 const DexFile::CodeItem* code_item = m->GetCodeItem(); 1476 if (code_item == nullptr) { 1477 // We should not get here for a method without code (native, proxy or abstract). Log it and 1478 // return the slot as is since all registers are arguments. 1479 LOG(WARNING) << "Trying to mangle slot for method without code " << PrettyMethod(m); 1480 return slot; 1481 } 1482 uint16_t ins_size = code_item->ins_size_; 1483 uint16_t locals_size = code_item->registers_size_ - ins_size; 1484 if (slot >= locals_size) { 1485 return slot - locals_size; 1486 } else { 1487 return slot + ins_size; 1488 } 1489} 1490 1491/* 1492 * Circularly shifts registers so that arguments come last. Reverts 1493 * slots to dex style argument placement. 1494 */ 1495static uint16_t DemangleSlot(uint16_t slot, mirror::ArtMethod* m) 1496 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1497 const DexFile::CodeItem* code_item = m->GetCodeItem(); 1498 if (code_item == nullptr) { 1499 // We should not get here for a method without code (native, proxy or abstract). Log it and 1500 // return the slot as is since all registers are arguments. 1501 LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m); 1502 return slot; 1503 } 1504 uint16_t ins_size = code_item->ins_size_; 1505 uint16_t locals_size = code_item->registers_size_ - ins_size; 1506 if (slot < ins_size) { 1507 return slot + locals_size; 1508 } else { 1509 return slot - ins_size; 1510 } 1511} 1512 1513JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) { 1514 JDWP::JdwpError status; 1515 mirror::Class* c = DecodeClass(class_id, status); 1516 if (c == NULL) { 1517 return status; 1518 } 1519 1520 size_t instance_field_count = c->NumInstanceFields(); 1521 size_t static_field_count = c->NumStaticFields(); 1522 1523 expandBufAdd4BE(pReply, instance_field_count + static_field_count); 1524 1525 for (size_t i = 0; i < instance_field_count + static_field_count; ++i) { 1526 mirror::ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count); 1527 expandBufAddFieldId(pReply, ToFieldId(f)); 1528 expandBufAddUtf8String(pReply, f->GetName()); 1529 expandBufAddUtf8String(pReply, f->GetTypeDescriptor()); 1530 if (with_generic) { 1531 static const char genericSignature[1] = ""; 1532 expandBufAddUtf8String(pReply, genericSignature); 1533 } 1534 expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags())); 1535 } 1536 return JDWP::ERR_NONE; 1537} 1538 1539JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic, 1540 JDWP::ExpandBuf* pReply) { 1541 JDWP::JdwpError status; 1542 mirror::Class* c = DecodeClass(class_id, status); 1543 if (c == NULL) { 1544 return status; 1545 } 1546 1547 size_t direct_method_count = c->NumDirectMethods(); 1548 size_t virtual_method_count = c->NumVirtualMethods(); 1549 1550 expandBufAdd4BE(pReply, direct_method_count + virtual_method_count); 1551 1552 for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) { 1553 mirror::ArtMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count); 1554 expandBufAddMethodId(pReply, ToMethodId(m)); 1555 expandBufAddUtf8String(pReply, m->GetName()); 1556 expandBufAddUtf8String(pReply, m->GetSignature().ToString()); 1557 if (with_generic) { 1558 static const char genericSignature[1] = ""; 1559 expandBufAddUtf8String(pReply, genericSignature); 1560 } 1561 expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags())); 1562 } 1563 return JDWP::ERR_NONE; 1564} 1565 1566JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) { 1567 JDWP::JdwpError status; 1568 Thread* self = Thread::Current(); 1569 StackHandleScope<1> hs(self); 1570 Handle<mirror::Class> c(hs.NewHandle(DecodeClass(class_id, status))); 1571 if (c.Get() == nullptr) { 1572 return status; 1573 } 1574 size_t interface_count = c->NumDirectInterfaces(); 1575 expandBufAdd4BE(pReply, interface_count); 1576 for (size_t i = 0; i < interface_count; ++i) { 1577 expandBufAddRefTypeId(pReply, 1578 gRegistry->AddRefType(mirror::Class::GetDirectInterface(self, c, i))); 1579 } 1580 return JDWP::ERR_NONE; 1581} 1582 1583void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply) 1584 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1585 struct DebugCallbackContext { 1586 int numItems; 1587 JDWP::ExpandBuf* pReply; 1588 1589 static bool Callback(void* context, uint32_t address, uint32_t line_number) { 1590 DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context); 1591 expandBufAdd8BE(pContext->pReply, address); 1592 expandBufAdd4BE(pContext->pReply, line_number); 1593 pContext->numItems++; 1594 return false; 1595 } 1596 }; 1597 mirror::ArtMethod* m = FromMethodId(method_id); 1598 const DexFile::CodeItem* code_item = m->GetCodeItem(); 1599 uint64_t start, end; 1600 if (code_item == nullptr) { 1601 DCHECK(m->IsNative() || m->IsProxyMethod()); 1602 start = -1; 1603 end = -1; 1604 } else { 1605 start = 0; 1606 // Return the index of the last instruction 1607 end = code_item->insns_size_in_code_units_ - 1; 1608 } 1609 1610 expandBufAdd8BE(pReply, start); 1611 expandBufAdd8BE(pReply, end); 1612 1613 // Add numLines later 1614 size_t numLinesOffset = expandBufGetLength(pReply); 1615 expandBufAdd4BE(pReply, 0); 1616 1617 DebugCallbackContext context; 1618 context.numItems = 0; 1619 context.pReply = pReply; 1620 1621 if (code_item != nullptr) { 1622 m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(), 1623 DebugCallbackContext::Callback, NULL, &context); 1624 } 1625 1626 JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems); 1627} 1628 1629void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic, 1630 JDWP::ExpandBuf* pReply) { 1631 struct DebugCallbackContext { 1632 mirror::ArtMethod* method; 1633 JDWP::ExpandBuf* pReply; 1634 size_t variable_count; 1635 bool with_generic; 1636 1637 static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress, 1638 const char* name, const char* descriptor, const char* signature) 1639 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1640 DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context); 1641 1642 VLOG(jdwp) << StringPrintf(" %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d", 1643 pContext->variable_count, startAddress, endAddress - startAddress, 1644 name, descriptor, signature, slot, 1645 MangleSlot(slot, pContext->method)); 1646 1647 slot = MangleSlot(slot, pContext->method); 1648 1649 expandBufAdd8BE(pContext->pReply, startAddress); 1650 expandBufAddUtf8String(pContext->pReply, name); 1651 expandBufAddUtf8String(pContext->pReply, descriptor); 1652 if (pContext->with_generic) { 1653 expandBufAddUtf8String(pContext->pReply, signature); 1654 } 1655 expandBufAdd4BE(pContext->pReply, endAddress - startAddress); 1656 expandBufAdd4BE(pContext->pReply, slot); 1657 1658 ++pContext->variable_count; 1659 } 1660 }; 1661 mirror::ArtMethod* m = FromMethodId(method_id); 1662 1663 // arg_count considers doubles and longs to take 2 units. 1664 // variable_count considers everything to take 1 unit. 1665 std::string shorty(m->GetShorty()); 1666 expandBufAdd4BE(pReply, mirror::ArtMethod::NumArgRegisters(shorty)); 1667 1668 // We don't know the total number of variables yet, so leave a blank and update it later. 1669 size_t variable_count_offset = expandBufGetLength(pReply); 1670 expandBufAdd4BE(pReply, 0); 1671 1672 DebugCallbackContext context; 1673 context.method = m; 1674 context.pReply = pReply; 1675 context.variable_count = 0; 1676 context.with_generic = with_generic; 1677 1678 const DexFile::CodeItem* code_item = m->GetCodeItem(); 1679 if (code_item != nullptr) { 1680 m->GetDexFile()->DecodeDebugInfo( 1681 code_item, m->IsStatic(), m->GetDexMethodIndex(), NULL, DebugCallbackContext::Callback, 1682 &context); 1683 } 1684 1685 JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count); 1686} 1687 1688void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value, 1689 JDWP::ExpandBuf* pReply) { 1690 mirror::ArtMethod* m = FromMethodId(method_id); 1691 JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty()); 1692 OutputJValue(tag, return_value, pReply); 1693} 1694 1695void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value, 1696 JDWP::ExpandBuf* pReply) { 1697 mirror::ArtField* f = FromFieldId(field_id); 1698 JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor()); 1699 OutputJValue(tag, field_value, pReply); 1700} 1701 1702JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id, 1703 std::vector<uint8_t>& bytecodes) 1704 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1705 mirror::ArtMethod* m = FromMethodId(method_id); 1706 if (m == NULL) { 1707 return JDWP::ERR_INVALID_METHODID; 1708 } 1709 const DexFile::CodeItem* code_item = m->GetCodeItem(); 1710 size_t byte_count = code_item->insns_size_in_code_units_ * 2; 1711 const uint8_t* begin = reinterpret_cast<const uint8_t*>(code_item->insns_); 1712 const uint8_t* end = begin + byte_count; 1713 for (const uint8_t* p = begin; p != end; ++p) { 1714 bytecodes.push_back(*p); 1715 } 1716 return JDWP::ERR_NONE; 1717} 1718 1719JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) { 1720 return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor()); 1721} 1722 1723JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) { 1724 return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor()); 1725} 1726 1727static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id, 1728 JDWP::FieldId field_id, JDWP::ExpandBuf* pReply, 1729 bool is_static) 1730 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1731 JDWP::JdwpError status; 1732 mirror::Class* c = DecodeClass(ref_type_id, status); 1733 if (ref_type_id != 0 && c == NULL) { 1734 return status; 1735 } 1736 1737 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); 1738 if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) { 1739 return JDWP::ERR_INVALID_OBJECT; 1740 } 1741 mirror::ArtField* f = FromFieldId(field_id); 1742 1743 mirror::Class* receiver_class = c; 1744 if (receiver_class == NULL && o != NULL) { 1745 receiver_class = o->GetClass(); 1746 } 1747 // TODO: should we give up now if receiver_class is NULL? 1748 if (receiver_class != NULL && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) { 1749 LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class); 1750 return JDWP::ERR_INVALID_FIELDID; 1751 } 1752 1753 // The RI only enforces the static/non-static mismatch in one direction. 1754 // TODO: should we change the tests and check both? 1755 if (is_static) { 1756 if (!f->IsStatic()) { 1757 return JDWP::ERR_INVALID_FIELDID; 1758 } 1759 } else { 1760 if (f->IsStatic()) { 1761 LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f); 1762 } 1763 } 1764 if (f->IsStatic()) { 1765 o = f->GetDeclaringClass(); 1766 } 1767 1768 JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor()); 1769 JValue field_value; 1770 if (tag == JDWP::JT_VOID) { 1771 LOG(FATAL) << "Unknown tag: " << tag; 1772 } else if (!IsPrimitiveTag(tag)) { 1773 field_value.SetL(f->GetObject(o)); 1774 } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) { 1775 field_value.SetJ(f->Get64(o)); 1776 } else { 1777 field_value.SetI(f->Get32(o)); 1778 } 1779 Dbg::OutputJValue(tag, &field_value, pReply); 1780 1781 return JDWP::ERR_NONE; 1782} 1783 1784JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, 1785 JDWP::ExpandBuf* pReply) { 1786 return GetFieldValueImpl(0, object_id, field_id, pReply, false); 1787} 1788 1789JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id, JDWP::ExpandBuf* pReply) { 1790 return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true); 1791} 1792 1793static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id, 1794 uint64_t value, int width, bool is_static) 1795 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1796 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); 1797 if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) { 1798 return JDWP::ERR_INVALID_OBJECT; 1799 } 1800 mirror::ArtField* f = FromFieldId(field_id); 1801 1802 // The RI only enforces the static/non-static mismatch in one direction. 1803 // TODO: should we change the tests and check both? 1804 if (is_static) { 1805 if (!f->IsStatic()) { 1806 return JDWP::ERR_INVALID_FIELDID; 1807 } 1808 } else { 1809 if (f->IsStatic()) { 1810 LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f); 1811 } 1812 } 1813 if (f->IsStatic()) { 1814 o = f->GetDeclaringClass(); 1815 } 1816 1817 JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor()); 1818 1819 if (IsPrimitiveTag(tag)) { 1820 if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) { 1821 CHECK_EQ(width, 8); 1822 // Debugging can't use transactional mode (runtime only). 1823 f->Set64<false>(o, value); 1824 } else { 1825 CHECK_LE(width, 4); 1826 // Debugging can't use transactional mode (runtime only). 1827 f->Set32<false>(o, value); 1828 } 1829 } else { 1830 mirror::Object* v = gRegistry->Get<mirror::Object*>(value); 1831 if (v == ObjectRegistry::kInvalidObject) { 1832 return JDWP::ERR_INVALID_OBJECT; 1833 } 1834 if (v != NULL) { 1835 mirror::Class* field_type; 1836 { 1837 StackHandleScope<3> hs(Thread::Current()); 1838 HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v)); 1839 HandleWrapper<mirror::ArtField> h_f(hs.NewHandleWrapper(&f)); 1840 HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o)); 1841 field_type = FieldHelper(h_f).GetType(); 1842 } 1843 if (!field_type->IsAssignableFrom(v->GetClass())) { 1844 return JDWP::ERR_INVALID_OBJECT; 1845 } 1846 } 1847 // Debugging can't use transactional mode (runtime only). 1848 f->SetObject<false>(o, v); 1849 } 1850 1851 return JDWP::ERR_NONE; 1852} 1853 1854JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value, 1855 int width) { 1856 return SetFieldValueImpl(object_id, field_id, value, width, false); 1857} 1858 1859JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) { 1860 return SetFieldValueImpl(0, field_id, value, width, true); 1861} 1862 1863std::string Dbg::StringToUtf8(JDWP::ObjectId string_id) { 1864 mirror::String* s = gRegistry->Get<mirror::String*>(string_id); 1865 return s->ToModifiedUtf8(); 1866} 1867 1868void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) { 1869 if (IsPrimitiveTag(tag)) { 1870 expandBufAdd1(pReply, tag); 1871 if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) { 1872 expandBufAdd1(pReply, return_value->GetI()); 1873 } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) { 1874 expandBufAdd2BE(pReply, return_value->GetI()); 1875 } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) { 1876 expandBufAdd4BE(pReply, return_value->GetI()); 1877 } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) { 1878 expandBufAdd8BE(pReply, return_value->GetJ()); 1879 } else { 1880 CHECK_EQ(tag, JDWP::JT_VOID); 1881 } 1882 } else { 1883 ScopedObjectAccessUnchecked soa(Thread::Current()); 1884 mirror::Object* value = return_value->GetL(); 1885 expandBufAdd1(pReply, TagFromObject(soa, value)); 1886 expandBufAddObjectId(pReply, gRegistry->Add(value)); 1887 } 1888} 1889 1890JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string& name) { 1891 ScopedObjectAccessUnchecked soa(Thread::Current()); 1892 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 1893 Thread* thread; 1894 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 1895 if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) { 1896 return error; 1897 } 1898 1899 // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName. 1900 mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id); 1901 mirror::ArtField* java_lang_Thread_name_field = 1902 soa.DecodeField(WellKnownClasses::java_lang_Thread_name); 1903 mirror::String* s = 1904 reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object)); 1905 if (s != NULL) { 1906 name = s->ToModifiedUtf8(); 1907 } 1908 return JDWP::ERR_NONE; 1909} 1910 1911JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) { 1912 ScopedObjectAccess soa(Thread::Current()); 1913 mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id); 1914 if (thread_object == ObjectRegistry::kInvalidObject) { 1915 return JDWP::ERR_INVALID_OBJECT; 1916 } 1917 const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroup"); 1918 // Okay, so it's an object, but is it actually a thread? 1919 JDWP::JdwpError error; 1920 { 1921 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 1922 Thread* thread; 1923 error = DecodeThread(soa, thread_id, thread); 1924 } 1925 if (error == JDWP::ERR_THREAD_NOT_ALIVE) { 1926 // Zombie threads are in the null group. 1927 expandBufAddObjectId(pReply, JDWP::ObjectId(0)); 1928 error = JDWP::ERR_NONE; 1929 } else if (error == JDWP::ERR_NONE) { 1930 mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread); 1931 CHECK(c != nullptr); 1932 mirror::ArtField* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;"); 1933 CHECK(f != nullptr); 1934 mirror::Object* group = f->GetObject(thread_object); 1935 CHECK(group != nullptr); 1936 JDWP::ObjectId thread_group_id = gRegistry->Add(group); 1937 expandBufAddObjectId(pReply, thread_group_id); 1938 } 1939 soa.Self()->EndAssertNoThreadSuspension(old_cause); 1940 return error; 1941} 1942 1943std::string Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id) { 1944 ScopedObjectAccess soa(Thread::Current()); 1945 mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id); 1946 CHECK(thread_group != nullptr); 1947 const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupName"); 1948 mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup); 1949 CHECK(c != nullptr); 1950 mirror::ArtField* f = c->FindInstanceField("name", "Ljava/lang/String;"); 1951 CHECK(f != NULL); 1952 mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group)); 1953 soa.Self()->EndAssertNoThreadSuspension(old_cause); 1954 return s->ToModifiedUtf8(); 1955} 1956 1957JDWP::ObjectId Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id) { 1958 ScopedObjectAccessUnchecked soa(Thread::Current()); 1959 mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id); 1960 CHECK(thread_group != nullptr); 1961 const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupParent"); 1962 mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup); 1963 CHECK(c != nullptr); 1964 mirror::ArtField* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;"); 1965 CHECK(f != NULL); 1966 mirror::Object* parent = f->GetObject(thread_group); 1967 soa.Self()->EndAssertNoThreadSuspension(old_cause); 1968 return gRegistry->Add(parent); 1969} 1970 1971JDWP::ObjectId Dbg::GetSystemThreadGroupId() { 1972 ScopedObjectAccessUnchecked soa(Thread::Current()); 1973 mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup); 1974 mirror::Object* group = f->GetObject(f->GetDeclaringClass()); 1975 return gRegistry->Add(group); 1976} 1977 1978JDWP::ObjectId Dbg::GetMainThreadGroupId() { 1979 ScopedObjectAccess soa(Thread::Current()); 1980 mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup); 1981 mirror::Object* group = f->GetObject(f->GetDeclaringClass()); 1982 return gRegistry->Add(group); 1983} 1984 1985JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) { 1986 switch (state) { 1987 case kBlocked: 1988 return JDWP::TS_MONITOR; 1989 case kNative: 1990 case kRunnable: 1991 case kSuspended: 1992 return JDWP::TS_RUNNING; 1993 case kSleeping: 1994 return JDWP::TS_SLEEPING; 1995 case kStarting: 1996 case kTerminated: 1997 return JDWP::TS_ZOMBIE; 1998 case kTimedWaiting: 1999 case kWaitingForCheckPointsToRun: 2000 case kWaitingForDebuggerSend: 2001 case kWaitingForDebuggerSuspension: 2002 case kWaitingForDebuggerToAttach: 2003 case kWaitingForDeoptimization: 2004 case kWaitingForGcToComplete: 2005 case kWaitingForJniOnLoad: 2006 case kWaitingForMethodTracingStart: 2007 case kWaitingForSignalCatcherOutput: 2008 case kWaitingInMainDebuggerLoop: 2009 case kWaitingInMainSignalCatcherLoop: 2010 case kWaitingPerformingGc: 2011 case kWaiting: 2012 return JDWP::TS_WAIT; 2013 // Don't add a 'default' here so the compiler can spot incompatible enum changes. 2014 } 2015 LOG(FATAL) << "Unknown thread state: " << state; 2016 return JDWP::TS_ZOMBIE; 2017} 2018 2019JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus, 2020 JDWP::JdwpSuspendStatus* pSuspendStatus) { 2021 ScopedObjectAccess soa(Thread::Current()); 2022 2023 *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED; 2024 2025 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2026 Thread* thread; 2027 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 2028 if (error != JDWP::ERR_NONE) { 2029 if (error == JDWP::ERR_THREAD_NOT_ALIVE) { 2030 *pThreadStatus = JDWP::TS_ZOMBIE; 2031 return JDWP::ERR_NONE; 2032 } 2033 return error; 2034 } 2035 2036 if (IsSuspendedForDebugger(soa, thread)) { 2037 *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED; 2038 } 2039 2040 *pThreadStatus = ToJdwpThreadStatus(thread->GetState()); 2041 return JDWP::ERR_NONE; 2042} 2043 2044JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) { 2045 ScopedObjectAccess soa(Thread::Current()); 2046 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2047 Thread* thread; 2048 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 2049 if (error != JDWP::ERR_NONE) { 2050 return error; 2051 } 2052 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_); 2053 expandBufAdd4BE(pReply, thread->GetDebugSuspendCount()); 2054 return JDWP::ERR_NONE; 2055} 2056 2057JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) { 2058 ScopedObjectAccess soa(Thread::Current()); 2059 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2060 Thread* thread; 2061 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 2062 if (error != JDWP::ERR_NONE) { 2063 return error; 2064 } 2065 thread->Interrupt(soa.Self()); 2066 return JDWP::ERR_NONE; 2067} 2068 2069void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& thread_ids) { 2070 class ThreadListVisitor { 2071 public: 2072 ThreadListVisitor(const ScopedObjectAccessUnchecked& soa, mirror::Object* desired_thread_group, 2073 std::vector<JDWP::ObjectId>& thread_ids) 2074 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 2075 : soa_(soa), desired_thread_group_(desired_thread_group), thread_ids_(thread_ids) {} 2076 2077 static void Visit(Thread* t, void* arg) { 2078 reinterpret_cast<ThreadListVisitor*>(arg)->Visit(t); 2079 } 2080 2081 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 2082 // annotalysis. 2083 void Visit(Thread* t) NO_THREAD_SAFETY_ANALYSIS { 2084 if (t == Dbg::GetDebugThread()) { 2085 // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and 2086 // query all threads, so it's easier if we just don't tell them about this thread. 2087 return; 2088 } 2089 mirror::Object* peer = t->GetPeer(); 2090 if (IsInDesiredThreadGroup(peer)) { 2091 thread_ids_.push_back(gRegistry->Add(peer)); 2092 } 2093 } 2094 2095 private: 2096 bool IsInDesiredThreadGroup(mirror::Object* peer) 2097 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 2098 // peer might be NULL if the thread is still starting up. 2099 if (peer == NULL) { 2100 // We can't tell the debugger about this thread yet. 2101 // TODO: if we identified threads to the debugger by their Thread* 2102 // rather than their peer's mirror::Object*, we could fix this. 2103 // Doing so might help us report ZOMBIE threads too. 2104 return false; 2105 } 2106 // Do we want threads from all thread groups? 2107 if (desired_thread_group_ == NULL) { 2108 return true; 2109 } 2110 mirror::Object* group = soa_.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(peer); 2111 return (group == desired_thread_group_); 2112 } 2113 2114 const ScopedObjectAccessUnchecked& soa_; 2115 mirror::Object* const desired_thread_group_; 2116 std::vector<JDWP::ObjectId>& thread_ids_; 2117 }; 2118 2119 ScopedObjectAccessUnchecked soa(Thread::Current()); 2120 mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id); 2121 ThreadListVisitor tlv(soa, thread_group, thread_ids); 2122 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2123 Runtime::Current()->GetThreadList()->ForEach(ThreadListVisitor::Visit, &tlv); 2124} 2125 2126void Dbg::GetChildThreadGroups(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& child_thread_group_ids) { 2127 ScopedObjectAccess soa(Thread::Current()); 2128 mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id); 2129 2130 // Get the ArrayList<ThreadGroup> "groups" out of this thread group... 2131 mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;"); 2132 mirror::Object* groups_array_list = groups_field->GetObject(thread_group); 2133 2134 // Get the array and size out of the ArrayList<ThreadGroup>... 2135 mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;"); 2136 mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I"); 2137 mirror::ObjectArray<mirror::Object>* groups_array = 2138 array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>(); 2139 const int32_t size = size_field->GetInt(groups_array_list); 2140 2141 // Copy the first 'size' elements out of the array into the result. 2142 for (int32_t i = 0; i < size; ++i) { 2143 child_thread_group_ids.push_back(gRegistry->Add(groups_array->Get(i))); 2144 } 2145} 2146 2147static int GetStackDepth(Thread* thread) 2148 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 2149 struct CountStackDepthVisitor : public StackVisitor { 2150 explicit CountStackDepthVisitor(Thread* thread) 2151 : StackVisitor(thread, NULL), depth(0) {} 2152 2153 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 2154 // annotalysis. 2155 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 2156 if (!GetMethod()->IsRuntimeMethod()) { 2157 ++depth; 2158 } 2159 return true; 2160 } 2161 size_t depth; 2162 }; 2163 2164 CountStackDepthVisitor visitor(thread); 2165 visitor.WalkStack(); 2166 return visitor.depth; 2167} 2168 2169JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t& result) { 2170 ScopedObjectAccess soa(Thread::Current()); 2171 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2172 Thread* thread; 2173 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 2174 if (error != JDWP::ERR_NONE) { 2175 return error; 2176 } 2177 if (!IsSuspendedForDebugger(soa, thread)) { 2178 return JDWP::ERR_THREAD_NOT_SUSPENDED; 2179 } 2180 result = GetStackDepth(thread); 2181 return JDWP::ERR_NONE; 2182} 2183 2184JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame, 2185 size_t frame_count, JDWP::ExpandBuf* buf) { 2186 class GetFrameVisitor : public StackVisitor { 2187 public: 2188 GetFrameVisitor(Thread* thread, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf) 2189 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 2190 : StackVisitor(thread, NULL), depth_(0), 2191 start_frame_(start_frame), frame_count_(frame_count), buf_(buf) { 2192 expandBufAdd4BE(buf_, frame_count_); 2193 } 2194 2195 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 2196 // annotalysis. 2197 virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 2198 if (GetMethod()->IsRuntimeMethod()) { 2199 return true; // The debugger can't do anything useful with a frame that has no Method*. 2200 } 2201 if (depth_ >= start_frame_ + frame_count_) { 2202 return false; 2203 } 2204 if (depth_ >= start_frame_) { 2205 JDWP::FrameId frame_id(GetFrameId()); 2206 JDWP::JdwpLocation location; 2207 SetLocation(location, GetMethod(), GetDexPc()); 2208 VLOG(jdwp) << StringPrintf(" Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location; 2209 expandBufAdd8BE(buf_, frame_id); 2210 expandBufAddLocation(buf_, location); 2211 } 2212 ++depth_; 2213 return true; 2214 } 2215 2216 private: 2217 size_t depth_; 2218 const size_t start_frame_; 2219 const size_t frame_count_; 2220 JDWP::ExpandBuf* buf_; 2221 }; 2222 2223 ScopedObjectAccessUnchecked soa(Thread::Current()); 2224 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2225 Thread* thread; 2226 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 2227 if (error != JDWP::ERR_NONE) { 2228 return error; 2229 } 2230 if (!IsSuspendedForDebugger(soa, thread)) { 2231 return JDWP::ERR_THREAD_NOT_SUSPENDED; 2232 } 2233 GetFrameVisitor visitor(thread, start_frame, frame_count, buf); 2234 visitor.WalkStack(); 2235 return JDWP::ERR_NONE; 2236} 2237 2238JDWP::ObjectId Dbg::GetThreadSelfId() { 2239 ScopedObjectAccessUnchecked soa(Thread::Current()); 2240 return gRegistry->Add(soa.Self()->GetPeer()); 2241} 2242 2243void Dbg::SuspendVM() { 2244 Runtime::Current()->GetThreadList()->SuspendAllForDebugger(); 2245} 2246 2247void Dbg::ResumeVM() { 2248 Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions(); 2249} 2250 2251JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) { 2252 Thread* self = Thread::Current(); 2253 ScopedLocalRef<jobject> peer(self->GetJniEnv(), NULL); 2254 { 2255 ScopedObjectAccess soa(self); 2256 peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id))); 2257 } 2258 if (peer.get() == NULL) { 2259 return JDWP::ERR_THREAD_NOT_ALIVE; 2260 } 2261 // Suspend thread to build stack trace. Take suspend thread lock to avoid races with threads 2262 // trying to suspend this one. 2263 MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_); 2264 bool timed_out; 2265 Thread* thread = ThreadList::SuspendThreadByPeer(peer.get(), request_suspension, true, 2266 &timed_out); 2267 if (thread != NULL) { 2268 return JDWP::ERR_NONE; 2269 } else if (timed_out) { 2270 return JDWP::ERR_INTERNAL; 2271 } else { 2272 return JDWP::ERR_THREAD_NOT_ALIVE; 2273 } 2274} 2275 2276void Dbg::ResumeThread(JDWP::ObjectId thread_id) { 2277 ScopedObjectAccessUnchecked soa(Thread::Current()); 2278 mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id); 2279 Thread* thread; 2280 { 2281 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2282 thread = Thread::FromManagedThread(soa, peer); 2283 } 2284 if (thread == NULL) { 2285 LOG(WARNING) << "No such thread for resume: " << peer; 2286 return; 2287 } 2288 bool needs_resume; 2289 { 2290 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_); 2291 needs_resume = thread->GetSuspendCount() > 0; 2292 } 2293 if (needs_resume) { 2294 Runtime::Current()->GetThreadList()->Resume(thread, true); 2295 } 2296} 2297 2298void Dbg::SuspendSelf() { 2299 Runtime::Current()->GetThreadList()->SuspendSelfForDebugger(); 2300} 2301 2302struct GetThisVisitor : public StackVisitor { 2303 GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id) 2304 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 2305 : StackVisitor(thread, context), this_object(NULL), frame_id(frame_id) {} 2306 2307 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 2308 // annotalysis. 2309 virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 2310 if (frame_id != GetFrameId()) { 2311 return true; // continue 2312 } else { 2313 this_object = GetThisObject(); 2314 return false; 2315 } 2316 } 2317 2318 mirror::Object* this_object; 2319 JDWP::FrameId frame_id; 2320}; 2321 2322JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, 2323 JDWP::ObjectId* result) { 2324 ScopedObjectAccessUnchecked soa(Thread::Current()); 2325 Thread* thread; 2326 { 2327 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2328 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 2329 if (error != JDWP::ERR_NONE) { 2330 return error; 2331 } 2332 if (!IsSuspendedForDebugger(soa, thread)) { 2333 return JDWP::ERR_THREAD_NOT_SUSPENDED; 2334 } 2335 } 2336 std::unique_ptr<Context> context(Context::Create()); 2337 GetThisVisitor visitor(thread, context.get(), frame_id); 2338 visitor.WalkStack(); 2339 *result = gRegistry->Add(visitor.this_object); 2340 return JDWP::ERR_NONE; 2341} 2342 2343JDWP::JdwpError Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot, 2344 JDWP::JdwpTag tag, uint8_t* buf, size_t width) { 2345 struct GetLocalVisitor : public StackVisitor { 2346 GetLocalVisitor(const ScopedObjectAccessUnchecked& soa, Thread* thread, Context* context, 2347 JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) 2348 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 2349 : StackVisitor(thread, context), soa_(soa), frame_id_(frame_id), slot_(slot), tag_(tag), 2350 buf_(buf), width_(width), error_(JDWP::ERR_NONE) {} 2351 2352 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 2353 // annotalysis. 2354 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 2355 if (GetFrameId() != frame_id_) { 2356 return true; // Not our frame, carry on. 2357 } 2358 // TODO: check that the tag is compatible with the actual type of the slot! 2359 // TODO: check slot is valid for this method or return INVALID_SLOT error. 2360 mirror::ArtMethod* m = GetMethod(); 2361 if (m->IsNative()) { 2362 // We can't read local value from native method. 2363 error_ = JDWP::ERR_OPAQUE_FRAME; 2364 return false; 2365 } 2366 uint16_t reg = DemangleSlot(slot_, m); 2367 constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION; 2368 switch (tag_) { 2369 case JDWP::JT_BOOLEAN: { 2370 CHECK_EQ(width_, 1U); 2371 uint32_t intVal; 2372 if (GetVReg(m, reg, kIntVReg, &intVal)) { 2373 VLOG(jdwp) << "get boolean local " << reg << " = " << intVal; 2374 JDWP::Set1(buf_+1, intVal != 0); 2375 } else { 2376 VLOG(jdwp) << "failed to get boolean local " << reg; 2377 error_ = kFailureErrorCode; 2378 } 2379 break; 2380 } 2381 case JDWP::JT_BYTE: { 2382 CHECK_EQ(width_, 1U); 2383 uint32_t intVal; 2384 if (GetVReg(m, reg, kIntVReg, &intVal)) { 2385 VLOG(jdwp) << "get byte local " << reg << " = " << intVal; 2386 JDWP::Set1(buf_+1, intVal); 2387 } else { 2388 VLOG(jdwp) << "failed to get byte local " << reg; 2389 error_ = kFailureErrorCode; 2390 } 2391 break; 2392 } 2393 case JDWP::JT_SHORT: 2394 case JDWP::JT_CHAR: { 2395 CHECK_EQ(width_, 2U); 2396 uint32_t intVal; 2397 if (GetVReg(m, reg, kIntVReg, &intVal)) { 2398 VLOG(jdwp) << "get short/char local " << reg << " = " << intVal; 2399 JDWP::Set2BE(buf_+1, intVal); 2400 } else { 2401 VLOG(jdwp) << "failed to get short/char local " << reg; 2402 error_ = kFailureErrorCode; 2403 } 2404 break; 2405 } 2406 case JDWP::JT_INT: { 2407 CHECK_EQ(width_, 4U); 2408 uint32_t intVal; 2409 if (GetVReg(m, reg, kIntVReg, &intVal)) { 2410 VLOG(jdwp) << "get int local " << reg << " = " << intVal; 2411 JDWP::Set4BE(buf_+1, intVal); 2412 } else { 2413 VLOG(jdwp) << "failed to get int local " << reg; 2414 error_ = kFailureErrorCode; 2415 } 2416 break; 2417 } 2418 case JDWP::JT_FLOAT: { 2419 CHECK_EQ(width_, 4U); 2420 uint32_t intVal; 2421 if (GetVReg(m, reg, kFloatVReg, &intVal)) { 2422 VLOG(jdwp) << "get float local " << reg << " = " << intVal; 2423 JDWP::Set4BE(buf_+1, intVal); 2424 } else { 2425 VLOG(jdwp) << "failed to get float local " << reg; 2426 error_ = kFailureErrorCode; 2427 } 2428 break; 2429 } 2430 case JDWP::JT_ARRAY: 2431 case JDWP::JT_CLASS_LOADER: 2432 case JDWP::JT_CLASS_OBJECT: 2433 case JDWP::JT_OBJECT: 2434 case JDWP::JT_STRING: 2435 case JDWP::JT_THREAD: 2436 case JDWP::JT_THREAD_GROUP: { 2437 CHECK_EQ(width_, sizeof(JDWP::ObjectId)); 2438 uint32_t intVal; 2439 if (GetVReg(m, reg, kReferenceVReg, &intVal)) { 2440 mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal); 2441 VLOG(jdwp) << "get " << tag_ << " object local " << reg << " = " << o; 2442 if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) { 2443 LOG(FATAL) << "Register " << reg << " expected to hold " << tag_ << " object: " << o; 2444 } 2445 tag_ = TagFromObject(soa_, o); 2446 JDWP::SetObjectId(buf_+1, gRegistry->Add(o)); 2447 } else { 2448 VLOG(jdwp) << "failed to get " << tag_ << " object local " << reg; 2449 error_ = kFailureErrorCode; 2450 } 2451 break; 2452 } 2453 case JDWP::JT_DOUBLE: { 2454 CHECK_EQ(width_, 8U); 2455 uint64_t longVal; 2456 if (GetVRegPair(m, reg, kDoubleLoVReg, kDoubleHiVReg, &longVal)) { 2457 VLOG(jdwp) << "get double local " << reg << " = " << longVal; 2458 JDWP::Set8BE(buf_+1, longVal); 2459 } else { 2460 VLOG(jdwp) << "failed to get double local " << reg; 2461 error_ = kFailureErrorCode; 2462 } 2463 break; 2464 } 2465 case JDWP::JT_LONG: { 2466 CHECK_EQ(width_, 8U); 2467 uint64_t longVal; 2468 if (GetVRegPair(m, reg, kLongLoVReg, kLongHiVReg, &longVal)) { 2469 VLOG(jdwp) << "get long local " << reg << " = " << longVal; 2470 JDWP::Set8BE(buf_+1, longVal); 2471 } else { 2472 VLOG(jdwp) << "failed to get long local " << reg; 2473 error_ = kFailureErrorCode; 2474 } 2475 break; 2476 } 2477 default: 2478 LOG(FATAL) << "Unknown tag " << tag_; 2479 break; 2480 } 2481 2482 // Prepend tag, which may have been updated. 2483 JDWP::Set1(buf_, tag_); 2484 return false; 2485 } 2486 const ScopedObjectAccessUnchecked& soa_; 2487 const JDWP::FrameId frame_id_; 2488 const int slot_; 2489 JDWP::JdwpTag tag_; 2490 uint8_t* const buf_; 2491 const size_t width_; 2492 JDWP::JdwpError error_; 2493 }; 2494 2495 ScopedObjectAccessUnchecked soa(Thread::Current()); 2496 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2497 Thread* thread; 2498 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 2499 if (error != JDWP::ERR_NONE) { 2500 return error; 2501 } 2502 // TODO check thread is suspended by the debugger ? 2503 std::unique_ptr<Context> context(Context::Create()); 2504 GetLocalVisitor visitor(soa, thread, context.get(), frame_id, slot, tag, buf, width); 2505 visitor.WalkStack(); 2506 return visitor.error_; 2507} 2508 2509JDWP::JdwpError Dbg::SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot, 2510 JDWP::JdwpTag tag, uint64_t value, size_t width) { 2511 struct SetLocalVisitor : public StackVisitor { 2512 SetLocalVisitor(Thread* thread, Context* context, 2513 JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint64_t value, 2514 size_t width) 2515 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 2516 : StackVisitor(thread, context), 2517 frame_id_(frame_id), slot_(slot), tag_(tag), value_(value), width_(width), 2518 error_(JDWP::ERR_NONE) {} 2519 2520 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 2521 // annotalysis. 2522 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 2523 if (GetFrameId() != frame_id_) { 2524 return true; // Not our frame, carry on. 2525 } 2526 // TODO: check that the tag is compatible with the actual type of the slot! 2527 // TODO: check slot is valid for this method or return INVALID_SLOT error. 2528 mirror::ArtMethod* m = GetMethod(); 2529 if (m->IsNative()) { 2530 // We can't read local value from native method. 2531 error_ = JDWP::ERR_OPAQUE_FRAME; 2532 return false; 2533 } 2534 uint16_t reg = DemangleSlot(slot_, m); 2535 constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION; 2536 switch (tag_) { 2537 case JDWP::JT_BOOLEAN: 2538 case JDWP::JT_BYTE: 2539 CHECK_EQ(width_, 1U); 2540 if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) { 2541 VLOG(jdwp) << "failed to set boolean/byte local " << reg << " = " 2542 << static_cast<uint32_t>(value_); 2543 error_ = kFailureErrorCode; 2544 } 2545 break; 2546 case JDWP::JT_SHORT: 2547 case JDWP::JT_CHAR: 2548 CHECK_EQ(width_, 2U); 2549 if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) { 2550 VLOG(jdwp) << "failed to set short/char local " << reg << " = " 2551 << static_cast<uint32_t>(value_); 2552 error_ = kFailureErrorCode; 2553 } 2554 break; 2555 case JDWP::JT_INT: 2556 CHECK_EQ(width_, 4U); 2557 if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) { 2558 VLOG(jdwp) << "failed to set int local " << reg << " = " 2559 << static_cast<uint32_t>(value_); 2560 error_ = kFailureErrorCode; 2561 } 2562 break; 2563 case JDWP::JT_FLOAT: 2564 CHECK_EQ(width_, 4U); 2565 if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kFloatVReg)) { 2566 VLOG(jdwp) << "failed to set float local " << reg << " = " 2567 << static_cast<uint32_t>(value_); 2568 error_ = kFailureErrorCode; 2569 } 2570 break; 2571 case JDWP::JT_ARRAY: 2572 case JDWP::JT_CLASS_LOADER: 2573 case JDWP::JT_CLASS_OBJECT: 2574 case JDWP::JT_OBJECT: 2575 case JDWP::JT_STRING: 2576 case JDWP::JT_THREAD: 2577 case JDWP::JT_THREAD_GROUP: { 2578 CHECK_EQ(width_, sizeof(JDWP::ObjectId)); 2579 mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value_)); 2580 if (o == ObjectRegistry::kInvalidObject) { 2581 VLOG(jdwp) << tag_ << " object " << o << " is an invalid object"; 2582 error_ = JDWP::ERR_INVALID_OBJECT; 2583 } else if (!SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)), 2584 kReferenceVReg)) { 2585 VLOG(jdwp) << "failed to set " << tag_ << " object local " << reg << " = " << o; 2586 error_ = kFailureErrorCode; 2587 } 2588 break; 2589 } 2590 case JDWP::JT_DOUBLE: { 2591 CHECK_EQ(width_, 8U); 2592 bool success = SetVRegPair(m, reg, value_, kDoubleLoVReg, kDoubleHiVReg); 2593 if (!success) { 2594 VLOG(jdwp) << "failed to set double local " << reg << " = " << value_; 2595 error_ = kFailureErrorCode; 2596 } 2597 break; 2598 } 2599 case JDWP::JT_LONG: { 2600 CHECK_EQ(width_, 8U); 2601 bool success = SetVRegPair(m, reg, value_, kLongLoVReg, kLongHiVReg); 2602 if (!success) { 2603 VLOG(jdwp) << "failed to set double local " << reg << " = " << value_; 2604 error_ = kFailureErrorCode; 2605 } 2606 break; 2607 } 2608 default: 2609 LOG(FATAL) << "Unknown tag " << tag_; 2610 break; 2611 } 2612 return false; 2613 } 2614 2615 const JDWP::FrameId frame_id_; 2616 const int slot_; 2617 const JDWP::JdwpTag tag_; 2618 const uint64_t value_; 2619 const size_t width_; 2620 JDWP::JdwpError error_; 2621 }; 2622 2623 ScopedObjectAccessUnchecked soa(Thread::Current()); 2624 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2625 Thread* thread; 2626 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 2627 if (error != JDWP::ERR_NONE) { 2628 return error; 2629 } 2630 // TODO check thread is suspended by the debugger ? 2631 std::unique_ptr<Context> context(Context::Create()); 2632 SetLocalVisitor visitor(thread, context.get(), frame_id, slot, tag, value, width); 2633 visitor.WalkStack(); 2634 return visitor.error_; 2635} 2636 2637JDWP::ObjectId Dbg::GetThisObjectIdForEvent(mirror::Object* this_object) { 2638 // If 'this_object' isn't already in the registry, we know that we're not looking for it, so 2639 // there's no point adding it to the registry and burning through ids. 2640 // When registering an event request with an instance filter, we've been given an existing object 2641 // id so it must already be present in the registry when the event fires. 2642 JDWP::ObjectId this_id = 0; 2643 if (this_object != nullptr && gRegistry->Contains(this_object)) { 2644 this_id = gRegistry->Add(this_object); 2645 } 2646 return this_id; 2647} 2648 2649void Dbg::PostLocationEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object, 2650 int event_flags, const JValue* return_value) { 2651 if (!IsDebuggerActive()) { 2652 return; 2653 } 2654 DCHECK(m != nullptr); 2655 DCHECK_EQ(m->IsStatic(), this_object == nullptr); 2656 JDWP::JdwpLocation location; 2657 SetLocation(location, m, dex_pc); 2658 2659 // We need 'this' for InstanceOnly filters only. 2660 JDWP::ObjectId this_id = GetThisObjectIdForEvent(this_object); 2661 gJdwpState->PostLocationEvent(&location, this_id, event_flags, return_value); 2662} 2663 2664void Dbg::PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc, 2665 mirror::Object* this_object, mirror::ArtField* f) { 2666 if (!IsDebuggerActive()) { 2667 return; 2668 } 2669 DCHECK(m != nullptr); 2670 DCHECK(f != nullptr); 2671 JDWP::JdwpLocation location; 2672 SetLocation(location, m, dex_pc); 2673 2674 JDWP::RefTypeId type_id = gRegistry->AddRefType(f->GetDeclaringClass()); 2675 JDWP::FieldId field_id = ToFieldId(f); 2676 JDWP::ObjectId this_id = gRegistry->Add(this_object); 2677 2678 gJdwpState->PostFieldEvent(&location, type_id, field_id, this_id, nullptr, false); 2679} 2680 2681void Dbg::PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc, 2682 mirror::Object* this_object, mirror::ArtField* f, 2683 const JValue* field_value) { 2684 if (!IsDebuggerActive()) { 2685 return; 2686 } 2687 DCHECK(m != nullptr); 2688 DCHECK(f != nullptr); 2689 DCHECK(field_value != nullptr); 2690 JDWP::JdwpLocation location; 2691 SetLocation(location, m, dex_pc); 2692 2693 JDWP::RefTypeId type_id = gRegistry->AddRefType(f->GetDeclaringClass()); 2694 JDWP::FieldId field_id = ToFieldId(f); 2695 JDWP::ObjectId this_id = gRegistry->Add(this_object); 2696 2697 gJdwpState->PostFieldEvent(&location, type_id, field_id, this_id, field_value, true); 2698} 2699 2700void Dbg::PostException(const ThrowLocation& throw_location, 2701 mirror::ArtMethod* catch_method, 2702 uint32_t catch_dex_pc, mirror::Throwable* exception_object) { 2703 if (!IsDebuggerActive()) { 2704 return; 2705 } 2706 2707 JDWP::JdwpLocation jdwp_throw_location; 2708 SetLocation(jdwp_throw_location, throw_location.GetMethod(), throw_location.GetDexPc()); 2709 JDWP::JdwpLocation catch_location; 2710 SetLocation(catch_location, catch_method, catch_dex_pc); 2711 2712 // We need 'this' for InstanceOnly filters only. 2713 JDWP::ObjectId this_id = GetThisObjectIdForEvent(throw_location.GetThis()); 2714 JDWP::ObjectId exception_id = gRegistry->Add(exception_object); 2715 JDWP::RefTypeId exception_class_id = gRegistry->AddRefType(exception_object->GetClass()); 2716 2717 gJdwpState->PostException(&jdwp_throw_location, exception_id, exception_class_id, &catch_location, 2718 this_id); 2719} 2720 2721void Dbg::PostClassPrepare(mirror::Class* c) { 2722 if (!IsDebuggerActive()) { 2723 return; 2724 } 2725 2726 // OLD-TODO - we currently always send both "verified" and "prepared" since 2727 // debuggers seem to like that. There might be some advantage to honesty, 2728 // since the class may not yet be verified. 2729 int state = JDWP::CS_VERIFIED | JDWP::CS_PREPARED; 2730 JDWP::JdwpTypeTag tag = GetTypeTag(c); 2731 gJdwpState->PostClassPrepare(tag, gRegistry->Add(c), c->GetDescriptor(), state); 2732} 2733 2734void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object, 2735 mirror::ArtMethod* m, uint32_t dex_pc, 2736 int event_flags, const JValue* return_value) { 2737 if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) { 2738 return; 2739 } 2740 2741 if (IsBreakpoint(m, dex_pc)) { 2742 event_flags |= kBreakpoint; 2743 } 2744 2745 // If the debugger is single-stepping one of our threads, check to 2746 // see if we're that thread and we've reached a step point. 2747 const SingleStepControl* single_step_control = thread->GetSingleStepControl(); 2748 DCHECK(single_step_control != nullptr); 2749 if (single_step_control->is_active) { 2750 CHECK(!m->IsNative()); 2751 if (single_step_control->step_depth == JDWP::SD_INTO) { 2752 // Step into method calls. We break when the line number 2753 // or method pointer changes. If we're in SS_MIN mode, we 2754 // always stop. 2755 if (single_step_control->method != m) { 2756 event_flags |= kSingleStep; 2757 VLOG(jdwp) << "SS new method"; 2758 } else if (single_step_control->step_size == JDWP::SS_MIN) { 2759 event_flags |= kSingleStep; 2760 VLOG(jdwp) << "SS new instruction"; 2761 } else if (single_step_control->ContainsDexPc(dex_pc)) { 2762 event_flags |= kSingleStep; 2763 VLOG(jdwp) << "SS new line"; 2764 } 2765 } else if (single_step_control->step_depth == JDWP::SD_OVER) { 2766 // Step over method calls. We break when the line number is 2767 // different and the frame depth is <= the original frame 2768 // depth. (We can't just compare on the method, because we 2769 // might get unrolled past it by an exception, and it's tricky 2770 // to identify recursion.) 2771 2772 int stack_depth = GetStackDepth(thread); 2773 2774 if (stack_depth < single_step_control->stack_depth) { 2775 // Popped up one or more frames, always trigger. 2776 event_flags |= kSingleStep; 2777 VLOG(jdwp) << "SS method pop"; 2778 } else if (stack_depth == single_step_control->stack_depth) { 2779 // Same depth, see if we moved. 2780 if (single_step_control->step_size == JDWP::SS_MIN) { 2781 event_flags |= kSingleStep; 2782 VLOG(jdwp) << "SS new instruction"; 2783 } else if (single_step_control->ContainsDexPc(dex_pc)) { 2784 event_flags |= kSingleStep; 2785 VLOG(jdwp) << "SS new line"; 2786 } 2787 } 2788 } else { 2789 CHECK_EQ(single_step_control->step_depth, JDWP::SD_OUT); 2790 // Return from the current method. We break when the frame 2791 // depth pops up. 2792 2793 // This differs from the "method exit" break in that it stops 2794 // with the PC at the next instruction in the returned-to 2795 // function, rather than the end of the returning function. 2796 2797 int stack_depth = GetStackDepth(thread); 2798 if (stack_depth < single_step_control->stack_depth) { 2799 event_flags |= kSingleStep; 2800 VLOG(jdwp) << "SS method pop"; 2801 } 2802 } 2803 } 2804 2805 // If there's something interesting going on, see if it matches one 2806 // of the debugger filters. 2807 if (event_flags != 0) { 2808 Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, return_value); 2809 } 2810} 2811 2812size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) { 2813 switch (instrumentation_event) { 2814 case instrumentation::Instrumentation::kMethodEntered: 2815 return &method_enter_event_ref_count_; 2816 case instrumentation::Instrumentation::kMethodExited: 2817 return &method_exit_event_ref_count_; 2818 case instrumentation::Instrumentation::kDexPcMoved: 2819 return &dex_pc_change_event_ref_count_; 2820 case instrumentation::Instrumentation::kFieldRead: 2821 return &field_read_event_ref_count_; 2822 case instrumentation::Instrumentation::kFieldWritten: 2823 return &field_write_event_ref_count_; 2824 case instrumentation::Instrumentation::kExceptionCaught: 2825 return &exception_catch_event_ref_count_; 2826 default: 2827 return nullptr; 2828 } 2829} 2830 2831// Process request while all mutator threads are suspended. 2832void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) { 2833 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 2834 switch (request.GetKind()) { 2835 case DeoptimizationRequest::kNothing: 2836 LOG(WARNING) << "Ignoring empty deoptimization request."; 2837 break; 2838 case DeoptimizationRequest::kRegisterForEvent: 2839 VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x", 2840 request.InstrumentationEvent()); 2841 instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent()); 2842 instrumentation_events_ |= request.InstrumentationEvent(); 2843 break; 2844 case DeoptimizationRequest::kUnregisterForEvent: 2845 VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x", 2846 request.InstrumentationEvent()); 2847 instrumentation->RemoveListener(&gDebugInstrumentationListener, 2848 request.InstrumentationEvent()); 2849 instrumentation_events_ &= ~request.InstrumentationEvent(); 2850 break; 2851 case DeoptimizationRequest::kFullDeoptimization: 2852 VLOG(jdwp) << "Deoptimize the world ..."; 2853 instrumentation->DeoptimizeEverything(); 2854 VLOG(jdwp) << "Deoptimize the world DONE"; 2855 break; 2856 case DeoptimizationRequest::kFullUndeoptimization: 2857 VLOG(jdwp) << "Undeoptimize the world ..."; 2858 instrumentation->UndeoptimizeEverything(); 2859 VLOG(jdwp) << "Undeoptimize the world DONE"; 2860 break; 2861 case DeoptimizationRequest::kSelectiveDeoptimization: 2862 VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " ..."; 2863 instrumentation->Deoptimize(request.Method()); 2864 VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " DONE"; 2865 break; 2866 case DeoptimizationRequest::kSelectiveUndeoptimization: 2867 VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " ..."; 2868 instrumentation->Undeoptimize(request.Method()); 2869 VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " DONE"; 2870 break; 2871 default: 2872 LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind(); 2873 break; 2874 } 2875} 2876 2877void Dbg::DelayFullUndeoptimization() { 2878 MutexLock mu(Thread::Current(), *deoptimization_lock_); 2879 ++delayed_full_undeoptimization_count_; 2880 DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_); 2881} 2882 2883void Dbg::ProcessDelayedFullUndeoptimizations() { 2884 // TODO: avoid taking the lock twice (once here and once in ManageDeoptimization). 2885 { 2886 MutexLock mu(Thread::Current(), *deoptimization_lock_); 2887 while (delayed_full_undeoptimization_count_ > 0) { 2888 DeoptimizationRequest req; 2889 req.SetKind(DeoptimizationRequest::kFullUndeoptimization); 2890 req.SetMethod(nullptr); 2891 RequestDeoptimizationLocked(req); 2892 --delayed_full_undeoptimization_count_; 2893 } 2894 } 2895 ManageDeoptimization(); 2896} 2897 2898void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) { 2899 if (req.GetKind() == DeoptimizationRequest::kNothing) { 2900 // Nothing to do. 2901 return; 2902 } 2903 MutexLock mu(Thread::Current(), *deoptimization_lock_); 2904 RequestDeoptimizationLocked(req); 2905} 2906 2907void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) { 2908 switch (req.GetKind()) { 2909 case DeoptimizationRequest::kRegisterForEvent: { 2910 DCHECK_NE(req.InstrumentationEvent(), 0u); 2911 size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent()); 2912 CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x", 2913 req.InstrumentationEvent()); 2914 if (*counter == 0) { 2915 VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x", 2916 deoptimization_requests_.size(), req.InstrumentationEvent()); 2917 deoptimization_requests_.push_back(req); 2918 } 2919 *counter = *counter + 1; 2920 break; 2921 } 2922 case DeoptimizationRequest::kUnregisterForEvent: { 2923 DCHECK_NE(req.InstrumentationEvent(), 0u); 2924 size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent()); 2925 CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x", 2926 req.InstrumentationEvent()); 2927 *counter = *counter - 1; 2928 if (*counter == 0) { 2929 VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x", 2930 deoptimization_requests_.size(), req.InstrumentationEvent()); 2931 deoptimization_requests_.push_back(req); 2932 } 2933 break; 2934 } 2935 case DeoptimizationRequest::kFullDeoptimization: { 2936 DCHECK(req.Method() == nullptr); 2937 if (full_deoptimization_event_count_ == 0) { 2938 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size() 2939 << " for full deoptimization"; 2940 deoptimization_requests_.push_back(req); 2941 } 2942 ++full_deoptimization_event_count_; 2943 break; 2944 } 2945 case DeoptimizationRequest::kFullUndeoptimization: { 2946 DCHECK(req.Method() == nullptr); 2947 DCHECK_GT(full_deoptimization_event_count_, 0U); 2948 --full_deoptimization_event_count_; 2949 if (full_deoptimization_event_count_ == 0) { 2950 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size() 2951 << " for full undeoptimization"; 2952 deoptimization_requests_.push_back(req); 2953 } 2954 break; 2955 } 2956 case DeoptimizationRequest::kSelectiveDeoptimization: { 2957 DCHECK(req.Method() != nullptr); 2958 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size() 2959 << " for deoptimization of " << PrettyMethod(req.Method()); 2960 deoptimization_requests_.push_back(req); 2961 break; 2962 } 2963 case DeoptimizationRequest::kSelectiveUndeoptimization: { 2964 DCHECK(req.Method() != nullptr); 2965 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size() 2966 << " for undeoptimization of " << PrettyMethod(req.Method()); 2967 deoptimization_requests_.push_back(req); 2968 break; 2969 } 2970 default: { 2971 LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind(); 2972 break; 2973 } 2974 } 2975} 2976 2977void Dbg::ManageDeoptimization() { 2978 Thread* const self = Thread::Current(); 2979 { 2980 // Avoid suspend/resume if there is no pending request. 2981 MutexLock mu(self, *deoptimization_lock_); 2982 if (deoptimization_requests_.empty()) { 2983 return; 2984 } 2985 } 2986 CHECK_EQ(self->GetState(), kRunnable); 2987 self->TransitionFromRunnableToSuspended(kWaitingForDeoptimization); 2988 // We need to suspend mutator threads first. 2989 Runtime* const runtime = Runtime::Current(); 2990 runtime->GetThreadList()->SuspendAll(); 2991 const ThreadState old_state = self->SetStateUnsafe(kRunnable); 2992 { 2993 MutexLock mu(self, *deoptimization_lock_); 2994 size_t req_index = 0; 2995 for (DeoptimizationRequest& request : deoptimization_requests_) { 2996 VLOG(jdwp) << "Process deoptimization request #" << req_index++; 2997 ProcessDeoptimizationRequest(request); 2998 } 2999 deoptimization_requests_.clear(); 3000 } 3001 CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable); 3002 runtime->GetThreadList()->ResumeAll(); 3003 self->TransitionFromSuspendedToRunnable(); 3004} 3005 3006static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m) 3007 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 3008 const DexFile::CodeItem* code_item = m->GetCodeItem(); 3009 if (code_item == nullptr) { 3010 // TODO We should not be asked to watch location in a native or abstract method so the code item 3011 // should never be null. We could just check we never encounter this case. 3012 return false; 3013 } 3014 StackHandleScope<2> hs(self); 3015 mirror::Class* declaring_class = m->GetDeclaringClass(); 3016 Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache())); 3017 Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader())); 3018 verifier::MethodVerifier verifier(dex_cache->GetDexFile(), &dex_cache, &class_loader, 3019 &m->GetClassDef(), code_item, m->GetDexMethodIndex(), m, 3020 m->GetAccessFlags(), false, true, false); 3021 // Note: we don't need to verify the method. 3022 return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr); 3023} 3024 3025static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m) 3026 EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 3027 for (Breakpoint& breakpoint : gBreakpoints) { 3028 if (breakpoint.Method() == m) { 3029 return &breakpoint; 3030 } 3031 } 3032 return nullptr; 3033} 3034 3035// Sanity checks all existing breakpoints on the same method. 3036static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_deoptimization) 3037 EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) { 3038 if (kIsDebugBuild) { 3039 for (const Breakpoint& breakpoint : gBreakpoints) { 3040 CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization()); 3041 } 3042 if (need_full_deoptimization) { 3043 // We should have deoptimized everything but not "selectively" deoptimized this method. 3044 CHECK(Runtime::Current()->GetInstrumentation()->AreAllMethodsDeoptimized()); 3045 CHECK(!Runtime::Current()->GetInstrumentation()->IsDeoptimized(m)); 3046 } else { 3047 // We should have "selectively" deoptimized this method. 3048 // Note: while we have not deoptimized everything for this method, we may have done it for 3049 // another event. 3050 CHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m)); 3051 } 3052 } 3053} 3054 3055// Installs a breakpoint at the specified location. Also indicates through the deoptimization 3056// request if we need to deoptimize. 3057void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) { 3058 Thread* const self = Thread::Current(); 3059 mirror::ArtMethod* m = FromMethodId(location->method_id); 3060 DCHECK(m != nullptr) << "No method for method id " << location->method_id; 3061 3062 MutexLock mu(self, *Locks::breakpoint_lock_); 3063 const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m); 3064 bool need_full_deoptimization; 3065 if (existing_breakpoint == nullptr) { 3066 // There is no breakpoint on this method yet: we need to deoptimize. If this method may be 3067 // inlined, we deoptimize everything; otherwise we deoptimize only this method. 3068 need_full_deoptimization = IsMethodPossiblyInlined(self, m); 3069 if (need_full_deoptimization) { 3070 req->SetKind(DeoptimizationRequest::kFullDeoptimization); 3071 req->SetMethod(nullptr); 3072 } else { 3073 req->SetKind(DeoptimizationRequest::kSelectiveDeoptimization); 3074 req->SetMethod(m); 3075 } 3076 } else { 3077 // There is at least one breakpoint for this method: we don't need to deoptimize. 3078 req->SetKind(DeoptimizationRequest::kNothing); 3079 req->SetMethod(nullptr); 3080 3081 need_full_deoptimization = existing_breakpoint->NeedFullDeoptimization(); 3082 SanityCheckExistingBreakpoints(m, need_full_deoptimization); 3083 } 3084 3085 gBreakpoints.push_back(Breakpoint(m, location->dex_pc, need_full_deoptimization)); 3086 VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": " 3087 << gBreakpoints[gBreakpoints.size() - 1]; 3088} 3089 3090// Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization 3091// request if we need to undeoptimize. 3092void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) { 3093 MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); 3094 mirror::ArtMethod* m = FromMethodId(location->method_id); 3095 DCHECK(m != nullptr) << "No method for method id " << location->method_id; 3096 bool need_full_deoptimization = false; 3097 for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) { 3098 if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].Method() == m) { 3099 VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i]; 3100 need_full_deoptimization = gBreakpoints[i].NeedFullDeoptimization(); 3101 DCHECK_NE(need_full_deoptimization, Runtime::Current()->GetInstrumentation()->IsDeoptimized(m)); 3102 gBreakpoints.erase(gBreakpoints.begin() + i); 3103 break; 3104 } 3105 } 3106 const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m); 3107 if (existing_breakpoint == nullptr) { 3108 // There is no more breakpoint on this method: we need to undeoptimize. 3109 if (need_full_deoptimization) { 3110 // This method required full deoptimization: we need to undeoptimize everything. 3111 req->SetKind(DeoptimizationRequest::kFullUndeoptimization); 3112 req->SetMethod(nullptr); 3113 } else { 3114 // This method required selective deoptimization: we need to undeoptimize only that method. 3115 req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization); 3116 req->SetMethod(m); 3117 } 3118 } else { 3119 // There is at least one breakpoint for this method: we don't need to undeoptimize. 3120 req->SetKind(DeoptimizationRequest::kNothing); 3121 req->SetMethod(nullptr); 3122 SanityCheckExistingBreakpoints(m, need_full_deoptimization); 3123 } 3124} 3125 3126// Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't 3127// cause suspension if the thread is the current thread. 3128class ScopedThreadSuspension { 3129 public: 3130 ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id) 3131 LOCKS_EXCLUDED(Locks::thread_list_lock_) 3132 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : 3133 thread_(nullptr), 3134 error_(JDWP::ERR_NONE), 3135 self_suspend_(false), 3136 other_suspend_(false) { 3137 ScopedObjectAccessUnchecked soa(self); 3138 { 3139 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 3140 error_ = DecodeThread(soa, thread_id, thread_); 3141 } 3142 if (error_ == JDWP::ERR_NONE) { 3143 if (thread_ == soa.Self()) { 3144 self_suspend_ = true; 3145 } else { 3146 soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension); 3147 jobject thread_peer = gRegistry->GetJObject(thread_id); 3148 bool timed_out; 3149 Thread* suspended_thread; 3150 { 3151 // Take suspend thread lock to avoid races with threads trying to suspend this one. 3152 MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_); 3153 suspended_thread = ThreadList::SuspendThreadByPeer(thread_peer, true, true, 3154 &timed_out); 3155 } 3156 CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension); 3157 if (suspended_thread == nullptr) { 3158 // Thread terminated from under us while suspending. 3159 error_ = JDWP::ERR_INVALID_THREAD; 3160 } else { 3161 CHECK_EQ(suspended_thread, thread_); 3162 other_suspend_ = true; 3163 } 3164 } 3165 } 3166 } 3167 3168 Thread* GetThread() const { 3169 return thread_; 3170 } 3171 3172 JDWP::JdwpError GetError() const { 3173 return error_; 3174 } 3175 3176 ~ScopedThreadSuspension() { 3177 if (other_suspend_) { 3178 Runtime::Current()->GetThreadList()->Resume(thread_, true); 3179 } 3180 } 3181 3182 private: 3183 Thread* thread_; 3184 JDWP::JdwpError error_; 3185 bool self_suspend_; 3186 bool other_suspend_; 3187}; 3188 3189JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size, 3190 JDWP::JdwpStepDepth step_depth) { 3191 Thread* self = Thread::Current(); 3192 ScopedThreadSuspension sts(self, thread_id); 3193 if (sts.GetError() != JDWP::ERR_NONE) { 3194 return sts.GetError(); 3195 } 3196 3197 // 3198 // Work out what Method* we're in, the current line number, and how deep the stack currently 3199 // is for step-out. 3200 // 3201 3202 struct SingleStepStackVisitor : public StackVisitor { 3203 explicit SingleStepStackVisitor(Thread* thread, SingleStepControl* single_step_control, 3204 int32_t* line_number) 3205 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 3206 : StackVisitor(thread, NULL), single_step_control_(single_step_control), 3207 line_number_(line_number) { 3208 DCHECK_EQ(single_step_control_, thread->GetSingleStepControl()); 3209 single_step_control_->method = NULL; 3210 single_step_control_->stack_depth = 0; 3211 } 3212 3213 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 3214 // annotalysis. 3215 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 3216 mirror::ArtMethod* m = GetMethod(); 3217 if (!m->IsRuntimeMethod()) { 3218 ++single_step_control_->stack_depth; 3219 if (single_step_control_->method == NULL) { 3220 mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache(); 3221 single_step_control_->method = m; 3222 *line_number_ = -1; 3223 if (dex_cache != NULL) { 3224 const DexFile& dex_file = *dex_cache->GetDexFile(); 3225 *line_number_ = dex_file.GetLineNumFromPC(m, GetDexPc()); 3226 } 3227 } 3228 } 3229 return true; 3230 } 3231 3232 SingleStepControl* const single_step_control_; 3233 int32_t* const line_number_; 3234 }; 3235 3236 Thread* const thread = sts.GetThread(); 3237 SingleStepControl* const single_step_control = thread->GetSingleStepControl(); 3238 DCHECK(single_step_control != nullptr); 3239 int32_t line_number = -1; 3240 SingleStepStackVisitor visitor(thread, single_step_control, &line_number); 3241 visitor.WalkStack(); 3242 3243 // 3244 // Find the dex_pc values that correspond to the current line, for line-based single-stepping. 3245 // 3246 3247 struct DebugCallbackContext { 3248 explicit DebugCallbackContext(SingleStepControl* single_step_control, int32_t line_number, 3249 const DexFile::CodeItem* code_item) 3250 : single_step_control_(single_step_control), line_number_(line_number), code_item_(code_item), 3251 last_pc_valid(false), last_pc(0) { 3252 } 3253 3254 static bool Callback(void* raw_context, uint32_t address, uint32_t line_number) { 3255 DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context); 3256 if (static_cast<int32_t>(line_number) == context->line_number_) { 3257 if (!context->last_pc_valid) { 3258 // Everything from this address until the next line change is ours. 3259 context->last_pc = address; 3260 context->last_pc_valid = true; 3261 } 3262 // Otherwise, if we're already in a valid range for this line, 3263 // just keep going (shouldn't really happen)... 3264 } else if (context->last_pc_valid) { // and the line number is new 3265 // Add everything from the last entry up until here to the set 3266 for (uint32_t dex_pc = context->last_pc; dex_pc < address; ++dex_pc) { 3267 context->single_step_control_->dex_pcs.insert(dex_pc); 3268 } 3269 context->last_pc_valid = false; 3270 } 3271 return false; // There may be multiple entries for any given line. 3272 } 3273 3274 ~DebugCallbackContext() { 3275 // If the line number was the last in the position table... 3276 if (last_pc_valid) { 3277 size_t end = code_item_->insns_size_in_code_units_; 3278 for (uint32_t dex_pc = last_pc; dex_pc < end; ++dex_pc) { 3279 single_step_control_->dex_pcs.insert(dex_pc); 3280 } 3281 } 3282 } 3283 3284 SingleStepControl* const single_step_control_; 3285 const int32_t line_number_; 3286 const DexFile::CodeItem* const code_item_; 3287 bool last_pc_valid; 3288 uint32_t last_pc; 3289 }; 3290 single_step_control->dex_pcs.clear(); 3291 mirror::ArtMethod* m = single_step_control->method; 3292 if (!m->IsNative()) { 3293 const DexFile::CodeItem* const code_item = m->GetCodeItem(); 3294 DebugCallbackContext context(single_step_control, line_number, code_item); 3295 m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(), 3296 DebugCallbackContext::Callback, NULL, &context); 3297 } 3298 3299 // 3300 // Everything else... 3301 // 3302 3303 single_step_control->step_size = step_size; 3304 single_step_control->step_depth = step_depth; 3305 single_step_control->is_active = true; 3306 3307 if (VLOG_IS_ON(jdwp)) { 3308 VLOG(jdwp) << "Single-step thread: " << *thread; 3309 VLOG(jdwp) << "Single-step step size: " << single_step_control->step_size; 3310 VLOG(jdwp) << "Single-step step depth: " << single_step_control->step_depth; 3311 VLOG(jdwp) << "Single-step current method: " << PrettyMethod(single_step_control->method); 3312 VLOG(jdwp) << "Single-step current line: " << line_number; 3313 VLOG(jdwp) << "Single-step current stack depth: " << single_step_control->stack_depth; 3314 VLOG(jdwp) << "Single-step dex_pc values:"; 3315 for (uint32_t dex_pc : single_step_control->dex_pcs) { 3316 VLOG(jdwp) << StringPrintf(" %#x", dex_pc); 3317 } 3318 } 3319 3320 return JDWP::ERR_NONE; 3321} 3322 3323void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) { 3324 ScopedObjectAccessUnchecked soa(Thread::Current()); 3325 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 3326 Thread* thread; 3327 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 3328 if (error == JDWP::ERR_NONE) { 3329 SingleStepControl* single_step_control = thread->GetSingleStepControl(); 3330 DCHECK(single_step_control != nullptr); 3331 single_step_control->Clear(); 3332 } 3333} 3334 3335static char JdwpTagToShortyChar(JDWP::JdwpTag tag) { 3336 switch (tag) { 3337 default: 3338 LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag); 3339 3340 // Primitives. 3341 case JDWP::JT_BYTE: return 'B'; 3342 case JDWP::JT_CHAR: return 'C'; 3343 case JDWP::JT_FLOAT: return 'F'; 3344 case JDWP::JT_DOUBLE: return 'D'; 3345 case JDWP::JT_INT: return 'I'; 3346 case JDWP::JT_LONG: return 'J'; 3347 case JDWP::JT_SHORT: return 'S'; 3348 case JDWP::JT_VOID: return 'V'; 3349 case JDWP::JT_BOOLEAN: return 'Z'; 3350 3351 // Reference types. 3352 case JDWP::JT_ARRAY: 3353 case JDWP::JT_OBJECT: 3354 case JDWP::JT_STRING: 3355 case JDWP::JT_THREAD: 3356 case JDWP::JT_THREAD_GROUP: 3357 case JDWP::JT_CLASS_LOADER: 3358 case JDWP::JT_CLASS_OBJECT: 3359 return 'L'; 3360 } 3361} 3362 3363JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id, 3364 JDWP::RefTypeId class_id, JDWP::MethodId method_id, 3365 uint32_t arg_count, uint64_t* arg_values, 3366 JDWP::JdwpTag* arg_types, uint32_t options, 3367 JDWP::JdwpTag* pResultTag, uint64_t* pResultValue, 3368 JDWP::ObjectId* pExceptionId) { 3369 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 3370 3371 Thread* targetThread = NULL; 3372 DebugInvokeReq* req = NULL; 3373 Thread* self = Thread::Current(); 3374 { 3375 ScopedObjectAccessUnchecked soa(self); 3376 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 3377 JDWP::JdwpError error = DecodeThread(soa, thread_id, targetThread); 3378 if (error != JDWP::ERR_NONE) { 3379 LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id; 3380 return error; 3381 } 3382 req = targetThread->GetInvokeReq(); 3383 if (!req->ready) { 3384 LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread; 3385 return JDWP::ERR_INVALID_THREAD; 3386 } 3387 3388 /* 3389 * We currently have a bug where we don't successfully resume the 3390 * target thread if the suspend count is too deep. We're expected to 3391 * require one "resume" for each "suspend", but when asked to execute 3392 * a method we have to resume fully and then re-suspend it back to the 3393 * same level. (The easiest way to cause this is to type "suspend" 3394 * multiple times in jdb.) 3395 * 3396 * It's unclear what this means when the event specifies "resume all" 3397 * and some threads are suspended more deeply than others. This is 3398 * a rare problem, so for now we just prevent it from hanging forever 3399 * by rejecting the method invocation request. Without this, we will 3400 * be stuck waiting on a suspended thread. 3401 */ 3402 int suspend_count; 3403 { 3404 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_); 3405 suspend_count = targetThread->GetSuspendCount(); 3406 } 3407 if (suspend_count > 1) { 3408 LOG(ERROR) << *targetThread << " suspend count too deep for method invocation: " << suspend_count; 3409 return JDWP::ERR_THREAD_SUSPENDED; // Probably not expected here. 3410 } 3411 3412 JDWP::JdwpError status; 3413 mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id); 3414 if (receiver == ObjectRegistry::kInvalidObject) { 3415 return JDWP::ERR_INVALID_OBJECT; 3416 } 3417 3418 mirror::Object* thread = gRegistry->Get<mirror::Object*>(thread_id); 3419 if (thread == ObjectRegistry::kInvalidObject) { 3420 return JDWP::ERR_INVALID_OBJECT; 3421 } 3422 // TODO: check that 'thread' is actually a java.lang.Thread! 3423 3424 mirror::Class* c = DecodeClass(class_id, status); 3425 if (c == NULL) { 3426 return status; 3427 } 3428 3429 mirror::ArtMethod* m = FromMethodId(method_id); 3430 if (m->IsStatic() != (receiver == NULL)) { 3431 return JDWP::ERR_INVALID_METHODID; 3432 } 3433 if (m->IsStatic()) { 3434 if (m->GetDeclaringClass() != c) { 3435 return JDWP::ERR_INVALID_METHODID; 3436 } 3437 } else { 3438 if (!m->GetDeclaringClass()->IsAssignableFrom(c)) { 3439 return JDWP::ERR_INVALID_METHODID; 3440 } 3441 } 3442 3443 // Check the argument list matches the method. 3444 uint32_t shorty_len = 0; 3445 const char* shorty = m->GetShorty(&shorty_len); 3446 if (shorty_len - 1 != arg_count) { 3447 return JDWP::ERR_ILLEGAL_ARGUMENT; 3448 } 3449 3450 { 3451 StackHandleScope<3> hs(soa.Self()); 3452 MethodHelper mh(hs.NewHandle(m)); 3453 HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver)); 3454 HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c)); 3455 const DexFile::TypeList* types = m->GetParameterTypeList(); 3456 for (size_t i = 0; i < arg_count; ++i) { 3457 if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) { 3458 return JDWP::ERR_ILLEGAL_ARGUMENT; 3459 } 3460 3461 if (shorty[i + 1] == 'L') { 3462 // Did we really get an argument of an appropriate reference type? 3463 mirror::Class* parameter_type = mh.GetClassFromTypeIdx(types->GetTypeItem(i).type_idx_); 3464 mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i]); 3465 if (argument == ObjectRegistry::kInvalidObject) { 3466 return JDWP::ERR_INVALID_OBJECT; 3467 } 3468 if (argument != NULL && !argument->InstanceOf(parameter_type)) { 3469 return JDWP::ERR_ILLEGAL_ARGUMENT; 3470 } 3471 3472 // Turn the on-the-wire ObjectId into a jobject. 3473 jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]); 3474 v.l = gRegistry->GetJObject(arg_values[i]); 3475 } 3476 } 3477 // Update in case it moved. 3478 m = mh.GetMethod(); 3479 } 3480 3481 req->receiver = receiver; 3482 req->thread = thread; 3483 req->klass = c; 3484 req->method = m; 3485 req->arg_count = arg_count; 3486 req->arg_values = arg_values; 3487 req->options = options; 3488 req->invoke_needed = true; 3489 } 3490 3491 // The fact that we've released the thread list lock is a bit risky --- if the thread goes 3492 // away we're sitting high and dry -- but we must release this before the ResumeAllThreads 3493 // call, and it's unwise to hold it during WaitForSuspend. 3494 3495 { 3496 /* 3497 * We change our (JDWP thread) status, which should be THREAD_RUNNING, 3498 * so we can suspend for a GC if the invoke request causes us to 3499 * run out of memory. It's also a good idea to change it before locking 3500 * the invokeReq mutex, although that should never be held for long. 3501 */ 3502 self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend); 3503 3504 VLOG(jdwp) << " Transferring control to event thread"; 3505 { 3506 MutexLock mu(self, req->lock); 3507 3508 if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) { 3509 VLOG(jdwp) << " Resuming all threads"; 3510 thread_list->UndoDebuggerSuspensions(); 3511 } else { 3512 VLOG(jdwp) << " Resuming event thread only"; 3513 thread_list->Resume(targetThread, true); 3514 } 3515 3516 // Wait for the request to finish executing. 3517 while (req->invoke_needed) { 3518 req->cond.Wait(self); 3519 } 3520 } 3521 VLOG(jdwp) << " Control has returned from event thread"; 3522 3523 /* wait for thread to re-suspend itself */ 3524 SuspendThread(thread_id, false /* request_suspension */); 3525 self->TransitionFromSuspendedToRunnable(); 3526 } 3527 3528 /* 3529 * Suspend the threads. We waited for the target thread to suspend 3530 * itself, so all we need to do is suspend the others. 3531 * 3532 * The suspendAllThreads() call will double-suspend the event thread, 3533 * so we want to resume the target thread once to keep the books straight. 3534 */ 3535 if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) { 3536 self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension); 3537 VLOG(jdwp) << " Suspending all threads"; 3538 thread_list->SuspendAllForDebugger(); 3539 self->TransitionFromSuspendedToRunnable(); 3540 VLOG(jdwp) << " Resuming event thread to balance the count"; 3541 thread_list->Resume(targetThread, true); 3542 } 3543 3544 // Copy the result. 3545 *pResultTag = req->result_tag; 3546 if (IsPrimitiveTag(req->result_tag)) { 3547 *pResultValue = req->result_value.GetJ(); 3548 } else { 3549 *pResultValue = gRegistry->Add(req->result_value.GetL()); 3550 } 3551 *pExceptionId = req->exception; 3552 return req->error; 3553} 3554 3555void Dbg::ExecuteMethod(DebugInvokeReq* pReq) { 3556 ScopedObjectAccess soa(Thread::Current()); 3557 3558 // We can be called while an exception is pending. We need 3559 // to preserve that across the method invocation. 3560 StackHandleScope<4> hs(soa.Self()); 3561 auto old_throw_this_object = hs.NewHandle<mirror::Object>(nullptr); 3562 auto old_throw_method = hs.NewHandle<mirror::ArtMethod>(nullptr); 3563 auto old_exception = hs.NewHandle<mirror::Throwable>(nullptr); 3564 uint32_t old_throw_dex_pc; 3565 bool old_exception_report_flag; 3566 { 3567 ThrowLocation old_throw_location; 3568 mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location); 3569 old_throw_this_object.Assign(old_throw_location.GetThis()); 3570 old_throw_method.Assign(old_throw_location.GetMethod()); 3571 old_exception.Assign(old_exception_obj); 3572 old_throw_dex_pc = old_throw_location.GetDexPc(); 3573 old_exception_report_flag = soa.Self()->IsExceptionReportedToInstrumentation(); 3574 soa.Self()->ClearException(); 3575 } 3576 3577 // Translate the method through the vtable, unless the debugger wants to suppress it. 3578 Handle<mirror::ArtMethod> m(hs.NewHandle(pReq->method)); 3579 if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver != NULL) { 3580 mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.Get()); 3581 if (actual_method != m.Get()) { 3582 VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.Get()) << " to " << PrettyMethod(actual_method); 3583 m.Assign(actual_method); 3584 } 3585 } 3586 VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.Get()) 3587 << " receiver=" << pReq->receiver 3588 << " arg_count=" << pReq->arg_count; 3589 CHECK(m.Get() != nullptr); 3590 3591 CHECK_EQ(sizeof(jvalue), sizeof(uint64_t)); 3592 3593 pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(m.Get()), 3594 reinterpret_cast<jvalue*>(pReq->arg_values)); 3595 3596 mirror::Throwable* exception = soa.Self()->GetException(NULL); 3597 soa.Self()->ClearException(); 3598 pReq->exception = gRegistry->Add(exception); 3599 pReq->result_tag = BasicTagFromDescriptor(m.Get()->GetShorty()); 3600 if (pReq->exception != 0) { 3601 VLOG(jdwp) << " JDWP invocation returning with exception=" << exception 3602 << " " << exception->Dump(); 3603 pReq->result_value.SetJ(0); 3604 } else if (pReq->result_tag == JDWP::JT_OBJECT) { 3605 /* if no exception thrown, examine object result more closely */ 3606 JDWP::JdwpTag new_tag = TagFromObject(soa, pReq->result_value.GetL()); 3607 if (new_tag != pReq->result_tag) { 3608 VLOG(jdwp) << " JDWP promoted result from " << pReq->result_tag << " to " << new_tag; 3609 pReq->result_tag = new_tag; 3610 } 3611 3612 /* 3613 * Register the object. We don't actually need an ObjectId yet, 3614 * but we do need to be sure that the GC won't move or discard the 3615 * object when we switch out of RUNNING. The ObjectId conversion 3616 * will add the object to the "do not touch" list. 3617 * 3618 * We can't use the "tracked allocation" mechanism here because 3619 * the object is going to be handed off to a different thread. 3620 */ 3621 gRegistry->Add(pReq->result_value.GetL()); 3622 } 3623 3624 if (old_exception.Get() != NULL) { 3625 ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(), 3626 old_throw_dex_pc); 3627 soa.Self()->SetException(gc_safe_throw_location, old_exception.Get()); 3628 soa.Self()->SetExceptionReportedToInstrumentation(old_exception_report_flag); 3629 } 3630} 3631 3632/* 3633 * "request" contains a full JDWP packet, possibly with multiple chunks. We 3634 * need to process each, accumulate the replies, and ship the whole thing 3635 * back. 3636 * 3637 * Returns "true" if we have a reply. The reply buffer is newly allocated, 3638 * and includes the chunk type/length, followed by the data. 3639 * 3640 * OLD-TODO: we currently assume that the request and reply include a single 3641 * chunk. If this becomes inconvenient we will need to adapt. 3642 */ 3643bool Dbg::DdmHandlePacket(JDWP::Request& request, uint8_t** pReplyBuf, int* pReplyLen) { 3644 Thread* self = Thread::Current(); 3645 JNIEnv* env = self->GetJniEnv(); 3646 3647 uint32_t type = request.ReadUnsigned32("type"); 3648 uint32_t length = request.ReadUnsigned32("length"); 3649 3650 // Create a byte[] corresponding to 'request'. 3651 size_t request_length = request.size(); 3652 ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(request_length)); 3653 if (dataArray.get() == NULL) { 3654 LOG(WARNING) << "byte[] allocation failed: " << request_length; 3655 env->ExceptionClear(); 3656 return false; 3657 } 3658 env->SetByteArrayRegion(dataArray.get(), 0, request_length, reinterpret_cast<const jbyte*>(request.data())); 3659 request.Skip(request_length); 3660 3661 // Run through and find all chunks. [Currently just find the first.] 3662 ScopedByteArrayRO contents(env, dataArray.get()); 3663 if (length != request_length) { 3664 LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length); 3665 return false; 3666 } 3667 3668 // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)". 3669 ScopedLocalRef<jobject> chunk(env, env->CallStaticObjectMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer, 3670 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch, 3671 type, dataArray.get(), 0, length)); 3672 if (env->ExceptionCheck()) { 3673 LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type); 3674 env->ExceptionDescribe(); 3675 env->ExceptionClear(); 3676 return false; 3677 } 3678 3679 if (chunk.get() == NULL) { 3680 return false; 3681 } 3682 3683 /* 3684 * Pull the pieces out of the chunk. We copy the results into a 3685 * newly-allocated buffer that the caller can free. We don't want to 3686 * continue using the Chunk object because nothing has a reference to it. 3687 * 3688 * We could avoid this by returning type/data/offset/length and having 3689 * the caller be aware of the object lifetime issues, but that 3690 * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work 3691 * if we have responses for multiple chunks. 3692 * 3693 * So we're pretty much stuck with copying data around multiple times. 3694 */ 3695 ScopedLocalRef<jbyteArray> replyData(env, reinterpret_cast<jbyteArray>(env->GetObjectField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data))); 3696 jint offset = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset); 3697 length = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length); 3698 type = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type); 3699 3700 VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length); 3701 if (length == 0 || replyData.get() == NULL) { 3702 return false; 3703 } 3704 3705 const int kChunkHdrLen = 8; 3706 uint8_t* reply = new uint8_t[length + kChunkHdrLen]; 3707 if (reply == NULL) { 3708 LOG(WARNING) << "malloc failed: " << (length + kChunkHdrLen); 3709 return false; 3710 } 3711 JDWP::Set4BE(reply + 0, type); 3712 JDWP::Set4BE(reply + 4, length); 3713 env->GetByteArrayRegion(replyData.get(), offset, length, reinterpret_cast<jbyte*>(reply + kChunkHdrLen)); 3714 3715 *pReplyBuf = reply; 3716 *pReplyLen = length + kChunkHdrLen; 3717 3718 VLOG(jdwp) << StringPrintf("dvmHandleDdm returning type=%.4s %p len=%d", reinterpret_cast<char*>(reply), reply, length); 3719 return true; 3720} 3721 3722void Dbg::DdmBroadcast(bool connect) { 3723 VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "..."; 3724 3725 Thread* self = Thread::Current(); 3726 if (self->GetState() != kRunnable) { 3727 LOG(ERROR) << "DDM broadcast in thread state " << self->GetState(); 3728 /* try anyway? */ 3729 } 3730 3731 JNIEnv* env = self->GetJniEnv(); 3732 jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/; 3733 env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer, 3734 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast, 3735 event); 3736 if (env->ExceptionCheck()) { 3737 LOG(ERROR) << "DdmServer.broadcast " << event << " failed"; 3738 env->ExceptionDescribe(); 3739 env->ExceptionClear(); 3740 } 3741} 3742 3743void Dbg::DdmConnected() { 3744 Dbg::DdmBroadcast(true); 3745} 3746 3747void Dbg::DdmDisconnected() { 3748 Dbg::DdmBroadcast(false); 3749 gDdmThreadNotification = false; 3750} 3751 3752/* 3753 * Send a notification when a thread starts, stops, or changes its name. 3754 * 3755 * Because we broadcast the full set of threads when the notifications are 3756 * first enabled, it's possible for "thread" to be actively executing. 3757 */ 3758void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) { 3759 if (!gDdmThreadNotification) { 3760 return; 3761 } 3762 3763 if (type == CHUNK_TYPE("THDE")) { 3764 uint8_t buf[4]; 3765 JDWP::Set4BE(&buf[0], t->GetThreadId()); 3766 Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf); 3767 } else { 3768 CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type; 3769 ScopedObjectAccessUnchecked soa(Thread::Current()); 3770 StackHandleScope<1> hs(soa.Self()); 3771 Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa))); 3772 size_t char_count = (name.Get() != NULL) ? name->GetLength() : 0; 3773 const jchar* chars = (name.Get() != NULL) ? name->GetCharArray()->GetData() : NULL; 3774 3775 std::vector<uint8_t> bytes; 3776 JDWP::Append4BE(bytes, t->GetThreadId()); 3777 JDWP::AppendUtf16BE(bytes, chars, char_count); 3778 CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2); 3779 Dbg::DdmSendChunk(type, bytes); 3780 } 3781} 3782 3783void Dbg::DdmSetThreadNotification(bool enable) { 3784 // Enable/disable thread notifications. 3785 gDdmThreadNotification = enable; 3786 if (enable) { 3787 // Suspend the VM then post thread start notifications for all threads. Threads attaching will 3788 // see a suspension in progress and block until that ends. They then post their own start 3789 // notification. 3790 SuspendVM(); 3791 std::list<Thread*> threads; 3792 Thread* self = Thread::Current(); 3793 { 3794 MutexLock mu(self, *Locks::thread_list_lock_); 3795 threads = Runtime::Current()->GetThreadList()->GetList(); 3796 } 3797 { 3798 ScopedObjectAccess soa(self); 3799 for (Thread* thread : threads) { 3800 Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR")); 3801 } 3802 } 3803 ResumeVM(); 3804 } 3805} 3806 3807void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) { 3808 if (IsDebuggerActive()) { 3809 ScopedObjectAccessUnchecked soa(Thread::Current()); 3810 JDWP::ObjectId id = gRegistry->Add(t->GetPeer()); 3811 gJdwpState->PostThreadChange(id, type == CHUNK_TYPE("THCR")); 3812 } 3813 Dbg::DdmSendThreadNotification(t, type); 3814} 3815 3816void Dbg::PostThreadStart(Thread* t) { 3817 Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR")); 3818} 3819 3820void Dbg::PostThreadDeath(Thread* t) { 3821 Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE")); 3822} 3823 3824void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) { 3825 CHECK(buf != NULL); 3826 iovec vec[1]; 3827 vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf)); 3828 vec[0].iov_len = byte_count; 3829 Dbg::DdmSendChunkV(type, vec, 1); 3830} 3831 3832void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) { 3833 DdmSendChunk(type, bytes.size(), &bytes[0]); 3834} 3835 3836void Dbg::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) { 3837 if (gJdwpState == NULL) { 3838 VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type; 3839 } else { 3840 gJdwpState->DdmSendChunkV(type, iov, iov_count); 3841 } 3842} 3843 3844int Dbg::DdmHandleHpifChunk(HpifWhen when) { 3845 if (when == HPIF_WHEN_NOW) { 3846 DdmSendHeapInfo(when); 3847 return true; 3848 } 3849 3850 if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) { 3851 LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when); 3852 return false; 3853 } 3854 3855 gDdmHpifWhen = when; 3856 return true; 3857} 3858 3859bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) { 3860 if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) { 3861 LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when); 3862 return false; 3863 } 3864 3865 if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) { 3866 LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what); 3867 return false; 3868 } 3869 3870 if (native) { 3871 gDdmNhsgWhen = when; 3872 gDdmNhsgWhat = what; 3873 } else { 3874 gDdmHpsgWhen = when; 3875 gDdmHpsgWhat = what; 3876 } 3877 return true; 3878} 3879 3880void Dbg::DdmSendHeapInfo(HpifWhen reason) { 3881 // If there's a one-shot 'when', reset it. 3882 if (reason == gDdmHpifWhen) { 3883 if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) { 3884 gDdmHpifWhen = HPIF_WHEN_NEVER; 3885 } 3886 } 3887 3888 /* 3889 * Chunk HPIF (client --> server) 3890 * 3891 * Heap Info. General information about the heap, 3892 * suitable for a summary display. 3893 * 3894 * [u4]: number of heaps 3895 * 3896 * For each heap: 3897 * [u4]: heap ID 3898 * [u8]: timestamp in ms since Unix epoch 3899 * [u1]: capture reason (same as 'when' value from server) 3900 * [u4]: max heap size in bytes (-Xmx) 3901 * [u4]: current heap size in bytes 3902 * [u4]: current number of bytes allocated 3903 * [u4]: current number of objects allocated 3904 */ 3905 uint8_t heap_count = 1; 3906 gc::Heap* heap = Runtime::Current()->GetHeap(); 3907 std::vector<uint8_t> bytes; 3908 JDWP::Append4BE(bytes, heap_count); 3909 JDWP::Append4BE(bytes, 1); // Heap id (bogus; we only have one heap). 3910 JDWP::Append8BE(bytes, MilliTime()); 3911 JDWP::Append1BE(bytes, reason); 3912 JDWP::Append4BE(bytes, heap->GetMaxMemory()); // Max allowed heap size in bytes. 3913 JDWP::Append4BE(bytes, heap->GetTotalMemory()); // Current heap size in bytes. 3914 JDWP::Append4BE(bytes, heap->GetBytesAllocated()); 3915 JDWP::Append4BE(bytes, heap->GetObjectsAllocated()); 3916 CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4))); 3917 Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes); 3918} 3919 3920enum HpsgSolidity { 3921 SOLIDITY_FREE = 0, 3922 SOLIDITY_HARD = 1, 3923 SOLIDITY_SOFT = 2, 3924 SOLIDITY_WEAK = 3, 3925 SOLIDITY_PHANTOM = 4, 3926 SOLIDITY_FINALIZABLE = 5, 3927 SOLIDITY_SWEEP = 6, 3928}; 3929 3930enum HpsgKind { 3931 KIND_OBJECT = 0, 3932 KIND_CLASS_OBJECT = 1, 3933 KIND_ARRAY_1 = 2, 3934 KIND_ARRAY_2 = 3, 3935 KIND_ARRAY_4 = 4, 3936 KIND_ARRAY_8 = 5, 3937 KIND_UNKNOWN = 6, 3938 KIND_NATIVE = 7, 3939}; 3940 3941#define HPSG_PARTIAL (1<<7) 3942#define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7))) 3943 3944class HeapChunkContext { 3945 public: 3946 // Maximum chunk size. Obtain this from the formula: 3947 // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2 3948 HeapChunkContext(bool merge, bool native) 3949 : buf_(16384 - 16), 3950 type_(0), 3951 merge_(merge) { 3952 Reset(); 3953 if (native) { 3954 type_ = CHUNK_TYPE("NHSG"); 3955 } else { 3956 type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO"); 3957 } 3958 } 3959 3960 ~HeapChunkContext() { 3961 if (p_ > &buf_[0]) { 3962 Flush(); 3963 } 3964 } 3965 3966 void EnsureHeader(const void* chunk_ptr) { 3967 if (!needHeader_) { 3968 return; 3969 } 3970 3971 // Start a new HPSx chunk. 3972 JDWP::Write4BE(&p_, 1); // Heap id (bogus; we only have one heap). 3973 JDWP::Write1BE(&p_, 8); // Size of allocation unit, in bytes. 3974 3975 JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr)); // virtual address of segment start. 3976 JDWP::Write4BE(&p_, 0); // offset of this piece (relative to the virtual address). 3977 // [u4]: length of piece, in allocation units 3978 // We won't know this until we're done, so save the offset and stuff in a dummy value. 3979 pieceLenField_ = p_; 3980 JDWP::Write4BE(&p_, 0x55555555); 3981 needHeader_ = false; 3982 } 3983 3984 void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 3985 if (pieceLenField_ == NULL) { 3986 // Flush immediately post Reset (maybe back-to-back Flush). Ignore. 3987 CHECK(needHeader_); 3988 return; 3989 } 3990 // Patch the "length of piece" field. 3991 CHECK_LE(&buf_[0], pieceLenField_); 3992 CHECK_LE(pieceLenField_, p_); 3993 JDWP::Set4BE(pieceLenField_, totalAllocationUnits_); 3994 3995 Dbg::DdmSendChunk(type_, p_ - &buf_[0], &buf_[0]); 3996 Reset(); 3997 } 3998 3999 static void HeapChunkCallback(void* start, void* end, size_t used_bytes, void* arg) 4000 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, 4001 Locks::mutator_lock_) { 4002 reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkCallback(start, end, used_bytes); 4003 } 4004 4005 private: 4006 enum { ALLOCATION_UNIT_SIZE = 8 }; 4007 4008 void Reset() { 4009 p_ = &buf_[0]; 4010 startOfNextMemoryChunk_ = NULL; 4011 totalAllocationUnits_ = 0; 4012 needHeader_ = true; 4013 pieceLenField_ = NULL; 4014 } 4015 4016 void HeapChunkCallback(void* start, void* /*end*/, size_t used_bytes) 4017 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, 4018 Locks::mutator_lock_) { 4019 // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken 4020 // in the following code not to allocate memory, by ensuring buf_ is of the correct size 4021 if (used_bytes == 0) { 4022 if (start == NULL) { 4023 // Reset for start of new heap. 4024 startOfNextMemoryChunk_ = NULL; 4025 Flush(); 4026 } 4027 // Only process in use memory so that free region information 4028 // also includes dlmalloc book keeping. 4029 return; 4030 } 4031 4032 /* If we're looking at the native heap, we'll just return 4033 * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks 4034 */ 4035 bool native = type_ == CHUNK_TYPE("NHSG"); 4036 4037 if (startOfNextMemoryChunk_ != NULL) { 4038 // Transmit any pending free memory. Native free memory of 4039 // over kMaxFreeLen could be because of the use of mmaps, so 4040 // don't report. If not free memory then start a new segment. 4041 bool flush = true; 4042 if (start > startOfNextMemoryChunk_) { 4043 const size_t kMaxFreeLen = 2 * kPageSize; 4044 void* freeStart = startOfNextMemoryChunk_; 4045 void* freeEnd = start; 4046 size_t freeLen = reinterpret_cast<char*>(freeEnd) - reinterpret_cast<char*>(freeStart); 4047 if (!native || freeLen < kMaxFreeLen) { 4048 AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), freeStart, freeLen); 4049 flush = false; 4050 } 4051 } 4052 if (flush) { 4053 startOfNextMemoryChunk_ = NULL; 4054 Flush(); 4055 } 4056 } 4057 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start); 4058 4059 // Determine the type of this chunk. 4060 // OLD-TODO: if context.merge, see if this chunk is different from the last chunk. 4061 // If it's the same, we should combine them. 4062 uint8_t state = ExamineObject(obj, native); 4063 // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an 4064 // allocation then the first sizeof(size_t) may belong to it. 4065 const size_t dlMallocOverhead = sizeof(size_t); 4066 AppendChunk(state, start, used_bytes + dlMallocOverhead); 4067 startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + dlMallocOverhead; 4068 } 4069 4070 void AppendChunk(uint8_t state, void* ptr, size_t length) 4071 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 4072 // Make sure there's enough room left in the buffer. 4073 // We need to use two bytes for every fractional 256 allocation units used by the chunk plus 4074 // 17 bytes for any header. 4075 size_t needed = (((length/ALLOCATION_UNIT_SIZE + 255) / 256) * 2) + 17; 4076 size_t bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]); 4077 if (bytesLeft < needed) { 4078 Flush(); 4079 } 4080 4081 bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]); 4082 if (bytesLeft < needed) { 4083 LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", " 4084 << needed << " bytes)"; 4085 return; 4086 } 4087 EnsureHeader(ptr); 4088 // Write out the chunk description. 4089 length /= ALLOCATION_UNIT_SIZE; // Convert to allocation units. 4090 totalAllocationUnits_ += length; 4091 while (length > 256) { 4092 *p_++ = state | HPSG_PARTIAL; 4093 *p_++ = 255; // length - 1 4094 length -= 256; 4095 } 4096 *p_++ = state; 4097 *p_++ = length - 1; 4098 } 4099 4100 uint8_t ExamineObject(mirror::Object* o, bool is_native_heap) 4101 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 4102 if (o == NULL) { 4103 return HPSG_STATE(SOLIDITY_FREE, 0); 4104 } 4105 4106 // It's an allocated chunk. Figure out what it is. 4107 4108 // If we're looking at the native heap, we'll just return 4109 // (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks. 4110 if (is_native_heap) { 4111 return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE); 4112 } 4113 4114 if (!Runtime::Current()->GetHeap()->IsLiveObjectLocked(o)) { 4115 return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE); 4116 } 4117 4118 mirror::Class* c = o->GetClass(); 4119 if (c == NULL) { 4120 // The object was probably just created but hasn't been initialized yet. 4121 return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT); 4122 } 4123 4124 if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) { 4125 LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c; 4126 return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN); 4127 } 4128 4129 if (c->IsClassClass()) { 4130 return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT); 4131 } 4132 4133 if (c->IsArrayClass()) { 4134 if (o->IsObjectArray()) { 4135 return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4); 4136 } 4137 switch (c->GetComponentSize()) { 4138 case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1); 4139 case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2); 4140 case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4); 4141 case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8); 4142 } 4143 } 4144 4145 return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT); 4146 } 4147 4148 std::vector<uint8_t> buf_; 4149 uint8_t* p_; 4150 uint8_t* pieceLenField_; 4151 void* startOfNextMemoryChunk_; 4152 size_t totalAllocationUnits_; 4153 uint32_t type_; 4154 bool merge_; 4155 bool needHeader_; 4156 4157 DISALLOW_COPY_AND_ASSIGN(HeapChunkContext); 4158}; 4159 4160void Dbg::DdmSendHeapSegments(bool native) { 4161 Dbg::HpsgWhen when; 4162 Dbg::HpsgWhat what; 4163 if (!native) { 4164 when = gDdmHpsgWhen; 4165 what = gDdmHpsgWhat; 4166 } else { 4167 when = gDdmNhsgWhen; 4168 what = gDdmNhsgWhat; 4169 } 4170 if (when == HPSG_WHEN_NEVER) { 4171 return; 4172 } 4173 4174 // Figure out what kind of chunks we'll be sending. 4175 CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS) << static_cast<int>(what); 4176 4177 // First, send a heap start chunk. 4178 uint8_t heap_id[4]; 4179 JDWP::Set4BE(&heap_id[0], 1); // Heap id (bogus; we only have one heap). 4180 Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id); 4181 4182 Thread* self = Thread::Current(); 4183 4184 // To allow the Walk/InspectAll() below to exclusively-lock the 4185 // mutator lock, temporarily release the shared access to the 4186 // mutator lock here by transitioning to the suspended state. 4187 Locks::mutator_lock_->AssertSharedHeld(self); 4188 self->TransitionFromRunnableToSuspended(kSuspended); 4189 4190 // Send a series of heap segment chunks. 4191 HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native); 4192 if (native) { 4193#ifdef USE_DLMALLOC 4194 dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context); 4195#else 4196 UNIMPLEMENTED(WARNING) << "Native heap inspection is only supported with dlmalloc"; 4197#endif 4198 } else { 4199 gc::Heap* heap = Runtime::Current()->GetHeap(); 4200 const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces(); 4201 typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It; 4202 for (It cur = spaces.begin(), end = spaces.end(); cur != end; ++cur) { 4203 if ((*cur)->IsMallocSpace()) { 4204 (*cur)->AsMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context); 4205 } 4206 } 4207 // Walk the large objects, these are not in the AllocSpace. 4208 heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context); 4209 } 4210 4211 // Shared-lock the mutator lock back. 4212 self->TransitionFromSuspendedToRunnable(); 4213 Locks::mutator_lock_->AssertSharedHeld(self); 4214 4215 // Finally, send a heap end chunk. 4216 Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id); 4217} 4218 4219static size_t GetAllocTrackerMax() { 4220#ifdef HAVE_ANDROID_OS 4221 // Check whether there's a system property overriding the number of records. 4222 const char* propertyName = "dalvik.vm.allocTrackerMax"; 4223 char allocRecordMaxString[PROPERTY_VALUE_MAX]; 4224 if (property_get(propertyName, allocRecordMaxString, "") > 0) { 4225 char* end; 4226 size_t value = strtoul(allocRecordMaxString, &end, 10); 4227 if (*end != '\0') { 4228 LOG(ERROR) << "Ignoring " << propertyName << " '" << allocRecordMaxString 4229 << "' --- invalid"; 4230 return kDefaultNumAllocRecords; 4231 } 4232 if (!IsPowerOfTwo(value)) { 4233 LOG(ERROR) << "Ignoring " << propertyName << " '" << allocRecordMaxString 4234 << "' --- not power of two"; 4235 return kDefaultNumAllocRecords; 4236 } 4237 return value; 4238 } 4239#endif 4240 return kDefaultNumAllocRecords; 4241} 4242 4243void Dbg::SetAllocTrackingEnabled(bool enabled) { 4244 if (enabled) { 4245 { 4246 MutexLock mu(Thread::Current(), *alloc_tracker_lock_); 4247 if (recent_allocation_records_ == NULL) { 4248 alloc_record_max_ = GetAllocTrackerMax(); 4249 LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of " 4250 << kMaxAllocRecordStackDepth << " frames, taking " 4251 << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")"; 4252 alloc_record_head_ = alloc_record_count_ = 0; 4253 recent_allocation_records_ = new AllocRecord[alloc_record_max_]; 4254 CHECK(recent_allocation_records_ != NULL); 4255 } 4256 } 4257 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints(); 4258 } else { 4259 Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints(); 4260 { 4261 MutexLock mu(Thread::Current(), *alloc_tracker_lock_); 4262 LOG(INFO) << "Disabling alloc tracker"; 4263 delete[] recent_allocation_records_; 4264 recent_allocation_records_ = NULL; 4265 type_cache_.Clear(); 4266 } 4267 } 4268} 4269 4270struct AllocRecordStackVisitor : public StackVisitor { 4271 AllocRecordStackVisitor(Thread* thread, AllocRecord* record) 4272 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 4273 : StackVisitor(thread, NULL), record(record), depth(0) {} 4274 4275 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 4276 // annotalysis. 4277 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 4278 if (depth >= kMaxAllocRecordStackDepth) { 4279 return false; 4280 } 4281 mirror::ArtMethod* m = GetMethod(); 4282 if (!m->IsRuntimeMethod()) { 4283 record->StackElement(depth)->SetMethod(m); 4284 record->StackElement(depth)->SetDexPc(GetDexPc()); 4285 ++depth; 4286 } 4287 return true; 4288 } 4289 4290 ~AllocRecordStackVisitor() { 4291 // Clear out any unused stack trace elements. 4292 for (; depth < kMaxAllocRecordStackDepth; ++depth) { 4293 record->StackElement(depth)->SetMethod(nullptr); 4294 record->StackElement(depth)->SetDexPc(0); 4295 } 4296 } 4297 4298 AllocRecord* record; 4299 size_t depth; 4300}; 4301 4302void Dbg::RecordAllocation(mirror::Class* type, size_t byte_count) { 4303 Thread* self = Thread::Current(); 4304 CHECK(self != NULL); 4305 4306 MutexLock mu(self, *alloc_tracker_lock_); 4307 if (recent_allocation_records_ == NULL) { 4308 return; 4309 } 4310 4311 // Advance and clip. 4312 if (++alloc_record_head_ == alloc_record_max_) { 4313 alloc_record_head_ = 0; 4314 } 4315 4316 // Fill in the basics. 4317 AllocRecord* record = &recent_allocation_records_[alloc_record_head_]; 4318 record->SetType(type); 4319 record->SetByteCount(byte_count); 4320 record->SetThinLockId(self->GetThreadId()); 4321 4322 // Fill in the stack trace. 4323 AllocRecordStackVisitor visitor(self, record); 4324 visitor.WalkStack(); 4325 4326 if (alloc_record_count_ < alloc_record_max_) { 4327 ++alloc_record_count_; 4328 } 4329} 4330 4331// Returns the index of the head element. 4332// 4333// We point at the most-recently-written record, so if gAllocRecordCount is 1 4334// we want to use the current element. Take "head+1" and subtract count 4335// from it. 4336// 4337// We need to handle underflow in our circular buffer, so we add 4338// gAllocRecordMax and then mask it back down. 4339size_t Dbg::HeadIndex() { 4340 return (Dbg::alloc_record_head_ + 1 + Dbg::alloc_record_max_ - Dbg::alloc_record_count_) & 4341 (Dbg::alloc_record_max_ - 1); 4342} 4343 4344void Dbg::DumpRecentAllocations() { 4345 ScopedObjectAccess soa(Thread::Current()); 4346 MutexLock mu(soa.Self(), *alloc_tracker_lock_); 4347 if (recent_allocation_records_ == NULL) { 4348 LOG(INFO) << "Not recording tracked allocations"; 4349 return; 4350 } 4351 4352 // "i" is the head of the list. We want to start at the end of the 4353 // list and move forward to the tail. 4354 size_t i = HeadIndex(); 4355 size_t count = alloc_record_count_; 4356 4357 LOG(INFO) << "Tracked allocations, (head=" << alloc_record_head_ << " count=" << count << ")"; 4358 while (count--) { 4359 AllocRecord* record = &recent_allocation_records_[i]; 4360 4361 LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->ThinLockId(), record->ByteCount()) 4362 << PrettyClass(record->Type()); 4363 4364 for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) { 4365 AllocRecordStackTraceElement* stack_element = record->StackElement(stack_frame); 4366 mirror::ArtMethod* m = stack_element->Method(); 4367 if (m == NULL) { 4368 break; 4369 } 4370 LOG(INFO) << " " << PrettyMethod(m) << " line " << stack_element->LineNumber(); 4371 } 4372 4373 // pause periodically to help logcat catch up 4374 if ((count % 5) == 0) { 4375 usleep(40000); 4376 } 4377 4378 i = (i + 1) & (alloc_record_max_ - 1); 4379 } 4380} 4381 4382class StringTable { 4383 public: 4384 StringTable() { 4385 } 4386 4387 void Add(const std::string& str) { 4388 table_.insert(str); 4389 } 4390 4391 void Add(const char* str) { 4392 table_.insert(str); 4393 } 4394 4395 size_t IndexOf(const char* s) const { 4396 auto it = table_.find(s); 4397 if (it == table_.end()) { 4398 LOG(FATAL) << "IndexOf(\"" << s << "\") failed"; 4399 } 4400 return std::distance(table_.begin(), it); 4401 } 4402 4403 size_t Size() const { 4404 return table_.size(); 4405 } 4406 4407 void WriteTo(std::vector<uint8_t>& bytes) const { 4408 for (const std::string& str : table_) { 4409 const char* s = str.c_str(); 4410 size_t s_len = CountModifiedUtf8Chars(s); 4411 std::unique_ptr<uint16_t> s_utf16(new uint16_t[s_len]); 4412 ConvertModifiedUtf8ToUtf16(s_utf16.get(), s); 4413 JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len); 4414 } 4415 } 4416 4417 private: 4418 std::set<std::string> table_; 4419 DISALLOW_COPY_AND_ASSIGN(StringTable); 4420}; 4421 4422static const char* GetMethodSourceFile(mirror::ArtMethod* method) 4423 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 4424 DCHECK(method != nullptr); 4425 const char* source_file = method->GetDeclaringClassSourceFile(); 4426 return (source_file != nullptr) ? source_file : ""; 4427} 4428 4429/* 4430 * The data we send to DDMS contains everything we have recorded. 4431 * 4432 * Message header (all values big-endian): 4433 * (1b) message header len (to allow future expansion); includes itself 4434 * (1b) entry header len 4435 * (1b) stack frame len 4436 * (2b) number of entries 4437 * (4b) offset to string table from start of message 4438 * (2b) number of class name strings 4439 * (2b) number of method name strings 4440 * (2b) number of source file name strings 4441 * For each entry: 4442 * (4b) total allocation size 4443 * (2b) thread id 4444 * (2b) allocated object's class name index 4445 * (1b) stack depth 4446 * For each stack frame: 4447 * (2b) method's class name 4448 * (2b) method name 4449 * (2b) method source file 4450 * (2b) line number, clipped to 32767; -2 if native; -1 if no source 4451 * (xb) class name strings 4452 * (xb) method name strings 4453 * (xb) source file strings 4454 * 4455 * As with other DDM traffic, strings are sent as a 4-byte length 4456 * followed by UTF-16 data. 4457 * 4458 * We send up 16-bit unsigned indexes into string tables. In theory there 4459 * can be (kMaxAllocRecordStackDepth * gAllocRecordMax) unique strings in 4460 * each table, but in practice there should be far fewer. 4461 * 4462 * The chief reason for using a string table here is to keep the size of 4463 * the DDMS message to a minimum. This is partly to make the protocol 4464 * efficient, but also because we have to form the whole thing up all at 4465 * once in a memory buffer. 4466 * 4467 * We use separate string tables for class names, method names, and source 4468 * files to keep the indexes small. There will generally be no overlap 4469 * between the contents of these tables. 4470 */ 4471jbyteArray Dbg::GetRecentAllocations() { 4472 if (false) { 4473 DumpRecentAllocations(); 4474 } 4475 4476 Thread* self = Thread::Current(); 4477 std::vector<uint8_t> bytes; 4478 { 4479 MutexLock mu(self, *alloc_tracker_lock_); 4480 // 4481 // Part 1: generate string tables. 4482 // 4483 StringTable class_names; 4484 StringTable method_names; 4485 StringTable filenames; 4486 4487 int count = alloc_record_count_; 4488 int idx = HeadIndex(); 4489 while (count--) { 4490 AllocRecord* record = &recent_allocation_records_[idx]; 4491 class_names.Add(record->Type()->GetDescriptor()); 4492 for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) { 4493 mirror::ArtMethod* m = record->StackElement(i)->Method(); 4494 if (m != NULL) { 4495 class_names.Add(m->GetDeclaringClassDescriptor()); 4496 method_names.Add(m->GetName()); 4497 filenames.Add(GetMethodSourceFile(m)); 4498 } 4499 } 4500 4501 idx = (idx + 1) & (alloc_record_max_ - 1); 4502 } 4503 4504 LOG(INFO) << "allocation records: " << alloc_record_count_; 4505 4506 // 4507 // Part 2: Generate the output and store it in the buffer. 4508 // 4509 4510 // (1b) message header len (to allow future expansion); includes itself 4511 // (1b) entry header len 4512 // (1b) stack frame len 4513 const int kMessageHeaderLen = 15; 4514 const int kEntryHeaderLen = 9; 4515 const int kStackFrameLen = 8; 4516 JDWP::Append1BE(bytes, kMessageHeaderLen); 4517 JDWP::Append1BE(bytes, kEntryHeaderLen); 4518 JDWP::Append1BE(bytes, kStackFrameLen); 4519 4520 // (2b) number of entries 4521 // (4b) offset to string table from start of message 4522 // (2b) number of class name strings 4523 // (2b) number of method name strings 4524 // (2b) number of source file name strings 4525 JDWP::Append2BE(bytes, alloc_record_count_); 4526 size_t string_table_offset = bytes.size(); 4527 JDWP::Append4BE(bytes, 0); // We'll patch this later... 4528 JDWP::Append2BE(bytes, class_names.Size()); 4529 JDWP::Append2BE(bytes, method_names.Size()); 4530 JDWP::Append2BE(bytes, filenames.Size()); 4531 4532 count = alloc_record_count_; 4533 idx = HeadIndex(); 4534 while (count--) { 4535 // For each entry: 4536 // (4b) total allocation size 4537 // (2b) thread id 4538 // (2b) allocated object's class name index 4539 // (1b) stack depth 4540 AllocRecord* record = &recent_allocation_records_[idx]; 4541 size_t stack_depth = record->GetDepth(); 4542 size_t allocated_object_class_name_index = 4543 class_names.IndexOf(record->Type()->GetDescriptor().c_str()); 4544 JDWP::Append4BE(bytes, record->ByteCount()); 4545 JDWP::Append2BE(bytes, record->ThinLockId()); 4546 JDWP::Append2BE(bytes, allocated_object_class_name_index); 4547 JDWP::Append1BE(bytes, stack_depth); 4548 4549 for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) { 4550 // For each stack frame: 4551 // (2b) method's class name 4552 // (2b) method name 4553 // (2b) method source file 4554 // (2b) line number, clipped to 32767; -2 if native; -1 if no source 4555 mirror::ArtMethod* m = record->StackElement(stack_frame)->Method(); 4556 size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor()); 4557 size_t method_name_index = method_names.IndexOf(m->GetName()); 4558 size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m)); 4559 JDWP::Append2BE(bytes, class_name_index); 4560 JDWP::Append2BE(bytes, method_name_index); 4561 JDWP::Append2BE(bytes, file_name_index); 4562 JDWP::Append2BE(bytes, record->StackElement(stack_frame)->LineNumber()); 4563 } 4564 4565 idx = (idx + 1) & (alloc_record_max_ - 1); 4566 } 4567 4568 // (xb) class name strings 4569 // (xb) method name strings 4570 // (xb) source file strings 4571 JDWP::Set4BE(&bytes[string_table_offset], bytes.size()); 4572 class_names.WriteTo(bytes); 4573 method_names.WriteTo(bytes); 4574 filenames.WriteTo(bytes); 4575 } 4576 JNIEnv* env = self->GetJniEnv(); 4577 jbyteArray result = env->NewByteArray(bytes.size()); 4578 if (result != NULL) { 4579 env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0])); 4580 } 4581 return result; 4582} 4583 4584mirror::ArtMethod* DeoptimizationRequest::Method() const { 4585 ScopedObjectAccessUnchecked soa(Thread::Current()); 4586 return soa.DecodeMethod(method_); 4587} 4588 4589void DeoptimizationRequest::SetMethod(mirror::ArtMethod* m) { 4590 ScopedObjectAccessUnchecked soa(Thread::Current()); 4591 method_ = soa.EncodeMethod(m); 4592} 4593 4594} // namespace art 4595