debugger.cc revision f3d874c60ee3ada19ce26a5c4e532312b6f3a9e9
1/* 2 * Copyright (C) 2008 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "debugger.h" 18 19#include <sys/uio.h> 20 21#include <set> 22 23#include "arch/context.h" 24#include "class_linker.h" 25#include "class_linker-inl.h" 26#include "dex_file-inl.h" 27#include "dex_instruction.h" 28#include "field_helper.h" 29#include "gc/accounting/card_table-inl.h" 30#include "gc/space/large_object_space.h" 31#include "gc/space/space-inl.h" 32#include "handle_scope.h" 33#include "jdwp/object_registry.h" 34#include "method_helper.h" 35#include "mirror/art_field-inl.h" 36#include "mirror/art_method-inl.h" 37#include "mirror/class.h" 38#include "mirror/class-inl.h" 39#include "mirror/class_loader.h" 40#include "mirror/object-inl.h" 41#include "mirror/object_array-inl.h" 42#include "mirror/string-inl.h" 43#include "mirror/throwable.h" 44#include "quick/inline_method_analyser.h" 45#include "reflection.h" 46#include "safe_map.h" 47#include "scoped_thread_state_change.h" 48#include "ScopedLocalRef.h" 49#include "ScopedPrimitiveArray.h" 50#include "handle_scope-inl.h" 51#include "thread_list.h" 52#include "throw_location.h" 53#include "utf.h" 54#include "verifier/method_verifier-inl.h" 55#include "well_known_classes.h" 56 57#ifdef HAVE_ANDROID_OS 58#include "cutils/properties.h" 59#endif 60 61namespace art { 62 63static const size_t kMaxAllocRecordStackDepth = 16; // Max 255. 64static const size_t kDefaultNumAllocRecords = 64*1024; // Must be a power of 2. 65 66class AllocRecordStackTraceElement { 67 public: 68 AllocRecordStackTraceElement() : method_(nullptr), dex_pc_(0) { 69 } 70 71 int32_t LineNumber() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 72 mirror::ArtMethod* method = Method(); 73 DCHECK(method != nullptr); 74 return method->GetLineNumFromDexPC(DexPc()); 75 } 76 77 mirror::ArtMethod* Method() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 78 ScopedObjectAccessUnchecked soa(Thread::Current()); 79 return soa.DecodeMethod(method_); 80 } 81 82 void SetMethod(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 83 ScopedObjectAccessUnchecked soa(Thread::Current()); 84 method_ = soa.EncodeMethod(m); 85 } 86 87 uint32_t DexPc() const { 88 return dex_pc_; 89 } 90 91 void SetDexPc(uint32_t pc) { 92 dex_pc_ = pc; 93 } 94 95 private: 96 jmethodID method_; 97 uint32_t dex_pc_; 98}; 99 100jobject Dbg::TypeCache::Add(mirror::Class* t) { 101 ScopedObjectAccessUnchecked soa(Thread::Current()); 102 int32_t hash_code = t->IdentityHashCode(); 103 auto range = objects_.equal_range(hash_code); 104 for (auto it = range.first; it != range.second; ++it) { 105 if (soa.Decode<mirror::Class*>(it->second) == t) { 106 // Found a matching weak global, return it. 107 return it->second; 108 } 109 } 110 JNIEnv* env = soa.Env(); 111 const jobject local_ref = soa.AddLocalReference<jobject>(t); 112 const jobject weak_global = env->NewWeakGlobalRef(local_ref); 113 env->DeleteLocalRef(local_ref); 114 objects_.insert(std::make_pair(hash_code, weak_global)); 115 return weak_global; 116} 117 118void Dbg::TypeCache::Clear() { 119 ScopedObjectAccess soa(Thread::Current()); 120 for (const auto& p : objects_) { 121 soa.Vm()->DeleteWeakGlobalRef(soa.Self(), p.second); 122 } 123 objects_.clear(); 124} 125 126class AllocRecord { 127 public: 128 AllocRecord() : type_(nullptr), byte_count_(0), thin_lock_id_(0) {} 129 130 mirror::Class* Type() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 131 return down_cast<mirror::Class*>(Thread::Current()->DecodeJObject(type_)); 132 } 133 134 void SetType(mirror::Class* t) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 135 type_ = Dbg::GetTypeCache().Add(t); 136 } 137 138 size_t GetDepth() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 139 size_t depth = 0; 140 while (depth < kMaxAllocRecordStackDepth && stack_[depth].Method() != NULL) { 141 ++depth; 142 } 143 return depth; 144 } 145 146 size_t ByteCount() const { 147 return byte_count_; 148 } 149 150 void SetByteCount(size_t count) { 151 byte_count_ = count; 152 } 153 154 uint16_t ThinLockId() const { 155 return thin_lock_id_; 156 } 157 158 void SetThinLockId(uint16_t id) { 159 thin_lock_id_ = id; 160 } 161 162 AllocRecordStackTraceElement* StackElement(size_t index) { 163 DCHECK_LT(index, kMaxAllocRecordStackDepth); 164 return &stack_[index]; 165 } 166 167 private: 168 jobject type_; // This is a weak global. 169 size_t byte_count_; 170 uint16_t thin_lock_id_; 171 AllocRecordStackTraceElement stack_[kMaxAllocRecordStackDepth]; // Unused entries have NULL method. 172}; 173 174class Breakpoint { 175 public: 176 Breakpoint(mirror::ArtMethod* method, uint32_t dex_pc, bool need_full_deoptimization) 177 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 178 : method_(nullptr), dex_pc_(dex_pc), need_full_deoptimization_(need_full_deoptimization) { 179 ScopedObjectAccessUnchecked soa(Thread::Current()); 180 method_ = soa.EncodeMethod(method); 181 } 182 183 Breakpoint(const Breakpoint& other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 184 : method_(nullptr), dex_pc_(other.dex_pc_), 185 need_full_deoptimization_(other.need_full_deoptimization_) { 186 ScopedObjectAccessUnchecked soa(Thread::Current()); 187 method_ = soa.EncodeMethod(other.Method()); 188 } 189 190 mirror::ArtMethod* Method() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 191 ScopedObjectAccessUnchecked soa(Thread::Current()); 192 return soa.DecodeMethod(method_); 193 } 194 195 uint32_t DexPc() const { 196 return dex_pc_; 197 } 198 199 bool NeedFullDeoptimization() const { 200 return need_full_deoptimization_; 201 } 202 203 private: 204 // The location of this breakpoint. 205 jmethodID method_; 206 uint32_t dex_pc_; 207 208 // Indicates whether breakpoint needs full deoptimization or selective deoptimization. 209 bool need_full_deoptimization_; 210}; 211 212static std::ostream& operator<<(std::ostream& os, Breakpoint& rhs) 213 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 214 os << StringPrintf("Breakpoint[%s @%#x]", PrettyMethod(rhs.Method()).c_str(), rhs.DexPc()); 215 return os; 216} 217 218class DebugInstrumentationListener FINAL : public instrumentation::InstrumentationListener { 219 public: 220 DebugInstrumentationListener() {} 221 virtual ~DebugInstrumentationListener() {} 222 223 void MethodEntered(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method, 224 uint32_t dex_pc) 225 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 226 if (method->IsNative()) { 227 // TODO: post location events is a suspension point and native method entry stubs aren't. 228 return; 229 } 230 Dbg::UpdateDebugger(thread, this_object, method, 0, Dbg::kMethodEntry, nullptr); 231 } 232 233 void MethodExited(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method, 234 uint32_t dex_pc, const JValue& return_value) 235 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 236 if (method->IsNative()) { 237 // TODO: post location events is a suspension point and native method entry stubs aren't. 238 return; 239 } 240 Dbg::UpdateDebugger(thread, this_object, method, dex_pc, Dbg::kMethodExit, &return_value); 241 } 242 243 void MethodUnwind(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method, 244 uint32_t dex_pc) 245 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 246 // We're not recorded to listen to this kind of event, so complain. 247 LOG(ERROR) << "Unexpected method unwind event in debugger " << PrettyMethod(method) 248 << " " << dex_pc; 249 } 250 251 void DexPcMoved(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method, 252 uint32_t new_dex_pc) 253 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 254 Dbg::UpdateDebugger(thread, this_object, method, new_dex_pc, 0, nullptr); 255 } 256 257 void FieldRead(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method, 258 uint32_t dex_pc, mirror::ArtField* field) 259 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 260 Dbg::PostFieldAccessEvent(method, dex_pc, this_object, field); 261 } 262 263 void FieldWritten(Thread* thread, mirror::Object* this_object, mirror::ArtMethod* method, 264 uint32_t dex_pc, mirror::ArtField* field, const JValue& field_value) 265 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 266 Dbg::PostFieldModificationEvent(method, dex_pc, this_object, field, &field_value); 267 } 268 269 void ExceptionCaught(Thread* thread, const ThrowLocation& throw_location, 270 mirror::ArtMethod* catch_method, uint32_t catch_dex_pc, 271 mirror::Throwable* exception_object) 272 OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 273 Dbg::PostException(throw_location, catch_method, catch_dex_pc, exception_object); 274 } 275 276 private: 277 DISALLOW_COPY_AND_ASSIGN(DebugInstrumentationListener); 278} gDebugInstrumentationListener; 279 280// JDWP is allowed unless the Zygote forbids it. 281static bool gJdwpAllowed = true; 282 283// Was there a -Xrunjdwp or -agentlib:jdwp= argument on the command line? 284static bool gJdwpConfigured = false; 285 286// Broken-down JDWP options. (Only valid if IsJdwpConfigured() is true.) 287static JDWP::JdwpOptions gJdwpOptions; 288 289// Runtime JDWP state. 290static JDWP::JdwpState* gJdwpState = NULL; 291static bool gDebuggerConnected; // debugger or DDMS is connected. 292static bool gDebuggerActive; // debugger is making requests. 293static bool gDisposed; // debugger called VirtualMachine.Dispose, so we should drop the connection. 294 295static bool gDdmThreadNotification = false; 296 297// DDMS GC-related settings. 298static Dbg::HpifWhen gDdmHpifWhen = Dbg::HPIF_WHEN_NEVER; 299static Dbg::HpsgWhen gDdmHpsgWhen = Dbg::HPSG_WHEN_NEVER; 300static Dbg::HpsgWhat gDdmHpsgWhat; 301static Dbg::HpsgWhen gDdmNhsgWhen = Dbg::HPSG_WHEN_NEVER; 302static Dbg::HpsgWhat gDdmNhsgWhat; 303 304static ObjectRegistry* gRegistry = nullptr; 305 306// Recent allocation tracking. 307Mutex* Dbg::alloc_tracker_lock_ = nullptr; 308AllocRecord* Dbg::recent_allocation_records_ = nullptr; // TODO: CircularBuffer<AllocRecord> 309size_t Dbg::alloc_record_max_ = 0; 310size_t Dbg::alloc_record_head_ = 0; 311size_t Dbg::alloc_record_count_ = 0; 312Dbg::TypeCache Dbg::type_cache_; 313 314// Deoptimization support. 315Mutex* Dbg::deoptimization_lock_ = nullptr; 316std::vector<DeoptimizationRequest> Dbg::deoptimization_requests_; 317size_t Dbg::full_deoptimization_event_count_ = 0; 318size_t Dbg::delayed_full_undeoptimization_count_ = 0; 319 320// Instrumentation event reference counters. 321size_t Dbg::dex_pc_change_event_ref_count_ = 0; 322size_t Dbg::method_enter_event_ref_count_ = 0; 323size_t Dbg::method_exit_event_ref_count_ = 0; 324size_t Dbg::field_read_event_ref_count_ = 0; 325size_t Dbg::field_write_event_ref_count_ = 0; 326size_t Dbg::exception_catch_event_ref_count_ = 0; 327uint32_t Dbg::instrumentation_events_ = 0; 328 329// Breakpoints. 330static std::vector<Breakpoint> gBreakpoints GUARDED_BY(Locks::breakpoint_lock_); 331 332void DebugInvokeReq::VisitRoots(RootCallback* callback, void* arg, uint32_t tid, 333 RootType root_type) { 334 if (receiver != nullptr) { 335 callback(&receiver, arg, tid, root_type); 336 } 337 if (thread != nullptr) { 338 callback(&thread, arg, tid, root_type); 339 } 340 if (klass != nullptr) { 341 callback(reinterpret_cast<mirror::Object**>(&klass), arg, tid, root_type); 342 } 343 if (method != nullptr) { 344 callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type); 345 } 346} 347 348void DebugInvokeReq::Clear() { 349 invoke_needed = false; 350 receiver = nullptr; 351 thread = nullptr; 352 klass = nullptr; 353 method = nullptr; 354} 355 356void SingleStepControl::VisitRoots(RootCallback* callback, void* arg, uint32_t tid, 357 RootType root_type) { 358 if (method != nullptr) { 359 callback(reinterpret_cast<mirror::Object**>(&method), arg, tid, root_type); 360 } 361} 362 363bool SingleStepControl::ContainsDexPc(uint32_t dex_pc) const { 364 return dex_pcs.find(dex_pc) == dex_pcs.end(); 365} 366 367void SingleStepControl::Clear() { 368 is_active = false; 369 method = nullptr; 370 dex_pcs.clear(); 371} 372 373static bool IsBreakpoint(const mirror::ArtMethod* m, uint32_t dex_pc) 374 LOCKS_EXCLUDED(Locks::breakpoint_lock_) 375 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 376 MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); 377 for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) { 378 if (gBreakpoints[i].DexPc() == dex_pc && gBreakpoints[i].Method() == m) { 379 VLOG(jdwp) << "Hit breakpoint #" << i << ": " << gBreakpoints[i]; 380 return true; 381 } 382 } 383 return false; 384} 385 386static bool IsSuspendedForDebugger(ScopedObjectAccessUnchecked& soa, Thread* thread) 387 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) { 388 MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_); 389 // A thread may be suspended for GC; in this code, we really want to know whether 390 // there's a debugger suspension active. 391 return thread->IsSuspended() && thread->GetDebugSuspendCount() > 0; 392} 393 394static mirror::Array* DecodeArray(JDWP::RefTypeId id, JDWP::JdwpError& status) 395 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 396 mirror::Object* o = gRegistry->Get<mirror::Object*>(id); 397 if (o == NULL || o == ObjectRegistry::kInvalidObject) { 398 status = JDWP::ERR_INVALID_OBJECT; 399 return NULL; 400 } 401 if (!o->IsArrayInstance()) { 402 status = JDWP::ERR_INVALID_ARRAY; 403 return NULL; 404 } 405 status = JDWP::ERR_NONE; 406 return o->AsArray(); 407} 408 409static mirror::Class* DecodeClass(JDWP::RefTypeId id, JDWP::JdwpError& status) 410 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 411 mirror::Object* o = gRegistry->Get<mirror::Object*>(id); 412 if (o == NULL || o == ObjectRegistry::kInvalidObject) { 413 status = JDWP::ERR_INVALID_OBJECT; 414 return NULL; 415 } 416 if (!o->IsClass()) { 417 status = JDWP::ERR_INVALID_CLASS; 418 return NULL; 419 } 420 status = JDWP::ERR_NONE; 421 return o->AsClass(); 422} 423 424static JDWP::JdwpError DecodeThread(ScopedObjectAccessUnchecked& soa, JDWP::ObjectId thread_id, Thread*& thread) 425 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) 426 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) 427 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 428 mirror::Object* thread_peer = gRegistry->Get<mirror::Object*>(thread_id); 429 if (thread_peer == NULL || thread_peer == ObjectRegistry::kInvalidObject) { 430 // This isn't even an object. 431 return JDWP::ERR_INVALID_OBJECT; 432 } 433 434 mirror::Class* java_lang_Thread = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread); 435 if (!java_lang_Thread->IsAssignableFrom(thread_peer->GetClass())) { 436 // This isn't a thread. 437 return JDWP::ERR_INVALID_THREAD; 438 } 439 440 thread = Thread::FromManagedThread(soa, thread_peer); 441 if (thread == NULL) { 442 // This is a java.lang.Thread without a Thread*. Must be a zombie. 443 return JDWP::ERR_THREAD_NOT_ALIVE; 444 } 445 return JDWP::ERR_NONE; 446} 447 448static JDWP::JdwpTag BasicTagFromDescriptor(const char* descriptor) { 449 // JDWP deliberately uses the descriptor characters' ASCII values for its enum. 450 // Note that by "basic" we mean that we don't get more specific than JT_OBJECT. 451 return static_cast<JDWP::JdwpTag>(descriptor[0]); 452} 453 454static JDWP::JdwpTag TagFromClass(const ScopedObjectAccessUnchecked& soa, mirror::Class* c) 455 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 456 CHECK(c != NULL); 457 if (c->IsArrayClass()) { 458 return JDWP::JT_ARRAY; 459 } 460 if (c->IsStringClass()) { 461 return JDWP::JT_STRING; 462 } 463 if (c->IsClassClass()) { 464 return JDWP::JT_CLASS_OBJECT; 465 } 466 { 467 mirror::Class* thread_class = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread); 468 if (thread_class->IsAssignableFrom(c)) { 469 return JDWP::JT_THREAD; 470 } 471 } 472 { 473 mirror::Class* thread_group_class = 474 soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup); 475 if (thread_group_class->IsAssignableFrom(c)) { 476 return JDWP::JT_THREAD_GROUP; 477 } 478 } 479 { 480 mirror::Class* class_loader_class = 481 soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader); 482 if (class_loader_class->IsAssignableFrom(c)) { 483 return JDWP::JT_CLASS_LOADER; 484 } 485 } 486 return JDWP::JT_OBJECT; 487} 488 489/* 490 * Objects declared to hold Object might actually hold a more specific 491 * type. The debugger may take a special interest in these (e.g. it 492 * wants to display the contents of Strings), so we want to return an 493 * appropriate tag. 494 * 495 * Null objects are tagged JT_OBJECT. 496 */ 497static JDWP::JdwpTag TagFromObject(const ScopedObjectAccessUnchecked& soa, mirror::Object* o) 498 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 499 return (o == NULL) ? JDWP::JT_OBJECT : TagFromClass(soa, o->GetClass()); 500} 501 502static bool IsPrimitiveTag(JDWP::JdwpTag tag) { 503 switch (tag) { 504 case JDWP::JT_BOOLEAN: 505 case JDWP::JT_BYTE: 506 case JDWP::JT_CHAR: 507 case JDWP::JT_FLOAT: 508 case JDWP::JT_DOUBLE: 509 case JDWP::JT_INT: 510 case JDWP::JT_LONG: 511 case JDWP::JT_SHORT: 512 case JDWP::JT_VOID: 513 return true; 514 default: 515 return false; 516 } 517} 518 519/* 520 * Handle one of the JDWP name/value pairs. 521 * 522 * JDWP options are: 523 * help: if specified, show help message and bail 524 * transport: may be dt_socket or dt_shmem 525 * address: for dt_socket, "host:port", or just "port" when listening 526 * server: if "y", wait for debugger to attach; if "n", attach to debugger 527 * timeout: how long to wait for debugger to connect / listen 528 * 529 * Useful with server=n (these aren't supported yet): 530 * onthrow=<exception-name>: connect to debugger when exception thrown 531 * onuncaught=y|n: connect to debugger when uncaught exception thrown 532 * launch=<command-line>: launch the debugger itself 533 * 534 * The "transport" option is required, as is "address" if server=n. 535 */ 536static bool ParseJdwpOption(const std::string& name, const std::string& value) { 537 if (name == "transport") { 538 if (value == "dt_socket") { 539 gJdwpOptions.transport = JDWP::kJdwpTransportSocket; 540 } else if (value == "dt_android_adb") { 541 gJdwpOptions.transport = JDWP::kJdwpTransportAndroidAdb; 542 } else { 543 LOG(ERROR) << "JDWP transport not supported: " << value; 544 return false; 545 } 546 } else if (name == "server") { 547 if (value == "n") { 548 gJdwpOptions.server = false; 549 } else if (value == "y") { 550 gJdwpOptions.server = true; 551 } else { 552 LOG(ERROR) << "JDWP option 'server' must be 'y' or 'n'"; 553 return false; 554 } 555 } else if (name == "suspend") { 556 if (value == "n") { 557 gJdwpOptions.suspend = false; 558 } else if (value == "y") { 559 gJdwpOptions.suspend = true; 560 } else { 561 LOG(ERROR) << "JDWP option 'suspend' must be 'y' or 'n'"; 562 return false; 563 } 564 } else if (name == "address") { 565 /* this is either <port> or <host>:<port> */ 566 std::string port_string; 567 gJdwpOptions.host.clear(); 568 std::string::size_type colon = value.find(':'); 569 if (colon != std::string::npos) { 570 gJdwpOptions.host = value.substr(0, colon); 571 port_string = value.substr(colon + 1); 572 } else { 573 port_string = value; 574 } 575 if (port_string.empty()) { 576 LOG(ERROR) << "JDWP address missing port: " << value; 577 return false; 578 } 579 char* end; 580 uint64_t port = strtoul(port_string.c_str(), &end, 10); 581 if (*end != '\0' || port > 0xffff) { 582 LOG(ERROR) << "JDWP address has junk in port field: " << value; 583 return false; 584 } 585 gJdwpOptions.port = port; 586 } else if (name == "launch" || name == "onthrow" || name == "oncaught" || name == "timeout") { 587 /* valid but unsupported */ 588 LOG(INFO) << "Ignoring JDWP option '" << name << "'='" << value << "'"; 589 } else { 590 LOG(INFO) << "Ignoring unrecognized JDWP option '" << name << "'='" << value << "'"; 591 } 592 593 return true; 594} 595 596/* 597 * Parse the latter half of a -Xrunjdwp/-agentlib:jdwp= string, e.g.: 598 * "transport=dt_socket,address=8000,server=y,suspend=n" 599 */ 600bool Dbg::ParseJdwpOptions(const std::string& options) { 601 VLOG(jdwp) << "ParseJdwpOptions: " << options; 602 603 std::vector<std::string> pairs; 604 Split(options, ',', pairs); 605 606 for (size_t i = 0; i < pairs.size(); ++i) { 607 std::string::size_type equals = pairs[i].find('='); 608 if (equals == std::string::npos) { 609 LOG(ERROR) << "Can't parse JDWP option '" << pairs[i] << "' in '" << options << "'"; 610 return false; 611 } 612 ParseJdwpOption(pairs[i].substr(0, equals), pairs[i].substr(equals + 1)); 613 } 614 615 if (gJdwpOptions.transport == JDWP::kJdwpTransportUnknown) { 616 LOG(ERROR) << "Must specify JDWP transport: " << options; 617 } 618 if (!gJdwpOptions.server && (gJdwpOptions.host.empty() || gJdwpOptions.port == 0)) { 619 LOG(ERROR) << "Must specify JDWP host and port when server=n: " << options; 620 return false; 621 } 622 623 gJdwpConfigured = true; 624 return true; 625} 626 627void Dbg::StartJdwp() { 628 if (!gJdwpAllowed || !IsJdwpConfigured()) { 629 // No JDWP for you! 630 return; 631 } 632 633 CHECK(gRegistry == nullptr); 634 gRegistry = new ObjectRegistry; 635 636 alloc_tracker_lock_ = new Mutex("AllocTracker lock"); 637 deoptimization_lock_ = new Mutex("deoptimization lock", kDeoptimizationLock); 638 // Init JDWP if the debugger is enabled. This may connect out to a 639 // debugger, passively listen for a debugger, or block waiting for a 640 // debugger. 641 gJdwpState = JDWP::JdwpState::Create(&gJdwpOptions); 642 if (gJdwpState == NULL) { 643 // We probably failed because some other process has the port already, which means that 644 // if we don't abort the user is likely to think they're talking to us when they're actually 645 // talking to that other process. 646 LOG(FATAL) << "Debugger thread failed to initialize"; 647 } 648 649 // If a debugger has already attached, send the "welcome" message. 650 // This may cause us to suspend all threads. 651 if (gJdwpState->IsActive()) { 652 ScopedObjectAccess soa(Thread::Current()); 653 if (!gJdwpState->PostVMStart()) { 654 LOG(WARNING) << "Failed to post 'start' message to debugger"; 655 } 656 } 657} 658 659void Dbg::StopJdwp() { 660 // Prevent the JDWP thread from processing JDWP incoming packets after we close the connection. 661 Disposed(); 662 delete gJdwpState; 663 gJdwpState = nullptr; 664 delete gRegistry; 665 gRegistry = nullptr; 666 delete alloc_tracker_lock_; 667 alloc_tracker_lock_ = nullptr; 668 delete deoptimization_lock_; 669 deoptimization_lock_ = nullptr; 670} 671 672void Dbg::GcDidFinish() { 673 if (gDdmHpifWhen != HPIF_WHEN_NEVER) { 674 ScopedObjectAccess soa(Thread::Current()); 675 VLOG(jdwp) << "Sending heap info to DDM"; 676 DdmSendHeapInfo(gDdmHpifWhen); 677 } 678 if (gDdmHpsgWhen != HPSG_WHEN_NEVER) { 679 ScopedObjectAccess soa(Thread::Current()); 680 VLOG(jdwp) << "Dumping heap to DDM"; 681 DdmSendHeapSegments(false); 682 } 683 if (gDdmNhsgWhen != HPSG_WHEN_NEVER) { 684 ScopedObjectAccess soa(Thread::Current()); 685 VLOG(jdwp) << "Dumping native heap to DDM"; 686 DdmSendHeapSegments(true); 687 } 688} 689 690void Dbg::SetJdwpAllowed(bool allowed) { 691 gJdwpAllowed = allowed; 692} 693 694DebugInvokeReq* Dbg::GetInvokeReq() { 695 return Thread::Current()->GetInvokeReq(); 696} 697 698Thread* Dbg::GetDebugThread() { 699 return (gJdwpState != NULL) ? gJdwpState->GetDebugThread() : NULL; 700} 701 702void Dbg::ClearWaitForEventThread() { 703 gJdwpState->ClearWaitForEventThread(); 704} 705 706void Dbg::Connected() { 707 CHECK(!gDebuggerConnected); 708 VLOG(jdwp) << "JDWP has attached"; 709 gDebuggerConnected = true; 710 gDisposed = false; 711} 712 713void Dbg::Disposed() { 714 gDisposed = true; 715} 716 717bool Dbg::IsDisposed() { 718 return gDisposed; 719} 720 721void Dbg::GoActive() { 722 // Enable all debugging features, including scans for breakpoints. 723 // This is a no-op if we're already active. 724 // Only called from the JDWP handler thread. 725 if (gDebuggerActive) { 726 return; 727 } 728 729 { 730 // TODO: dalvik only warned if there were breakpoints left over. clear in Dbg::Disconnected? 731 MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); 732 CHECK_EQ(gBreakpoints.size(), 0U); 733 } 734 735 { 736 MutexLock mu(Thread::Current(), *deoptimization_lock_); 737 CHECK_EQ(deoptimization_requests_.size(), 0U); 738 CHECK_EQ(full_deoptimization_event_count_, 0U); 739 CHECK_EQ(delayed_full_undeoptimization_count_, 0U); 740 CHECK_EQ(dex_pc_change_event_ref_count_, 0U); 741 CHECK_EQ(method_enter_event_ref_count_, 0U); 742 CHECK_EQ(method_exit_event_ref_count_, 0U); 743 CHECK_EQ(field_read_event_ref_count_, 0U); 744 CHECK_EQ(field_write_event_ref_count_, 0U); 745 CHECK_EQ(exception_catch_event_ref_count_, 0U); 746 } 747 748 Runtime* runtime = Runtime::Current(); 749 runtime->GetThreadList()->SuspendAll(); 750 Thread* self = Thread::Current(); 751 ThreadState old_state = self->SetStateUnsafe(kRunnable); 752 CHECK_NE(old_state, kRunnable); 753 runtime->GetInstrumentation()->EnableDeoptimization(); 754 instrumentation_events_ = 0; 755 gDebuggerActive = true; 756 CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable); 757 runtime->GetThreadList()->ResumeAll(); 758 759 LOG(INFO) << "Debugger is active"; 760} 761 762void Dbg::Disconnected() { 763 CHECK(gDebuggerConnected); 764 765 LOG(INFO) << "Debugger is no longer active"; 766 767 // Suspend all threads and exclusively acquire the mutator lock. Set the state of the thread 768 // to kRunnable to avoid scoped object access transitions. Remove the debugger as a listener 769 // and clear the object registry. 770 Runtime* runtime = Runtime::Current(); 771 runtime->GetThreadList()->SuspendAll(); 772 Thread* self = Thread::Current(); 773 ThreadState old_state = self->SetStateUnsafe(kRunnable); 774 775 // Debugger may not be active at this point. 776 if (gDebuggerActive) { 777 { 778 // Since we're going to disable deoptimization, we clear the deoptimization requests queue. 779 // This prevents us from having any pending deoptimization request when the debugger attaches 780 // to us again while no event has been requested yet. 781 MutexLock mu(Thread::Current(), *deoptimization_lock_); 782 deoptimization_requests_.clear(); 783 full_deoptimization_event_count_ = 0U; 784 delayed_full_undeoptimization_count_ = 0U; 785 } 786 if (instrumentation_events_ != 0) { 787 runtime->GetInstrumentation()->RemoveListener(&gDebugInstrumentationListener, 788 instrumentation_events_); 789 instrumentation_events_ = 0; 790 } 791 runtime->GetInstrumentation()->DisableDeoptimization(); 792 gDebuggerActive = false; 793 } 794 gRegistry->Clear(); 795 gDebuggerConnected = false; 796 CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable); 797 runtime->GetThreadList()->ResumeAll(); 798} 799 800bool Dbg::IsDebuggerActive() { 801 return gDebuggerActive; 802} 803 804bool Dbg::IsJdwpConfigured() { 805 return gJdwpConfigured; 806} 807 808int64_t Dbg::LastDebuggerActivity() { 809 return gJdwpState->LastDebuggerActivity(); 810} 811 812void Dbg::UndoDebuggerSuspensions() { 813 Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions(); 814} 815 816std::string Dbg::GetClassName(JDWP::RefTypeId class_id) { 817 mirror::Object* o = gRegistry->Get<mirror::Object*>(class_id); 818 if (o == NULL) { 819 return "NULL"; 820 } 821 if (o == ObjectRegistry::kInvalidObject) { 822 return StringPrintf("invalid object %p", reinterpret_cast<void*>(class_id)); 823 } 824 if (!o->IsClass()) { 825 return StringPrintf("non-class %p", o); // This is only used for debugging output anyway. 826 } 827 return DescriptorToName(o->AsClass()->GetDescriptor().c_str()); 828} 829 830JDWP::JdwpError Dbg::GetClassObject(JDWP::RefTypeId id, JDWP::ObjectId& class_object_id) { 831 JDWP::JdwpError status; 832 mirror::Class* c = DecodeClass(id, status); 833 if (c == NULL) { 834 return status; 835 } 836 class_object_id = gRegistry->Add(c); 837 return JDWP::ERR_NONE; 838} 839 840JDWP::JdwpError Dbg::GetSuperclass(JDWP::RefTypeId id, JDWP::RefTypeId& superclass_id) { 841 JDWP::JdwpError status; 842 mirror::Class* c = DecodeClass(id, status); 843 if (c == NULL) { 844 return status; 845 } 846 if (c->IsInterface()) { 847 // http://code.google.com/p/android/issues/detail?id=20856 848 superclass_id = 0; 849 } else { 850 superclass_id = gRegistry->Add(c->GetSuperClass()); 851 } 852 return JDWP::ERR_NONE; 853} 854 855JDWP::JdwpError Dbg::GetClassLoader(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) { 856 mirror::Object* o = gRegistry->Get<mirror::Object*>(id); 857 if (o == NULL || o == ObjectRegistry::kInvalidObject) { 858 return JDWP::ERR_INVALID_OBJECT; 859 } 860 expandBufAddObjectId(pReply, gRegistry->Add(o->GetClass()->GetClassLoader())); 861 return JDWP::ERR_NONE; 862} 863 864JDWP::JdwpError Dbg::GetModifiers(JDWP::RefTypeId id, JDWP::ExpandBuf* pReply) { 865 JDWP::JdwpError status; 866 mirror::Class* c = DecodeClass(id, status); 867 if (c == NULL) { 868 return status; 869 } 870 871 uint32_t access_flags = c->GetAccessFlags() & kAccJavaFlagsMask; 872 873 // Set ACC_SUPER. Dex files don't contain this flag but only classes are supposed to have it set, 874 // not interfaces. 875 // Class.getModifiers doesn't return it, but JDWP does, so we set it here. 876 if ((access_flags & kAccInterface) == 0) { 877 access_flags |= kAccSuper; 878 } 879 880 expandBufAdd4BE(pReply, access_flags); 881 882 return JDWP::ERR_NONE; 883} 884 885JDWP::JdwpError Dbg::GetMonitorInfo(JDWP::ObjectId object_id, JDWP::ExpandBuf* reply) 886 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 887 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); 888 if (o == NULL || o == ObjectRegistry::kInvalidObject) { 889 return JDWP::ERR_INVALID_OBJECT; 890 } 891 892 // Ensure all threads are suspended while we read objects' lock words. 893 Thread* self = Thread::Current(); 894 CHECK_EQ(self->GetState(), kRunnable); 895 self->TransitionFromRunnableToSuspended(kSuspended); 896 Runtime::Current()->GetThreadList()->SuspendAll(); 897 898 MonitorInfo monitor_info(o); 899 900 Runtime::Current()->GetThreadList()->ResumeAll(); 901 self->TransitionFromSuspendedToRunnable(); 902 903 if (monitor_info.owner_ != NULL) { 904 expandBufAddObjectId(reply, gRegistry->Add(monitor_info.owner_->GetPeer())); 905 } else { 906 expandBufAddObjectId(reply, gRegistry->Add(NULL)); 907 } 908 expandBufAdd4BE(reply, monitor_info.entry_count_); 909 expandBufAdd4BE(reply, monitor_info.waiters_.size()); 910 for (size_t i = 0; i < monitor_info.waiters_.size(); ++i) { 911 expandBufAddObjectId(reply, gRegistry->Add(monitor_info.waiters_[i]->GetPeer())); 912 } 913 return JDWP::ERR_NONE; 914} 915 916JDWP::JdwpError Dbg::GetOwnedMonitors(JDWP::ObjectId thread_id, 917 std::vector<JDWP::ObjectId>& monitors, 918 std::vector<uint32_t>& stack_depths) { 919 struct OwnedMonitorVisitor : public StackVisitor { 920 OwnedMonitorVisitor(Thread* thread, Context* context, 921 std::vector<JDWP::ObjectId>* monitor_vector, 922 std::vector<uint32_t>* stack_depth_vector) 923 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 924 : StackVisitor(thread, context), current_stack_depth(0), 925 monitors(monitor_vector), stack_depths(stack_depth_vector) {} 926 927 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 928 // annotalysis. 929 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 930 if (!GetMethod()->IsRuntimeMethod()) { 931 Monitor::VisitLocks(this, AppendOwnedMonitors, this); 932 ++current_stack_depth; 933 } 934 return true; 935 } 936 937 static void AppendOwnedMonitors(mirror::Object* owned_monitor, void* arg) 938 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 939 OwnedMonitorVisitor* visitor = reinterpret_cast<OwnedMonitorVisitor*>(arg); 940 visitor->monitors->push_back(gRegistry->Add(owned_monitor)); 941 visitor->stack_depths->push_back(visitor->current_stack_depth); 942 } 943 944 size_t current_stack_depth; 945 std::vector<JDWP::ObjectId>* monitors; 946 std::vector<uint32_t>* stack_depths; 947 }; 948 949 ScopedObjectAccessUnchecked soa(Thread::Current()); 950 Thread* thread; 951 { 952 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 953 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 954 if (error != JDWP::ERR_NONE) { 955 return error; 956 } 957 if (!IsSuspendedForDebugger(soa, thread)) { 958 return JDWP::ERR_THREAD_NOT_SUSPENDED; 959 } 960 } 961 std::unique_ptr<Context> context(Context::Create()); 962 OwnedMonitorVisitor visitor(thread, context.get(), &monitors, &stack_depths); 963 visitor.WalkStack(); 964 return JDWP::ERR_NONE; 965} 966 967JDWP::JdwpError Dbg::GetContendedMonitor(JDWP::ObjectId thread_id, 968 JDWP::ObjectId& contended_monitor) { 969 mirror::Object* contended_monitor_obj; 970 ScopedObjectAccessUnchecked soa(Thread::Current()); 971 { 972 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 973 Thread* thread; 974 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 975 if (error != JDWP::ERR_NONE) { 976 return error; 977 } 978 if (!IsSuspendedForDebugger(soa, thread)) { 979 return JDWP::ERR_THREAD_NOT_SUSPENDED; 980 } 981 contended_monitor_obj = Monitor::GetContendedMonitor(thread); 982 } 983 // Add() requires the thread_list_lock_ not held to avoid the lock 984 // level violation. 985 contended_monitor = gRegistry->Add(contended_monitor_obj); 986 return JDWP::ERR_NONE; 987} 988 989JDWP::JdwpError Dbg::GetInstanceCounts(const std::vector<JDWP::RefTypeId>& class_ids, 990 std::vector<uint64_t>& counts) 991 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 992 gc::Heap* heap = Runtime::Current()->GetHeap(); 993 heap->CollectGarbage(false); 994 std::vector<mirror::Class*> classes; 995 counts.clear(); 996 for (size_t i = 0; i < class_ids.size(); ++i) { 997 JDWP::JdwpError status; 998 mirror::Class* c = DecodeClass(class_ids[i], status); 999 if (c == NULL) { 1000 return status; 1001 } 1002 classes.push_back(c); 1003 counts.push_back(0); 1004 } 1005 heap->CountInstances(classes, false, &counts[0]); 1006 return JDWP::ERR_NONE; 1007} 1008 1009JDWP::JdwpError Dbg::GetInstances(JDWP::RefTypeId class_id, int32_t max_count, std::vector<JDWP::ObjectId>& instances) 1010 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1011 gc::Heap* heap = Runtime::Current()->GetHeap(); 1012 // We only want reachable instances, so do a GC. 1013 heap->CollectGarbage(false); 1014 JDWP::JdwpError status; 1015 mirror::Class* c = DecodeClass(class_id, status); 1016 if (c == nullptr) { 1017 return status; 1018 } 1019 std::vector<mirror::Object*> raw_instances; 1020 Runtime::Current()->GetHeap()->GetInstances(c, max_count, raw_instances); 1021 for (size_t i = 0; i < raw_instances.size(); ++i) { 1022 instances.push_back(gRegistry->Add(raw_instances[i])); 1023 } 1024 return JDWP::ERR_NONE; 1025} 1026 1027JDWP::JdwpError Dbg::GetReferringObjects(JDWP::ObjectId object_id, int32_t max_count, 1028 std::vector<JDWP::ObjectId>& referring_objects) 1029 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1030 gc::Heap* heap = Runtime::Current()->GetHeap(); 1031 heap->CollectGarbage(false); 1032 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); 1033 if (o == NULL || o == ObjectRegistry::kInvalidObject) { 1034 return JDWP::ERR_INVALID_OBJECT; 1035 } 1036 std::vector<mirror::Object*> raw_instances; 1037 heap->GetReferringObjects(o, max_count, raw_instances); 1038 for (size_t i = 0; i < raw_instances.size(); ++i) { 1039 referring_objects.push_back(gRegistry->Add(raw_instances[i])); 1040 } 1041 return JDWP::ERR_NONE; 1042} 1043 1044JDWP::JdwpError Dbg::DisableCollection(JDWP::ObjectId object_id) 1045 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1046 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); 1047 if (o == NULL || o == ObjectRegistry::kInvalidObject) { 1048 return JDWP::ERR_INVALID_OBJECT; 1049 } 1050 gRegistry->DisableCollection(object_id); 1051 return JDWP::ERR_NONE; 1052} 1053 1054JDWP::JdwpError Dbg::EnableCollection(JDWP::ObjectId object_id) 1055 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1056 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); 1057 // Unlike DisableCollection, JDWP specs do not state an invalid object causes an error. The RI 1058 // also ignores these cases and never return an error. However it's not obvious why this command 1059 // should behave differently from DisableCollection and IsCollected commands. So let's be more 1060 // strict and return an error if this happens. 1061 if (o == NULL || o == ObjectRegistry::kInvalidObject) { 1062 return JDWP::ERR_INVALID_OBJECT; 1063 } 1064 gRegistry->EnableCollection(object_id); 1065 return JDWP::ERR_NONE; 1066} 1067 1068JDWP::JdwpError Dbg::IsCollected(JDWP::ObjectId object_id, bool& is_collected) 1069 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1070 if (object_id == 0) { 1071 // Null object id is invalid. 1072 return JDWP::ERR_INVALID_OBJECT; 1073 } 1074 // JDWP specs state an INVALID_OBJECT error is returned if the object ID is not valid. However 1075 // the RI seems to ignore this and assume object has been collected. 1076 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); 1077 if (o == NULL || o == ObjectRegistry::kInvalidObject) { 1078 is_collected = true; 1079 } else { 1080 is_collected = gRegistry->IsCollected(object_id); 1081 } 1082 return JDWP::ERR_NONE; 1083} 1084 1085void Dbg::DisposeObject(JDWP::ObjectId object_id, uint32_t reference_count) 1086 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1087 gRegistry->DisposeObject(object_id, reference_count); 1088} 1089 1090static JDWP::JdwpTypeTag GetTypeTag(mirror::Class* klass) 1091 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1092 DCHECK(klass != nullptr); 1093 if (klass->IsArrayClass()) { 1094 return JDWP::TT_ARRAY; 1095 } else if (klass->IsInterface()) { 1096 return JDWP::TT_INTERFACE; 1097 } else { 1098 return JDWP::TT_CLASS; 1099 } 1100} 1101 1102JDWP::JdwpError Dbg::GetReflectedType(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) { 1103 JDWP::JdwpError status; 1104 mirror::Class* c = DecodeClass(class_id, status); 1105 if (c == NULL) { 1106 return status; 1107 } 1108 1109 JDWP::JdwpTypeTag type_tag = GetTypeTag(c); 1110 expandBufAdd1(pReply, type_tag); 1111 expandBufAddRefTypeId(pReply, class_id); 1112 return JDWP::ERR_NONE; 1113} 1114 1115void Dbg::GetClassList(std::vector<JDWP::RefTypeId>& classes) { 1116 // Get the complete list of reference classes (i.e. all classes except 1117 // the primitive types). 1118 // Returns a newly-allocated buffer full of RefTypeId values. 1119 struct ClassListCreator { 1120 explicit ClassListCreator(std::vector<JDWP::RefTypeId>& classes) : classes(classes) { 1121 } 1122 1123 static bool Visit(mirror::Class* c, void* arg) { 1124 return reinterpret_cast<ClassListCreator*>(arg)->Visit(c); 1125 } 1126 1127 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 1128 // annotalysis. 1129 bool Visit(mirror::Class* c) NO_THREAD_SAFETY_ANALYSIS { 1130 if (!c->IsPrimitive()) { 1131 classes.push_back(gRegistry->AddRefType(c)); 1132 } 1133 return true; 1134 } 1135 1136 std::vector<JDWP::RefTypeId>& classes; 1137 }; 1138 1139 ClassListCreator clc(classes); 1140 Runtime::Current()->GetClassLinker()->VisitClasses(ClassListCreator::Visit, &clc); 1141} 1142 1143JDWP::JdwpError Dbg::GetClassInfo(JDWP::RefTypeId class_id, JDWP::JdwpTypeTag* pTypeTag, uint32_t* pStatus, std::string* pDescriptor) { 1144 JDWP::JdwpError status; 1145 mirror::Class* c = DecodeClass(class_id, status); 1146 if (c == NULL) { 1147 return status; 1148 } 1149 1150 if (c->IsArrayClass()) { 1151 *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED; 1152 *pTypeTag = JDWP::TT_ARRAY; 1153 } else { 1154 if (c->IsErroneous()) { 1155 *pStatus = JDWP::CS_ERROR; 1156 } else { 1157 *pStatus = JDWP::CS_VERIFIED | JDWP::CS_PREPARED | JDWP::CS_INITIALIZED; 1158 } 1159 *pTypeTag = c->IsInterface() ? JDWP::TT_INTERFACE : JDWP::TT_CLASS; 1160 } 1161 1162 if (pDescriptor != NULL) { 1163 *pDescriptor = c->GetDescriptor(); 1164 } 1165 return JDWP::ERR_NONE; 1166} 1167 1168void Dbg::FindLoadedClassBySignature(const char* descriptor, std::vector<JDWP::RefTypeId>& ids) { 1169 std::vector<mirror::Class*> classes; 1170 Runtime::Current()->GetClassLinker()->LookupClasses(descriptor, classes); 1171 ids.clear(); 1172 for (size_t i = 0; i < classes.size(); ++i) { 1173 ids.push_back(gRegistry->Add(classes[i])); 1174 } 1175} 1176 1177JDWP::JdwpError Dbg::GetReferenceType(JDWP::ObjectId object_id, JDWP::ExpandBuf* pReply) 1178 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1179 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); 1180 if (o == NULL || o == ObjectRegistry::kInvalidObject) { 1181 return JDWP::ERR_INVALID_OBJECT; 1182 } 1183 1184 JDWP::JdwpTypeTag type_tag = GetTypeTag(o->GetClass()); 1185 JDWP::RefTypeId type_id = gRegistry->AddRefType(o->GetClass()); 1186 1187 expandBufAdd1(pReply, type_tag); 1188 expandBufAddRefTypeId(pReply, type_id); 1189 1190 return JDWP::ERR_NONE; 1191} 1192 1193JDWP::JdwpError Dbg::GetSignature(JDWP::RefTypeId class_id, std::string* signature) { 1194 JDWP::JdwpError status; 1195 mirror::Class* c = DecodeClass(class_id, status); 1196 if (c == NULL) { 1197 return status; 1198 } 1199 *signature = c->GetDescriptor(); 1200 return JDWP::ERR_NONE; 1201} 1202 1203JDWP::JdwpError Dbg::GetSourceFile(JDWP::RefTypeId class_id, std::string& result) { 1204 JDWP::JdwpError status; 1205 mirror::Class* c = DecodeClass(class_id, status); 1206 if (c == nullptr) { 1207 return status; 1208 } 1209 const char* source_file = c->GetSourceFile(); 1210 if (source_file == nullptr) { 1211 return JDWP::ERR_ABSENT_INFORMATION; 1212 } 1213 result = source_file; 1214 return JDWP::ERR_NONE; 1215} 1216 1217JDWP::JdwpError Dbg::GetObjectTag(JDWP::ObjectId object_id, uint8_t& tag) { 1218 ScopedObjectAccessUnchecked soa(Thread::Current()); 1219 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); 1220 if (o == ObjectRegistry::kInvalidObject) { 1221 return JDWP::ERR_INVALID_OBJECT; 1222 } 1223 tag = TagFromObject(soa, o); 1224 return JDWP::ERR_NONE; 1225} 1226 1227size_t Dbg::GetTagWidth(JDWP::JdwpTag tag) { 1228 switch (tag) { 1229 case JDWP::JT_VOID: 1230 return 0; 1231 case JDWP::JT_BYTE: 1232 case JDWP::JT_BOOLEAN: 1233 return 1; 1234 case JDWP::JT_CHAR: 1235 case JDWP::JT_SHORT: 1236 return 2; 1237 case JDWP::JT_FLOAT: 1238 case JDWP::JT_INT: 1239 return 4; 1240 case JDWP::JT_ARRAY: 1241 case JDWP::JT_OBJECT: 1242 case JDWP::JT_STRING: 1243 case JDWP::JT_THREAD: 1244 case JDWP::JT_THREAD_GROUP: 1245 case JDWP::JT_CLASS_LOADER: 1246 case JDWP::JT_CLASS_OBJECT: 1247 return sizeof(JDWP::ObjectId); 1248 case JDWP::JT_DOUBLE: 1249 case JDWP::JT_LONG: 1250 return 8; 1251 default: 1252 LOG(FATAL) << "Unknown tag " << tag; 1253 return -1; 1254 } 1255} 1256 1257JDWP::JdwpError Dbg::GetArrayLength(JDWP::ObjectId array_id, int& length) { 1258 JDWP::JdwpError status; 1259 mirror::Array* a = DecodeArray(array_id, status); 1260 if (a == NULL) { 1261 return status; 1262 } 1263 length = a->GetLength(); 1264 return JDWP::ERR_NONE; 1265} 1266 1267JDWP::JdwpError Dbg::OutputArray(JDWP::ObjectId array_id, int offset, int count, JDWP::ExpandBuf* pReply) { 1268 JDWP::JdwpError status; 1269 mirror::Array* a = DecodeArray(array_id, status); 1270 if (a == nullptr) { 1271 return status; 1272 } 1273 1274 if (offset < 0 || count < 0 || offset > a->GetLength() || a->GetLength() - offset < count) { 1275 LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count; 1276 return JDWP::ERR_INVALID_LENGTH; 1277 } 1278 std::string descriptor(a->GetClass()->GetDescriptor()); 1279 JDWP::JdwpTag tag = BasicTagFromDescriptor(descriptor.c_str() + 1); 1280 1281 expandBufAdd1(pReply, tag); 1282 expandBufAdd4BE(pReply, count); 1283 1284 if (IsPrimitiveTag(tag)) { 1285 size_t width = GetTagWidth(tag); 1286 uint8_t* dst = expandBufAddSpace(pReply, count * width); 1287 if (width == 8) { 1288 const uint64_t* src8 = reinterpret_cast<uint64_t*>(a->GetRawData(sizeof(uint64_t), 0)); 1289 for (int i = 0; i < count; ++i) JDWP::Write8BE(&dst, src8[offset + i]); 1290 } else if (width == 4) { 1291 const uint32_t* src4 = reinterpret_cast<uint32_t*>(a->GetRawData(sizeof(uint32_t), 0)); 1292 for (int i = 0; i < count; ++i) JDWP::Write4BE(&dst, src4[offset + i]); 1293 } else if (width == 2) { 1294 const uint16_t* src2 = reinterpret_cast<uint16_t*>(a->GetRawData(sizeof(uint16_t), 0)); 1295 for (int i = 0; i < count; ++i) JDWP::Write2BE(&dst, src2[offset + i]); 1296 } else { 1297 const uint8_t* src = reinterpret_cast<uint8_t*>(a->GetRawData(sizeof(uint8_t), 0)); 1298 memcpy(dst, &src[offset * width], count * width); 1299 } 1300 } else { 1301 ScopedObjectAccessUnchecked soa(Thread::Current()); 1302 mirror::ObjectArray<mirror::Object>* oa = a->AsObjectArray<mirror::Object>(); 1303 for (int i = 0; i < count; ++i) { 1304 mirror::Object* element = oa->Get(offset + i); 1305 JDWP::JdwpTag specific_tag = (element != nullptr) ? TagFromObject(soa, element) 1306 : tag; 1307 expandBufAdd1(pReply, specific_tag); 1308 expandBufAddObjectId(pReply, gRegistry->Add(element)); 1309 } 1310 } 1311 1312 return JDWP::ERR_NONE; 1313} 1314 1315template <typename T> 1316static void CopyArrayData(mirror::Array* a, JDWP::Request& src, int offset, int count) 1317 NO_THREAD_SAFETY_ANALYSIS { 1318 // TODO: fix when annotalysis correctly handles non-member functions. 1319 DCHECK(a->GetClass()->IsPrimitiveArray()); 1320 1321 T* dst = reinterpret_cast<T*>(a->GetRawData(sizeof(T), offset)); 1322 for (int i = 0; i < count; ++i) { 1323 *dst++ = src.ReadValue(sizeof(T)); 1324 } 1325} 1326 1327JDWP::JdwpError Dbg::SetArrayElements(JDWP::ObjectId array_id, int offset, int count, 1328 JDWP::Request& request) 1329 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1330 JDWP::JdwpError status; 1331 mirror::Array* dst = DecodeArray(array_id, status); 1332 if (dst == NULL) { 1333 return status; 1334 } 1335 1336 if (offset < 0 || count < 0 || offset > dst->GetLength() || dst->GetLength() - offset < count) { 1337 LOG(WARNING) << __FUNCTION__ << " access out of bounds: offset=" << offset << "; count=" << count; 1338 return JDWP::ERR_INVALID_LENGTH; 1339 } 1340 std::string descriptor = dst->GetClass()->GetDescriptor(); 1341 JDWP::JdwpTag tag = BasicTagFromDescriptor(descriptor.c_str() + 1); 1342 1343 if (IsPrimitiveTag(tag)) { 1344 size_t width = GetTagWidth(tag); 1345 if (width == 8) { 1346 CopyArrayData<uint64_t>(dst, request, offset, count); 1347 } else if (width == 4) { 1348 CopyArrayData<uint32_t>(dst, request, offset, count); 1349 } else if (width == 2) { 1350 CopyArrayData<uint16_t>(dst, request, offset, count); 1351 } else { 1352 CopyArrayData<uint8_t>(dst, request, offset, count); 1353 } 1354 } else { 1355 mirror::ObjectArray<mirror::Object>* oa = dst->AsObjectArray<mirror::Object>(); 1356 for (int i = 0; i < count; ++i) { 1357 JDWP::ObjectId id = request.ReadObjectId(); 1358 mirror::Object* o = gRegistry->Get<mirror::Object*>(id); 1359 if (o == ObjectRegistry::kInvalidObject) { 1360 return JDWP::ERR_INVALID_OBJECT; 1361 } 1362 oa->Set<false>(offset + i, o); 1363 } 1364 } 1365 1366 return JDWP::ERR_NONE; 1367} 1368 1369JDWP::ObjectId Dbg::CreateString(const std::string& str) { 1370 return gRegistry->Add(mirror::String::AllocFromModifiedUtf8(Thread::Current(), str.c_str())); 1371} 1372 1373JDWP::JdwpError Dbg::CreateObject(JDWP::RefTypeId class_id, JDWP::ObjectId& new_object) { 1374 JDWP::JdwpError status; 1375 mirror::Class* c = DecodeClass(class_id, status); 1376 if (c == NULL) { 1377 return status; 1378 } 1379 new_object = gRegistry->Add(c->AllocObject(Thread::Current())); 1380 return JDWP::ERR_NONE; 1381} 1382 1383/* 1384 * Used by Eclipse's "Display" view to evaluate "new byte[5]" to get "(byte[]) [0, 0, 0, 0, 0]". 1385 */ 1386JDWP::JdwpError Dbg::CreateArrayObject(JDWP::RefTypeId array_class_id, uint32_t length, 1387 JDWP::ObjectId& new_array) { 1388 JDWP::JdwpError status; 1389 mirror::Class* c = DecodeClass(array_class_id, status); 1390 if (c == NULL) { 1391 return status; 1392 } 1393 new_array = gRegistry->Add(mirror::Array::Alloc<true>(Thread::Current(), c, length, 1394 c->GetComponentSize(), 1395 Runtime::Current()->GetHeap()->GetCurrentAllocator())); 1396 return JDWP::ERR_NONE; 1397} 1398 1399bool Dbg::MatchType(JDWP::RefTypeId instance_class_id, JDWP::RefTypeId class_id) { 1400 JDWP::JdwpError status; 1401 mirror::Class* c1 = DecodeClass(instance_class_id, status); 1402 CHECK(c1 != NULL); 1403 mirror::Class* c2 = DecodeClass(class_id, status); 1404 CHECK(c2 != NULL); 1405 return c2->IsAssignableFrom(c1); 1406} 1407 1408static JDWP::FieldId ToFieldId(const mirror::ArtField* f) 1409 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1410 CHECK(!kMovingFields); 1411 return static_cast<JDWP::FieldId>(reinterpret_cast<uintptr_t>(f)); 1412} 1413 1414static JDWP::MethodId ToMethodId(const mirror::ArtMethod* m) 1415 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1416 CHECK(!kMovingMethods); 1417 return static_cast<JDWP::MethodId>(reinterpret_cast<uintptr_t>(m)); 1418} 1419 1420static mirror::ArtField* FromFieldId(JDWP::FieldId fid) 1421 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1422 CHECK(!kMovingFields); 1423 return reinterpret_cast<mirror::ArtField*>(static_cast<uintptr_t>(fid)); 1424} 1425 1426static mirror::ArtMethod* FromMethodId(JDWP::MethodId mid) 1427 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1428 CHECK(!kMovingMethods); 1429 return reinterpret_cast<mirror::ArtMethod*>(static_cast<uintptr_t>(mid)); 1430} 1431 1432static void SetLocation(JDWP::JdwpLocation& location, mirror::ArtMethod* m, uint32_t dex_pc) 1433 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1434 if (m == NULL) { 1435 memset(&location, 0, sizeof(location)); 1436 } else { 1437 mirror::Class* c = m->GetDeclaringClass(); 1438 location.type_tag = GetTypeTag(c); 1439 location.class_id = gRegistry->AddRefType(c); 1440 location.method_id = ToMethodId(m); 1441 location.dex_pc = (m->IsNative() || m->IsProxyMethod()) ? static_cast<uint64_t>(-1) : dex_pc; 1442 } 1443} 1444 1445std::string Dbg::GetMethodName(JDWP::MethodId method_id) 1446 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1447 mirror::ArtMethod* m = FromMethodId(method_id); 1448 return m->GetName(); 1449} 1450 1451std::string Dbg::GetFieldName(JDWP::FieldId field_id) 1452 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1453 return FromFieldId(field_id)->GetName(); 1454} 1455 1456/* 1457 * Augment the access flags for synthetic methods and fields by setting 1458 * the (as described by the spec) "0xf0000000 bit". Also, strip out any 1459 * flags not specified by the Java programming language. 1460 */ 1461static uint32_t MangleAccessFlags(uint32_t accessFlags) { 1462 accessFlags &= kAccJavaFlagsMask; 1463 if ((accessFlags & kAccSynthetic) != 0) { 1464 accessFlags |= 0xf0000000; 1465 } 1466 return accessFlags; 1467} 1468 1469/* 1470 * Circularly shifts registers so that arguments come first. Debuggers 1471 * expect slots to begin with arguments, but dex code places them at 1472 * the end. 1473 */ 1474static uint16_t MangleSlot(uint16_t slot, mirror::ArtMethod* m) 1475 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1476 const DexFile::CodeItem* code_item = m->GetCodeItem(); 1477 if (code_item == nullptr) { 1478 // We should not get here for a method without code (native, proxy or abstract). Log it and 1479 // return the slot as is since all registers are arguments. 1480 LOG(WARNING) << "Trying to mangle slot for method without code " << PrettyMethod(m); 1481 return slot; 1482 } 1483 uint16_t ins_size = code_item->ins_size_; 1484 uint16_t locals_size = code_item->registers_size_ - ins_size; 1485 if (slot >= locals_size) { 1486 return slot - locals_size; 1487 } else { 1488 return slot + ins_size; 1489 } 1490} 1491 1492/* 1493 * Circularly shifts registers so that arguments come last. Reverts 1494 * slots to dex style argument placement. 1495 */ 1496static uint16_t DemangleSlot(uint16_t slot, mirror::ArtMethod* m) 1497 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1498 const DexFile::CodeItem* code_item = m->GetCodeItem(); 1499 if (code_item == nullptr) { 1500 // We should not get here for a method without code (native, proxy or abstract). Log it and 1501 // return the slot as is since all registers are arguments. 1502 LOG(WARNING) << "Trying to demangle slot for method without code " << PrettyMethod(m); 1503 return slot; 1504 } 1505 uint16_t ins_size = code_item->ins_size_; 1506 uint16_t locals_size = code_item->registers_size_ - ins_size; 1507 if (slot < ins_size) { 1508 return slot + locals_size; 1509 } else { 1510 return slot - ins_size; 1511 } 1512} 1513 1514JDWP::JdwpError Dbg::OutputDeclaredFields(JDWP::RefTypeId class_id, bool with_generic, JDWP::ExpandBuf* pReply) { 1515 JDWP::JdwpError status; 1516 mirror::Class* c = DecodeClass(class_id, status); 1517 if (c == NULL) { 1518 return status; 1519 } 1520 1521 size_t instance_field_count = c->NumInstanceFields(); 1522 size_t static_field_count = c->NumStaticFields(); 1523 1524 expandBufAdd4BE(pReply, instance_field_count + static_field_count); 1525 1526 for (size_t i = 0; i < instance_field_count + static_field_count; ++i) { 1527 mirror::ArtField* f = (i < instance_field_count) ? c->GetInstanceField(i) : c->GetStaticField(i - instance_field_count); 1528 expandBufAddFieldId(pReply, ToFieldId(f)); 1529 expandBufAddUtf8String(pReply, f->GetName()); 1530 expandBufAddUtf8String(pReply, f->GetTypeDescriptor()); 1531 if (with_generic) { 1532 static const char genericSignature[1] = ""; 1533 expandBufAddUtf8String(pReply, genericSignature); 1534 } 1535 expandBufAdd4BE(pReply, MangleAccessFlags(f->GetAccessFlags())); 1536 } 1537 return JDWP::ERR_NONE; 1538} 1539 1540JDWP::JdwpError Dbg::OutputDeclaredMethods(JDWP::RefTypeId class_id, bool with_generic, 1541 JDWP::ExpandBuf* pReply) { 1542 JDWP::JdwpError status; 1543 mirror::Class* c = DecodeClass(class_id, status); 1544 if (c == NULL) { 1545 return status; 1546 } 1547 1548 size_t direct_method_count = c->NumDirectMethods(); 1549 size_t virtual_method_count = c->NumVirtualMethods(); 1550 1551 expandBufAdd4BE(pReply, direct_method_count + virtual_method_count); 1552 1553 for (size_t i = 0; i < direct_method_count + virtual_method_count; ++i) { 1554 mirror::ArtMethod* m = (i < direct_method_count) ? c->GetDirectMethod(i) : c->GetVirtualMethod(i - direct_method_count); 1555 expandBufAddMethodId(pReply, ToMethodId(m)); 1556 expandBufAddUtf8String(pReply, m->GetName()); 1557 expandBufAddUtf8String(pReply, m->GetSignature().ToString()); 1558 if (with_generic) { 1559 static const char genericSignature[1] = ""; 1560 expandBufAddUtf8String(pReply, genericSignature); 1561 } 1562 expandBufAdd4BE(pReply, MangleAccessFlags(m->GetAccessFlags())); 1563 } 1564 return JDWP::ERR_NONE; 1565} 1566 1567JDWP::JdwpError Dbg::OutputDeclaredInterfaces(JDWP::RefTypeId class_id, JDWP::ExpandBuf* pReply) { 1568 JDWP::JdwpError status; 1569 Thread* self = Thread::Current(); 1570 StackHandleScope<1> hs(self); 1571 Handle<mirror::Class> c(hs.NewHandle(DecodeClass(class_id, status))); 1572 if (c.Get() == nullptr) { 1573 return status; 1574 } 1575 size_t interface_count = c->NumDirectInterfaces(); 1576 expandBufAdd4BE(pReply, interface_count); 1577 for (size_t i = 0; i < interface_count; ++i) { 1578 expandBufAddRefTypeId(pReply, 1579 gRegistry->AddRefType(mirror::Class::GetDirectInterface(self, c, i))); 1580 } 1581 return JDWP::ERR_NONE; 1582} 1583 1584void Dbg::OutputLineTable(JDWP::RefTypeId, JDWP::MethodId method_id, JDWP::ExpandBuf* pReply) 1585 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1586 struct DebugCallbackContext { 1587 int numItems; 1588 JDWP::ExpandBuf* pReply; 1589 1590 static bool Callback(void* context, uint32_t address, uint32_t line_number) { 1591 DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context); 1592 expandBufAdd8BE(pContext->pReply, address); 1593 expandBufAdd4BE(pContext->pReply, line_number); 1594 pContext->numItems++; 1595 return false; 1596 } 1597 }; 1598 mirror::ArtMethod* m = FromMethodId(method_id); 1599 const DexFile::CodeItem* code_item = m->GetCodeItem(); 1600 uint64_t start, end; 1601 if (code_item == nullptr) { 1602 DCHECK(m->IsNative() || m->IsProxyMethod()); 1603 start = -1; 1604 end = -1; 1605 } else { 1606 start = 0; 1607 // Return the index of the last instruction 1608 end = code_item->insns_size_in_code_units_ - 1; 1609 } 1610 1611 expandBufAdd8BE(pReply, start); 1612 expandBufAdd8BE(pReply, end); 1613 1614 // Add numLines later 1615 size_t numLinesOffset = expandBufGetLength(pReply); 1616 expandBufAdd4BE(pReply, 0); 1617 1618 DebugCallbackContext context; 1619 context.numItems = 0; 1620 context.pReply = pReply; 1621 1622 if (code_item != nullptr) { 1623 m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(), 1624 DebugCallbackContext::Callback, NULL, &context); 1625 } 1626 1627 JDWP::Set4BE(expandBufGetBuffer(pReply) + numLinesOffset, context.numItems); 1628} 1629 1630void Dbg::OutputVariableTable(JDWP::RefTypeId, JDWP::MethodId method_id, bool with_generic, 1631 JDWP::ExpandBuf* pReply) { 1632 struct DebugCallbackContext { 1633 mirror::ArtMethod* method; 1634 JDWP::ExpandBuf* pReply; 1635 size_t variable_count; 1636 bool with_generic; 1637 1638 static void Callback(void* context, uint16_t slot, uint32_t startAddress, uint32_t endAddress, 1639 const char* name, const char* descriptor, const char* signature) 1640 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1641 DebugCallbackContext* pContext = reinterpret_cast<DebugCallbackContext*>(context); 1642 1643 VLOG(jdwp) << StringPrintf(" %2zd: %d(%d) '%s' '%s' '%s' actual slot=%d mangled slot=%d", 1644 pContext->variable_count, startAddress, endAddress - startAddress, 1645 name, descriptor, signature, slot, 1646 MangleSlot(slot, pContext->method)); 1647 1648 slot = MangleSlot(slot, pContext->method); 1649 1650 expandBufAdd8BE(pContext->pReply, startAddress); 1651 expandBufAddUtf8String(pContext->pReply, name); 1652 expandBufAddUtf8String(pContext->pReply, descriptor); 1653 if (pContext->with_generic) { 1654 expandBufAddUtf8String(pContext->pReply, signature); 1655 } 1656 expandBufAdd4BE(pContext->pReply, endAddress - startAddress); 1657 expandBufAdd4BE(pContext->pReply, slot); 1658 1659 ++pContext->variable_count; 1660 } 1661 }; 1662 mirror::ArtMethod* m = FromMethodId(method_id); 1663 1664 // arg_count considers doubles and longs to take 2 units. 1665 // variable_count considers everything to take 1 unit. 1666 std::string shorty(m->GetShorty()); 1667 expandBufAdd4BE(pReply, mirror::ArtMethod::NumArgRegisters(shorty)); 1668 1669 // We don't know the total number of variables yet, so leave a blank and update it later. 1670 size_t variable_count_offset = expandBufGetLength(pReply); 1671 expandBufAdd4BE(pReply, 0); 1672 1673 DebugCallbackContext context; 1674 context.method = m; 1675 context.pReply = pReply; 1676 context.variable_count = 0; 1677 context.with_generic = with_generic; 1678 1679 const DexFile::CodeItem* code_item = m->GetCodeItem(); 1680 if (code_item != nullptr) { 1681 m->GetDexFile()->DecodeDebugInfo( 1682 code_item, m->IsStatic(), m->GetDexMethodIndex(), NULL, DebugCallbackContext::Callback, 1683 &context); 1684 } 1685 1686 JDWP::Set4BE(expandBufGetBuffer(pReply) + variable_count_offset, context.variable_count); 1687} 1688 1689void Dbg::OutputMethodReturnValue(JDWP::MethodId method_id, const JValue* return_value, 1690 JDWP::ExpandBuf* pReply) { 1691 mirror::ArtMethod* m = FromMethodId(method_id); 1692 JDWP::JdwpTag tag = BasicTagFromDescriptor(m->GetShorty()); 1693 OutputJValue(tag, return_value, pReply); 1694} 1695 1696void Dbg::OutputFieldValue(JDWP::FieldId field_id, const JValue* field_value, 1697 JDWP::ExpandBuf* pReply) { 1698 mirror::ArtField* f = FromFieldId(field_id); 1699 JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor()); 1700 OutputJValue(tag, field_value, pReply); 1701} 1702 1703JDWP::JdwpError Dbg::GetBytecodes(JDWP::RefTypeId, JDWP::MethodId method_id, 1704 std::vector<uint8_t>& bytecodes) 1705 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1706 mirror::ArtMethod* m = FromMethodId(method_id); 1707 if (m == NULL) { 1708 return JDWP::ERR_INVALID_METHODID; 1709 } 1710 const DexFile::CodeItem* code_item = m->GetCodeItem(); 1711 size_t byte_count = code_item->insns_size_in_code_units_ * 2; 1712 const uint8_t* begin = reinterpret_cast<const uint8_t*>(code_item->insns_); 1713 const uint8_t* end = begin + byte_count; 1714 for (const uint8_t* p = begin; p != end; ++p) { 1715 bytecodes.push_back(*p); 1716 } 1717 return JDWP::ERR_NONE; 1718} 1719 1720JDWP::JdwpTag Dbg::GetFieldBasicTag(JDWP::FieldId field_id) { 1721 return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor()); 1722} 1723 1724JDWP::JdwpTag Dbg::GetStaticFieldBasicTag(JDWP::FieldId field_id) { 1725 return BasicTagFromDescriptor(FromFieldId(field_id)->GetTypeDescriptor()); 1726} 1727 1728static JDWP::JdwpError GetFieldValueImpl(JDWP::RefTypeId ref_type_id, JDWP::ObjectId object_id, 1729 JDWP::FieldId field_id, JDWP::ExpandBuf* pReply, 1730 bool is_static) 1731 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1732 JDWP::JdwpError status; 1733 mirror::Class* c = DecodeClass(ref_type_id, status); 1734 if (ref_type_id != 0 && c == NULL) { 1735 return status; 1736 } 1737 1738 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); 1739 if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) { 1740 return JDWP::ERR_INVALID_OBJECT; 1741 } 1742 mirror::ArtField* f = FromFieldId(field_id); 1743 1744 mirror::Class* receiver_class = c; 1745 if (receiver_class == NULL && o != NULL) { 1746 receiver_class = o->GetClass(); 1747 } 1748 // TODO: should we give up now if receiver_class is NULL? 1749 if (receiver_class != NULL && !f->GetDeclaringClass()->IsAssignableFrom(receiver_class)) { 1750 LOG(INFO) << "ERR_INVALID_FIELDID: " << PrettyField(f) << " " << PrettyClass(receiver_class); 1751 return JDWP::ERR_INVALID_FIELDID; 1752 } 1753 1754 // The RI only enforces the static/non-static mismatch in one direction. 1755 // TODO: should we change the tests and check both? 1756 if (is_static) { 1757 if (!f->IsStatic()) { 1758 return JDWP::ERR_INVALID_FIELDID; 1759 } 1760 } else { 1761 if (f->IsStatic()) { 1762 LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f); 1763 } 1764 } 1765 if (f->IsStatic()) { 1766 o = f->GetDeclaringClass(); 1767 } 1768 1769 JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor()); 1770 JValue field_value; 1771 if (tag == JDWP::JT_VOID) { 1772 LOG(FATAL) << "Unknown tag: " << tag; 1773 } else if (!IsPrimitiveTag(tag)) { 1774 field_value.SetL(f->GetObject(o)); 1775 } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) { 1776 field_value.SetJ(f->Get64(o)); 1777 } else { 1778 field_value.SetI(f->Get32(o)); 1779 } 1780 Dbg::OutputJValue(tag, &field_value, pReply); 1781 1782 return JDWP::ERR_NONE; 1783} 1784 1785JDWP::JdwpError Dbg::GetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, 1786 JDWP::ExpandBuf* pReply) { 1787 return GetFieldValueImpl(0, object_id, field_id, pReply, false); 1788} 1789 1790JDWP::JdwpError Dbg::GetStaticFieldValue(JDWP::RefTypeId ref_type_id, JDWP::FieldId field_id, JDWP::ExpandBuf* pReply) { 1791 return GetFieldValueImpl(ref_type_id, 0, field_id, pReply, true); 1792} 1793 1794static JDWP::JdwpError SetFieldValueImpl(JDWP::ObjectId object_id, JDWP::FieldId field_id, 1795 uint64_t value, int width, bool is_static) 1796 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1797 mirror::Object* o = gRegistry->Get<mirror::Object*>(object_id); 1798 if ((!is_static && o == NULL) || o == ObjectRegistry::kInvalidObject) { 1799 return JDWP::ERR_INVALID_OBJECT; 1800 } 1801 mirror::ArtField* f = FromFieldId(field_id); 1802 1803 // The RI only enforces the static/non-static mismatch in one direction. 1804 // TODO: should we change the tests and check both? 1805 if (is_static) { 1806 if (!f->IsStatic()) { 1807 return JDWP::ERR_INVALID_FIELDID; 1808 } 1809 } else { 1810 if (f->IsStatic()) { 1811 LOG(WARNING) << "Ignoring non-NULL receiver for ObjectReference.SetValues on static field " << PrettyField(f); 1812 } 1813 } 1814 if (f->IsStatic()) { 1815 o = f->GetDeclaringClass(); 1816 } 1817 1818 JDWP::JdwpTag tag = BasicTagFromDescriptor(f->GetTypeDescriptor()); 1819 1820 if (IsPrimitiveTag(tag)) { 1821 if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) { 1822 CHECK_EQ(width, 8); 1823 // Debugging can't use transactional mode (runtime only). 1824 f->Set64<false>(o, value); 1825 } else { 1826 CHECK_LE(width, 4); 1827 // Debugging can't use transactional mode (runtime only). 1828 f->Set32<false>(o, value); 1829 } 1830 } else { 1831 mirror::Object* v = gRegistry->Get<mirror::Object*>(value); 1832 if (v == ObjectRegistry::kInvalidObject) { 1833 return JDWP::ERR_INVALID_OBJECT; 1834 } 1835 if (v != NULL) { 1836 mirror::Class* field_type; 1837 { 1838 StackHandleScope<3> hs(Thread::Current()); 1839 HandleWrapper<mirror::Object> h_v(hs.NewHandleWrapper(&v)); 1840 HandleWrapper<mirror::ArtField> h_f(hs.NewHandleWrapper(&f)); 1841 HandleWrapper<mirror::Object> h_o(hs.NewHandleWrapper(&o)); 1842 field_type = FieldHelper(h_f).GetType(); 1843 } 1844 if (!field_type->IsAssignableFrom(v->GetClass())) { 1845 return JDWP::ERR_INVALID_OBJECT; 1846 } 1847 } 1848 // Debugging can't use transactional mode (runtime only). 1849 f->SetObject<false>(o, v); 1850 } 1851 1852 return JDWP::ERR_NONE; 1853} 1854 1855JDWP::JdwpError Dbg::SetFieldValue(JDWP::ObjectId object_id, JDWP::FieldId field_id, uint64_t value, 1856 int width) { 1857 return SetFieldValueImpl(object_id, field_id, value, width, false); 1858} 1859 1860JDWP::JdwpError Dbg::SetStaticFieldValue(JDWP::FieldId field_id, uint64_t value, int width) { 1861 return SetFieldValueImpl(0, field_id, value, width, true); 1862} 1863 1864std::string Dbg::StringToUtf8(JDWP::ObjectId string_id) { 1865 mirror::String* s = gRegistry->Get<mirror::String*>(string_id); 1866 return s->ToModifiedUtf8(); 1867} 1868 1869void Dbg::OutputJValue(JDWP::JdwpTag tag, const JValue* return_value, JDWP::ExpandBuf* pReply) { 1870 if (IsPrimitiveTag(tag)) { 1871 expandBufAdd1(pReply, tag); 1872 if (tag == JDWP::JT_BOOLEAN || tag == JDWP::JT_BYTE) { 1873 expandBufAdd1(pReply, return_value->GetI()); 1874 } else if (tag == JDWP::JT_CHAR || tag == JDWP::JT_SHORT) { 1875 expandBufAdd2BE(pReply, return_value->GetI()); 1876 } else if (tag == JDWP::JT_FLOAT || tag == JDWP::JT_INT) { 1877 expandBufAdd4BE(pReply, return_value->GetI()); 1878 } else if (tag == JDWP::JT_DOUBLE || tag == JDWP::JT_LONG) { 1879 expandBufAdd8BE(pReply, return_value->GetJ()); 1880 } else { 1881 CHECK_EQ(tag, JDWP::JT_VOID); 1882 } 1883 } else { 1884 ScopedObjectAccessUnchecked soa(Thread::Current()); 1885 mirror::Object* value = return_value->GetL(); 1886 expandBufAdd1(pReply, TagFromObject(soa, value)); 1887 expandBufAddObjectId(pReply, gRegistry->Add(value)); 1888 } 1889} 1890 1891JDWP::JdwpError Dbg::GetThreadName(JDWP::ObjectId thread_id, std::string& name) { 1892 ScopedObjectAccessUnchecked soa(Thread::Current()); 1893 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 1894 Thread* thread; 1895 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 1896 if (error != JDWP::ERR_NONE && error != JDWP::ERR_THREAD_NOT_ALIVE) { 1897 return error; 1898 } 1899 1900 // We still need to report the zombie threads' names, so we can't just call Thread::GetThreadName. 1901 mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id); 1902 mirror::ArtField* java_lang_Thread_name_field = 1903 soa.DecodeField(WellKnownClasses::java_lang_Thread_name); 1904 mirror::String* s = 1905 reinterpret_cast<mirror::String*>(java_lang_Thread_name_field->GetObject(thread_object)); 1906 if (s != NULL) { 1907 name = s->ToModifiedUtf8(); 1908 } 1909 return JDWP::ERR_NONE; 1910} 1911 1912JDWP::JdwpError Dbg::GetThreadGroup(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) { 1913 ScopedObjectAccess soa(Thread::Current()); 1914 mirror::Object* thread_object = gRegistry->Get<mirror::Object*>(thread_id); 1915 if (thread_object == ObjectRegistry::kInvalidObject) { 1916 return JDWP::ERR_INVALID_OBJECT; 1917 } 1918 const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroup"); 1919 // Okay, so it's an object, but is it actually a thread? 1920 JDWP::JdwpError error; 1921 { 1922 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 1923 Thread* thread; 1924 error = DecodeThread(soa, thread_id, thread); 1925 } 1926 if (error == JDWP::ERR_THREAD_NOT_ALIVE) { 1927 // Zombie threads are in the null group. 1928 expandBufAddObjectId(pReply, JDWP::ObjectId(0)); 1929 error = JDWP::ERR_NONE; 1930 } else if (error == JDWP::ERR_NONE) { 1931 mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread); 1932 CHECK(c != nullptr); 1933 mirror::ArtField* f = c->FindInstanceField("group", "Ljava/lang/ThreadGroup;"); 1934 CHECK(f != nullptr); 1935 mirror::Object* group = f->GetObject(thread_object); 1936 CHECK(group != nullptr); 1937 JDWP::ObjectId thread_group_id = gRegistry->Add(group); 1938 expandBufAddObjectId(pReply, thread_group_id); 1939 } 1940 soa.Self()->EndAssertNoThreadSuspension(old_cause); 1941 return error; 1942} 1943 1944std::string Dbg::GetThreadGroupName(JDWP::ObjectId thread_group_id) { 1945 ScopedObjectAccess soa(Thread::Current()); 1946 mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id); 1947 CHECK(thread_group != nullptr); 1948 const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupName"); 1949 mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup); 1950 CHECK(c != nullptr); 1951 mirror::ArtField* f = c->FindInstanceField("name", "Ljava/lang/String;"); 1952 CHECK(f != NULL); 1953 mirror::String* s = reinterpret_cast<mirror::String*>(f->GetObject(thread_group)); 1954 soa.Self()->EndAssertNoThreadSuspension(old_cause); 1955 return s->ToModifiedUtf8(); 1956} 1957 1958JDWP::ObjectId Dbg::GetThreadGroupParent(JDWP::ObjectId thread_group_id) { 1959 ScopedObjectAccessUnchecked soa(Thread::Current()); 1960 mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id); 1961 CHECK(thread_group != nullptr); 1962 const char* old_cause = soa.Self()->StartAssertNoThreadSuspension("Debugger: GetThreadGroupParent"); 1963 mirror::Class* c = soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ThreadGroup); 1964 CHECK(c != nullptr); 1965 mirror::ArtField* f = c->FindInstanceField("parent", "Ljava/lang/ThreadGroup;"); 1966 CHECK(f != NULL); 1967 mirror::Object* parent = f->GetObject(thread_group); 1968 soa.Self()->EndAssertNoThreadSuspension(old_cause); 1969 return gRegistry->Add(parent); 1970} 1971 1972JDWP::ObjectId Dbg::GetSystemThreadGroupId() { 1973 ScopedObjectAccessUnchecked soa(Thread::Current()); 1974 mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_systemThreadGroup); 1975 mirror::Object* group = f->GetObject(f->GetDeclaringClass()); 1976 return gRegistry->Add(group); 1977} 1978 1979JDWP::ObjectId Dbg::GetMainThreadGroupId() { 1980 ScopedObjectAccess soa(Thread::Current()); 1981 mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_mainThreadGroup); 1982 mirror::Object* group = f->GetObject(f->GetDeclaringClass()); 1983 return gRegistry->Add(group); 1984} 1985 1986JDWP::JdwpThreadStatus Dbg::ToJdwpThreadStatus(ThreadState state) { 1987 switch (state) { 1988 case kBlocked: 1989 return JDWP::TS_MONITOR; 1990 case kNative: 1991 case kRunnable: 1992 case kSuspended: 1993 return JDWP::TS_RUNNING; 1994 case kSleeping: 1995 return JDWP::TS_SLEEPING; 1996 case kStarting: 1997 case kTerminated: 1998 return JDWP::TS_ZOMBIE; 1999 case kTimedWaiting: 2000 case kWaitingForCheckPointsToRun: 2001 case kWaitingForDebuggerSend: 2002 case kWaitingForDebuggerSuspension: 2003 case kWaitingForDebuggerToAttach: 2004 case kWaitingForDeoptimization: 2005 case kWaitingForGcToComplete: 2006 case kWaitingForJniOnLoad: 2007 case kWaitingForMethodTracingStart: 2008 case kWaitingForSignalCatcherOutput: 2009 case kWaitingInMainDebuggerLoop: 2010 case kWaitingInMainSignalCatcherLoop: 2011 case kWaitingPerformingGc: 2012 case kWaiting: 2013 return JDWP::TS_WAIT; 2014 // Don't add a 'default' here so the compiler can spot incompatible enum changes. 2015 } 2016 LOG(FATAL) << "Unknown thread state: " << state; 2017 return JDWP::TS_ZOMBIE; 2018} 2019 2020JDWP::JdwpError Dbg::GetThreadStatus(JDWP::ObjectId thread_id, JDWP::JdwpThreadStatus* pThreadStatus, 2021 JDWP::JdwpSuspendStatus* pSuspendStatus) { 2022 ScopedObjectAccess soa(Thread::Current()); 2023 2024 *pSuspendStatus = JDWP::SUSPEND_STATUS_NOT_SUSPENDED; 2025 2026 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2027 Thread* thread; 2028 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 2029 if (error != JDWP::ERR_NONE) { 2030 if (error == JDWP::ERR_THREAD_NOT_ALIVE) { 2031 *pThreadStatus = JDWP::TS_ZOMBIE; 2032 return JDWP::ERR_NONE; 2033 } 2034 return error; 2035 } 2036 2037 if (IsSuspendedForDebugger(soa, thread)) { 2038 *pSuspendStatus = JDWP::SUSPEND_STATUS_SUSPENDED; 2039 } 2040 2041 *pThreadStatus = ToJdwpThreadStatus(thread->GetState()); 2042 return JDWP::ERR_NONE; 2043} 2044 2045JDWP::JdwpError Dbg::GetThreadDebugSuspendCount(JDWP::ObjectId thread_id, JDWP::ExpandBuf* pReply) { 2046 ScopedObjectAccess soa(Thread::Current()); 2047 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2048 Thread* thread; 2049 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 2050 if (error != JDWP::ERR_NONE) { 2051 return error; 2052 } 2053 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_); 2054 expandBufAdd4BE(pReply, thread->GetDebugSuspendCount()); 2055 return JDWP::ERR_NONE; 2056} 2057 2058JDWP::JdwpError Dbg::Interrupt(JDWP::ObjectId thread_id) { 2059 ScopedObjectAccess soa(Thread::Current()); 2060 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2061 Thread* thread; 2062 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 2063 if (error != JDWP::ERR_NONE) { 2064 return error; 2065 } 2066 thread->Interrupt(soa.Self()); 2067 return JDWP::ERR_NONE; 2068} 2069 2070void Dbg::GetThreads(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& thread_ids) { 2071 class ThreadListVisitor { 2072 public: 2073 ThreadListVisitor(const ScopedObjectAccessUnchecked& soa, mirror::Object* desired_thread_group, 2074 std::vector<JDWP::ObjectId>& thread_ids) 2075 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 2076 : soa_(soa), desired_thread_group_(desired_thread_group), thread_ids_(thread_ids) {} 2077 2078 static void Visit(Thread* t, void* arg) { 2079 reinterpret_cast<ThreadListVisitor*>(arg)->Visit(t); 2080 } 2081 2082 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 2083 // annotalysis. 2084 void Visit(Thread* t) NO_THREAD_SAFETY_ANALYSIS { 2085 if (t == Dbg::GetDebugThread()) { 2086 // Skip the JDWP thread. Some debuggers get bent out of shape when they can't suspend and 2087 // query all threads, so it's easier if we just don't tell them about this thread. 2088 return; 2089 } 2090 mirror::Object* peer = t->GetPeer(); 2091 if (IsInDesiredThreadGroup(peer)) { 2092 thread_ids_.push_back(gRegistry->Add(peer)); 2093 } 2094 } 2095 2096 private: 2097 bool IsInDesiredThreadGroup(mirror::Object* peer) 2098 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 2099 // peer might be NULL if the thread is still starting up. 2100 if (peer == NULL) { 2101 // We can't tell the debugger about this thread yet. 2102 // TODO: if we identified threads to the debugger by their Thread* 2103 // rather than their peer's mirror::Object*, we could fix this. 2104 // Doing so might help us report ZOMBIE threads too. 2105 return false; 2106 } 2107 // Do we want threads from all thread groups? 2108 if (desired_thread_group_ == NULL) { 2109 return true; 2110 } 2111 mirror::Object* group = soa_.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(peer); 2112 return (group == desired_thread_group_); 2113 } 2114 2115 const ScopedObjectAccessUnchecked& soa_; 2116 mirror::Object* const desired_thread_group_; 2117 std::vector<JDWP::ObjectId>& thread_ids_; 2118 }; 2119 2120 ScopedObjectAccessUnchecked soa(Thread::Current()); 2121 mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id); 2122 ThreadListVisitor tlv(soa, thread_group, thread_ids); 2123 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2124 Runtime::Current()->GetThreadList()->ForEach(ThreadListVisitor::Visit, &tlv); 2125} 2126 2127void Dbg::GetChildThreadGroups(JDWP::ObjectId thread_group_id, std::vector<JDWP::ObjectId>& child_thread_group_ids) { 2128 ScopedObjectAccess soa(Thread::Current()); 2129 mirror::Object* thread_group = gRegistry->Get<mirror::Object*>(thread_group_id); 2130 2131 // Get the ArrayList<ThreadGroup> "groups" out of this thread group... 2132 mirror::ArtField* groups_field = thread_group->GetClass()->FindInstanceField("groups", "Ljava/util/List;"); 2133 mirror::Object* groups_array_list = groups_field->GetObject(thread_group); 2134 2135 // Get the array and size out of the ArrayList<ThreadGroup>... 2136 mirror::ArtField* array_field = groups_array_list->GetClass()->FindInstanceField("array", "[Ljava/lang/Object;"); 2137 mirror::ArtField* size_field = groups_array_list->GetClass()->FindInstanceField("size", "I"); 2138 mirror::ObjectArray<mirror::Object>* groups_array = 2139 array_field->GetObject(groups_array_list)->AsObjectArray<mirror::Object>(); 2140 const int32_t size = size_field->GetInt(groups_array_list); 2141 2142 // Copy the first 'size' elements out of the array into the result. 2143 for (int32_t i = 0; i < size; ++i) { 2144 child_thread_group_ids.push_back(gRegistry->Add(groups_array->Get(i))); 2145 } 2146} 2147 2148static int GetStackDepth(Thread* thread) 2149 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 2150 struct CountStackDepthVisitor : public StackVisitor { 2151 explicit CountStackDepthVisitor(Thread* thread) 2152 : StackVisitor(thread, NULL), depth(0) {} 2153 2154 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 2155 // annotalysis. 2156 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 2157 if (!GetMethod()->IsRuntimeMethod()) { 2158 ++depth; 2159 } 2160 return true; 2161 } 2162 size_t depth; 2163 }; 2164 2165 CountStackDepthVisitor visitor(thread); 2166 visitor.WalkStack(); 2167 return visitor.depth; 2168} 2169 2170JDWP::JdwpError Dbg::GetThreadFrameCount(JDWP::ObjectId thread_id, size_t& result) { 2171 ScopedObjectAccess soa(Thread::Current()); 2172 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2173 Thread* thread; 2174 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 2175 if (error != JDWP::ERR_NONE) { 2176 return error; 2177 } 2178 if (!IsSuspendedForDebugger(soa, thread)) { 2179 return JDWP::ERR_THREAD_NOT_SUSPENDED; 2180 } 2181 result = GetStackDepth(thread); 2182 return JDWP::ERR_NONE; 2183} 2184 2185JDWP::JdwpError Dbg::GetThreadFrames(JDWP::ObjectId thread_id, size_t start_frame, 2186 size_t frame_count, JDWP::ExpandBuf* buf) { 2187 class GetFrameVisitor : public StackVisitor { 2188 public: 2189 GetFrameVisitor(Thread* thread, size_t start_frame, size_t frame_count, JDWP::ExpandBuf* buf) 2190 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 2191 : StackVisitor(thread, NULL), depth_(0), 2192 start_frame_(start_frame), frame_count_(frame_count), buf_(buf) { 2193 expandBufAdd4BE(buf_, frame_count_); 2194 } 2195 2196 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 2197 // annotalysis. 2198 virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 2199 if (GetMethod()->IsRuntimeMethod()) { 2200 return true; // The debugger can't do anything useful with a frame that has no Method*. 2201 } 2202 if (depth_ >= start_frame_ + frame_count_) { 2203 return false; 2204 } 2205 if (depth_ >= start_frame_) { 2206 JDWP::FrameId frame_id(GetFrameId()); 2207 JDWP::JdwpLocation location; 2208 SetLocation(location, GetMethod(), GetDexPc()); 2209 VLOG(jdwp) << StringPrintf(" Frame %3zd: id=%3" PRIu64 " ", depth_, frame_id) << location; 2210 expandBufAdd8BE(buf_, frame_id); 2211 expandBufAddLocation(buf_, location); 2212 } 2213 ++depth_; 2214 return true; 2215 } 2216 2217 private: 2218 size_t depth_; 2219 const size_t start_frame_; 2220 const size_t frame_count_; 2221 JDWP::ExpandBuf* buf_; 2222 }; 2223 2224 ScopedObjectAccessUnchecked soa(Thread::Current()); 2225 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2226 Thread* thread; 2227 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 2228 if (error != JDWP::ERR_NONE) { 2229 return error; 2230 } 2231 if (!IsSuspendedForDebugger(soa, thread)) { 2232 return JDWP::ERR_THREAD_NOT_SUSPENDED; 2233 } 2234 GetFrameVisitor visitor(thread, start_frame, frame_count, buf); 2235 visitor.WalkStack(); 2236 return JDWP::ERR_NONE; 2237} 2238 2239JDWP::ObjectId Dbg::GetThreadSelfId() { 2240 ScopedObjectAccessUnchecked soa(Thread::Current()); 2241 return gRegistry->Add(soa.Self()->GetPeer()); 2242} 2243 2244void Dbg::SuspendVM() { 2245 Runtime::Current()->GetThreadList()->SuspendAllForDebugger(); 2246} 2247 2248void Dbg::ResumeVM() { 2249 Runtime::Current()->GetThreadList()->UndoDebuggerSuspensions(); 2250} 2251 2252JDWP::JdwpError Dbg::SuspendThread(JDWP::ObjectId thread_id, bool request_suspension) { 2253 Thread* self = Thread::Current(); 2254 ScopedLocalRef<jobject> peer(self->GetJniEnv(), NULL); 2255 { 2256 ScopedObjectAccess soa(self); 2257 peer.reset(soa.AddLocalReference<jobject>(gRegistry->Get<mirror::Object*>(thread_id))); 2258 } 2259 if (peer.get() == NULL) { 2260 return JDWP::ERR_THREAD_NOT_ALIVE; 2261 } 2262 // Suspend thread to build stack trace. Take suspend thread lock to avoid races with threads 2263 // trying to suspend this one. 2264 MutexLock mu(self, *Locks::thread_list_suspend_thread_lock_); 2265 bool timed_out; 2266 Thread* thread = ThreadList::SuspendThreadByPeer(peer.get(), request_suspension, true, 2267 &timed_out); 2268 if (thread != NULL) { 2269 return JDWP::ERR_NONE; 2270 } else if (timed_out) { 2271 return JDWP::ERR_INTERNAL; 2272 } else { 2273 return JDWP::ERR_THREAD_NOT_ALIVE; 2274 } 2275} 2276 2277void Dbg::ResumeThread(JDWP::ObjectId thread_id) { 2278 ScopedObjectAccessUnchecked soa(Thread::Current()); 2279 mirror::Object* peer = gRegistry->Get<mirror::Object*>(thread_id); 2280 Thread* thread; 2281 { 2282 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2283 thread = Thread::FromManagedThread(soa, peer); 2284 } 2285 if (thread == NULL) { 2286 LOG(WARNING) << "No such thread for resume: " << peer; 2287 return; 2288 } 2289 bool needs_resume; 2290 { 2291 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_); 2292 needs_resume = thread->GetSuspendCount() > 0; 2293 } 2294 if (needs_resume) { 2295 Runtime::Current()->GetThreadList()->Resume(thread, true); 2296 } 2297} 2298 2299void Dbg::SuspendSelf() { 2300 Runtime::Current()->GetThreadList()->SuspendSelfForDebugger(); 2301} 2302 2303struct GetThisVisitor : public StackVisitor { 2304 GetThisVisitor(Thread* thread, Context* context, JDWP::FrameId frame_id) 2305 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 2306 : StackVisitor(thread, context), this_object(NULL), frame_id(frame_id) {} 2307 2308 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 2309 // annotalysis. 2310 virtual bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 2311 if (frame_id != GetFrameId()) { 2312 return true; // continue 2313 } else { 2314 this_object = GetThisObject(); 2315 return false; 2316 } 2317 } 2318 2319 mirror::Object* this_object; 2320 JDWP::FrameId frame_id; 2321}; 2322 2323JDWP::JdwpError Dbg::GetThisObject(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, 2324 JDWP::ObjectId* result) { 2325 ScopedObjectAccessUnchecked soa(Thread::Current()); 2326 Thread* thread; 2327 { 2328 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2329 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 2330 if (error != JDWP::ERR_NONE) { 2331 return error; 2332 } 2333 if (!IsSuspendedForDebugger(soa, thread)) { 2334 return JDWP::ERR_THREAD_NOT_SUSPENDED; 2335 } 2336 } 2337 std::unique_ptr<Context> context(Context::Create()); 2338 GetThisVisitor visitor(thread, context.get(), frame_id); 2339 visitor.WalkStack(); 2340 *result = gRegistry->Add(visitor.this_object); 2341 return JDWP::ERR_NONE; 2342} 2343 2344JDWP::JdwpError Dbg::GetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot, 2345 JDWP::JdwpTag tag, uint8_t* buf, size_t width) { 2346 struct GetLocalVisitor : public StackVisitor { 2347 GetLocalVisitor(const ScopedObjectAccessUnchecked& soa, Thread* thread, Context* context, 2348 JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint8_t* buf, size_t width) 2349 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 2350 : StackVisitor(thread, context), soa_(soa), frame_id_(frame_id), slot_(slot), tag_(tag), 2351 buf_(buf), width_(width), error_(JDWP::ERR_NONE) {} 2352 2353 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 2354 // annotalysis. 2355 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 2356 if (GetFrameId() != frame_id_) { 2357 return true; // Not our frame, carry on. 2358 } 2359 // TODO: check that the tag is compatible with the actual type of the slot! 2360 // TODO: check slot is valid for this method or return INVALID_SLOT error. 2361 mirror::ArtMethod* m = GetMethod(); 2362 if (m->IsNative()) { 2363 // We can't read local value from native method. 2364 error_ = JDWP::ERR_OPAQUE_FRAME; 2365 return false; 2366 } 2367 uint16_t reg = DemangleSlot(slot_, m); 2368 constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION; 2369 switch (tag_) { 2370 case JDWP::JT_BOOLEAN: { 2371 CHECK_EQ(width_, 1U); 2372 uint32_t intVal; 2373 if (GetVReg(m, reg, kIntVReg, &intVal)) { 2374 VLOG(jdwp) << "get boolean local " << reg << " = " << intVal; 2375 JDWP::Set1(buf_+1, intVal != 0); 2376 } else { 2377 VLOG(jdwp) << "failed to get boolean local " << reg; 2378 error_ = kFailureErrorCode; 2379 } 2380 break; 2381 } 2382 case JDWP::JT_BYTE: { 2383 CHECK_EQ(width_, 1U); 2384 uint32_t intVal; 2385 if (GetVReg(m, reg, kIntVReg, &intVal)) { 2386 VLOG(jdwp) << "get byte local " << reg << " = " << intVal; 2387 JDWP::Set1(buf_+1, intVal); 2388 } else { 2389 VLOG(jdwp) << "failed to get byte local " << reg; 2390 error_ = kFailureErrorCode; 2391 } 2392 break; 2393 } 2394 case JDWP::JT_SHORT: 2395 case JDWP::JT_CHAR: { 2396 CHECK_EQ(width_, 2U); 2397 uint32_t intVal; 2398 if (GetVReg(m, reg, kIntVReg, &intVal)) { 2399 VLOG(jdwp) << "get short/char local " << reg << " = " << intVal; 2400 JDWP::Set2BE(buf_+1, intVal); 2401 } else { 2402 VLOG(jdwp) << "failed to get short/char local " << reg; 2403 error_ = kFailureErrorCode; 2404 } 2405 break; 2406 } 2407 case JDWP::JT_INT: { 2408 CHECK_EQ(width_, 4U); 2409 uint32_t intVal; 2410 if (GetVReg(m, reg, kIntVReg, &intVal)) { 2411 VLOG(jdwp) << "get int local " << reg << " = " << intVal; 2412 JDWP::Set4BE(buf_+1, intVal); 2413 } else { 2414 VLOG(jdwp) << "failed to get int local " << reg; 2415 error_ = kFailureErrorCode; 2416 } 2417 break; 2418 } 2419 case JDWP::JT_FLOAT: { 2420 CHECK_EQ(width_, 4U); 2421 uint32_t intVal; 2422 if (GetVReg(m, reg, kFloatVReg, &intVal)) { 2423 VLOG(jdwp) << "get float local " << reg << " = " << intVal; 2424 JDWP::Set4BE(buf_+1, intVal); 2425 } else { 2426 VLOG(jdwp) << "failed to get float local " << reg; 2427 error_ = kFailureErrorCode; 2428 } 2429 break; 2430 } 2431 case JDWP::JT_ARRAY: 2432 case JDWP::JT_CLASS_LOADER: 2433 case JDWP::JT_CLASS_OBJECT: 2434 case JDWP::JT_OBJECT: 2435 case JDWP::JT_STRING: 2436 case JDWP::JT_THREAD: 2437 case JDWP::JT_THREAD_GROUP: { 2438 CHECK_EQ(width_, sizeof(JDWP::ObjectId)); 2439 uint32_t intVal; 2440 if (GetVReg(m, reg, kReferenceVReg, &intVal)) { 2441 mirror::Object* o = reinterpret_cast<mirror::Object*>(intVal); 2442 VLOG(jdwp) << "get " << tag_ << " object local " << reg << " = " << o; 2443 if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) { 2444 LOG(FATAL) << "Register " << reg << " expected to hold " << tag_ << " object: " << o; 2445 } 2446 tag_ = TagFromObject(soa_, o); 2447 JDWP::SetObjectId(buf_+1, gRegistry->Add(o)); 2448 } else { 2449 VLOG(jdwp) << "failed to get " << tag_ << " object local " << reg; 2450 error_ = kFailureErrorCode; 2451 } 2452 break; 2453 } 2454 case JDWP::JT_DOUBLE: { 2455 CHECK_EQ(width_, 8U); 2456 uint32_t lo; 2457 uint32_t hi; 2458 if (GetVReg(m, reg, kDoubleLoVReg, &lo) && GetVReg(m, reg + 1, kDoubleHiVReg, &hi)) { 2459 uint64_t longVal = (static_cast<uint64_t>(hi) << 32) | lo; 2460 VLOG(jdwp) << "get double local " << reg << " = " 2461 << hi << ":" << lo << " = " << longVal; 2462 JDWP::Set8BE(buf_+1, longVal); 2463 } else { 2464 VLOG(jdwp) << "failed to get double local " << reg; 2465 error_ = kFailureErrorCode; 2466 } 2467 break; 2468 } 2469 case JDWP::JT_LONG: { 2470 CHECK_EQ(width_, 8U); 2471 uint32_t lo; 2472 uint32_t hi; 2473 if (GetVReg(m, reg, kLongLoVReg, &lo) && GetVReg(m, reg + 1, kLongHiVReg, &hi)) { 2474 uint64_t longVal = (static_cast<uint64_t>(hi) << 32) | lo; 2475 VLOG(jdwp) << "get long local " << reg << " = " 2476 << hi << ":" << lo << " = " << longVal; 2477 JDWP::Set8BE(buf_+1, longVal); 2478 } else { 2479 VLOG(jdwp) << "failed to get long local " << reg; 2480 error_ = kFailureErrorCode; 2481 } 2482 break; 2483 } 2484 default: 2485 LOG(FATAL) << "Unknown tag " << tag_; 2486 break; 2487 } 2488 2489 // Prepend tag, which may have been updated. 2490 JDWP::Set1(buf_, tag_); 2491 return false; 2492 } 2493 const ScopedObjectAccessUnchecked& soa_; 2494 const JDWP::FrameId frame_id_; 2495 const int slot_; 2496 JDWP::JdwpTag tag_; 2497 uint8_t* const buf_; 2498 const size_t width_; 2499 JDWP::JdwpError error_; 2500 }; 2501 2502 ScopedObjectAccessUnchecked soa(Thread::Current()); 2503 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2504 Thread* thread; 2505 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 2506 if (error != JDWP::ERR_NONE) { 2507 return error; 2508 } 2509 // TODO check thread is suspended by the debugger ? 2510 std::unique_ptr<Context> context(Context::Create()); 2511 GetLocalVisitor visitor(soa, thread, context.get(), frame_id, slot, tag, buf, width); 2512 visitor.WalkStack(); 2513 return visitor.error_; 2514} 2515 2516JDWP::JdwpError Dbg::SetLocalValue(JDWP::ObjectId thread_id, JDWP::FrameId frame_id, int slot, 2517 JDWP::JdwpTag tag, uint64_t value, size_t width) { 2518 struct SetLocalVisitor : public StackVisitor { 2519 SetLocalVisitor(Thread* thread, Context* context, 2520 JDWP::FrameId frame_id, int slot, JDWP::JdwpTag tag, uint64_t value, 2521 size_t width) 2522 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 2523 : StackVisitor(thread, context), 2524 frame_id_(frame_id), slot_(slot), tag_(tag), value_(value), width_(width), 2525 error_(JDWP::ERR_NONE) {} 2526 2527 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 2528 // annotalysis. 2529 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 2530 if (GetFrameId() != frame_id_) { 2531 return true; // Not our frame, carry on. 2532 } 2533 // TODO: check that the tag is compatible with the actual type of the slot! 2534 // TODO: check slot is valid for this method or return INVALID_SLOT error. 2535 mirror::ArtMethod* m = GetMethod(); 2536 if (m->IsNative()) { 2537 // We can't read local value from native method. 2538 error_ = JDWP::ERR_OPAQUE_FRAME; 2539 return false; 2540 } 2541 uint16_t reg = DemangleSlot(slot_, m); 2542 constexpr JDWP::JdwpError kFailureErrorCode = JDWP::ERR_ABSENT_INFORMATION; 2543 switch (tag_) { 2544 case JDWP::JT_BOOLEAN: 2545 case JDWP::JT_BYTE: 2546 CHECK_EQ(width_, 1U); 2547 if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) { 2548 VLOG(jdwp) << "failed to set boolean/byte local " << reg << " = " 2549 << static_cast<uint32_t>(value_); 2550 error_ = kFailureErrorCode; 2551 } 2552 break; 2553 case JDWP::JT_SHORT: 2554 case JDWP::JT_CHAR: 2555 CHECK_EQ(width_, 2U); 2556 if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) { 2557 VLOG(jdwp) << "failed to set short/char local " << reg << " = " 2558 << static_cast<uint32_t>(value_); 2559 error_ = kFailureErrorCode; 2560 } 2561 break; 2562 case JDWP::JT_INT: 2563 CHECK_EQ(width_, 4U); 2564 if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kIntVReg)) { 2565 VLOG(jdwp) << "failed to set int local " << reg << " = " 2566 << static_cast<uint32_t>(value_); 2567 error_ = kFailureErrorCode; 2568 } 2569 break; 2570 case JDWP::JT_FLOAT: 2571 CHECK_EQ(width_, 4U); 2572 if (!SetVReg(m, reg, static_cast<uint32_t>(value_), kFloatVReg)) { 2573 VLOG(jdwp) << "failed to set float local " << reg << " = " 2574 << static_cast<uint32_t>(value_); 2575 error_ = kFailureErrorCode; 2576 } 2577 break; 2578 case JDWP::JT_ARRAY: 2579 case JDWP::JT_CLASS_LOADER: 2580 case JDWP::JT_CLASS_OBJECT: 2581 case JDWP::JT_OBJECT: 2582 case JDWP::JT_STRING: 2583 case JDWP::JT_THREAD: 2584 case JDWP::JT_THREAD_GROUP: { 2585 CHECK_EQ(width_, sizeof(JDWP::ObjectId)); 2586 mirror::Object* o = gRegistry->Get<mirror::Object*>(static_cast<JDWP::ObjectId>(value_)); 2587 if (o == ObjectRegistry::kInvalidObject) { 2588 VLOG(jdwp) << tag_ << " object " << o << " is an invalid object"; 2589 error_ = JDWP::ERR_INVALID_OBJECT; 2590 } else if (!SetVReg(m, reg, static_cast<uint32_t>(reinterpret_cast<uintptr_t>(o)), 2591 kReferenceVReg)) { 2592 VLOG(jdwp) << "failed to set " << tag_ << " object local " << reg << " = " << o; 2593 error_ = kFailureErrorCode; 2594 } 2595 break; 2596 } 2597 case JDWP::JT_DOUBLE: { 2598 CHECK_EQ(width_, 8U); 2599 const uint32_t lo = static_cast<uint32_t>(value_); 2600 const uint32_t hi = static_cast<uint32_t>(value_ >> 32); 2601 bool success = SetVReg(m, reg, lo, kDoubleLoVReg); 2602 success &= SetVReg(m, reg + 1, hi, kDoubleHiVReg); 2603 if (!success) { 2604 uint64_t longVal = (static_cast<uint64_t>(hi) << 32) | lo; 2605 VLOG(jdwp) << "failed to set double local " << reg << " = " 2606 << hi << ":" << lo << " = " << longVal; 2607 error_ = kFailureErrorCode; 2608 } 2609 break; 2610 } 2611 case JDWP::JT_LONG: { 2612 CHECK_EQ(width_, 8U); 2613 const uint32_t lo = static_cast<uint32_t>(value_); 2614 const uint32_t hi = static_cast<uint32_t>(value_ >> 32); 2615 bool success = SetVReg(m, reg, lo, kLongLoVReg); 2616 success &= SetVReg(m, reg + 1, hi, kLongHiVReg); 2617 if (!success) { 2618 uint64_t longVal = (static_cast<uint64_t>(hi) << 32) | lo; 2619 VLOG(jdwp) << "failed to set double local " << reg << " = " 2620 << hi << ":" << lo << " = " << longVal; 2621 error_ = kFailureErrorCode; 2622 } 2623 break; 2624 } 2625 default: 2626 LOG(FATAL) << "Unknown tag " << tag_; 2627 break; 2628 } 2629 return false; 2630 } 2631 2632 const JDWP::FrameId frame_id_; 2633 const int slot_; 2634 const JDWP::JdwpTag tag_; 2635 const uint64_t value_; 2636 const size_t width_; 2637 JDWP::JdwpError error_; 2638 }; 2639 2640 ScopedObjectAccessUnchecked soa(Thread::Current()); 2641 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 2642 Thread* thread; 2643 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 2644 if (error != JDWP::ERR_NONE) { 2645 return error; 2646 } 2647 // TODO check thread is suspended by the debugger ? 2648 std::unique_ptr<Context> context(Context::Create()); 2649 SetLocalVisitor visitor(thread, context.get(), frame_id, slot, tag, value, width); 2650 visitor.WalkStack(); 2651 return visitor.error_; 2652} 2653 2654JDWP::ObjectId Dbg::GetThisObjectIdForEvent(mirror::Object* this_object) { 2655 // If 'this_object' isn't already in the registry, we know that we're not looking for it, so 2656 // there's no point adding it to the registry and burning through ids. 2657 // When registering an event request with an instance filter, we've been given an existing object 2658 // id so it must already be present in the registry when the event fires. 2659 JDWP::ObjectId this_id = 0; 2660 if (this_object != nullptr && gRegistry->Contains(this_object)) { 2661 this_id = gRegistry->Add(this_object); 2662 } 2663 return this_id; 2664} 2665 2666void Dbg::PostLocationEvent(mirror::ArtMethod* m, int dex_pc, mirror::Object* this_object, 2667 int event_flags, const JValue* return_value) { 2668 if (!IsDebuggerActive()) { 2669 return; 2670 } 2671 DCHECK(m != nullptr); 2672 DCHECK_EQ(m->IsStatic(), this_object == nullptr); 2673 JDWP::JdwpLocation location; 2674 SetLocation(location, m, dex_pc); 2675 2676 // We need 'this' for InstanceOnly filters only. 2677 JDWP::ObjectId this_id = GetThisObjectIdForEvent(this_object); 2678 gJdwpState->PostLocationEvent(&location, this_id, event_flags, return_value); 2679} 2680 2681void Dbg::PostFieldAccessEvent(mirror::ArtMethod* m, int dex_pc, 2682 mirror::Object* this_object, mirror::ArtField* f) { 2683 if (!IsDebuggerActive()) { 2684 return; 2685 } 2686 DCHECK(m != nullptr); 2687 DCHECK(f != nullptr); 2688 JDWP::JdwpLocation location; 2689 SetLocation(location, m, dex_pc); 2690 2691 JDWP::RefTypeId type_id = gRegistry->AddRefType(f->GetDeclaringClass()); 2692 JDWP::FieldId field_id = ToFieldId(f); 2693 JDWP::ObjectId this_id = gRegistry->Add(this_object); 2694 2695 gJdwpState->PostFieldEvent(&location, type_id, field_id, this_id, nullptr, false); 2696} 2697 2698void Dbg::PostFieldModificationEvent(mirror::ArtMethod* m, int dex_pc, 2699 mirror::Object* this_object, mirror::ArtField* f, 2700 const JValue* field_value) { 2701 if (!IsDebuggerActive()) { 2702 return; 2703 } 2704 DCHECK(m != nullptr); 2705 DCHECK(f != nullptr); 2706 DCHECK(field_value != nullptr); 2707 JDWP::JdwpLocation location; 2708 SetLocation(location, m, dex_pc); 2709 2710 JDWP::RefTypeId type_id = gRegistry->AddRefType(f->GetDeclaringClass()); 2711 JDWP::FieldId field_id = ToFieldId(f); 2712 JDWP::ObjectId this_id = gRegistry->Add(this_object); 2713 2714 gJdwpState->PostFieldEvent(&location, type_id, field_id, this_id, field_value, true); 2715} 2716 2717void Dbg::PostException(const ThrowLocation& throw_location, 2718 mirror::ArtMethod* catch_method, 2719 uint32_t catch_dex_pc, mirror::Throwable* exception_object) { 2720 if (!IsDebuggerActive()) { 2721 return; 2722 } 2723 2724 JDWP::JdwpLocation jdwp_throw_location; 2725 SetLocation(jdwp_throw_location, throw_location.GetMethod(), throw_location.GetDexPc()); 2726 JDWP::JdwpLocation catch_location; 2727 SetLocation(catch_location, catch_method, catch_dex_pc); 2728 2729 // We need 'this' for InstanceOnly filters only. 2730 JDWP::ObjectId this_id = GetThisObjectIdForEvent(throw_location.GetThis()); 2731 JDWP::ObjectId exception_id = gRegistry->Add(exception_object); 2732 JDWP::RefTypeId exception_class_id = gRegistry->AddRefType(exception_object->GetClass()); 2733 2734 gJdwpState->PostException(&jdwp_throw_location, exception_id, exception_class_id, &catch_location, 2735 this_id); 2736} 2737 2738void Dbg::PostClassPrepare(mirror::Class* c) { 2739 if (!IsDebuggerActive()) { 2740 return; 2741 } 2742 2743 // OLD-TODO - we currently always send both "verified" and "prepared" since 2744 // debuggers seem to like that. There might be some advantage to honesty, 2745 // since the class may not yet be verified. 2746 int state = JDWP::CS_VERIFIED | JDWP::CS_PREPARED; 2747 JDWP::JdwpTypeTag tag = GetTypeTag(c); 2748 gJdwpState->PostClassPrepare(tag, gRegistry->Add(c), c->GetDescriptor(), state); 2749} 2750 2751void Dbg::UpdateDebugger(Thread* thread, mirror::Object* this_object, 2752 mirror::ArtMethod* m, uint32_t dex_pc, 2753 int event_flags, const JValue* return_value) { 2754 if (!IsDebuggerActive() || dex_pc == static_cast<uint32_t>(-2) /* fake method exit */) { 2755 return; 2756 } 2757 2758 if (IsBreakpoint(m, dex_pc)) { 2759 event_flags |= kBreakpoint; 2760 } 2761 2762 // If the debugger is single-stepping one of our threads, check to 2763 // see if we're that thread and we've reached a step point. 2764 const SingleStepControl* single_step_control = thread->GetSingleStepControl(); 2765 DCHECK(single_step_control != nullptr); 2766 if (single_step_control->is_active) { 2767 CHECK(!m->IsNative()); 2768 if (single_step_control->step_depth == JDWP::SD_INTO) { 2769 // Step into method calls. We break when the line number 2770 // or method pointer changes. If we're in SS_MIN mode, we 2771 // always stop. 2772 if (single_step_control->method != m) { 2773 event_flags |= kSingleStep; 2774 VLOG(jdwp) << "SS new method"; 2775 } else if (single_step_control->step_size == JDWP::SS_MIN) { 2776 event_flags |= kSingleStep; 2777 VLOG(jdwp) << "SS new instruction"; 2778 } else if (single_step_control->ContainsDexPc(dex_pc)) { 2779 event_flags |= kSingleStep; 2780 VLOG(jdwp) << "SS new line"; 2781 } 2782 } else if (single_step_control->step_depth == JDWP::SD_OVER) { 2783 // Step over method calls. We break when the line number is 2784 // different and the frame depth is <= the original frame 2785 // depth. (We can't just compare on the method, because we 2786 // might get unrolled past it by an exception, and it's tricky 2787 // to identify recursion.) 2788 2789 int stack_depth = GetStackDepth(thread); 2790 2791 if (stack_depth < single_step_control->stack_depth) { 2792 // Popped up one or more frames, always trigger. 2793 event_flags |= kSingleStep; 2794 VLOG(jdwp) << "SS method pop"; 2795 } else if (stack_depth == single_step_control->stack_depth) { 2796 // Same depth, see if we moved. 2797 if (single_step_control->step_size == JDWP::SS_MIN) { 2798 event_flags |= kSingleStep; 2799 VLOG(jdwp) << "SS new instruction"; 2800 } else if (single_step_control->ContainsDexPc(dex_pc)) { 2801 event_flags |= kSingleStep; 2802 VLOG(jdwp) << "SS new line"; 2803 } 2804 } 2805 } else { 2806 CHECK_EQ(single_step_control->step_depth, JDWP::SD_OUT); 2807 // Return from the current method. We break when the frame 2808 // depth pops up. 2809 2810 // This differs from the "method exit" break in that it stops 2811 // with the PC at the next instruction in the returned-to 2812 // function, rather than the end of the returning function. 2813 2814 int stack_depth = GetStackDepth(thread); 2815 if (stack_depth < single_step_control->stack_depth) { 2816 event_flags |= kSingleStep; 2817 VLOG(jdwp) << "SS method pop"; 2818 } 2819 } 2820 } 2821 2822 // If there's something interesting going on, see if it matches one 2823 // of the debugger filters. 2824 if (event_flags != 0) { 2825 Dbg::PostLocationEvent(m, dex_pc, this_object, event_flags, return_value); 2826 } 2827} 2828 2829size_t* Dbg::GetReferenceCounterForEvent(uint32_t instrumentation_event) { 2830 switch (instrumentation_event) { 2831 case instrumentation::Instrumentation::kMethodEntered: 2832 return &method_enter_event_ref_count_; 2833 case instrumentation::Instrumentation::kMethodExited: 2834 return &method_exit_event_ref_count_; 2835 case instrumentation::Instrumentation::kDexPcMoved: 2836 return &dex_pc_change_event_ref_count_; 2837 case instrumentation::Instrumentation::kFieldRead: 2838 return &field_read_event_ref_count_; 2839 case instrumentation::Instrumentation::kFieldWritten: 2840 return &field_write_event_ref_count_; 2841 case instrumentation::Instrumentation::kExceptionCaught: 2842 return &exception_catch_event_ref_count_; 2843 default: 2844 return nullptr; 2845 } 2846} 2847 2848// Process request while all mutator threads are suspended. 2849void Dbg::ProcessDeoptimizationRequest(const DeoptimizationRequest& request) { 2850 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 2851 switch (request.GetKind()) { 2852 case DeoptimizationRequest::kNothing: 2853 LOG(WARNING) << "Ignoring empty deoptimization request."; 2854 break; 2855 case DeoptimizationRequest::kRegisterForEvent: 2856 VLOG(jdwp) << StringPrintf("Add debugger as listener for instrumentation event 0x%x", 2857 request.InstrumentationEvent()); 2858 instrumentation->AddListener(&gDebugInstrumentationListener, request.InstrumentationEvent()); 2859 instrumentation_events_ |= request.InstrumentationEvent(); 2860 break; 2861 case DeoptimizationRequest::kUnregisterForEvent: 2862 VLOG(jdwp) << StringPrintf("Remove debugger as listener for instrumentation event 0x%x", 2863 request.InstrumentationEvent()); 2864 instrumentation->RemoveListener(&gDebugInstrumentationListener, 2865 request.InstrumentationEvent()); 2866 instrumentation_events_ &= ~request.InstrumentationEvent(); 2867 break; 2868 case DeoptimizationRequest::kFullDeoptimization: 2869 VLOG(jdwp) << "Deoptimize the world ..."; 2870 instrumentation->DeoptimizeEverything(); 2871 VLOG(jdwp) << "Deoptimize the world DONE"; 2872 break; 2873 case DeoptimizationRequest::kFullUndeoptimization: 2874 VLOG(jdwp) << "Undeoptimize the world ..."; 2875 instrumentation->UndeoptimizeEverything(); 2876 VLOG(jdwp) << "Undeoptimize the world DONE"; 2877 break; 2878 case DeoptimizationRequest::kSelectiveDeoptimization: 2879 VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " ..."; 2880 instrumentation->Deoptimize(request.Method()); 2881 VLOG(jdwp) << "Deoptimize method " << PrettyMethod(request.Method()) << " DONE"; 2882 break; 2883 case DeoptimizationRequest::kSelectiveUndeoptimization: 2884 VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " ..."; 2885 instrumentation->Undeoptimize(request.Method()); 2886 VLOG(jdwp) << "Undeoptimize method " << PrettyMethod(request.Method()) << " DONE"; 2887 break; 2888 default: 2889 LOG(FATAL) << "Unsupported deoptimization request kind " << request.GetKind(); 2890 break; 2891 } 2892} 2893 2894void Dbg::DelayFullUndeoptimization() { 2895 MutexLock mu(Thread::Current(), *deoptimization_lock_); 2896 ++delayed_full_undeoptimization_count_; 2897 DCHECK_LE(delayed_full_undeoptimization_count_, full_deoptimization_event_count_); 2898} 2899 2900void Dbg::ProcessDelayedFullUndeoptimizations() { 2901 // TODO: avoid taking the lock twice (once here and once in ManageDeoptimization). 2902 { 2903 MutexLock mu(Thread::Current(), *deoptimization_lock_); 2904 while (delayed_full_undeoptimization_count_ > 0) { 2905 DeoptimizationRequest req; 2906 req.SetKind(DeoptimizationRequest::kFullUndeoptimization); 2907 req.SetMethod(nullptr); 2908 RequestDeoptimizationLocked(req); 2909 --delayed_full_undeoptimization_count_; 2910 } 2911 } 2912 ManageDeoptimization(); 2913} 2914 2915void Dbg::RequestDeoptimization(const DeoptimizationRequest& req) { 2916 if (req.GetKind() == DeoptimizationRequest::kNothing) { 2917 // Nothing to do. 2918 return; 2919 } 2920 MutexLock mu(Thread::Current(), *deoptimization_lock_); 2921 RequestDeoptimizationLocked(req); 2922} 2923 2924void Dbg::RequestDeoptimizationLocked(const DeoptimizationRequest& req) { 2925 switch (req.GetKind()) { 2926 case DeoptimizationRequest::kRegisterForEvent: { 2927 DCHECK_NE(req.InstrumentationEvent(), 0u); 2928 size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent()); 2929 CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x", 2930 req.InstrumentationEvent()); 2931 if (*counter == 0) { 2932 VLOG(jdwp) << StringPrintf("Queue request #%zd to start listening to instrumentation event 0x%x", 2933 deoptimization_requests_.size(), req.InstrumentationEvent()); 2934 deoptimization_requests_.push_back(req); 2935 } 2936 *counter = *counter + 1; 2937 break; 2938 } 2939 case DeoptimizationRequest::kUnregisterForEvent: { 2940 DCHECK_NE(req.InstrumentationEvent(), 0u); 2941 size_t* counter = GetReferenceCounterForEvent(req.InstrumentationEvent()); 2942 CHECK(counter != nullptr) << StringPrintf("No counter for instrumentation event 0x%x", 2943 req.InstrumentationEvent()); 2944 *counter = *counter - 1; 2945 if (*counter == 0) { 2946 VLOG(jdwp) << StringPrintf("Queue request #%zd to stop listening to instrumentation event 0x%x", 2947 deoptimization_requests_.size(), req.InstrumentationEvent()); 2948 deoptimization_requests_.push_back(req); 2949 } 2950 break; 2951 } 2952 case DeoptimizationRequest::kFullDeoptimization: { 2953 DCHECK(req.Method() == nullptr); 2954 if (full_deoptimization_event_count_ == 0) { 2955 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size() 2956 << " for full deoptimization"; 2957 deoptimization_requests_.push_back(req); 2958 } 2959 ++full_deoptimization_event_count_; 2960 break; 2961 } 2962 case DeoptimizationRequest::kFullUndeoptimization: { 2963 DCHECK(req.Method() == nullptr); 2964 DCHECK_GT(full_deoptimization_event_count_, 0U); 2965 --full_deoptimization_event_count_; 2966 if (full_deoptimization_event_count_ == 0) { 2967 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size() 2968 << " for full undeoptimization"; 2969 deoptimization_requests_.push_back(req); 2970 } 2971 break; 2972 } 2973 case DeoptimizationRequest::kSelectiveDeoptimization: { 2974 DCHECK(req.Method() != nullptr); 2975 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size() 2976 << " for deoptimization of " << PrettyMethod(req.Method()); 2977 deoptimization_requests_.push_back(req); 2978 break; 2979 } 2980 case DeoptimizationRequest::kSelectiveUndeoptimization: { 2981 DCHECK(req.Method() != nullptr); 2982 VLOG(jdwp) << "Queue request #" << deoptimization_requests_.size() 2983 << " for undeoptimization of " << PrettyMethod(req.Method()); 2984 deoptimization_requests_.push_back(req); 2985 break; 2986 } 2987 default: { 2988 LOG(FATAL) << "Unknown deoptimization request kind " << req.GetKind(); 2989 break; 2990 } 2991 } 2992} 2993 2994void Dbg::ManageDeoptimization() { 2995 Thread* const self = Thread::Current(); 2996 { 2997 // Avoid suspend/resume if there is no pending request. 2998 MutexLock mu(self, *deoptimization_lock_); 2999 if (deoptimization_requests_.empty()) { 3000 return; 3001 } 3002 } 3003 CHECK_EQ(self->GetState(), kRunnable); 3004 self->TransitionFromRunnableToSuspended(kWaitingForDeoptimization); 3005 // We need to suspend mutator threads first. 3006 Runtime* const runtime = Runtime::Current(); 3007 runtime->GetThreadList()->SuspendAll(); 3008 const ThreadState old_state = self->SetStateUnsafe(kRunnable); 3009 { 3010 MutexLock mu(self, *deoptimization_lock_); 3011 size_t req_index = 0; 3012 for (DeoptimizationRequest& request : deoptimization_requests_) { 3013 VLOG(jdwp) << "Process deoptimization request #" << req_index++; 3014 ProcessDeoptimizationRequest(request); 3015 } 3016 deoptimization_requests_.clear(); 3017 } 3018 CHECK_EQ(self->SetStateUnsafe(old_state), kRunnable); 3019 runtime->GetThreadList()->ResumeAll(); 3020 self->TransitionFromSuspendedToRunnable(); 3021} 3022 3023static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m) 3024 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 3025 const DexFile::CodeItem* code_item = m->GetCodeItem(); 3026 if (code_item == nullptr) { 3027 // TODO We should not be asked to watch location in a native or abstract method so the code item 3028 // should never be null. We could just check we never encounter this case. 3029 return false; 3030 } 3031 StackHandleScope<2> hs(self); 3032 mirror::Class* declaring_class = m->GetDeclaringClass(); 3033 Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache())); 3034 Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader())); 3035 verifier::MethodVerifier verifier(dex_cache->GetDexFile(), &dex_cache, &class_loader, 3036 &m->GetClassDef(), code_item, m->GetDexMethodIndex(), m, 3037 m->GetAccessFlags(), false, true, false); 3038 // Note: we don't need to verify the method. 3039 return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr); 3040} 3041 3042static const Breakpoint* FindFirstBreakpointForMethod(mirror::ArtMethod* m) 3043 EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 3044 for (Breakpoint& breakpoint : gBreakpoints) { 3045 if (breakpoint.Method() == m) { 3046 return &breakpoint; 3047 } 3048 } 3049 return nullptr; 3050} 3051 3052// Sanity checks all existing breakpoints on the same method. 3053static void SanityCheckExistingBreakpoints(mirror::ArtMethod* m, bool need_full_deoptimization) 3054 EXCLUSIVE_LOCKS_REQUIRED(Locks::breakpoint_lock_) { 3055 if (kIsDebugBuild) { 3056 for (const Breakpoint& breakpoint : gBreakpoints) { 3057 CHECK_EQ(need_full_deoptimization, breakpoint.NeedFullDeoptimization()); 3058 } 3059 if (need_full_deoptimization) { 3060 // We should have deoptimized everything but not "selectively" deoptimized this method. 3061 CHECK(Runtime::Current()->GetInstrumentation()->AreAllMethodsDeoptimized()); 3062 CHECK(!Runtime::Current()->GetInstrumentation()->IsDeoptimized(m)); 3063 } else { 3064 // We should have "selectively" deoptimized this method. 3065 // Note: while we have not deoptimized everything for this method, we may have done it for 3066 // another event. 3067 CHECK(Runtime::Current()->GetInstrumentation()->IsDeoptimized(m)); 3068 } 3069 } 3070} 3071 3072// Installs a breakpoint at the specified location. Also indicates through the deoptimization 3073// request if we need to deoptimize. 3074void Dbg::WatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) { 3075 Thread* const self = Thread::Current(); 3076 mirror::ArtMethod* m = FromMethodId(location->method_id); 3077 DCHECK(m != nullptr) << "No method for method id " << location->method_id; 3078 3079 MutexLock mu(self, *Locks::breakpoint_lock_); 3080 const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m); 3081 bool need_full_deoptimization; 3082 if (existing_breakpoint == nullptr) { 3083 // There is no breakpoint on this method yet: we need to deoptimize. If this method may be 3084 // inlined, we deoptimize everything; otherwise we deoptimize only this method. 3085 need_full_deoptimization = IsMethodPossiblyInlined(self, m); 3086 if (need_full_deoptimization) { 3087 req->SetKind(DeoptimizationRequest::kFullDeoptimization); 3088 req->SetMethod(nullptr); 3089 } else { 3090 req->SetKind(DeoptimizationRequest::kSelectiveDeoptimization); 3091 req->SetMethod(m); 3092 } 3093 } else { 3094 // There is at least one breakpoint for this method: we don't need to deoptimize. 3095 req->SetKind(DeoptimizationRequest::kNothing); 3096 req->SetMethod(nullptr); 3097 3098 need_full_deoptimization = existing_breakpoint->NeedFullDeoptimization(); 3099 SanityCheckExistingBreakpoints(m, need_full_deoptimization); 3100 } 3101 3102 gBreakpoints.push_back(Breakpoint(m, location->dex_pc, need_full_deoptimization)); 3103 VLOG(jdwp) << "Set breakpoint #" << (gBreakpoints.size() - 1) << ": " 3104 << gBreakpoints[gBreakpoints.size() - 1]; 3105} 3106 3107// Uninstalls a breakpoint at the specified location. Also indicates through the deoptimization 3108// request if we need to undeoptimize. 3109void Dbg::UnwatchLocation(const JDWP::JdwpLocation* location, DeoptimizationRequest* req) { 3110 MutexLock mu(Thread::Current(), *Locks::breakpoint_lock_); 3111 mirror::ArtMethod* m = FromMethodId(location->method_id); 3112 DCHECK(m != nullptr) << "No method for method id " << location->method_id; 3113 bool need_full_deoptimization = false; 3114 for (size_t i = 0, e = gBreakpoints.size(); i < e; ++i) { 3115 if (gBreakpoints[i].DexPc() == location->dex_pc && gBreakpoints[i].Method() == m) { 3116 VLOG(jdwp) << "Removed breakpoint #" << i << ": " << gBreakpoints[i]; 3117 need_full_deoptimization = gBreakpoints[i].NeedFullDeoptimization(); 3118 DCHECK_NE(need_full_deoptimization, Runtime::Current()->GetInstrumentation()->IsDeoptimized(m)); 3119 gBreakpoints.erase(gBreakpoints.begin() + i); 3120 break; 3121 } 3122 } 3123 const Breakpoint* const existing_breakpoint = FindFirstBreakpointForMethod(m); 3124 if (existing_breakpoint == nullptr) { 3125 // There is no more breakpoint on this method: we need to undeoptimize. 3126 if (need_full_deoptimization) { 3127 // This method required full deoptimization: we need to undeoptimize everything. 3128 req->SetKind(DeoptimizationRequest::kFullUndeoptimization); 3129 req->SetMethod(nullptr); 3130 } else { 3131 // This method required selective deoptimization: we need to undeoptimize only that method. 3132 req->SetKind(DeoptimizationRequest::kSelectiveUndeoptimization); 3133 req->SetMethod(m); 3134 } 3135 } else { 3136 // There is at least one breakpoint for this method: we don't need to undeoptimize. 3137 req->SetKind(DeoptimizationRequest::kNothing); 3138 req->SetMethod(nullptr); 3139 SanityCheckExistingBreakpoints(m, need_full_deoptimization); 3140 } 3141} 3142 3143// Scoped utility class to suspend a thread so that we may do tasks such as walk its stack. Doesn't 3144// cause suspension if the thread is the current thread. 3145class ScopedThreadSuspension { 3146 public: 3147 ScopedThreadSuspension(Thread* self, JDWP::ObjectId thread_id) 3148 LOCKS_EXCLUDED(Locks::thread_list_lock_) 3149 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) : 3150 thread_(nullptr), 3151 error_(JDWP::ERR_NONE), 3152 self_suspend_(false), 3153 other_suspend_(false) { 3154 ScopedObjectAccessUnchecked soa(self); 3155 { 3156 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 3157 error_ = DecodeThread(soa, thread_id, thread_); 3158 } 3159 if (error_ == JDWP::ERR_NONE) { 3160 if (thread_ == soa.Self()) { 3161 self_suspend_ = true; 3162 } else { 3163 soa.Self()->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension); 3164 jobject thread_peer = gRegistry->GetJObject(thread_id); 3165 bool timed_out; 3166 Thread* suspended_thread; 3167 { 3168 // Take suspend thread lock to avoid races with threads trying to suspend this one. 3169 MutexLock mu(soa.Self(), *Locks::thread_list_suspend_thread_lock_); 3170 suspended_thread = ThreadList::SuspendThreadByPeer(thread_peer, true, true, 3171 &timed_out); 3172 } 3173 CHECK_EQ(soa.Self()->TransitionFromSuspendedToRunnable(), kWaitingForDebuggerSuspension); 3174 if (suspended_thread == nullptr) { 3175 // Thread terminated from under us while suspending. 3176 error_ = JDWP::ERR_INVALID_THREAD; 3177 } else { 3178 CHECK_EQ(suspended_thread, thread_); 3179 other_suspend_ = true; 3180 } 3181 } 3182 } 3183 } 3184 3185 Thread* GetThread() const { 3186 return thread_; 3187 } 3188 3189 JDWP::JdwpError GetError() const { 3190 return error_; 3191 } 3192 3193 ~ScopedThreadSuspension() { 3194 if (other_suspend_) { 3195 Runtime::Current()->GetThreadList()->Resume(thread_, true); 3196 } 3197 } 3198 3199 private: 3200 Thread* thread_; 3201 JDWP::JdwpError error_; 3202 bool self_suspend_; 3203 bool other_suspend_; 3204}; 3205 3206JDWP::JdwpError Dbg::ConfigureStep(JDWP::ObjectId thread_id, JDWP::JdwpStepSize step_size, 3207 JDWP::JdwpStepDepth step_depth) { 3208 Thread* self = Thread::Current(); 3209 ScopedThreadSuspension sts(self, thread_id); 3210 if (sts.GetError() != JDWP::ERR_NONE) { 3211 return sts.GetError(); 3212 } 3213 3214 // 3215 // Work out what Method* we're in, the current line number, and how deep the stack currently 3216 // is for step-out. 3217 // 3218 3219 struct SingleStepStackVisitor : public StackVisitor { 3220 explicit SingleStepStackVisitor(Thread* thread, SingleStepControl* single_step_control, 3221 int32_t* line_number) 3222 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 3223 : StackVisitor(thread, NULL), single_step_control_(single_step_control), 3224 line_number_(line_number) { 3225 DCHECK_EQ(single_step_control_, thread->GetSingleStepControl()); 3226 single_step_control_->method = NULL; 3227 single_step_control_->stack_depth = 0; 3228 } 3229 3230 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 3231 // annotalysis. 3232 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 3233 mirror::ArtMethod* m = GetMethod(); 3234 if (!m->IsRuntimeMethod()) { 3235 ++single_step_control_->stack_depth; 3236 if (single_step_control_->method == NULL) { 3237 mirror::DexCache* dex_cache = m->GetDeclaringClass()->GetDexCache(); 3238 single_step_control_->method = m; 3239 *line_number_ = -1; 3240 if (dex_cache != NULL) { 3241 const DexFile& dex_file = *dex_cache->GetDexFile(); 3242 *line_number_ = dex_file.GetLineNumFromPC(m, GetDexPc()); 3243 } 3244 } 3245 } 3246 return true; 3247 } 3248 3249 SingleStepControl* const single_step_control_; 3250 int32_t* const line_number_; 3251 }; 3252 3253 Thread* const thread = sts.GetThread(); 3254 SingleStepControl* const single_step_control = thread->GetSingleStepControl(); 3255 DCHECK(single_step_control != nullptr); 3256 int32_t line_number = -1; 3257 SingleStepStackVisitor visitor(thread, single_step_control, &line_number); 3258 visitor.WalkStack(); 3259 3260 // 3261 // Find the dex_pc values that correspond to the current line, for line-based single-stepping. 3262 // 3263 3264 struct DebugCallbackContext { 3265 explicit DebugCallbackContext(SingleStepControl* single_step_control, int32_t line_number, 3266 const DexFile::CodeItem* code_item) 3267 : single_step_control_(single_step_control), line_number_(line_number), code_item_(code_item), 3268 last_pc_valid(false), last_pc(0) { 3269 } 3270 3271 static bool Callback(void* raw_context, uint32_t address, uint32_t line_number) { 3272 DebugCallbackContext* context = reinterpret_cast<DebugCallbackContext*>(raw_context); 3273 if (static_cast<int32_t>(line_number) == context->line_number_) { 3274 if (!context->last_pc_valid) { 3275 // Everything from this address until the next line change is ours. 3276 context->last_pc = address; 3277 context->last_pc_valid = true; 3278 } 3279 // Otherwise, if we're already in a valid range for this line, 3280 // just keep going (shouldn't really happen)... 3281 } else if (context->last_pc_valid) { // and the line number is new 3282 // Add everything from the last entry up until here to the set 3283 for (uint32_t dex_pc = context->last_pc; dex_pc < address; ++dex_pc) { 3284 context->single_step_control_->dex_pcs.insert(dex_pc); 3285 } 3286 context->last_pc_valid = false; 3287 } 3288 return false; // There may be multiple entries for any given line. 3289 } 3290 3291 ~DebugCallbackContext() { 3292 // If the line number was the last in the position table... 3293 if (last_pc_valid) { 3294 size_t end = code_item_->insns_size_in_code_units_; 3295 for (uint32_t dex_pc = last_pc; dex_pc < end; ++dex_pc) { 3296 single_step_control_->dex_pcs.insert(dex_pc); 3297 } 3298 } 3299 } 3300 3301 SingleStepControl* const single_step_control_; 3302 const int32_t line_number_; 3303 const DexFile::CodeItem* const code_item_; 3304 bool last_pc_valid; 3305 uint32_t last_pc; 3306 }; 3307 single_step_control->dex_pcs.clear(); 3308 mirror::ArtMethod* m = single_step_control->method; 3309 if (!m->IsNative()) { 3310 const DexFile::CodeItem* const code_item = m->GetCodeItem(); 3311 DebugCallbackContext context(single_step_control, line_number, code_item); 3312 m->GetDexFile()->DecodeDebugInfo(code_item, m->IsStatic(), m->GetDexMethodIndex(), 3313 DebugCallbackContext::Callback, NULL, &context); 3314 } 3315 3316 // 3317 // Everything else... 3318 // 3319 3320 single_step_control->step_size = step_size; 3321 single_step_control->step_depth = step_depth; 3322 single_step_control->is_active = true; 3323 3324 if (VLOG_IS_ON(jdwp)) { 3325 VLOG(jdwp) << "Single-step thread: " << *thread; 3326 VLOG(jdwp) << "Single-step step size: " << single_step_control->step_size; 3327 VLOG(jdwp) << "Single-step step depth: " << single_step_control->step_depth; 3328 VLOG(jdwp) << "Single-step current method: " << PrettyMethod(single_step_control->method); 3329 VLOG(jdwp) << "Single-step current line: " << line_number; 3330 VLOG(jdwp) << "Single-step current stack depth: " << single_step_control->stack_depth; 3331 VLOG(jdwp) << "Single-step dex_pc values:"; 3332 for (uint32_t dex_pc : single_step_control->dex_pcs) { 3333 VLOG(jdwp) << StringPrintf(" %#x", dex_pc); 3334 } 3335 } 3336 3337 return JDWP::ERR_NONE; 3338} 3339 3340void Dbg::UnconfigureStep(JDWP::ObjectId thread_id) { 3341 ScopedObjectAccessUnchecked soa(Thread::Current()); 3342 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 3343 Thread* thread; 3344 JDWP::JdwpError error = DecodeThread(soa, thread_id, thread); 3345 if (error == JDWP::ERR_NONE) { 3346 SingleStepControl* single_step_control = thread->GetSingleStepControl(); 3347 DCHECK(single_step_control != nullptr); 3348 single_step_control->Clear(); 3349 } 3350} 3351 3352static char JdwpTagToShortyChar(JDWP::JdwpTag tag) { 3353 switch (tag) { 3354 default: 3355 LOG(FATAL) << "unknown JDWP tag: " << PrintableChar(tag); 3356 3357 // Primitives. 3358 case JDWP::JT_BYTE: return 'B'; 3359 case JDWP::JT_CHAR: return 'C'; 3360 case JDWP::JT_FLOAT: return 'F'; 3361 case JDWP::JT_DOUBLE: return 'D'; 3362 case JDWP::JT_INT: return 'I'; 3363 case JDWP::JT_LONG: return 'J'; 3364 case JDWP::JT_SHORT: return 'S'; 3365 case JDWP::JT_VOID: return 'V'; 3366 case JDWP::JT_BOOLEAN: return 'Z'; 3367 3368 // Reference types. 3369 case JDWP::JT_ARRAY: 3370 case JDWP::JT_OBJECT: 3371 case JDWP::JT_STRING: 3372 case JDWP::JT_THREAD: 3373 case JDWP::JT_THREAD_GROUP: 3374 case JDWP::JT_CLASS_LOADER: 3375 case JDWP::JT_CLASS_OBJECT: 3376 return 'L'; 3377 } 3378} 3379 3380JDWP::JdwpError Dbg::InvokeMethod(JDWP::ObjectId thread_id, JDWP::ObjectId object_id, 3381 JDWP::RefTypeId class_id, JDWP::MethodId method_id, 3382 uint32_t arg_count, uint64_t* arg_values, 3383 JDWP::JdwpTag* arg_types, uint32_t options, 3384 JDWP::JdwpTag* pResultTag, uint64_t* pResultValue, 3385 JDWP::ObjectId* pExceptionId) { 3386 ThreadList* thread_list = Runtime::Current()->GetThreadList(); 3387 3388 Thread* targetThread = NULL; 3389 DebugInvokeReq* req = NULL; 3390 Thread* self = Thread::Current(); 3391 { 3392 ScopedObjectAccessUnchecked soa(self); 3393 MutexLock mu(soa.Self(), *Locks::thread_list_lock_); 3394 JDWP::JdwpError error = DecodeThread(soa, thread_id, targetThread); 3395 if (error != JDWP::ERR_NONE) { 3396 LOG(ERROR) << "InvokeMethod request for invalid thread id " << thread_id; 3397 return error; 3398 } 3399 req = targetThread->GetInvokeReq(); 3400 if (!req->ready) { 3401 LOG(ERROR) << "InvokeMethod request for thread not stopped by event: " << *targetThread; 3402 return JDWP::ERR_INVALID_THREAD; 3403 } 3404 3405 /* 3406 * We currently have a bug where we don't successfully resume the 3407 * target thread if the suspend count is too deep. We're expected to 3408 * require one "resume" for each "suspend", but when asked to execute 3409 * a method we have to resume fully and then re-suspend it back to the 3410 * same level. (The easiest way to cause this is to type "suspend" 3411 * multiple times in jdb.) 3412 * 3413 * It's unclear what this means when the event specifies "resume all" 3414 * and some threads are suspended more deeply than others. This is 3415 * a rare problem, so for now we just prevent it from hanging forever 3416 * by rejecting the method invocation request. Without this, we will 3417 * be stuck waiting on a suspended thread. 3418 */ 3419 int suspend_count; 3420 { 3421 MutexLock mu2(soa.Self(), *Locks::thread_suspend_count_lock_); 3422 suspend_count = targetThread->GetSuspendCount(); 3423 } 3424 if (suspend_count > 1) { 3425 LOG(ERROR) << *targetThread << " suspend count too deep for method invocation: " << suspend_count; 3426 return JDWP::ERR_THREAD_SUSPENDED; // Probably not expected here. 3427 } 3428 3429 JDWP::JdwpError status; 3430 mirror::Object* receiver = gRegistry->Get<mirror::Object*>(object_id); 3431 if (receiver == ObjectRegistry::kInvalidObject) { 3432 return JDWP::ERR_INVALID_OBJECT; 3433 } 3434 3435 mirror::Object* thread = gRegistry->Get<mirror::Object*>(thread_id); 3436 if (thread == ObjectRegistry::kInvalidObject) { 3437 return JDWP::ERR_INVALID_OBJECT; 3438 } 3439 // TODO: check that 'thread' is actually a java.lang.Thread! 3440 3441 mirror::Class* c = DecodeClass(class_id, status); 3442 if (c == NULL) { 3443 return status; 3444 } 3445 3446 mirror::ArtMethod* m = FromMethodId(method_id); 3447 if (m->IsStatic() != (receiver == NULL)) { 3448 return JDWP::ERR_INVALID_METHODID; 3449 } 3450 if (m->IsStatic()) { 3451 if (m->GetDeclaringClass() != c) { 3452 return JDWP::ERR_INVALID_METHODID; 3453 } 3454 } else { 3455 if (!m->GetDeclaringClass()->IsAssignableFrom(c)) { 3456 return JDWP::ERR_INVALID_METHODID; 3457 } 3458 } 3459 3460 // Check the argument list matches the method. 3461 uint32_t shorty_len = 0; 3462 const char* shorty = m->GetShorty(&shorty_len); 3463 if (shorty_len - 1 != arg_count) { 3464 return JDWP::ERR_ILLEGAL_ARGUMENT; 3465 } 3466 3467 { 3468 StackHandleScope<3> hs(soa.Self()); 3469 MethodHelper mh(hs.NewHandle(m)); 3470 HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&receiver)); 3471 HandleWrapper<mirror::Class> h_klass(hs.NewHandleWrapper(&c)); 3472 const DexFile::TypeList* types = m->GetParameterTypeList(); 3473 for (size_t i = 0; i < arg_count; ++i) { 3474 if (shorty[i + 1] != JdwpTagToShortyChar(arg_types[i])) { 3475 return JDWP::ERR_ILLEGAL_ARGUMENT; 3476 } 3477 3478 if (shorty[i + 1] == 'L') { 3479 // Did we really get an argument of an appropriate reference type? 3480 mirror::Class* parameter_type = mh.GetClassFromTypeIdx(types->GetTypeItem(i).type_idx_); 3481 mirror::Object* argument = gRegistry->Get<mirror::Object*>(arg_values[i]); 3482 if (argument == ObjectRegistry::kInvalidObject) { 3483 return JDWP::ERR_INVALID_OBJECT; 3484 } 3485 if (argument != NULL && !argument->InstanceOf(parameter_type)) { 3486 return JDWP::ERR_ILLEGAL_ARGUMENT; 3487 } 3488 3489 // Turn the on-the-wire ObjectId into a jobject. 3490 jvalue& v = reinterpret_cast<jvalue&>(arg_values[i]); 3491 v.l = gRegistry->GetJObject(arg_values[i]); 3492 } 3493 } 3494 // Update in case it moved. 3495 m = mh.GetMethod(); 3496 } 3497 3498 req->receiver = receiver; 3499 req->thread = thread; 3500 req->klass = c; 3501 req->method = m; 3502 req->arg_count = arg_count; 3503 req->arg_values = arg_values; 3504 req->options = options; 3505 req->invoke_needed = true; 3506 } 3507 3508 // The fact that we've released the thread list lock is a bit risky --- if the thread goes 3509 // away we're sitting high and dry -- but we must release this before the ResumeAllThreads 3510 // call, and it's unwise to hold it during WaitForSuspend. 3511 3512 { 3513 /* 3514 * We change our (JDWP thread) status, which should be THREAD_RUNNING, 3515 * so we can suspend for a GC if the invoke request causes us to 3516 * run out of memory. It's also a good idea to change it before locking 3517 * the invokeReq mutex, although that should never be held for long. 3518 */ 3519 self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSend); 3520 3521 VLOG(jdwp) << " Transferring control to event thread"; 3522 { 3523 MutexLock mu(self, req->lock); 3524 3525 if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) { 3526 VLOG(jdwp) << " Resuming all threads"; 3527 thread_list->UndoDebuggerSuspensions(); 3528 } else { 3529 VLOG(jdwp) << " Resuming event thread only"; 3530 thread_list->Resume(targetThread, true); 3531 } 3532 3533 // Wait for the request to finish executing. 3534 while (req->invoke_needed) { 3535 req->cond.Wait(self); 3536 } 3537 } 3538 VLOG(jdwp) << " Control has returned from event thread"; 3539 3540 /* wait for thread to re-suspend itself */ 3541 SuspendThread(thread_id, false /* request_suspension */); 3542 self->TransitionFromSuspendedToRunnable(); 3543 } 3544 3545 /* 3546 * Suspend the threads. We waited for the target thread to suspend 3547 * itself, so all we need to do is suspend the others. 3548 * 3549 * The suspendAllThreads() call will double-suspend the event thread, 3550 * so we want to resume the target thread once to keep the books straight. 3551 */ 3552 if ((options & JDWP::INVOKE_SINGLE_THREADED) == 0) { 3553 self->TransitionFromRunnableToSuspended(kWaitingForDebuggerSuspension); 3554 VLOG(jdwp) << " Suspending all threads"; 3555 thread_list->SuspendAllForDebugger(); 3556 self->TransitionFromSuspendedToRunnable(); 3557 VLOG(jdwp) << " Resuming event thread to balance the count"; 3558 thread_list->Resume(targetThread, true); 3559 } 3560 3561 // Copy the result. 3562 *pResultTag = req->result_tag; 3563 if (IsPrimitiveTag(req->result_tag)) { 3564 *pResultValue = req->result_value.GetJ(); 3565 } else { 3566 *pResultValue = gRegistry->Add(req->result_value.GetL()); 3567 } 3568 *pExceptionId = req->exception; 3569 return req->error; 3570} 3571 3572void Dbg::ExecuteMethod(DebugInvokeReq* pReq) { 3573 ScopedObjectAccess soa(Thread::Current()); 3574 3575 // We can be called while an exception is pending. We need 3576 // to preserve that across the method invocation. 3577 StackHandleScope<4> hs(soa.Self()); 3578 auto old_throw_this_object = hs.NewHandle<mirror::Object>(nullptr); 3579 auto old_throw_method = hs.NewHandle<mirror::ArtMethod>(nullptr); 3580 auto old_exception = hs.NewHandle<mirror::Throwable>(nullptr); 3581 uint32_t old_throw_dex_pc; 3582 bool old_exception_report_flag; 3583 { 3584 ThrowLocation old_throw_location; 3585 mirror::Throwable* old_exception_obj = soa.Self()->GetException(&old_throw_location); 3586 old_throw_this_object.Assign(old_throw_location.GetThis()); 3587 old_throw_method.Assign(old_throw_location.GetMethod()); 3588 old_exception.Assign(old_exception_obj); 3589 old_throw_dex_pc = old_throw_location.GetDexPc(); 3590 old_exception_report_flag = soa.Self()->IsExceptionReportedToInstrumentation(); 3591 soa.Self()->ClearException(); 3592 } 3593 3594 // Translate the method through the vtable, unless the debugger wants to suppress it. 3595 Handle<mirror::ArtMethod> m(hs.NewHandle(pReq->method)); 3596 if ((pReq->options & JDWP::INVOKE_NONVIRTUAL) == 0 && pReq->receiver != NULL) { 3597 mirror::ArtMethod* actual_method = pReq->klass->FindVirtualMethodForVirtualOrInterface(m.Get()); 3598 if (actual_method != m.Get()) { 3599 VLOG(jdwp) << "ExecuteMethod translated " << PrettyMethod(m.Get()) << " to " << PrettyMethod(actual_method); 3600 m.Assign(actual_method); 3601 } 3602 } 3603 VLOG(jdwp) << "ExecuteMethod " << PrettyMethod(m.Get()) 3604 << " receiver=" << pReq->receiver 3605 << " arg_count=" << pReq->arg_count; 3606 CHECK(m.Get() != nullptr); 3607 3608 CHECK_EQ(sizeof(jvalue), sizeof(uint64_t)); 3609 3610 pReq->result_value = InvokeWithJValues(soa, pReq->receiver, soa.EncodeMethod(m.Get()), 3611 reinterpret_cast<jvalue*>(pReq->arg_values)); 3612 3613 mirror::Throwable* exception = soa.Self()->GetException(NULL); 3614 soa.Self()->ClearException(); 3615 pReq->exception = gRegistry->Add(exception); 3616 pReq->result_tag = BasicTagFromDescriptor(m.Get()->GetShorty()); 3617 if (pReq->exception != 0) { 3618 VLOG(jdwp) << " JDWP invocation returning with exception=" << exception 3619 << " " << exception->Dump(); 3620 pReq->result_value.SetJ(0); 3621 } else if (pReq->result_tag == JDWP::JT_OBJECT) { 3622 /* if no exception thrown, examine object result more closely */ 3623 JDWP::JdwpTag new_tag = TagFromObject(soa, pReq->result_value.GetL()); 3624 if (new_tag != pReq->result_tag) { 3625 VLOG(jdwp) << " JDWP promoted result from " << pReq->result_tag << " to " << new_tag; 3626 pReq->result_tag = new_tag; 3627 } 3628 3629 /* 3630 * Register the object. We don't actually need an ObjectId yet, 3631 * but we do need to be sure that the GC won't move or discard the 3632 * object when we switch out of RUNNING. The ObjectId conversion 3633 * will add the object to the "do not touch" list. 3634 * 3635 * We can't use the "tracked allocation" mechanism here because 3636 * the object is going to be handed off to a different thread. 3637 */ 3638 gRegistry->Add(pReq->result_value.GetL()); 3639 } 3640 3641 if (old_exception.Get() != NULL) { 3642 ThrowLocation gc_safe_throw_location(old_throw_this_object.Get(), old_throw_method.Get(), 3643 old_throw_dex_pc); 3644 soa.Self()->SetException(gc_safe_throw_location, old_exception.Get()); 3645 soa.Self()->SetExceptionReportedToInstrumentation(old_exception_report_flag); 3646 } 3647} 3648 3649/* 3650 * "request" contains a full JDWP packet, possibly with multiple chunks. We 3651 * need to process each, accumulate the replies, and ship the whole thing 3652 * back. 3653 * 3654 * Returns "true" if we have a reply. The reply buffer is newly allocated, 3655 * and includes the chunk type/length, followed by the data. 3656 * 3657 * OLD-TODO: we currently assume that the request and reply include a single 3658 * chunk. If this becomes inconvenient we will need to adapt. 3659 */ 3660bool Dbg::DdmHandlePacket(JDWP::Request& request, uint8_t** pReplyBuf, int* pReplyLen) { 3661 Thread* self = Thread::Current(); 3662 JNIEnv* env = self->GetJniEnv(); 3663 3664 uint32_t type = request.ReadUnsigned32("type"); 3665 uint32_t length = request.ReadUnsigned32("length"); 3666 3667 // Create a byte[] corresponding to 'request'. 3668 size_t request_length = request.size(); 3669 ScopedLocalRef<jbyteArray> dataArray(env, env->NewByteArray(request_length)); 3670 if (dataArray.get() == NULL) { 3671 LOG(WARNING) << "byte[] allocation failed: " << request_length; 3672 env->ExceptionClear(); 3673 return false; 3674 } 3675 env->SetByteArrayRegion(dataArray.get(), 0, request_length, reinterpret_cast<const jbyte*>(request.data())); 3676 request.Skip(request_length); 3677 3678 // Run through and find all chunks. [Currently just find the first.] 3679 ScopedByteArrayRO contents(env, dataArray.get()); 3680 if (length != request_length) { 3681 LOG(WARNING) << StringPrintf("bad chunk found (len=%u pktLen=%zd)", length, request_length); 3682 return false; 3683 } 3684 3685 // Call "private static Chunk dispatch(int type, byte[] data, int offset, int length)". 3686 ScopedLocalRef<jobject> chunk(env, env->CallStaticObjectMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer, 3687 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_dispatch, 3688 type, dataArray.get(), 0, length)); 3689 if (env->ExceptionCheck()) { 3690 LOG(INFO) << StringPrintf("Exception thrown by dispatcher for 0x%08x", type); 3691 env->ExceptionDescribe(); 3692 env->ExceptionClear(); 3693 return false; 3694 } 3695 3696 if (chunk.get() == NULL) { 3697 return false; 3698 } 3699 3700 /* 3701 * Pull the pieces out of the chunk. We copy the results into a 3702 * newly-allocated buffer that the caller can free. We don't want to 3703 * continue using the Chunk object because nothing has a reference to it. 3704 * 3705 * We could avoid this by returning type/data/offset/length and having 3706 * the caller be aware of the object lifetime issues, but that 3707 * integrates the JDWP code more tightly into the rest of the runtime, and doesn't work 3708 * if we have responses for multiple chunks. 3709 * 3710 * So we're pretty much stuck with copying data around multiple times. 3711 */ 3712 ScopedLocalRef<jbyteArray> replyData(env, reinterpret_cast<jbyteArray>(env->GetObjectField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_data))); 3713 jint offset = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_offset); 3714 length = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_length); 3715 type = env->GetIntField(chunk.get(), WellKnownClasses::org_apache_harmony_dalvik_ddmc_Chunk_type); 3716 3717 VLOG(jdwp) << StringPrintf("DDM reply: type=0x%08x data=%p offset=%d length=%d", type, replyData.get(), offset, length); 3718 if (length == 0 || replyData.get() == NULL) { 3719 return false; 3720 } 3721 3722 const int kChunkHdrLen = 8; 3723 uint8_t* reply = new uint8_t[length + kChunkHdrLen]; 3724 if (reply == NULL) { 3725 LOG(WARNING) << "malloc failed: " << (length + kChunkHdrLen); 3726 return false; 3727 } 3728 JDWP::Set4BE(reply + 0, type); 3729 JDWP::Set4BE(reply + 4, length); 3730 env->GetByteArrayRegion(replyData.get(), offset, length, reinterpret_cast<jbyte*>(reply + kChunkHdrLen)); 3731 3732 *pReplyBuf = reply; 3733 *pReplyLen = length + kChunkHdrLen; 3734 3735 VLOG(jdwp) << StringPrintf("dvmHandleDdm returning type=%.4s %p len=%d", reinterpret_cast<char*>(reply), reply, length); 3736 return true; 3737} 3738 3739void Dbg::DdmBroadcast(bool connect) { 3740 VLOG(jdwp) << "Broadcasting DDM " << (connect ? "connect" : "disconnect") << "..."; 3741 3742 Thread* self = Thread::Current(); 3743 if (self->GetState() != kRunnable) { 3744 LOG(ERROR) << "DDM broadcast in thread state " << self->GetState(); 3745 /* try anyway? */ 3746 } 3747 3748 JNIEnv* env = self->GetJniEnv(); 3749 jint event = connect ? 1 /*DdmServer.CONNECTED*/ : 2 /*DdmServer.DISCONNECTED*/; 3750 env->CallStaticVoidMethod(WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer, 3751 WellKnownClasses::org_apache_harmony_dalvik_ddmc_DdmServer_broadcast, 3752 event); 3753 if (env->ExceptionCheck()) { 3754 LOG(ERROR) << "DdmServer.broadcast " << event << " failed"; 3755 env->ExceptionDescribe(); 3756 env->ExceptionClear(); 3757 } 3758} 3759 3760void Dbg::DdmConnected() { 3761 Dbg::DdmBroadcast(true); 3762} 3763 3764void Dbg::DdmDisconnected() { 3765 Dbg::DdmBroadcast(false); 3766 gDdmThreadNotification = false; 3767} 3768 3769/* 3770 * Send a notification when a thread starts, stops, or changes its name. 3771 * 3772 * Because we broadcast the full set of threads when the notifications are 3773 * first enabled, it's possible for "thread" to be actively executing. 3774 */ 3775void Dbg::DdmSendThreadNotification(Thread* t, uint32_t type) { 3776 if (!gDdmThreadNotification) { 3777 return; 3778 } 3779 3780 if (type == CHUNK_TYPE("THDE")) { 3781 uint8_t buf[4]; 3782 JDWP::Set4BE(&buf[0], t->GetThreadId()); 3783 Dbg::DdmSendChunk(CHUNK_TYPE("THDE"), 4, buf); 3784 } else { 3785 CHECK(type == CHUNK_TYPE("THCR") || type == CHUNK_TYPE("THNM")) << type; 3786 ScopedObjectAccessUnchecked soa(Thread::Current()); 3787 StackHandleScope<1> hs(soa.Self()); 3788 Handle<mirror::String> name(hs.NewHandle(t->GetThreadName(soa))); 3789 size_t char_count = (name.Get() != NULL) ? name->GetLength() : 0; 3790 const jchar* chars = (name.Get() != NULL) ? name->GetCharArray()->GetData() : NULL; 3791 3792 std::vector<uint8_t> bytes; 3793 JDWP::Append4BE(bytes, t->GetThreadId()); 3794 JDWP::AppendUtf16BE(bytes, chars, char_count); 3795 CHECK_EQ(bytes.size(), char_count*2 + sizeof(uint32_t)*2); 3796 Dbg::DdmSendChunk(type, bytes); 3797 } 3798} 3799 3800void Dbg::DdmSetThreadNotification(bool enable) { 3801 // Enable/disable thread notifications. 3802 gDdmThreadNotification = enable; 3803 if (enable) { 3804 // Suspend the VM then post thread start notifications for all threads. Threads attaching will 3805 // see a suspension in progress and block until that ends. They then post their own start 3806 // notification. 3807 SuspendVM(); 3808 std::list<Thread*> threads; 3809 Thread* self = Thread::Current(); 3810 { 3811 MutexLock mu(self, *Locks::thread_list_lock_); 3812 threads = Runtime::Current()->GetThreadList()->GetList(); 3813 } 3814 { 3815 ScopedObjectAccess soa(self); 3816 for (Thread* thread : threads) { 3817 Dbg::DdmSendThreadNotification(thread, CHUNK_TYPE("THCR")); 3818 } 3819 } 3820 ResumeVM(); 3821 } 3822} 3823 3824void Dbg::PostThreadStartOrStop(Thread* t, uint32_t type) { 3825 if (IsDebuggerActive()) { 3826 ScopedObjectAccessUnchecked soa(Thread::Current()); 3827 JDWP::ObjectId id = gRegistry->Add(t->GetPeer()); 3828 gJdwpState->PostThreadChange(id, type == CHUNK_TYPE("THCR")); 3829 } 3830 Dbg::DdmSendThreadNotification(t, type); 3831} 3832 3833void Dbg::PostThreadStart(Thread* t) { 3834 Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THCR")); 3835} 3836 3837void Dbg::PostThreadDeath(Thread* t) { 3838 Dbg::PostThreadStartOrStop(t, CHUNK_TYPE("THDE")); 3839} 3840 3841void Dbg::DdmSendChunk(uint32_t type, size_t byte_count, const uint8_t* buf) { 3842 CHECK(buf != NULL); 3843 iovec vec[1]; 3844 vec[0].iov_base = reinterpret_cast<void*>(const_cast<uint8_t*>(buf)); 3845 vec[0].iov_len = byte_count; 3846 Dbg::DdmSendChunkV(type, vec, 1); 3847} 3848 3849void Dbg::DdmSendChunk(uint32_t type, const std::vector<uint8_t>& bytes) { 3850 DdmSendChunk(type, bytes.size(), &bytes[0]); 3851} 3852 3853void Dbg::DdmSendChunkV(uint32_t type, const iovec* iov, int iov_count) { 3854 if (gJdwpState == NULL) { 3855 VLOG(jdwp) << "Debugger thread not active, ignoring DDM send: " << type; 3856 } else { 3857 gJdwpState->DdmSendChunkV(type, iov, iov_count); 3858 } 3859} 3860 3861int Dbg::DdmHandleHpifChunk(HpifWhen when) { 3862 if (when == HPIF_WHEN_NOW) { 3863 DdmSendHeapInfo(when); 3864 return true; 3865 } 3866 3867 if (when != HPIF_WHEN_NEVER && when != HPIF_WHEN_NEXT_GC && when != HPIF_WHEN_EVERY_GC) { 3868 LOG(ERROR) << "invalid HpifWhen value: " << static_cast<int>(when); 3869 return false; 3870 } 3871 3872 gDdmHpifWhen = when; 3873 return true; 3874} 3875 3876bool Dbg::DdmHandleHpsgNhsgChunk(Dbg::HpsgWhen when, Dbg::HpsgWhat what, bool native) { 3877 if (when != HPSG_WHEN_NEVER && when != HPSG_WHEN_EVERY_GC) { 3878 LOG(ERROR) << "invalid HpsgWhen value: " << static_cast<int>(when); 3879 return false; 3880 } 3881 3882 if (what != HPSG_WHAT_MERGED_OBJECTS && what != HPSG_WHAT_DISTINCT_OBJECTS) { 3883 LOG(ERROR) << "invalid HpsgWhat value: " << static_cast<int>(what); 3884 return false; 3885 } 3886 3887 if (native) { 3888 gDdmNhsgWhen = when; 3889 gDdmNhsgWhat = what; 3890 } else { 3891 gDdmHpsgWhen = when; 3892 gDdmHpsgWhat = what; 3893 } 3894 return true; 3895} 3896 3897void Dbg::DdmSendHeapInfo(HpifWhen reason) { 3898 // If there's a one-shot 'when', reset it. 3899 if (reason == gDdmHpifWhen) { 3900 if (gDdmHpifWhen == HPIF_WHEN_NEXT_GC) { 3901 gDdmHpifWhen = HPIF_WHEN_NEVER; 3902 } 3903 } 3904 3905 /* 3906 * Chunk HPIF (client --> server) 3907 * 3908 * Heap Info. General information about the heap, 3909 * suitable for a summary display. 3910 * 3911 * [u4]: number of heaps 3912 * 3913 * For each heap: 3914 * [u4]: heap ID 3915 * [u8]: timestamp in ms since Unix epoch 3916 * [u1]: capture reason (same as 'when' value from server) 3917 * [u4]: max heap size in bytes (-Xmx) 3918 * [u4]: current heap size in bytes 3919 * [u4]: current number of bytes allocated 3920 * [u4]: current number of objects allocated 3921 */ 3922 uint8_t heap_count = 1; 3923 gc::Heap* heap = Runtime::Current()->GetHeap(); 3924 std::vector<uint8_t> bytes; 3925 JDWP::Append4BE(bytes, heap_count); 3926 JDWP::Append4BE(bytes, 1); // Heap id (bogus; we only have one heap). 3927 JDWP::Append8BE(bytes, MilliTime()); 3928 JDWP::Append1BE(bytes, reason); 3929 JDWP::Append4BE(bytes, heap->GetMaxMemory()); // Max allowed heap size in bytes. 3930 JDWP::Append4BE(bytes, heap->GetTotalMemory()); // Current heap size in bytes. 3931 JDWP::Append4BE(bytes, heap->GetBytesAllocated()); 3932 JDWP::Append4BE(bytes, heap->GetObjectsAllocated()); 3933 CHECK_EQ(bytes.size(), 4U + (heap_count * (4 + 8 + 1 + 4 + 4 + 4 + 4))); 3934 Dbg::DdmSendChunk(CHUNK_TYPE("HPIF"), bytes); 3935} 3936 3937enum HpsgSolidity { 3938 SOLIDITY_FREE = 0, 3939 SOLIDITY_HARD = 1, 3940 SOLIDITY_SOFT = 2, 3941 SOLIDITY_WEAK = 3, 3942 SOLIDITY_PHANTOM = 4, 3943 SOLIDITY_FINALIZABLE = 5, 3944 SOLIDITY_SWEEP = 6, 3945}; 3946 3947enum HpsgKind { 3948 KIND_OBJECT = 0, 3949 KIND_CLASS_OBJECT = 1, 3950 KIND_ARRAY_1 = 2, 3951 KIND_ARRAY_2 = 3, 3952 KIND_ARRAY_4 = 4, 3953 KIND_ARRAY_8 = 5, 3954 KIND_UNKNOWN = 6, 3955 KIND_NATIVE = 7, 3956}; 3957 3958#define HPSG_PARTIAL (1<<7) 3959#define HPSG_STATE(solidity, kind) ((uint8_t)((((kind) & 0x7) << 3) | ((solidity) & 0x7))) 3960 3961class HeapChunkContext { 3962 public: 3963 // Maximum chunk size. Obtain this from the formula: 3964 // (((maximum_heap_size / ALLOCATION_UNIT_SIZE) + 255) / 256) * 2 3965 HeapChunkContext(bool merge, bool native) 3966 : buf_(16384 - 16), 3967 type_(0), 3968 merge_(merge) { 3969 Reset(); 3970 if (native) { 3971 type_ = CHUNK_TYPE("NHSG"); 3972 } else { 3973 type_ = merge ? CHUNK_TYPE("HPSG") : CHUNK_TYPE("HPSO"); 3974 } 3975 } 3976 3977 ~HeapChunkContext() { 3978 if (p_ > &buf_[0]) { 3979 Flush(); 3980 } 3981 } 3982 3983 void EnsureHeader(const void* chunk_ptr) { 3984 if (!needHeader_) { 3985 return; 3986 } 3987 3988 // Start a new HPSx chunk. 3989 JDWP::Write4BE(&p_, 1); // Heap id (bogus; we only have one heap). 3990 JDWP::Write1BE(&p_, 8); // Size of allocation unit, in bytes. 3991 3992 JDWP::Write4BE(&p_, reinterpret_cast<uintptr_t>(chunk_ptr)); // virtual address of segment start. 3993 JDWP::Write4BE(&p_, 0); // offset of this piece (relative to the virtual address). 3994 // [u4]: length of piece, in allocation units 3995 // We won't know this until we're done, so save the offset and stuff in a dummy value. 3996 pieceLenField_ = p_; 3997 JDWP::Write4BE(&p_, 0x55555555); 3998 needHeader_ = false; 3999 } 4000 4001 void Flush() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 4002 if (pieceLenField_ == NULL) { 4003 // Flush immediately post Reset (maybe back-to-back Flush). Ignore. 4004 CHECK(needHeader_); 4005 return; 4006 } 4007 // Patch the "length of piece" field. 4008 CHECK_LE(&buf_[0], pieceLenField_); 4009 CHECK_LE(pieceLenField_, p_); 4010 JDWP::Set4BE(pieceLenField_, totalAllocationUnits_); 4011 4012 Dbg::DdmSendChunk(type_, p_ - &buf_[0], &buf_[0]); 4013 Reset(); 4014 } 4015 4016 static void HeapChunkCallback(void* start, void* end, size_t used_bytes, void* arg) 4017 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, 4018 Locks::mutator_lock_) { 4019 reinterpret_cast<HeapChunkContext*>(arg)->HeapChunkCallback(start, end, used_bytes); 4020 } 4021 4022 private: 4023 enum { ALLOCATION_UNIT_SIZE = 8 }; 4024 4025 void Reset() { 4026 p_ = &buf_[0]; 4027 startOfNextMemoryChunk_ = NULL; 4028 totalAllocationUnits_ = 0; 4029 needHeader_ = true; 4030 pieceLenField_ = NULL; 4031 } 4032 4033 void HeapChunkCallback(void* start, void* /*end*/, size_t used_bytes) 4034 SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_, 4035 Locks::mutator_lock_) { 4036 // Note: heap call backs cannot manipulate the heap upon which they are crawling, care is taken 4037 // in the following code not to allocate memory, by ensuring buf_ is of the correct size 4038 if (used_bytes == 0) { 4039 if (start == NULL) { 4040 // Reset for start of new heap. 4041 startOfNextMemoryChunk_ = NULL; 4042 Flush(); 4043 } 4044 // Only process in use memory so that free region information 4045 // also includes dlmalloc book keeping. 4046 return; 4047 } 4048 4049 /* If we're looking at the native heap, we'll just return 4050 * (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks 4051 */ 4052 bool native = type_ == CHUNK_TYPE("NHSG"); 4053 4054 if (startOfNextMemoryChunk_ != NULL) { 4055 // Transmit any pending free memory. Native free memory of 4056 // over kMaxFreeLen could be because of the use of mmaps, so 4057 // don't report. If not free memory then start a new segment. 4058 bool flush = true; 4059 if (start > startOfNextMemoryChunk_) { 4060 const size_t kMaxFreeLen = 2 * kPageSize; 4061 void* freeStart = startOfNextMemoryChunk_; 4062 void* freeEnd = start; 4063 size_t freeLen = reinterpret_cast<char*>(freeEnd) - reinterpret_cast<char*>(freeStart); 4064 if (!native || freeLen < kMaxFreeLen) { 4065 AppendChunk(HPSG_STATE(SOLIDITY_FREE, 0), freeStart, freeLen); 4066 flush = false; 4067 } 4068 } 4069 if (flush) { 4070 startOfNextMemoryChunk_ = NULL; 4071 Flush(); 4072 } 4073 } 4074 mirror::Object* obj = reinterpret_cast<mirror::Object*>(start); 4075 4076 // Determine the type of this chunk. 4077 // OLD-TODO: if context.merge, see if this chunk is different from the last chunk. 4078 // If it's the same, we should combine them. 4079 uint8_t state = ExamineObject(obj, native); 4080 // dlmalloc's chunk header is 2 * sizeof(size_t), but if the previous chunk is in use for an 4081 // allocation then the first sizeof(size_t) may belong to it. 4082 const size_t dlMallocOverhead = sizeof(size_t); 4083 AppendChunk(state, start, used_bytes + dlMallocOverhead); 4084 startOfNextMemoryChunk_ = reinterpret_cast<char*>(start) + used_bytes + dlMallocOverhead; 4085 } 4086 4087 void AppendChunk(uint8_t state, void* ptr, size_t length) 4088 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 4089 // Make sure there's enough room left in the buffer. 4090 // We need to use two bytes for every fractional 256 allocation units used by the chunk plus 4091 // 17 bytes for any header. 4092 size_t needed = (((length/ALLOCATION_UNIT_SIZE + 255) / 256) * 2) + 17; 4093 size_t bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]); 4094 if (bytesLeft < needed) { 4095 Flush(); 4096 } 4097 4098 bytesLeft = buf_.size() - (size_t)(p_ - &buf_[0]); 4099 if (bytesLeft < needed) { 4100 LOG(WARNING) << "Chunk is too big to transmit (chunk_len=" << length << ", " 4101 << needed << " bytes)"; 4102 return; 4103 } 4104 EnsureHeader(ptr); 4105 // Write out the chunk description. 4106 length /= ALLOCATION_UNIT_SIZE; // Convert to allocation units. 4107 totalAllocationUnits_ += length; 4108 while (length > 256) { 4109 *p_++ = state | HPSG_PARTIAL; 4110 *p_++ = 255; // length - 1 4111 length -= 256; 4112 } 4113 *p_++ = state; 4114 *p_++ = length - 1; 4115 } 4116 4117 uint8_t ExamineObject(mirror::Object* o, bool is_native_heap) 4118 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_, Locks::heap_bitmap_lock_) { 4119 if (o == NULL) { 4120 return HPSG_STATE(SOLIDITY_FREE, 0); 4121 } 4122 4123 // It's an allocated chunk. Figure out what it is. 4124 4125 // If we're looking at the native heap, we'll just return 4126 // (SOLIDITY_HARD, KIND_NATIVE) for all allocated chunks. 4127 if (is_native_heap) { 4128 return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE); 4129 } 4130 4131 if (!Runtime::Current()->GetHeap()->IsLiveObjectLocked(o)) { 4132 return HPSG_STATE(SOLIDITY_HARD, KIND_NATIVE); 4133 } 4134 4135 mirror::Class* c = o->GetClass(); 4136 if (c == NULL) { 4137 // The object was probably just created but hasn't been initialized yet. 4138 return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT); 4139 } 4140 4141 if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) { 4142 LOG(ERROR) << "Invalid class for managed heap object: " << o << " " << c; 4143 return HPSG_STATE(SOLIDITY_HARD, KIND_UNKNOWN); 4144 } 4145 4146 if (c->IsClassClass()) { 4147 return HPSG_STATE(SOLIDITY_HARD, KIND_CLASS_OBJECT); 4148 } 4149 4150 if (c->IsArrayClass()) { 4151 if (o->IsObjectArray()) { 4152 return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4); 4153 } 4154 switch (c->GetComponentSize()) { 4155 case 1: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_1); 4156 case 2: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_2); 4157 case 4: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_4); 4158 case 8: return HPSG_STATE(SOLIDITY_HARD, KIND_ARRAY_8); 4159 } 4160 } 4161 4162 return HPSG_STATE(SOLIDITY_HARD, KIND_OBJECT); 4163 } 4164 4165 std::vector<uint8_t> buf_; 4166 uint8_t* p_; 4167 uint8_t* pieceLenField_; 4168 void* startOfNextMemoryChunk_; 4169 size_t totalAllocationUnits_; 4170 uint32_t type_; 4171 bool merge_; 4172 bool needHeader_; 4173 4174 DISALLOW_COPY_AND_ASSIGN(HeapChunkContext); 4175}; 4176 4177void Dbg::DdmSendHeapSegments(bool native) { 4178 Dbg::HpsgWhen when; 4179 Dbg::HpsgWhat what; 4180 if (!native) { 4181 when = gDdmHpsgWhen; 4182 what = gDdmHpsgWhat; 4183 } else { 4184 when = gDdmNhsgWhen; 4185 what = gDdmNhsgWhat; 4186 } 4187 if (when == HPSG_WHEN_NEVER) { 4188 return; 4189 } 4190 4191 // Figure out what kind of chunks we'll be sending. 4192 CHECK(what == HPSG_WHAT_MERGED_OBJECTS || what == HPSG_WHAT_DISTINCT_OBJECTS) << static_cast<int>(what); 4193 4194 // First, send a heap start chunk. 4195 uint8_t heap_id[4]; 4196 JDWP::Set4BE(&heap_id[0], 1); // Heap id (bogus; we only have one heap). 4197 Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHST") : CHUNK_TYPE("HPST"), sizeof(heap_id), heap_id); 4198 4199 Thread* self = Thread::Current(); 4200 4201 // To allow the Walk/InspectAll() below to exclusively-lock the 4202 // mutator lock, temporarily release the shared access to the 4203 // mutator lock here by transitioning to the suspended state. 4204 Locks::mutator_lock_->AssertSharedHeld(self); 4205 self->TransitionFromRunnableToSuspended(kSuspended); 4206 4207 // Send a series of heap segment chunks. 4208 HeapChunkContext context((what == HPSG_WHAT_MERGED_OBJECTS), native); 4209 if (native) { 4210#ifdef USE_DLMALLOC 4211 dlmalloc_inspect_all(HeapChunkContext::HeapChunkCallback, &context); 4212#else 4213 UNIMPLEMENTED(WARNING) << "Native heap inspection is only supported with dlmalloc"; 4214#endif 4215 } else { 4216 gc::Heap* heap = Runtime::Current()->GetHeap(); 4217 const std::vector<gc::space::ContinuousSpace*>& spaces = heap->GetContinuousSpaces(); 4218 typedef std::vector<gc::space::ContinuousSpace*>::const_iterator It; 4219 for (It cur = spaces.begin(), end = spaces.end(); cur != end; ++cur) { 4220 if ((*cur)->IsMallocSpace()) { 4221 (*cur)->AsMallocSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context); 4222 } 4223 } 4224 // Walk the large objects, these are not in the AllocSpace. 4225 heap->GetLargeObjectsSpace()->Walk(HeapChunkContext::HeapChunkCallback, &context); 4226 } 4227 4228 // Shared-lock the mutator lock back. 4229 self->TransitionFromSuspendedToRunnable(); 4230 Locks::mutator_lock_->AssertSharedHeld(self); 4231 4232 // Finally, send a heap end chunk. 4233 Dbg::DdmSendChunk(native ? CHUNK_TYPE("NHEN") : CHUNK_TYPE("HPEN"), sizeof(heap_id), heap_id); 4234} 4235 4236static size_t GetAllocTrackerMax() { 4237#ifdef HAVE_ANDROID_OS 4238 // Check whether there's a system property overriding the number of records. 4239 const char* propertyName = "dalvik.vm.allocTrackerMax"; 4240 char allocRecordMaxString[PROPERTY_VALUE_MAX]; 4241 if (property_get(propertyName, allocRecordMaxString, "") > 0) { 4242 char* end; 4243 size_t value = strtoul(allocRecordMaxString, &end, 10); 4244 if (*end != '\0') { 4245 LOG(ERROR) << "Ignoring " << propertyName << " '" << allocRecordMaxString 4246 << "' --- invalid"; 4247 return kDefaultNumAllocRecords; 4248 } 4249 if (!IsPowerOfTwo(value)) { 4250 LOG(ERROR) << "Ignoring " << propertyName << " '" << allocRecordMaxString 4251 << "' --- not power of two"; 4252 return kDefaultNumAllocRecords; 4253 } 4254 return value; 4255 } 4256#endif 4257 return kDefaultNumAllocRecords; 4258} 4259 4260void Dbg::SetAllocTrackingEnabled(bool enabled) { 4261 if (enabled) { 4262 { 4263 MutexLock mu(Thread::Current(), *alloc_tracker_lock_); 4264 if (recent_allocation_records_ == NULL) { 4265 alloc_record_max_ = GetAllocTrackerMax(); 4266 LOG(INFO) << "Enabling alloc tracker (" << alloc_record_max_ << " entries of " 4267 << kMaxAllocRecordStackDepth << " frames, taking " 4268 << PrettySize(sizeof(AllocRecord) * alloc_record_max_) << ")"; 4269 alloc_record_head_ = alloc_record_count_ = 0; 4270 recent_allocation_records_ = new AllocRecord[alloc_record_max_]; 4271 CHECK(recent_allocation_records_ != NULL); 4272 } 4273 } 4274 Runtime::Current()->GetInstrumentation()->InstrumentQuickAllocEntryPoints(); 4275 } else { 4276 Runtime::Current()->GetInstrumentation()->UninstrumentQuickAllocEntryPoints(); 4277 { 4278 MutexLock mu(Thread::Current(), *alloc_tracker_lock_); 4279 LOG(INFO) << "Disabling alloc tracker"; 4280 delete[] recent_allocation_records_; 4281 recent_allocation_records_ = NULL; 4282 type_cache_.Clear(); 4283 } 4284 } 4285} 4286 4287struct AllocRecordStackVisitor : public StackVisitor { 4288 AllocRecordStackVisitor(Thread* thread, AllocRecord* record) 4289 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 4290 : StackVisitor(thread, NULL), record(record), depth(0) {} 4291 4292 // TODO: Enable annotalysis. We know lock is held in constructor, but abstraction confuses 4293 // annotalysis. 4294 bool VisitFrame() NO_THREAD_SAFETY_ANALYSIS { 4295 if (depth >= kMaxAllocRecordStackDepth) { 4296 return false; 4297 } 4298 mirror::ArtMethod* m = GetMethod(); 4299 if (!m->IsRuntimeMethod()) { 4300 record->StackElement(depth)->SetMethod(m); 4301 record->StackElement(depth)->SetDexPc(GetDexPc()); 4302 ++depth; 4303 } 4304 return true; 4305 } 4306 4307 ~AllocRecordStackVisitor() { 4308 // Clear out any unused stack trace elements. 4309 for (; depth < kMaxAllocRecordStackDepth; ++depth) { 4310 record->StackElement(depth)->SetMethod(nullptr); 4311 record->StackElement(depth)->SetDexPc(0); 4312 } 4313 } 4314 4315 AllocRecord* record; 4316 size_t depth; 4317}; 4318 4319void Dbg::RecordAllocation(mirror::Class* type, size_t byte_count) { 4320 Thread* self = Thread::Current(); 4321 CHECK(self != NULL); 4322 4323 MutexLock mu(self, *alloc_tracker_lock_); 4324 if (recent_allocation_records_ == NULL) { 4325 return; 4326 } 4327 4328 // Advance and clip. 4329 if (++alloc_record_head_ == alloc_record_max_) { 4330 alloc_record_head_ = 0; 4331 } 4332 4333 // Fill in the basics. 4334 AllocRecord* record = &recent_allocation_records_[alloc_record_head_]; 4335 record->SetType(type); 4336 record->SetByteCount(byte_count); 4337 record->SetThinLockId(self->GetThreadId()); 4338 4339 // Fill in the stack trace. 4340 AllocRecordStackVisitor visitor(self, record); 4341 visitor.WalkStack(); 4342 4343 if (alloc_record_count_ < alloc_record_max_) { 4344 ++alloc_record_count_; 4345 } 4346} 4347 4348// Returns the index of the head element. 4349// 4350// We point at the most-recently-written record, so if gAllocRecordCount is 1 4351// we want to use the current element. Take "head+1" and subtract count 4352// from it. 4353// 4354// We need to handle underflow in our circular buffer, so we add 4355// gAllocRecordMax and then mask it back down. 4356size_t Dbg::HeadIndex() { 4357 return (Dbg::alloc_record_head_ + 1 + Dbg::alloc_record_max_ - Dbg::alloc_record_count_) & 4358 (Dbg::alloc_record_max_ - 1); 4359} 4360 4361void Dbg::DumpRecentAllocations() { 4362 ScopedObjectAccess soa(Thread::Current()); 4363 MutexLock mu(soa.Self(), *alloc_tracker_lock_); 4364 if (recent_allocation_records_ == NULL) { 4365 LOG(INFO) << "Not recording tracked allocations"; 4366 return; 4367 } 4368 4369 // "i" is the head of the list. We want to start at the end of the 4370 // list and move forward to the tail. 4371 size_t i = HeadIndex(); 4372 size_t count = alloc_record_count_; 4373 4374 LOG(INFO) << "Tracked allocations, (head=" << alloc_record_head_ << " count=" << count << ")"; 4375 while (count--) { 4376 AllocRecord* record = &recent_allocation_records_[i]; 4377 4378 LOG(INFO) << StringPrintf(" Thread %-2d %6zd bytes ", record->ThinLockId(), record->ByteCount()) 4379 << PrettyClass(record->Type()); 4380 4381 for (size_t stack_frame = 0; stack_frame < kMaxAllocRecordStackDepth; ++stack_frame) { 4382 AllocRecordStackTraceElement* stack_element = record->StackElement(stack_frame); 4383 mirror::ArtMethod* m = stack_element->Method(); 4384 if (m == NULL) { 4385 break; 4386 } 4387 LOG(INFO) << " " << PrettyMethod(m) << " line " << stack_element->LineNumber(); 4388 } 4389 4390 // pause periodically to help logcat catch up 4391 if ((count % 5) == 0) { 4392 usleep(40000); 4393 } 4394 4395 i = (i + 1) & (alloc_record_max_ - 1); 4396 } 4397} 4398 4399class StringTable { 4400 public: 4401 StringTable() { 4402 } 4403 4404 void Add(const std::string& str) { 4405 table_.insert(str); 4406 } 4407 4408 void Add(const char* str) { 4409 table_.insert(str); 4410 } 4411 4412 size_t IndexOf(const char* s) const { 4413 auto it = table_.find(s); 4414 if (it == table_.end()) { 4415 LOG(FATAL) << "IndexOf(\"" << s << "\") failed"; 4416 } 4417 return std::distance(table_.begin(), it); 4418 } 4419 4420 size_t Size() const { 4421 return table_.size(); 4422 } 4423 4424 void WriteTo(std::vector<uint8_t>& bytes) const { 4425 for (const std::string& str : table_) { 4426 const char* s = str.c_str(); 4427 size_t s_len = CountModifiedUtf8Chars(s); 4428 std::unique_ptr<uint16_t> s_utf16(new uint16_t[s_len]); 4429 ConvertModifiedUtf8ToUtf16(s_utf16.get(), s); 4430 JDWP::AppendUtf16BE(bytes, s_utf16.get(), s_len); 4431 } 4432 } 4433 4434 private: 4435 std::set<std::string> table_; 4436 DISALLOW_COPY_AND_ASSIGN(StringTable); 4437}; 4438 4439static const char* GetMethodSourceFile(mirror::ArtMethod* method) 4440 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 4441 DCHECK(method != nullptr); 4442 const char* source_file = method->GetDeclaringClassSourceFile(); 4443 return (source_file != nullptr) ? source_file : ""; 4444} 4445 4446/* 4447 * The data we send to DDMS contains everything we have recorded. 4448 * 4449 * Message header (all values big-endian): 4450 * (1b) message header len (to allow future expansion); includes itself 4451 * (1b) entry header len 4452 * (1b) stack frame len 4453 * (2b) number of entries 4454 * (4b) offset to string table from start of message 4455 * (2b) number of class name strings 4456 * (2b) number of method name strings 4457 * (2b) number of source file name strings 4458 * For each entry: 4459 * (4b) total allocation size 4460 * (2b) thread id 4461 * (2b) allocated object's class name index 4462 * (1b) stack depth 4463 * For each stack frame: 4464 * (2b) method's class name 4465 * (2b) method name 4466 * (2b) method source file 4467 * (2b) line number, clipped to 32767; -2 if native; -1 if no source 4468 * (xb) class name strings 4469 * (xb) method name strings 4470 * (xb) source file strings 4471 * 4472 * As with other DDM traffic, strings are sent as a 4-byte length 4473 * followed by UTF-16 data. 4474 * 4475 * We send up 16-bit unsigned indexes into string tables. In theory there 4476 * can be (kMaxAllocRecordStackDepth * gAllocRecordMax) unique strings in 4477 * each table, but in practice there should be far fewer. 4478 * 4479 * The chief reason for using a string table here is to keep the size of 4480 * the DDMS message to a minimum. This is partly to make the protocol 4481 * efficient, but also because we have to form the whole thing up all at 4482 * once in a memory buffer. 4483 * 4484 * We use separate string tables for class names, method names, and source 4485 * files to keep the indexes small. There will generally be no overlap 4486 * between the contents of these tables. 4487 */ 4488jbyteArray Dbg::GetRecentAllocations() { 4489 if (false) { 4490 DumpRecentAllocations(); 4491 } 4492 4493 Thread* self = Thread::Current(); 4494 std::vector<uint8_t> bytes; 4495 { 4496 MutexLock mu(self, *alloc_tracker_lock_); 4497 // 4498 // Part 1: generate string tables. 4499 // 4500 StringTable class_names; 4501 StringTable method_names; 4502 StringTable filenames; 4503 4504 int count = alloc_record_count_; 4505 int idx = HeadIndex(); 4506 while (count--) { 4507 AllocRecord* record = &recent_allocation_records_[idx]; 4508 class_names.Add(record->Type()->GetDescriptor()); 4509 for (size_t i = 0; i < kMaxAllocRecordStackDepth; i++) { 4510 mirror::ArtMethod* m = record->StackElement(i)->Method(); 4511 if (m != NULL) { 4512 class_names.Add(m->GetDeclaringClassDescriptor()); 4513 method_names.Add(m->GetName()); 4514 filenames.Add(GetMethodSourceFile(m)); 4515 } 4516 } 4517 4518 idx = (idx + 1) & (alloc_record_max_ - 1); 4519 } 4520 4521 LOG(INFO) << "allocation records: " << alloc_record_count_; 4522 4523 // 4524 // Part 2: Generate the output and store it in the buffer. 4525 // 4526 4527 // (1b) message header len (to allow future expansion); includes itself 4528 // (1b) entry header len 4529 // (1b) stack frame len 4530 const int kMessageHeaderLen = 15; 4531 const int kEntryHeaderLen = 9; 4532 const int kStackFrameLen = 8; 4533 JDWP::Append1BE(bytes, kMessageHeaderLen); 4534 JDWP::Append1BE(bytes, kEntryHeaderLen); 4535 JDWP::Append1BE(bytes, kStackFrameLen); 4536 4537 // (2b) number of entries 4538 // (4b) offset to string table from start of message 4539 // (2b) number of class name strings 4540 // (2b) number of method name strings 4541 // (2b) number of source file name strings 4542 JDWP::Append2BE(bytes, alloc_record_count_); 4543 size_t string_table_offset = bytes.size(); 4544 JDWP::Append4BE(bytes, 0); // We'll patch this later... 4545 JDWP::Append2BE(bytes, class_names.Size()); 4546 JDWP::Append2BE(bytes, method_names.Size()); 4547 JDWP::Append2BE(bytes, filenames.Size()); 4548 4549 count = alloc_record_count_; 4550 idx = HeadIndex(); 4551 while (count--) { 4552 // For each entry: 4553 // (4b) total allocation size 4554 // (2b) thread id 4555 // (2b) allocated object's class name index 4556 // (1b) stack depth 4557 AllocRecord* record = &recent_allocation_records_[idx]; 4558 size_t stack_depth = record->GetDepth(); 4559 size_t allocated_object_class_name_index = 4560 class_names.IndexOf(record->Type()->GetDescriptor().c_str()); 4561 JDWP::Append4BE(bytes, record->ByteCount()); 4562 JDWP::Append2BE(bytes, record->ThinLockId()); 4563 JDWP::Append2BE(bytes, allocated_object_class_name_index); 4564 JDWP::Append1BE(bytes, stack_depth); 4565 4566 for (size_t stack_frame = 0; stack_frame < stack_depth; ++stack_frame) { 4567 // For each stack frame: 4568 // (2b) method's class name 4569 // (2b) method name 4570 // (2b) method source file 4571 // (2b) line number, clipped to 32767; -2 if native; -1 if no source 4572 mirror::ArtMethod* m = record->StackElement(stack_frame)->Method(); 4573 size_t class_name_index = class_names.IndexOf(m->GetDeclaringClassDescriptor()); 4574 size_t method_name_index = method_names.IndexOf(m->GetName()); 4575 size_t file_name_index = filenames.IndexOf(GetMethodSourceFile(m)); 4576 JDWP::Append2BE(bytes, class_name_index); 4577 JDWP::Append2BE(bytes, method_name_index); 4578 JDWP::Append2BE(bytes, file_name_index); 4579 JDWP::Append2BE(bytes, record->StackElement(stack_frame)->LineNumber()); 4580 } 4581 4582 idx = (idx + 1) & (alloc_record_max_ - 1); 4583 } 4584 4585 // (xb) class name strings 4586 // (xb) method name strings 4587 // (xb) source file strings 4588 JDWP::Set4BE(&bytes[string_table_offset], bytes.size()); 4589 class_names.WriteTo(bytes); 4590 method_names.WriteTo(bytes); 4591 filenames.WriteTo(bytes); 4592 } 4593 JNIEnv* env = self->GetJniEnv(); 4594 jbyteArray result = env->NewByteArray(bytes.size()); 4595 if (result != NULL) { 4596 env->SetByteArrayRegion(result, 0, bytes.size(), reinterpret_cast<const jbyte*>(&bytes[0])); 4597 } 4598 return result; 4599} 4600 4601mirror::ArtMethod* DeoptimizationRequest::Method() const { 4602 ScopedObjectAccessUnchecked soa(Thread::Current()); 4603 return soa.DecodeMethod(method_); 4604} 4605 4606void DeoptimizationRequest::SetMethod(mirror::ArtMethod* m) { 4607 ScopedObjectAccessUnchecked soa(Thread::Current()); 4608 method_ = soa.EncodeMethod(m); 4609} 4610 4611} // namespace art 4612