thread.cc revision 7dc5166ea740359d381097a7ab382c1dd404055f
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "thread.h" 18 19#include <dynamic_annotations.h> 20#include <pthread.h> 21#include <signal.h> 22#include <sys/mman.h> 23#include <sys/resource.h> 24#include <sys/time.h> 25 26#include <algorithm> 27#include <bitset> 28#include <cerrno> 29#include <iostream> 30#include <list> 31 32#include "debugger.h" 33#include "class_linker.h" 34#include "class_loader.h" 35#include "heap.h" 36#include "jni_internal.h" 37#include "monitor.h" 38#include "oat/runtime/context.h" 39#include "object.h" 40#include "object_utils.h" 41#include "reflection.h" 42#include "runtime.h" 43#include "runtime_support.h" 44#include "ScopedLocalRef.h" 45#include "scoped_jni_thread_state.h" 46#include "shadow_frame.h" 47#include "space.h" 48#include "stack.h" 49#include "stack_indirect_reference_table.h" 50#include "thread_list.h" 51#include "utils.h" 52#include "verifier/gc_map.h" 53 54namespace art { 55 56pthread_key_t Thread::pthread_key_self_; 57 58static Class* gThreadGroup = NULL; 59static Class* gThreadLock = NULL; 60static Field* gThread_daemon = NULL; 61static Field* gThread_group = NULL; 62static Field* gThread_lock = NULL; 63static Field* gThread_name = NULL; 64static Field* gThread_priority = NULL; 65static Field* gThread_uncaughtHandler = NULL; 66static Field* gThread_vmData = NULL; 67static Field* gThreadGroup_mMain = NULL; 68static Field* gThreadGroup_mSystem = NULL; 69static Field* gThreadGroup_name = NULL; 70static Field* gThreadLock_thread = NULL; 71static Method* gThread_run = NULL; 72static Method* gThreadGroup_removeThread = NULL; 73static Method* gUncaughtExceptionHandler_uncaughtException = NULL; 74 75static const char* kThreadNameDuringStartup = "<native thread without managed peer>"; 76 77void Thread::InitCardTable() { 78 card_table_ = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin(); 79} 80 81#if !defined(__APPLE__) 82static void UnimplementedEntryPoint() { 83 UNIMPLEMENTED(FATAL); 84} 85#endif 86 87void Thread::InitFunctionPointers() { 88#if !defined(__APPLE__) // The Mac GCC is too old to accept this code. 89 // Insert a placeholder so we can easily tell if we call an unimplemented entry point. 90 uintptr_t* begin = reinterpret_cast<uintptr_t*>(&entrypoints_); 91 uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(entrypoints_)); 92 for (uintptr_t* it = begin; it != end; ++it) { 93 *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint); 94 } 95#endif 96 InitEntryPoints(&entrypoints_); 97} 98 99void Thread::SetDebuggerUpdatesEnabled(bool enabled) { 100 LOG(INFO) << "Turning debugger updates " << (enabled ? "on" : "off") << " for " << *this; 101#if !defined(ART_USE_LLVM_COMPILER) 102 ChangeDebuggerEntryPoint(&entrypoints_, enabled); 103#else 104 UNIMPLEMENTED(FATAL); 105#endif 106} 107 108void Thread::InitTid() { 109 tid_ = ::art::GetTid(); 110} 111 112void Thread::InitAfterFork() { 113 InitTid(); 114 115#if defined(__BIONIC__) 116 // Work around a bionic bug. 117 struct bionic_pthread_internal_t { 118 void* next; 119 void** pref; 120 pthread_attr_t attr; 121 pid_t kernel_id; 122 // et cetera. we just need 'kernel_id' so we can stop here. 123 }; 124 bionic_pthread_internal_t* self = reinterpret_cast<bionic_pthread_internal_t*>(pthread_self()); 125 if (self->kernel_id == tid_) { 126 // TODO: if you see this logging, you can remove this code! 127 LOG(INFO) << "*** this tree doesn't have the bionic pthread kernel_id bug"; 128 } 129 self->kernel_id = tid_; 130#endif 131} 132 133void* Thread::CreateCallback(void* arg) { 134 Thread* self = reinterpret_cast<Thread*>(arg); 135 self->Init(); 136 137 // Wait until it's safe to start running code. (There may have been a suspend-all 138 // in progress while we were starting up.) 139 Runtime* runtime = Runtime::Current(); 140 runtime->GetThreadList()->WaitForGo(); 141 142 { 143 CHECK_EQ(self->GetState(), kRunnable); 144 SirtRef<String> thread_name(self->GetThreadName()); 145 self->SetThreadName(thread_name->ToModifiedUtf8().c_str()); 146 } 147 148 Dbg::PostThreadStart(self); 149 150 // Invoke the 'run' method of our java.lang.Thread. 151 CHECK(self->peer_ != NULL); 152 Object* receiver = self->peer_; 153 Method* m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(gThread_run); 154 m->Invoke(self, receiver, NULL, NULL); 155 156 // Detach. 157 runtime->GetThreadList()->Unregister(); 158 159 return NULL; 160} 161 162static void SetVmData(Object* managed_thread, Thread* native_thread) { 163 gThread_vmData->SetInt(managed_thread, reinterpret_cast<uintptr_t>(native_thread)); 164} 165 166Thread* Thread::FromManagedThread(Object* thread_peer) { 167 return reinterpret_cast<Thread*>(static_cast<uintptr_t>(gThread_vmData->GetInt(thread_peer))); 168} 169 170Thread* Thread::FromManagedThread(JNIEnv* env, jobject java_thread) { 171 return FromManagedThread(Decode<Object*>(env, java_thread)); 172} 173 174static size_t FixStackSize(size_t stack_size) { 175 // A stack size of zero means "use the default". 176 if (stack_size == 0) { 177 stack_size = Runtime::Current()->GetDefaultStackSize(); 178 } 179 180 // Dalvik used the bionic pthread default stack size for native threads, 181 // so include that here to support apps that expect large native stacks. 182 stack_size += 1 * MB; 183 184 185 // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN. 186 if (stack_size < PTHREAD_STACK_MIN) { 187 stack_size = PTHREAD_STACK_MIN; 188 } 189 190 // It's likely that callers are trying to ensure they have at least a certain amount of 191 // stack space, so we should add our reserved space on top of what they requested, rather 192 // than implicitly take it away from them. 193 stack_size += Thread::kStackOverflowReservedBytes; 194 195 // Some systems require the stack size to be a multiple of the system page size, so round up. 196 stack_size = RoundUp(stack_size, kPageSize); 197 198 return stack_size; 199} 200 201static void SigAltStack(stack_t* new_stack, stack_t* old_stack) { 202 if (sigaltstack(new_stack, old_stack) == -1) { 203 PLOG(FATAL) << "sigaltstack failed"; 204 } 205} 206 207static void SetUpAlternateSignalStack() { 208 // Create and set an alternate signal stack. 209 stack_t ss; 210 ss.ss_sp = new uint8_t[SIGSTKSZ]; 211 ss.ss_size = SIGSTKSZ; 212 ss.ss_flags = 0; 213 CHECK(ss.ss_sp != NULL); 214 SigAltStack(&ss, NULL); 215 216 // Double-check that it worked. 217 ss.ss_sp = NULL; 218 SigAltStack(NULL, &ss); 219 VLOG(threads) << "Alternate signal stack is " << PrettySize(ss.ss_size) << " at " << ss.ss_sp; 220} 221 222static void TearDownAlternateSignalStack() { 223 // Get the pointer so we can free the memory. 224 stack_t ss; 225 SigAltStack(NULL, &ss); 226 uint8_t* allocated_signal_stack = reinterpret_cast<uint8_t*>(ss.ss_sp); 227 228 // Tell the kernel to stop using it. 229 ss.ss_sp = NULL; 230 ss.ss_flags = SS_DISABLE; 231 ss.ss_size = SIGSTKSZ; // Avoid ENOMEM failure with Mac OS' buggy libc. 232 SigAltStack(&ss, NULL); 233 234 // Free it. 235 delete[] allocated_signal_stack; 236} 237 238void Thread::Create(Object* peer, size_t stack_size) { 239 CHECK(peer != NULL); 240 241 stack_size = FixStackSize(stack_size); 242 243 Thread* native_thread = new Thread; 244 native_thread->peer_ = peer; 245 246 // Thread.start is synchronized, so we know that vmData is 0, 247 // and know that we're not racing to assign it. 248 SetVmData(peer, native_thread); 249 250 { 251 ScopedThreadStateChange tsc(Thread::Current(), kVmWait); 252 pthread_t new_pthread; 253 pthread_attr_t attr; 254 CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread"); 255 CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED"); 256 CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size); 257 CHECK_PTHREAD_CALL(pthread_create, (&new_pthread, &attr, Thread::CreateCallback, native_thread), "new thread"); 258 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread"); 259 } 260 261 // Let the child know when it's safe to start running. 262 Runtime::Current()->GetThreadList()->SignalGo(native_thread); 263} 264 265void Thread::Init() { 266 // This function does all the initialization that must be run by the native thread it applies to. 267 // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so 268 // we can handshake with the corresponding native thread when it's ready.) Check this native 269 // thread hasn't been through here already... 270 CHECK(Thread::Current() == NULL); 271 272 SetUpAlternateSignalStack(); 273 InitCpu(); 274 InitFunctionPointers(); 275 InitCardTable(); 276 277 Runtime* runtime = Runtime::Current(); 278 CHECK(runtime != NULL); 279 280 thin_lock_id_ = runtime->GetThreadList()->AllocThreadId(); 281 282 InitTid(); 283 InitStackHwm(); 284 285 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach"); 286 287 jni_env_ = new JNIEnvExt(this, runtime->GetJavaVM()); 288 289 runtime->GetThreadList()->Register(); 290} 291 292Thread* Thread::Attach(const char* thread_name, bool as_daemon, Object* thread_group) { 293 Thread* self = new Thread; 294 self->Init(); 295 296 self->SetState(kNative); 297 298 // If we're the main thread, ClassLinker won't be created until after we're attached, 299 // so that thread needs a two-stage attach. Regular threads don't need this hack. 300 // In the compiler, all threads need this hack, because no-one's going to be getting 301 // a native peer! 302 if (self->thin_lock_id_ != ThreadList::kMainId && !Runtime::Current()->IsCompiler()) { 303 self->CreatePeer(thread_name, as_daemon, thread_group); 304 } else { 305 // These aren't necessary, but they improve diagnostics for unit tests & command-line tools. 306 if (thread_name != NULL) { 307 self->name_->assign(thread_name); 308 ::art::SetThreadName(thread_name); 309 } 310 } 311 312 self->GetJniEnv()->locals.AssertEmpty(); 313 return self; 314} 315 316Object* Thread::GetMainThreadGroup() { 317 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(gThreadGroup, true, true)) { 318 return NULL; 319 } 320 return gThreadGroup_mMain->GetObject(NULL); 321} 322 323Object* Thread::GetSystemThreadGroup() { 324 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(gThreadGroup, true, true)) { 325 return NULL; 326 } 327 return gThreadGroup_mSystem->GetObject(NULL); 328} 329 330void Thread::CreatePeer(const char* name, bool as_daemon, Object* thread_group) { 331 CHECK(Runtime::Current()->IsStarted()); 332 JNIEnv* env = jni_env_; 333 334 if (thread_group == NULL) { 335 thread_group = Thread::GetMainThreadGroup(); 336 } 337 ScopedLocalRef<jobject> java_thread_group(env, AddLocalReference<jobject>(env, thread_group)); 338 ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name)); 339 jint thread_priority = GetNativePriority(); 340 jboolean thread_is_daemon = as_daemon; 341 342 ScopedLocalRef<jclass> c(env, env->FindClass("java/lang/Thread")); 343 ScopedLocalRef<jobject> peer(env, env->AllocObject(c.get())); 344 peer_ = DecodeJObject(peer.get()); 345 if (peer_ == NULL) { 346 CHECK(IsExceptionPending()); 347 return; 348 } 349 jmethodID mid = env->GetMethodID(c.get(), "<init>", "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V"); 350 env->CallNonvirtualVoidMethod(peer.get(), c.get(), mid, java_thread_group.get(), thread_name.get(), thread_priority, thread_is_daemon); 351 CHECK(!IsExceptionPending()) << " " << PrettyTypeOf(GetException()); 352 SetVmData(peer_, Thread::Current()); 353 354 SirtRef<String> peer_thread_name(GetThreadName()); 355 if (peer_thread_name.get() == NULL) { 356 // The Thread constructor should have set the Thread.name to a 357 // non-null value. However, because we can run without code 358 // available (in the compiler, in tests), we manually assign the 359 // fields the constructor should have set. 360 gThread_daemon->SetBoolean(peer_, thread_is_daemon); 361 gThread_group->SetObject(peer_, thread_group); 362 gThread_name->SetObject(peer_, Decode<Object*>(env, thread_name.get())); 363 gThread_priority->SetInt(peer_, thread_priority); 364 peer_thread_name.reset(GetThreadName()); 365 } 366 // thread_name may have been null, so don't trust this to be non-null 367 if (peer_thread_name.get() != NULL) { 368 SetThreadName(peer_thread_name->ToModifiedUtf8().c_str()); 369 } 370 371 // Pre-allocate an OutOfMemoryError for the double-OOME case. 372 ThrowNewException("Ljava/lang/OutOfMemoryError;", 373 "OutOfMemoryError thrown while trying to throw OutOfMemoryError; no stack available"); 374 ScopedLocalRef<jthrowable> exception(env, env->ExceptionOccurred()); 375 env->ExceptionClear(); 376 pre_allocated_OutOfMemoryError_ = Decode<Throwable*>(env, exception.get()); 377} 378 379void Thread::SetThreadName(const char* name) { 380 name_->assign(name); 381 ::art::SetThreadName(name); 382 Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM")); 383} 384 385void Thread::InitStackHwm() { 386 void* stack_base; 387 size_t stack_size; 388 GetThreadStack(stack_base, stack_size); 389 390 // TODO: include this in the thread dumps; potentially useful in SIGQUIT output? 391 VLOG(threads) << StringPrintf("Native stack is at %p (%s)", stack_base, PrettySize(stack_size).c_str()); 392 393 stack_begin_ = reinterpret_cast<byte*>(stack_base); 394 stack_size_ = stack_size; 395 396 if (stack_size_ <= kStackOverflowReservedBytes) { 397 LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << stack_size_ << " bytes)"; 398 } 399 400 // TODO: move this into the Linux GetThreadStack implementation. 401#if !defined(__APPLE__) 402 // If we're the main thread, check whether we were run with an unlimited stack. In that case, 403 // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection 404 // will be broken because we'll die long before we get close to 2GB. 405 if (thin_lock_id_ == 1) { 406 rlimit stack_limit; 407 if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) { 408 PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed"; 409 } 410 if (stack_limit.rlim_cur == RLIM_INFINITY) { 411 // Find the default stack size for new threads... 412 pthread_attr_t default_attributes; 413 size_t default_stack_size; 414 CHECK_PTHREAD_CALL(pthread_attr_init, (&default_attributes), "default stack size query"); 415 CHECK_PTHREAD_CALL(pthread_attr_getstacksize, (&default_attributes, &default_stack_size), 416 "default stack size query"); 417 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&default_attributes), "default stack size query"); 418 419 // ...and use that as our limit. 420 size_t old_stack_size = stack_size_; 421 stack_size_ = default_stack_size; 422 stack_begin_ += (old_stack_size - stack_size_); 423 VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")" 424 << " to " << PrettySize(stack_size_) 425 << " with base " << reinterpret_cast<void*>(stack_begin_); 426 } 427 } 428#endif 429 430 // Set stack_end_ to the bottom of the stack saving space of stack overflows 431 ResetDefaultStackEnd(); 432 433 // Sanity check. 434 int stack_variable; 435 CHECK_GT(&stack_variable, reinterpret_cast<void*>(stack_end_)); 436} 437 438void Thread::Dump(std::ostream& os, bool full) const { 439 if (full) { 440 DumpState(os); 441 DumpStack(os); 442 } else { 443 os << "Thread["; 444 if (GetThinLockId() != 0) { 445 // If we're in kStarting, we won't have a thin lock id or tid yet. 446 os << GetThinLockId() 447 << ",tid=" << GetTid() << ','; 448 } 449 os << GetState() 450 << ",Thread*=" << this 451 << ",peer=" << peer_ 452 << ",\"" << *name_ << "\"" 453 << "]"; 454 } 455} 456 457String* Thread::GetThreadName() const { 458 return (peer_ != NULL) ? reinterpret_cast<String*>(gThread_name->GetObject(peer_)) : NULL; 459} 460 461void Thread::GetThreadName(std::string& name) const { 462 name.assign(*name_); 463} 464 465void Thread::DumpState(std::ostream& os) const { 466 std::string group_name; 467 int priority; 468 bool is_daemon = false; 469 470 if (peer_ != NULL) { 471 priority = gThread_priority->GetInt(peer_); 472 is_daemon = gThread_daemon->GetBoolean(peer_); 473 474 Object* thread_group = GetThreadGroup(); 475 if (thread_group != NULL) { 476 String* group_name_string = reinterpret_cast<String*>(gThreadGroup_name->GetObject(thread_group)); 477 group_name = (group_name_string != NULL) ? group_name_string->ToModifiedUtf8() : "<null>"; 478 } 479 } else { 480 priority = GetNativePriority(); 481 } 482 483 int policy; 484 sched_param sp; 485 CHECK_PTHREAD_CALL(pthread_getschedparam, (pthread_self(), &policy, &sp), __FUNCTION__); 486 487 std::string scheduler_group_name(GetSchedulerGroupName(GetTid())); 488 if (scheduler_group_name.empty()) { 489 scheduler_group_name = "default"; 490 } 491 492 os << '"' << *name_ << '"'; 493 if (is_daemon) { 494 os << " daemon"; 495 } 496 os << " prio=" << priority 497 << " tid=" << GetThinLockId() 498 << " " << GetState() << "\n"; 499 500 os << " | group=\"" << group_name << "\"" 501 << " sCount=" << suspend_count_ 502 << " dsCount=" << debug_suspend_count_ 503 << " obj=" << reinterpret_cast<void*>(peer_) 504 << " self=" << reinterpret_cast<const void*>(this) << "\n"; 505 os << " | sysTid=" << GetTid() 506 << " nice=" << getpriority(PRIO_PROCESS, GetTid()) 507 << " sched=" << policy << "/" << sp.sched_priority 508 << " cgrp=" << scheduler_group_name 509 << " handle=" << pthread_self() << "\n"; 510 511 // Grab the scheduler stats for this thread. 512 std::string scheduler_stats; 513 if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", GetTid()), &scheduler_stats)) { 514 scheduler_stats.resize(scheduler_stats.size() - 1); // Lose the trailing '\n'. 515 } else { 516 scheduler_stats = "0 0 0"; 517 } 518 519 int utime = 0; 520 int stime = 0; 521 int task_cpu = 0; 522 GetTaskStats(GetTid(), utime, stime, task_cpu); 523 524 os << " | schedstat=( " << scheduler_stats << " )" 525 << " utm=" << utime 526 << " stm=" << stime 527 << " core=" << task_cpu 528 << " HZ=" << sysconf(_SC_CLK_TCK) << "\n"; 529 530 os << " | stackSize=" << PrettySize(stack_size_) 531 << " stack=" << reinterpret_cast<void*>(stack_begin_) << "-" << reinterpret_cast<void*>(stack_end_) << "\n"; 532} 533 534#if !defined(ART_USE_LLVM_COMPILER) 535void Thread::PushNativeToManagedRecord(NativeToManagedRecord* record) { 536 Method **sp = top_of_managed_stack_.GetSP(); 537#ifndef NDEBUG 538 if (sp != NULL) { 539 Method* m = *sp; 540 Runtime::Current()->GetHeap()->VerifyObject(m); 541 DCHECK((m == NULL) || m->IsMethod()); 542 } 543#endif 544 record->last_top_of_managed_stack_ = reinterpret_cast<void*>(sp); 545 record->last_top_of_managed_stack_pc_ = top_of_managed_stack_pc_; 546 record->link_ = native_to_managed_record_; 547 native_to_managed_record_ = record; 548 top_of_managed_stack_.SetSP(NULL); 549} 550#else 551void Thread::PushNativeToManagedRecord(NativeToManagedRecord*) { 552 LOG(FATAL) << "Called non-LLVM method with LLVM"; 553} 554#endif 555 556#if !defined(ART_USE_LLVM_COMPILER) 557void Thread::PopNativeToManagedRecord(const NativeToManagedRecord& record) { 558 native_to_managed_record_ = record.link_; 559 top_of_managed_stack_.SetSP(reinterpret_cast<Method**>(record.last_top_of_managed_stack_)); 560 top_of_managed_stack_pc_ = record.last_top_of_managed_stack_pc_; 561} 562#else 563void Thread::PopNativeToManagedRecord(const NativeToManagedRecord&) { 564 LOG(FATAL) << "Called non-LLVM method with LLVM"; 565} 566#endif 567 568struct StackDumpVisitor : public Thread::StackVisitor { 569 StackDumpVisitor(std::ostream& os, const Thread* thread) 570 : last_method(NULL), last_line_number(0), repetition_count(0), os(os), thread(thread), 571 frame_count(0) { 572 } 573 574 virtual ~StackDumpVisitor() { 575 if (frame_count == 0) { 576 os << " (no managed stack frames)\n"; 577 } 578 } 579 580 bool VisitFrame(const Frame& frame, uintptr_t pc) { 581 if (!frame.HasMethod()) { 582 return true; 583 } 584 const int kMaxRepetition = 3; 585 Method* m = frame.GetMethod(); 586 Class* c = m->GetDeclaringClass(); 587 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 588 const DexCache* dex_cache = c->GetDexCache(); 589 int line_number = -1; 590 if (dex_cache != NULL) { // be tolerant of bad input 591 const DexFile& dex_file = class_linker->FindDexFile(dex_cache); 592 line_number = dex_file.GetLineNumFromPC(m, m->ToDexPC(pc)); 593 } 594 if (line_number == last_line_number && last_method == m) { 595 repetition_count++; 596 } else { 597 if (repetition_count >= kMaxRepetition) { 598 os << " ... repeated " << (repetition_count - kMaxRepetition) << " times\n"; 599 } 600 repetition_count = 0; 601 last_line_number = line_number; 602 last_method = m; 603 } 604 if (repetition_count < kMaxRepetition) { 605 os << " at " << PrettyMethod(m, false); 606 if (m->IsNative()) { 607 os << "(Native method)"; 608 } else { 609 mh.ChangeMethod(m); 610 const char* source_file(mh.GetDeclaringClassSourceFile()); 611 os << "(" << (source_file != NULL ? source_file : "unavailable") 612 << ":" << line_number << ")"; 613 } 614 os << "\n"; 615 } 616 617 if (frame_count++ == 0) { 618 Monitor::DescribeWait(os, thread); 619 } 620 return true; 621 } 622 MethodHelper mh; 623 Method* last_method; 624 int last_line_number; 625 int repetition_count; 626 std::ostream& os; 627 const Thread* thread; 628 int frame_count; 629}; 630 631void Thread::DumpStack(std::ostream& os) const { 632 // If we're currently in native code, dump that stack before dumping the managed stack. 633 if (GetState() == kNative || GetState() == kVmWait) { 634 DumpKernelStack(os); 635 DumpNativeStack(os); 636 } 637 StackDumpVisitor dumper(os, this); 638 WalkStack(&dumper); 639} 640 641#if !defined(__APPLE__) 642void Thread::DumpKernelStack(std::ostream& os) const { 643 std::string kernel_stack_filename(StringPrintf("/proc/self/task/%d/stack", GetTid())); 644 std::string kernel_stack; 645 if (!ReadFileToString(kernel_stack_filename, &kernel_stack)) { 646 os << " (couldn't read " << kernel_stack_filename << ")"; 647 } 648 649 std::vector<std::string> kernel_stack_frames; 650 Split(kernel_stack, '\n', kernel_stack_frames); 651 // We skip the last stack frame because it's always equivalent to "[<ffffffff>] 0xffffffff", 652 // which looking at the source appears to be the kernel's way of saying "that's all, folks!". 653 kernel_stack_frames.pop_back(); 654 for (size_t i = 0; i < kernel_stack_frames.size(); ++i) { 655 os << " kernel: " << kernel_stack_frames[i] << "\n"; 656 } 657} 658#else 659// TODO: can we get the kernel stack on Mac OS? 660void Thread::DumpKernelStack(std::ostream&) const {} 661#endif 662 663void Thread::SetStateWithoutSuspendCheck(ThreadState new_state) { 664 volatile void* raw = reinterpret_cast<volatile void*>(&state_); 665 volatile int32_t* addr = reinterpret_cast<volatile int32_t*>(raw); 666 android_atomic_release_store(new_state, addr); 667} 668 669ThreadState Thread::SetState(ThreadState new_state) { 670 ThreadState old_state = state_; 671 if (old_state == new_state) { 672 return old_state; 673 } 674 675 volatile void* raw = reinterpret_cast<volatile void*>(&state_); 676 volatile int32_t* addr = reinterpret_cast<volatile int32_t*>(raw); 677 678 if (new_state == kRunnable) { 679 /* 680 * Change our status to kRunnable. The transition requires 681 * that we check for pending suspension, because the runtime considers 682 * us to be "asleep" in all other states, and another thread could 683 * be performing a GC now. 684 * 685 * The order of operations is very significant here. One way to 686 * do this wrong is: 687 * 688 * GCing thread Our thread (in kNative) 689 * ------------ ---------------------- 690 * check suspend count (== 0) 691 * SuspendAllThreads() 692 * grab suspend-count lock 693 * increment all suspend counts 694 * release suspend-count lock 695 * check thread state (== kNative) 696 * all are suspended, begin GC 697 * set state to kRunnable 698 * (continue executing) 699 * 700 * We can correct this by grabbing the suspend-count lock and 701 * performing both of our operations (check suspend count, set 702 * state) while holding it, now we need to grab a mutex on every 703 * transition to kRunnable. 704 * 705 * What we do instead is change the order of operations so that 706 * the transition to kRunnable happens first. If we then detect 707 * that the suspend count is nonzero, we switch to kSuspended. 708 * 709 * Appropriate compiler and memory barriers are required to ensure 710 * that the operations are observed in the expected order. 711 * 712 * This does create a small window of opportunity where a GC in 713 * progress could observe what appears to be a running thread (if 714 * it happens to look between when we set to kRunnable and when we 715 * switch to kSuspended). At worst this only affects assertions 716 * and thread logging. (We could work around it with some sort 717 * of intermediate "pre-running" state that is generally treated 718 * as equivalent to running, but that doesn't seem worthwhile.) 719 * 720 * We can also solve this by combining the "status" and "suspend 721 * count" fields into a single 32-bit value. This trades the 722 * store/load barrier on transition to kRunnable for an atomic RMW 723 * op on all transitions and all suspend count updates (also, all 724 * accesses to status or the thread count require bit-fiddling). 725 * It also eliminates the brief transition through kRunnable when 726 * the thread is supposed to be suspended. This is possibly faster 727 * on SMP and slightly more correct, but less convenient. 728 */ 729 android_atomic_acquire_store(new_state, addr); 730 ANNOTATE_IGNORE_READS_BEGIN(); 731 int suspend_count = suspend_count_; 732 ANNOTATE_IGNORE_READS_END(); 733 if (suspend_count != 0) { 734 Runtime::Current()->GetThreadList()->FullSuspendCheck(this); 735 } 736 } else { 737 /* 738 * Not changing to kRunnable. No additional work required. 739 * 740 * We use a releasing store to ensure that, if we were runnable, 741 * any updates we previously made to objects on the managed heap 742 * will be observed before the state change. 743 */ 744 android_atomic_release_store(new_state, addr); 745 } 746 747 return old_state; 748} 749 750bool Thread::IsSuspended() { 751 ANNOTATE_IGNORE_READS_BEGIN(); 752 int suspend_count = suspend_count_; 753 ANNOTATE_IGNORE_READS_END(); 754 return suspend_count != 0 && GetState() != kRunnable; 755} 756 757static void ReportThreadSuspendTimeout(Thread* waiting_thread) { 758 Runtime* runtime = Runtime::Current(); 759 std::ostringstream ss; 760 ss << "Thread suspend timeout waiting for thread " << *waiting_thread << "\n"; 761 runtime->DumpLockHolders(ss); 762 ss << "\n"; 763 runtime->GetThreadList()->DumpLocked(ss); 764 LOG(FATAL) << ss.str(); 765} 766 767void Thread::WaitUntilSuspended() { 768 static const useconds_t kTimeoutUs = 30 * 1000000; // 30s. 769 770 useconds_t total_delay = 0; 771 useconds_t delay = 0; 772 while (GetState() == kRunnable) { 773 if (total_delay >= kTimeoutUs) { 774 ReportThreadSuspendTimeout(this); 775 } 776 useconds_t new_delay = delay * 2; 777 CHECK_GE(new_delay, delay); 778 delay = new_delay; 779 if (delay == 0) { 780 sched_yield(); 781 delay = 10000; 782 } else { 783 usleep(delay); 784 total_delay += delay; 785 } 786 } 787} 788 789void Thread::ThreadExitCallback(void* arg) { 790 Thread* self = reinterpret_cast<Thread*>(arg); 791 LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self; 792} 793 794void Thread::Startup() { 795 // Allocate a TLS slot. 796 CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key"); 797 798 // Double-check the TLS slot allocation. 799 if (pthread_getspecific(pthread_key_self_) != NULL) { 800 LOG(FATAL) << "Newly-created pthread TLS slot is not NULL"; 801 } 802} 803 804// TODO: make more accessible? 805static Class* FindClassOrDie(ClassLinker* class_linker, const char* descriptor) { 806 Class* c = class_linker->FindSystemClass(descriptor); 807 CHECK(c != NULL) << descriptor; 808 return c; 809} 810 811// TODO: make more accessible? 812static Field* FindFieldOrDie(Class* c, const char* name, const char* descriptor) { 813 Field* f = c->FindDeclaredInstanceField(name, descriptor); 814 CHECK(f != NULL) << PrettyClass(c) << " " << name << " " << descriptor; 815 return f; 816} 817 818// TODO: make more accessible? 819static Method* FindMethodOrDie(Class* c, const char* name, const char* signature) { 820 Method* m = c->FindVirtualMethod(name, signature); 821 CHECK(m != NULL) << PrettyClass(c) << " " << name << " " << signature; 822 return m; 823} 824 825// TODO: make more accessible? 826static Field* FindStaticFieldOrDie(Class* c, const char* name, const char* descriptor) { 827 Field* f = c->FindDeclaredStaticField(name, descriptor); 828 CHECK(f != NULL) << PrettyClass(c) << " " << name << " " << descriptor; 829 return f; 830} 831 832void Thread::FinishStartup() { 833 CHECK(Runtime::Current()->IsStarted()); 834 Thread* self = Thread::Current(); 835 836 // Need to be kRunnable for FindClass 837 ScopedThreadStateChange tsc(self, kRunnable); 838 839 // Now the ClassLinker is ready, we can find the various Class*, Field*, and Method*s we need. 840 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 841 842 Class* Thread_class = FindClassOrDie(class_linker, "Ljava/lang/Thread;"); 843 Class* UncaughtExceptionHandler_class = FindClassOrDie(class_linker, "Ljava/lang/Thread$UncaughtExceptionHandler;"); 844 gThreadGroup = FindClassOrDie(class_linker, "Ljava/lang/ThreadGroup;"); 845 gThreadLock = FindClassOrDie(class_linker, "Ljava/lang/ThreadLock;"); 846 847 gThread_daemon = FindFieldOrDie(Thread_class, "daemon", "Z"); 848 gThread_group = FindFieldOrDie(Thread_class, "group", "Ljava/lang/ThreadGroup;"); 849 gThread_lock = FindFieldOrDie(Thread_class, "lock", "Ljava/lang/ThreadLock;"); 850 gThread_name = FindFieldOrDie(Thread_class, "name", "Ljava/lang/String;"); 851 gThread_priority = FindFieldOrDie(Thread_class, "priority", "I"); 852 gThread_uncaughtHandler = FindFieldOrDie(Thread_class, "uncaughtHandler", "Ljava/lang/Thread$UncaughtExceptionHandler;"); 853 gThread_vmData = FindFieldOrDie(Thread_class, "vmData", "I"); 854 gThreadGroup_name = FindFieldOrDie(gThreadGroup, "name", "Ljava/lang/String;"); 855 gThreadGroup_mMain = FindStaticFieldOrDie(gThreadGroup, "mMain", "Ljava/lang/ThreadGroup;"); 856 gThreadGroup_mSystem = FindStaticFieldOrDie(gThreadGroup, "mSystem", "Ljava/lang/ThreadGroup;"); 857 gThreadLock_thread = FindFieldOrDie(gThreadLock, "thread", "Ljava/lang/Thread;"); 858 859 gThread_run = FindMethodOrDie(Thread_class, "run", "()V"); 860 gThreadGroup_removeThread = FindMethodOrDie(gThreadGroup, "removeThread", "(Ljava/lang/Thread;)V"); 861 gUncaughtExceptionHandler_uncaughtException = FindMethodOrDie(UncaughtExceptionHandler_class, 862 "uncaughtException", "(Ljava/lang/Thread;Ljava/lang/Throwable;)V"); 863 864 // Finish attaching the main thread. 865 Thread::Current()->CreatePeer("main", false, Thread::GetMainThreadGroup()); 866 867 InitBoxingMethods(); 868 class_linker->RunRootClinits(); 869} 870 871void Thread::Shutdown() { 872 CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key"); 873} 874 875uint32_t Thread::LockOwnerFromThreadLock(Object* thread_lock) { 876 if (thread_lock == NULL || thread_lock->GetClass() != gThreadLock) { 877 return ThreadList::kInvalidId; 878 } 879 Object* managed_thread = gThreadLock_thread->GetObject(thread_lock); 880 if (managed_thread == NULL) { 881 return ThreadList::kInvalidId; 882 } 883 uintptr_t vmData = static_cast<uintptr_t>(gThread_vmData->GetInt(managed_thread)); 884 Thread* thread = reinterpret_cast<Thread*>(vmData); 885 if (thread == NULL) { 886 return ThreadList::kInvalidId; 887 } 888 return thread->GetThinLockId(); 889} 890 891Thread::Thread() 892 : thin_lock_id_(0), 893 tid_(0), 894 peer_(NULL), 895 top_of_managed_stack_(), 896 top_of_managed_stack_pc_(0), 897 wait_mutex_(new Mutex("a thread wait mutex")), 898 wait_cond_(new ConditionVariable("a thread wait condition variable")), 899 wait_monitor_(NULL), 900 interrupted_(false), 901 wait_next_(NULL), 902 monitor_enter_object_(NULL), 903 card_table_(0), 904 stack_end_(NULL), 905 native_to_managed_record_(NULL), 906 top_sirt_(NULL), 907 top_shadow_frame_(NULL), 908 jni_env_(NULL), 909 state_(kNative), 910 self_(NULL), 911 runtime_(NULL), 912 exception_(NULL), 913 suspend_count_(0), 914 debug_suspend_count_(0), 915 class_loader_override_(NULL), 916 long_jump_context_(NULL), 917 throwing_OutOfMemoryError_(false), 918 pre_allocated_OutOfMemoryError_(NULL), 919 debug_invoke_req_(new DebugInvokeReq), 920 trace_stack_(new std::vector<TraceStackFrame>), 921 name_(new std::string(kThreadNameDuringStartup)) { 922 CHECK_EQ((sizeof(Thread) % 4), 0U) << sizeof(Thread); 923 memset(&held_mutexes_[0], 0, sizeof(held_mutexes_)); 924} 925 926bool Thread::IsStillStarting() const { 927 // You might think you can check whether the state is kStarting, but for much of thread startup, 928 // the thread might also be in kVmWait. 929 // You might think you can check whether the peer is NULL, but the peer is actually created and 930 // assigned fairly early on, and needs to be. 931 // It turns out that the last thing to change is the thread name; that's a good proxy for "has 932 // this thread _ever_ entered kRunnable". 933 return (*name_ == kThreadNameDuringStartup); 934} 935 936static void MonitorExitVisitor(const Object* object, void*) { 937 Object* entered_monitor = const_cast<Object*>(object); 938 LOG(WARNING) << "Calling MonitorExit on object " << object << " (" << PrettyTypeOf(object) << ")" 939 << " left locked by native thread " << *Thread::Current() << " which is detaching"; 940 entered_monitor->MonitorExit(Thread::Current()); 941} 942 943void Thread::Destroy() { 944 // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited. 945 if (jni_env_ != NULL) { 946 jni_env_->monitors.VisitRoots(MonitorExitVisitor, NULL); 947 } 948 949 if (peer_ != NULL) { 950 Thread* self = this; 951 952 // We may need to call user-supplied managed code. 953 SetState(kRunnable); 954 955 HandleUncaughtExceptions(); 956 RemoveFromThreadGroup(); 957 958 // this.vmData = 0; 959 SetVmData(peer_, NULL); 960 961 Dbg::PostThreadDeath(self); 962 963 // Thread.join() is implemented as an Object.wait() on the Thread.lock 964 // object. Signal anyone who is waiting. 965 Object* lock = gThread_lock->GetObject(peer_); 966 // (This conditional is only needed for tests, where Thread.lock won't have been set.) 967 if (lock != NULL) { 968 lock->MonitorEnter(self); 969 lock->NotifyAll(); 970 lock->MonitorExit(self); 971 } 972 } 973} 974 975Thread::~Thread() { 976 delete jni_env_; 977 jni_env_ = NULL; 978 979 SetState(kTerminated); 980 981 delete wait_cond_; 982 delete wait_mutex_; 983 984#if !defined(ART_USE_LLVM_COMPILER) 985 delete long_jump_context_; 986#endif 987 988 delete debug_invoke_req_; 989 delete trace_stack_; 990 delete name_; 991 992 TearDownAlternateSignalStack(); 993} 994 995void Thread::HandleUncaughtExceptions() { 996 if (!IsExceptionPending()) { 997 return; 998 } 999 1000 // Get and clear the exception. 1001 Object* exception = GetException(); 1002 ClearException(); 1003 1004 // If the thread has its own handler, use that. 1005 Object* handler = gThread_uncaughtHandler->GetObject(peer_); 1006 if (handler == NULL) { 1007 // Otherwise use the thread group's default handler. 1008 handler = GetThreadGroup(); 1009 } 1010 1011 // Call the handler. 1012 Method* m = handler->GetClass()->FindVirtualMethodForVirtualOrInterface(gUncaughtExceptionHandler_uncaughtException); 1013 JValue args[2]; 1014 args[0].SetL(peer_); 1015 args[1].SetL(exception); 1016 m->Invoke(this, handler, args, NULL); 1017 1018 // If the handler threw, clear that exception too. 1019 ClearException(); 1020} 1021 1022Object* Thread::GetThreadGroup() const { 1023 return gThread_group->GetObject(peer_); 1024} 1025 1026void Thread::RemoveFromThreadGroup() { 1027 // this.group.removeThread(this); 1028 // group can be null if we're in the compiler or a test. 1029 Object* group = GetThreadGroup(); 1030 if (group != NULL) { 1031 Method* m = group->GetClass()->FindVirtualMethodForVirtualOrInterface(gThreadGroup_removeThread); 1032 JValue args[1]; 1033 args[0].SetL(peer_); 1034 m->Invoke(this, group, args, NULL); 1035 } 1036} 1037 1038size_t Thread::NumSirtReferences() { 1039 size_t count = 0; 1040 for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) { 1041 count += cur->NumberOfReferences(); 1042 } 1043 return count; 1044} 1045 1046size_t Thread::NumShadowFrameReferences() { 1047 size_t count = 0; 1048 for (ShadowFrame* cur = top_shadow_frame_; cur; cur = cur->GetLink()) { 1049 count += cur->NumberOfReferences(); 1050 } 1051 return count; 1052} 1053 1054bool Thread::SirtContains(jobject obj) { 1055 Object** sirt_entry = reinterpret_cast<Object**>(obj); 1056 for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) { 1057 if (cur->Contains(sirt_entry)) { 1058 return true; 1059 } 1060 } 1061 return false; 1062} 1063 1064bool Thread::ShadowFrameContains(jobject obj) { 1065 Object** shadow_frame_entry = reinterpret_cast<Object**>(obj); 1066 for (ShadowFrame* cur = top_shadow_frame_; cur; cur = cur->GetLink()) { 1067 if (cur->Contains(shadow_frame_entry)) { 1068 return true; 1069 } 1070 } 1071 return false; 1072} 1073 1074bool Thread::StackReferencesContain(jobject obj) { 1075 return SirtContains(obj) || ShadowFrameContains(obj); 1076} 1077 1078void Thread::SirtVisitRoots(Heap::RootVisitor* visitor, void* arg) { 1079 for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) { 1080 size_t num_refs = cur->NumberOfReferences(); 1081 for (size_t j = 0; j < num_refs; j++) { 1082 Object* object = cur->GetReference(j); 1083 if (object != NULL) { 1084 visitor(object, arg); 1085 } 1086 } 1087 } 1088} 1089 1090void Thread::ShadowFrameVisitRoots(Heap::RootVisitor* visitor, void* arg) { 1091 for (ShadowFrame* cur = top_shadow_frame_; cur; cur = cur->GetLink()) { 1092 size_t num_refs = cur->NumberOfReferences(); 1093 for (size_t j = 0; j < num_refs; j++) { 1094 Object* object = cur->GetReference(j); 1095 if (object != NULL) { 1096 visitor(object, arg); 1097 } 1098 } 1099 } 1100} 1101 1102Object* Thread::DecodeJObject(jobject obj) { 1103 DCHECK(CanAccessDirectReferences()); 1104 if (obj == NULL) { 1105 return NULL; 1106 } 1107 IndirectRef ref = reinterpret_cast<IndirectRef>(obj); 1108 IndirectRefKind kind = GetIndirectRefKind(ref); 1109 Object* result; 1110 switch (kind) { 1111 case kLocal: 1112 { 1113 IndirectReferenceTable& locals = jni_env_->locals; 1114 result = const_cast<Object*>(locals.Get(ref)); 1115 break; 1116 } 1117 case kGlobal: 1118 { 1119 JavaVMExt* vm = Runtime::Current()->GetJavaVM(); 1120 IndirectReferenceTable& globals = vm->globals; 1121 MutexLock mu(vm->globals_lock); 1122 result = const_cast<Object*>(globals.Get(ref)); 1123 break; 1124 } 1125 case kWeakGlobal: 1126 { 1127 JavaVMExt* vm = Runtime::Current()->GetJavaVM(); 1128 IndirectReferenceTable& weak_globals = vm->weak_globals; 1129 MutexLock mu(vm->weak_globals_lock); 1130 result = const_cast<Object*>(weak_globals.Get(ref)); 1131 if (result == kClearedJniWeakGlobal) { 1132 // This is a special case where it's okay to return NULL. 1133 return NULL; 1134 } 1135 break; 1136 } 1137 case kSirtOrInvalid: 1138 default: 1139 // TODO: make stack indirect reference table lookup more efficient 1140 // Check if this is a local reference in the SIRT 1141 if (StackReferencesContain(obj)) { 1142 result = *reinterpret_cast<Object**>(obj); // Read from SIRT 1143 } else if (Runtime::Current()->GetJavaVM()->work_around_app_jni_bugs) { 1144 // Assume an invalid local reference is actually a direct pointer. 1145 result = reinterpret_cast<Object*>(obj); 1146 } else { 1147 result = kInvalidIndirectRefObject; 1148 } 1149 } 1150 1151 if (result == NULL) { 1152 LOG(ERROR) << "JNI ERROR (app bug): use of deleted " << kind << ": " << obj; 1153 JniAbort(NULL); 1154 } else { 1155 if (result != kInvalidIndirectRefObject) { 1156 Runtime::Current()->GetHeap()->VerifyObject(result); 1157 } 1158 } 1159 return result; 1160} 1161 1162class CountStackDepthVisitor : public Thread::StackVisitor { 1163 public: 1164 CountStackDepthVisitor() : depth_(0), skip_depth_(0), skipping_(true) {} 1165 1166 bool VisitFrame(const Frame& frame, uintptr_t /*pc*/) { 1167 // We want to skip frames up to and including the exception's constructor. 1168 // Note we also skip the frame if it doesn't have a method (namely the callee 1169 // save frame) 1170 if (skipping_ && frame.HasMethod() && 1171 !Throwable::GetJavaLangThrowable()->IsAssignableFrom(frame.GetMethod()->GetDeclaringClass())) { 1172 skipping_ = false; 1173 } 1174 if (!skipping_) { 1175 if (frame.HasMethod()) { // ignore callee save frames 1176 ++depth_; 1177 } 1178 } else { 1179 ++skip_depth_; 1180 } 1181 return true; 1182 } 1183 1184 int GetDepth() const { 1185 return depth_; 1186 } 1187 1188 int GetSkipDepth() const { 1189 return skip_depth_; 1190 } 1191 1192 private: 1193 uint32_t depth_; 1194 uint32_t skip_depth_; 1195 bool skipping_; 1196}; 1197 1198class BuildInternalStackTraceVisitor : public Thread::StackVisitor { 1199 public: 1200 explicit BuildInternalStackTraceVisitor(int skip_depth) 1201 : skip_depth_(skip_depth), count_(0), pc_trace_(NULL), method_trace_(NULL), local_ref_(NULL) { 1202 } 1203 1204 bool Init(int depth, ScopedJniThreadState& ts) { 1205 // Allocate method trace with an extra slot that will hold the PC trace 1206 method_trace_ = Runtime::Current()->GetClassLinker()->AllocObjectArray<Object>(depth + 1); 1207 if (method_trace_ == NULL) { 1208 return false; 1209 } 1210 // Register a local reference as IntArray::Alloc may trigger GC 1211 local_ref_ = AddLocalReference<jobject>(ts.Env(), method_trace_); 1212 pc_trace_ = IntArray::Alloc(depth); 1213 if (pc_trace_ == NULL) { 1214 return false; 1215 } 1216#ifdef MOVING_GARBAGE_COLLECTOR 1217 // Re-read after potential GC 1218 method_trace_ = Decode<ObjectArray<Object>*>(ts.Env(), local_ref_); 1219#endif 1220 // Save PC trace in last element of method trace, also places it into the 1221 // object graph. 1222 method_trace_->Set(depth, pc_trace_); 1223 return true; 1224 } 1225 1226 virtual ~BuildInternalStackTraceVisitor() {} 1227 1228 bool VisitFrame(const Frame& frame, uintptr_t pc) { 1229 if (method_trace_ == NULL || pc_trace_ == NULL) { 1230 return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError. 1231 } 1232 if (skip_depth_ > 0) { 1233 skip_depth_--; 1234 return true; 1235 } 1236 if (!frame.HasMethod()) { 1237 return true; // ignore callee save frames 1238 } 1239 method_trace_->Set(count_, frame.GetMethod()); 1240 pc_trace_->Set(count_, pc); 1241 ++count_; 1242 return true; 1243 } 1244 1245 jobject GetInternalStackTrace() const { 1246 return local_ref_; 1247 } 1248 1249 private: 1250 // How many more frames to skip. 1251 int32_t skip_depth_; 1252 // Current position down stack trace 1253 uint32_t count_; 1254 // Array of return PC values 1255 IntArray* pc_trace_; 1256 // An array of the methods on the stack, the last entry is a reference to the 1257 // PC trace 1258 ObjectArray<Object>* method_trace_; 1259 // Local indirect reference table entry for method trace 1260 jobject local_ref_; 1261}; 1262 1263#if !defined(ART_USE_LLVM_COMPILER) 1264// TODO: remove this. 1265static uintptr_t ManglePc(uintptr_t pc) { 1266 // Move the PC back 2 bytes as a call will frequently terminate the 1267 // decoding of a particular instruction and we want to make sure we 1268 // get the Dex PC of the instruction with the call and not the 1269 // instruction following. 1270 if (pc > 0) { pc -= 2; } 1271 return pc; 1272} 1273#endif 1274 1275// TODO: remove this. 1276static uintptr_t DemanglePc(uintptr_t pc) { 1277 // Revert mangling for the case where we need the PC to return to the upcall 1278 if (pc > 0) { pc += 2; } 1279 return pc; 1280} 1281 1282void Thread::PushShadowFrame(ShadowFrame* frame) { 1283 frame->SetLink(top_shadow_frame_); 1284 top_shadow_frame_ = frame; 1285} 1286 1287ShadowFrame* Thread::PopShadowFrame() { 1288 CHECK(top_shadow_frame_ != NULL); 1289 ShadowFrame* frame = top_shadow_frame_; 1290 top_shadow_frame_ = frame->GetLink(); 1291 return frame; 1292} 1293 1294void Thread::PushSirt(StackIndirectReferenceTable* sirt) { 1295 sirt->SetLink(top_sirt_); 1296 top_sirt_ = sirt; 1297} 1298 1299StackIndirectReferenceTable* Thread::PopSirt() { 1300 CHECK(top_sirt_ != NULL); 1301 StackIndirectReferenceTable* sirt = top_sirt_; 1302 top_sirt_ = top_sirt_->GetLink(); 1303 return sirt; 1304} 1305 1306#if !defined(ART_USE_LLVM_COMPILER) // LLVM use ShadowFrame 1307 1308void Thread::WalkStack(StackVisitor* visitor, bool include_upcalls) const { 1309 Frame frame = GetTopOfStack(); 1310 uintptr_t pc = ManglePc(top_of_managed_stack_pc_); 1311 uint32_t trace_stack_depth = 0; 1312 // TODO: enable this CHECK after native_to_managed_record_ is initialized during startup. 1313 // CHECK(native_to_managed_record_ != NULL); 1314 NativeToManagedRecord* record = native_to_managed_record_; 1315 bool method_tracing_active = Runtime::Current()->IsMethodTracingActive(); 1316 while (frame.GetSP() != NULL) { 1317 for ( ; frame.GetMethod() != NULL; frame.Next()) { 1318 frame.GetMethod()->AssertPcIsWithinCode(pc); 1319 bool should_continue = visitor->VisitFrame(frame, pc); 1320 if (UNLIKELY(!should_continue)) { 1321 return; 1322 } 1323 uintptr_t return_pc = frame.GetReturnPC(); 1324 if (LIKELY(!method_tracing_active)) { 1325 pc = ManglePc(return_pc); 1326 } else { 1327 // While profiling, the return pc is restored from the side stack, except when walking 1328 // the stack for an exception where the side stack will be unwound in VisitFrame. 1329 if (IsTraceExitPc(return_pc) && !include_upcalls) { 1330 TraceStackFrame trace_frame = GetTraceStackFrame(trace_stack_depth++); 1331 CHECK(trace_frame.method_ == frame.GetMethod()); 1332 pc = ManglePc(trace_frame.return_pc_); 1333 } else { 1334 pc = ManglePc(return_pc); 1335 } 1336 } 1337 } 1338 if (include_upcalls) { 1339 bool should_continue = visitor->VisitFrame(frame, pc); 1340 if (!should_continue) { 1341 return; 1342 } 1343 } 1344 if (record == NULL) { 1345 return; 1346 } 1347 // last_tos should return Frame instead of sp? 1348 frame.SetSP(reinterpret_cast<Method**>(record->last_top_of_managed_stack_)); 1349 pc = ManglePc(record->last_top_of_managed_stack_pc_); 1350 record = record->link_; 1351 } 1352} 1353 1354#else // defined(ART_USE_LLVM_COMPILER) // LLVM uses ShadowFrame 1355 1356void Thread::WalkStack(StackVisitor* visitor, bool /*include_upcalls*/) const { 1357 for (ShadowFrame* cur = top_shadow_frame_; cur; cur = cur->GetLink()) { 1358 Frame frame; 1359 frame.SetSP(reinterpret_cast<Method**>(reinterpret_cast<byte*>(cur) + 1360 ShadowFrame::MethodOffset())); 1361 bool should_continue = visitor->VisitFrame(frame, cur->GetDexPC()); 1362 if (!should_continue) { 1363 return; 1364 } 1365 } 1366} 1367 1368/* 1369 * | | 1370 * | | 1371 * | | 1372 * | . | 1373 * | . | 1374 * | . | 1375 * | . | 1376 * | Method* | 1377 * | . | 1378 * | . | <-- top_shadow_frame_ (ShadowFrame*) 1379 * / +------------------------+ 1380 * ->| . | 1381 * . | . | 1382 * . | . | 1383 * /+------------------------+ 1384 * / | . | 1385 * / | . | 1386 * --- | | . | 1387 * | | | . | 1388 * | | Method* | <-- frame.GetSP() (Method**) 1389 * ShadowFrame \ | . | 1390 * | ->| . | <-- cur (ShadowFrame*) 1391 * --- /+------------------------+ 1392 * / | . | 1393 * / | . | 1394 * --- | | . | 1395 * | cur->GetLink() | | . | 1396 * | | Method* | 1397 * ShadowFrame \ | . | 1398 * | ->| . | 1399 * --- +------------------------+ 1400 * | . | 1401 * | . | 1402 * | . | 1403 * +========================+ 1404 */ 1405 1406#endif 1407 1408jobject Thread::CreateInternalStackTrace(JNIEnv* env) const { 1409 // Compute depth of stack 1410 CountStackDepthVisitor count_visitor; 1411 WalkStack(&count_visitor); 1412 int32_t depth = count_visitor.GetDepth(); 1413 int32_t skip_depth = count_visitor.GetSkipDepth(); 1414 1415 // Transition into runnable state to work on Object*/Array* 1416 ScopedJniThreadState ts(env); 1417 1418 // Build internal stack trace 1419 BuildInternalStackTraceVisitor build_trace_visitor(skip_depth); 1420 if (!build_trace_visitor.Init(depth, ts)) { 1421 return NULL; // Allocation failed 1422 } 1423 WalkStack(&build_trace_visitor); 1424 return build_trace_visitor.GetInternalStackTrace(); 1425} 1426 1427jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal, 1428 jobjectArray output_array, int* stack_depth) { 1429 // Transition into runnable state to work on Object*/Array* 1430 ScopedJniThreadState ts(env); 1431 // Decode the internal stack trace into the depth, method trace and PC trace 1432 ObjectArray<Object>* method_trace = 1433 down_cast<ObjectArray<Object>*>(Decode<Object*>(ts.Env(), internal)); 1434 int32_t depth = method_trace->GetLength() - 1; 1435 IntArray* pc_trace = down_cast<IntArray*>(method_trace->Get(depth)); 1436 1437 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 1438 1439 jobjectArray result; 1440 ObjectArray<StackTraceElement>* java_traces; 1441 if (output_array != NULL) { 1442 // Reuse the array we were given. 1443 result = output_array; 1444 java_traces = reinterpret_cast<ObjectArray<StackTraceElement>*>(Decode<Array*>(env, 1445 output_array)); 1446 // ...adjusting the number of frames we'll write to not exceed the array length. 1447 depth = std::min(depth, java_traces->GetLength()); 1448 } else { 1449 // Create java_trace array and place in local reference table 1450 java_traces = class_linker->AllocStackTraceElementArray(depth); 1451 if (java_traces == NULL) { 1452 return NULL; 1453 } 1454 result = AddLocalReference<jobjectArray>(ts.Env(), java_traces); 1455 } 1456 1457 if (stack_depth != NULL) { 1458 *stack_depth = depth; 1459 } 1460 1461 MethodHelper mh; 1462 for (int32_t i = 0; i < depth; ++i) { 1463 // Prepare parameters for StackTraceElement(String cls, String method, String file, int line) 1464 Method* method = down_cast<Method*>(method_trace->Get(i)); 1465 mh.ChangeMethod(method); 1466 uint32_t native_pc = pc_trace->Get(i); 1467 int32_t line_number = mh.GetLineNumFromNativePC(native_pc); 1468 // Allocate element, potentially triggering GC 1469 // TODO: reuse class_name_object via Class::name_? 1470 const char* descriptor = mh.GetDeclaringClassDescriptor(); 1471 CHECK(descriptor != NULL); 1472 std::string class_name(PrettyDescriptor(descriptor)); 1473 SirtRef<String> class_name_object(String::AllocFromModifiedUtf8(class_name.c_str())); 1474 if (class_name_object.get() == NULL) { 1475 return NULL; 1476 } 1477 const char* method_name = mh.GetName(); 1478 CHECK(method_name != NULL); 1479 SirtRef<String> method_name_object(String::AllocFromModifiedUtf8(method_name)); 1480 if (method_name_object.get() == NULL) { 1481 return NULL; 1482 } 1483 const char* source_file = mh.GetDeclaringClassSourceFile(); 1484 SirtRef<String> source_name_object(String::AllocFromModifiedUtf8(source_file)); 1485 StackTraceElement* obj = StackTraceElement::Alloc(class_name_object.get(), 1486 method_name_object.get(), 1487 source_name_object.get(), 1488 line_number); 1489 if (obj == NULL) { 1490 return NULL; 1491 } 1492#ifdef MOVING_GARBAGE_COLLECTOR 1493 // Re-read after potential GC 1494 java_traces = Decode<ObjectArray<Object>*>(ts.Env(), result); 1495 method_trace = down_cast<ObjectArray<Object>*>(Decode<Object*>(ts.Env(), internal)); 1496 pc_trace = down_cast<IntArray*>(method_trace->Get(depth)); 1497#endif 1498 java_traces->Set(i, obj); 1499 } 1500 return result; 1501} 1502 1503void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) { 1504 va_list args; 1505 va_start(args, fmt); 1506 ThrowNewExceptionV(exception_class_descriptor, fmt, args); 1507 va_end(args); 1508} 1509 1510void Thread::ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap) { 1511 std::string msg; 1512 StringAppendV(&msg, fmt, ap); 1513 ThrowNewException(exception_class_descriptor, msg.c_str()); 1514} 1515 1516void Thread::ThrowNewException(const char* exception_class_descriptor, const char* msg) { 1517 // Convert "Ljava/lang/Exception;" into JNI-style "java/lang/Exception". 1518 CHECK_EQ('L', exception_class_descriptor[0]); 1519 std::string descriptor(exception_class_descriptor + 1); 1520 CHECK_EQ(';', descriptor[descriptor.length() - 1]); 1521 descriptor.erase(descriptor.length() - 1); 1522 1523 JNIEnv* env = GetJniEnv(); 1524 ScopedLocalRef<jclass> exception_class(env, env->FindClass(descriptor.c_str())); 1525 if (exception_class.get() == NULL) { 1526 LOG(ERROR) << "Couldn't throw new " << descriptor << " because JNI FindClass failed: " 1527 << PrettyTypeOf(GetException()); 1528 CHECK(IsExceptionPending()); 1529 return; 1530 } 1531 if (!Runtime::Current()->IsStarted()) { 1532 // Something is trying to throw an exception without a started 1533 // runtime, which is the common case in the compiler. We won't be 1534 // able to invoke the constructor of the exception, so use 1535 // AllocObject which will not invoke a constructor. 1536 ScopedLocalRef<jthrowable> exception( 1537 env, reinterpret_cast<jthrowable>(env->AllocObject(exception_class.get()))); 1538 if (exception.get() != NULL) { 1539 ScopedJniThreadState ts(env); 1540 Throwable* t = reinterpret_cast<Throwable*>(ts.Self()->DecodeJObject(exception.get())); 1541 t->SetDetailMessage(String::AllocFromModifiedUtf8(msg)); 1542 ts.Self()->SetException(t); 1543 } else { 1544 LOG(ERROR) << "Couldn't throw new " << descriptor << " because JNI AllocObject failed: " 1545 << PrettyTypeOf(GetException()); 1546 CHECK(IsExceptionPending()); 1547 } 1548 return; 1549 } 1550 int rc = env->ThrowNew(exception_class.get(), msg); 1551 if (rc != JNI_OK) { 1552 LOG(ERROR) << "Couldn't throw new " << descriptor << " because JNI ThrowNew failed: " 1553 << PrettyTypeOf(GetException()); 1554 CHECK(IsExceptionPending()); 1555 } 1556} 1557 1558void Thread::ThrowOutOfMemoryError(const char* msg) { 1559 LOG(ERROR) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s", 1560 msg, (throwing_OutOfMemoryError_ ? " (recursive case)" : "")); 1561 if (!throwing_OutOfMemoryError_) { 1562 throwing_OutOfMemoryError_ = true; 1563 ThrowNewException("Ljava/lang/OutOfMemoryError;", NULL); 1564 } else { 1565 SetException(pre_allocated_OutOfMemoryError_); 1566 } 1567 throwing_OutOfMemoryError_ = false; 1568} 1569 1570 1571Thread* Thread::CurrentFromGdb() { 1572 return Thread::Current(); 1573} 1574 1575void Thread::DumpFromGdb() const { 1576 std::ostringstream ss; 1577 Dump(ss); 1578 std::string str(ss.str()); 1579 // log to stderr for debugging command line processes 1580 std::cerr << str; 1581#ifdef HAVE_ANDROID_OS 1582 // log to logcat for debugging frameworks processes 1583 LOG(INFO) << str; 1584#endif 1585} 1586 1587struct EntryPointInfo { 1588 uint32_t offset; 1589 const char* name; 1590}; 1591#define ENTRY_POINT_INFO(x) { ENTRYPOINT_OFFSET(x), #x } 1592static const EntryPointInfo gThreadEntryPointInfo[] = { 1593 ENTRY_POINT_INFO(pAllocArrayFromCode), 1594 ENTRY_POINT_INFO(pAllocArrayFromCodeWithAccessCheck), 1595 ENTRY_POINT_INFO(pAllocObjectFromCode), 1596 ENTRY_POINT_INFO(pAllocObjectFromCodeWithAccessCheck), 1597 ENTRY_POINT_INFO(pCheckAndAllocArrayFromCode), 1598 ENTRY_POINT_INFO(pCheckAndAllocArrayFromCodeWithAccessCheck), 1599 ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode), 1600 ENTRY_POINT_INFO(pCanPutArrayElementFromCode), 1601 ENTRY_POINT_INFO(pCheckCastFromCode), 1602 ENTRY_POINT_INFO(pDebugMe), 1603 ENTRY_POINT_INFO(pUpdateDebuggerFromCode), 1604 ENTRY_POINT_INFO(pInitializeStaticStorage), 1605 ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode), 1606 ENTRY_POINT_INFO(pInitializeTypeFromCode), 1607 ENTRY_POINT_INFO(pResolveStringFromCode), 1608 ENTRY_POINT_INFO(pSet32Instance), 1609 ENTRY_POINT_INFO(pSet32Static), 1610 ENTRY_POINT_INFO(pSet64Instance), 1611 ENTRY_POINT_INFO(pSet64Static), 1612 ENTRY_POINT_INFO(pSetObjInstance), 1613 ENTRY_POINT_INFO(pSetObjStatic), 1614 ENTRY_POINT_INFO(pGet32Instance), 1615 ENTRY_POINT_INFO(pGet32Static), 1616 ENTRY_POINT_INFO(pGet64Instance), 1617 ENTRY_POINT_INFO(pGet64Static), 1618 ENTRY_POINT_INFO(pGetObjInstance), 1619 ENTRY_POINT_INFO(pGetObjStatic), 1620 ENTRY_POINT_INFO(pHandleFillArrayDataFromCode), 1621 ENTRY_POINT_INFO(pDecodeJObjectInThread), 1622 ENTRY_POINT_INFO(pFindNativeMethod), 1623 ENTRY_POINT_INFO(pLockObjectFromCode), 1624 ENTRY_POINT_INFO(pUnlockObjectFromCode), 1625 ENTRY_POINT_INFO(pCmpgDouble), 1626 ENTRY_POINT_INFO(pCmpgFloat), 1627 ENTRY_POINT_INFO(pCmplDouble), 1628 ENTRY_POINT_INFO(pCmplFloat), 1629 ENTRY_POINT_INFO(pDadd), 1630 ENTRY_POINT_INFO(pDdiv), 1631 ENTRY_POINT_INFO(pDmul), 1632 ENTRY_POINT_INFO(pDsub), 1633 ENTRY_POINT_INFO(pF2d), 1634 ENTRY_POINT_INFO(pFmod), 1635 ENTRY_POINT_INFO(pI2d), 1636 ENTRY_POINT_INFO(pL2d), 1637 ENTRY_POINT_INFO(pD2f), 1638 ENTRY_POINT_INFO(pFadd), 1639 ENTRY_POINT_INFO(pFdiv), 1640 ENTRY_POINT_INFO(pFmodf), 1641 ENTRY_POINT_INFO(pFmul), 1642 ENTRY_POINT_INFO(pFsub), 1643 ENTRY_POINT_INFO(pI2f), 1644 ENTRY_POINT_INFO(pL2f), 1645 ENTRY_POINT_INFO(pD2iz), 1646 ENTRY_POINT_INFO(pF2iz), 1647 ENTRY_POINT_INFO(pIdivmod), 1648 ENTRY_POINT_INFO(pD2l), 1649 ENTRY_POINT_INFO(pF2l), 1650 ENTRY_POINT_INFO(pLdiv), 1651 ENTRY_POINT_INFO(pLdivmod), 1652 ENTRY_POINT_INFO(pLmul), 1653 ENTRY_POINT_INFO(pShlLong), 1654 ENTRY_POINT_INFO(pShrLong), 1655 ENTRY_POINT_INFO(pUshrLong), 1656 ENTRY_POINT_INFO(pIndexOf), 1657 ENTRY_POINT_INFO(pMemcmp16), 1658 ENTRY_POINT_INFO(pStringCompareTo), 1659 ENTRY_POINT_INFO(pMemcpy), 1660 ENTRY_POINT_INFO(pUnresolvedDirectMethodTrampolineFromCode), 1661 ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck), 1662 ENTRY_POINT_INFO(pInvokeInterfaceTrampoline), 1663 ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck), 1664 ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck), 1665 ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck), 1666 ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck), 1667 ENTRY_POINT_INFO(pCheckSuspendFromCode), 1668 ENTRY_POINT_INFO(pTestSuspendFromCode), 1669 ENTRY_POINT_INFO(pDeliverException), 1670 ENTRY_POINT_INFO(pThrowAbstractMethodErrorFromCode), 1671 ENTRY_POINT_INFO(pThrowArrayBoundsFromCode), 1672 ENTRY_POINT_INFO(pThrowDivZeroFromCode), 1673 ENTRY_POINT_INFO(pThrowNoSuchMethodFromCode), 1674 ENTRY_POINT_INFO(pThrowNullPointerFromCode), 1675 ENTRY_POINT_INFO(pThrowStackOverflowFromCode), 1676 ENTRY_POINT_INFO(pThrowVerificationErrorFromCode), 1677}; 1678#undef ENTRY_POINT_INFO 1679 1680void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers) { 1681 CHECK_EQ(size_of_pointers, 4U); // TODO: support 64-bit targets. 1682 1683#define DO_THREAD_OFFSET(x) if (offset == static_cast<uint32_t>(OFFSETOF_VOLATILE_MEMBER(Thread, x))) { os << # x; return; } 1684 DO_THREAD_OFFSET(card_table_); 1685 DO_THREAD_OFFSET(exception_); 1686 DO_THREAD_OFFSET(jni_env_); 1687 DO_THREAD_OFFSET(self_); 1688 DO_THREAD_OFFSET(stack_end_); 1689 DO_THREAD_OFFSET(state_); 1690 DO_THREAD_OFFSET(suspend_count_); 1691 DO_THREAD_OFFSET(thin_lock_id_); 1692 DO_THREAD_OFFSET(top_of_managed_stack_); 1693 DO_THREAD_OFFSET(top_of_managed_stack_pc_); 1694 DO_THREAD_OFFSET(top_sirt_); 1695#undef DO_THREAD_OFFSET 1696 1697 size_t entry_point_count = arraysize(gThreadEntryPointInfo); 1698 CHECK_EQ(entry_point_count * size_of_pointers, sizeof(EntryPoints)); 1699 uint32_t expected_offset = OFFSETOF_MEMBER(Thread, entrypoints_); 1700 for (size_t i = 0; i < entry_point_count; ++i) { 1701 CHECK_EQ(gThreadEntryPointInfo[i].offset, expected_offset); 1702 expected_offset += size_of_pointers; 1703 if (gThreadEntryPointInfo[i].offset == offset) { 1704 os << gThreadEntryPointInfo[i].name; 1705 return; 1706 } 1707 } 1708 os << offset; 1709} 1710 1711class CatchBlockStackVisitor : public Thread::StackVisitor { 1712 public: 1713 CatchBlockStackVisitor(Class* to_find, Context* ljc) 1714 : to_find_(to_find), long_jump_context_(ljc), native_method_count_(0), 1715 method_tracing_active_(Runtime::Current()->IsMethodTracingActive()) { 1716#ifndef NDEBUG 1717 handler_pc_ = 0xEBADC0DE; 1718 handler_frame_.SetSP(reinterpret_cast<Method**>(0xEBADF00D)); 1719#endif 1720 } 1721 1722 bool VisitFrame(const Frame& fr, uintptr_t pc) { 1723 Method* method = fr.GetMethod(); 1724 if (method == NULL) { 1725 // This is the upcall, we remember the frame and last_pc so that we may 1726 // long jump to them 1727 handler_pc_ = DemanglePc(pc); 1728 handler_frame_ = fr; 1729 return false; // End stack walk. 1730 } 1731 uint32_t dex_pc = DexFile::kDexNoIndex; 1732 if (method->IsRuntimeMethod()) { 1733 // ignore callee save method 1734 DCHECK(method->IsCalleeSaveMethod()); 1735 } else if (method->IsNative()) { 1736 native_method_count_++; 1737 } else { 1738 // Unwind stack when an exception occurs during method tracing 1739 if (UNLIKELY(method_tracing_active_)) { 1740#if !defined(ART_USE_LLVM_COMPILER) 1741 if (IsTraceExitPc(DemanglePc(pc))) { 1742 pc = ManglePc(TraceMethodUnwindFromCode(Thread::Current())); 1743 } 1744#else 1745 UNIMPLEMENTED(FATAL); 1746#endif 1747 } 1748 dex_pc = method->ToDexPC(pc); 1749 } 1750 if (dex_pc != DexFile::kDexNoIndex) { 1751 uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc); 1752 if (found_dex_pc != DexFile::kDexNoIndex) { 1753 handler_pc_ = method->ToNativePC(found_dex_pc); 1754 handler_frame_ = fr; 1755 return false; // End stack walk. 1756 } 1757 } 1758#if !defined(ART_USE_LLVM_COMPILER) 1759 // Caller may be handler, fill in callee saves in context 1760 long_jump_context_->FillCalleeSaves(fr); 1761#endif 1762 return true; // Continue stack walk. 1763 } 1764 1765 // The type of the exception catch block to find 1766 Class* to_find_; 1767 // Frame with found handler or last frame if no handler found 1768 Frame handler_frame_; 1769 // PC to branch to for the handler 1770 uintptr_t handler_pc_; 1771 // Context that will be the target of the long jump 1772 Context* long_jump_context_; 1773 // Number of native methods passed in crawl (equates to number of SIRTs to pop) 1774 uint32_t native_method_count_; 1775 // Is method tracing active? 1776 const bool method_tracing_active_; 1777}; 1778 1779void Thread::DeliverException() { 1780#if !defined(ART_USE_LLVM_COMPILER) 1781 const bool kDebugExceptionDelivery = false; 1782 Throwable* exception = GetException(); // Get exception from thread 1783 CHECK(exception != NULL); 1784 // Don't leave exception visible while we try to find the handler, which may cause class 1785 // resolution. 1786 ClearException(); 1787 if (kDebugExceptionDelivery) { 1788 String* msg = exception->GetDetailMessage(); 1789 std::string str_msg(msg != NULL ? msg->ToModifiedUtf8() : ""); 1790 DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception) 1791 << ": " << str_msg << std::endl); 1792 } 1793 1794 Context* long_jump_context = GetLongJumpContext(); 1795 CatchBlockStackVisitor catch_finder(exception->GetClass(), long_jump_context); 1796 WalkStack(&catch_finder, true); 1797 1798 Method** sp; 1799 uintptr_t throw_native_pc; 1800 Method* throw_method = GetCurrentMethod(&throw_native_pc, &sp); 1801 uintptr_t catch_native_pc = catch_finder.handler_pc_; 1802 Method* catch_method = catch_finder.handler_frame_.GetMethod(); 1803 Dbg::PostException(sp, throw_method, throw_native_pc, catch_method, catch_native_pc, exception); 1804 1805 if (kDebugExceptionDelivery) { 1806 if (catch_method == NULL) { 1807 LOG(INFO) << "Handler is upcall"; 1808 } else { 1809 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 1810 const DexFile& dex_file = 1811 class_linker->FindDexFile(catch_method->GetDeclaringClass()->GetDexCache()); 1812 int line_number = dex_file.GetLineNumFromPC(catch_method, 1813 catch_method->ToDexPC(catch_finder.handler_pc_)); 1814 LOG(INFO) << "Handler: " << PrettyMethod(catch_method) << " (line: " << line_number << ")"; 1815 } 1816 } 1817 SetException(exception); 1818 CHECK_NE(catch_native_pc, 0u); 1819 long_jump_context->SetSP(reinterpret_cast<uintptr_t>(catch_finder.handler_frame_.GetSP())); 1820 long_jump_context->SetPC(catch_native_pc); 1821 long_jump_context->SmashCallerSaves(); 1822 long_jump_context->DoLongJump(); 1823#endif 1824 LOG(FATAL) << "UNREACHABLE"; 1825} 1826 1827Context* Thread::GetLongJumpContext() { 1828 Context* result = long_jump_context_; 1829#if !defined(ART_USE_LLVM_COMPILER) 1830 if (result == NULL) { 1831 result = Context::Create(); 1832 long_jump_context_ = result; 1833 } 1834#endif 1835 return result; 1836} 1837 1838#if !defined(ART_USE_LLVM_COMPILER) 1839Method* Thread::GetCurrentMethod(uintptr_t* pc, Method*** sp) const { 1840 Frame f = top_of_managed_stack_; 1841 Method* m = f.GetMethod(); 1842 uintptr_t native_pc = top_of_managed_stack_pc_; 1843 1844 // We use JNI internally for exception throwing, so it's possible to arrive 1845 // here via a "FromCode" function, in which case there's a synthetic 1846 // callee-save method at the top of the stack. These shouldn't be user-visible, 1847 // so if we find one, skip it and return the compiled method underneath. 1848 if (m != NULL && m->IsCalleeSaveMethod()) { 1849 native_pc = f.GetReturnPC(); 1850 f.Next(); 1851 m = f.GetMethod(); 1852 } 1853 if (pc != NULL) { 1854 *pc = (m != NULL) ? ManglePc(native_pc) : 0; 1855 } 1856 if (sp != NULL) { 1857 *sp = f.GetSP(); 1858 } 1859 return m; 1860} 1861#else 1862Method* Thread::GetCurrentMethod(uintptr_t*, Method***) const { 1863 ShadowFrame* frame = top_shadow_frame_; 1864 if (frame == NULL) { 1865 return NULL; 1866 } 1867 return frame->GetMethod(); 1868} 1869#endif 1870 1871bool Thread::HoldsLock(Object* object) { 1872 if (object == NULL) { 1873 return false; 1874 } 1875 return object->GetThinLockId() == thin_lock_id_; 1876} 1877 1878bool Thread::IsDaemon() { 1879 return gThread_daemon->GetBoolean(peer_); 1880} 1881 1882#if !defined(ART_USE_LLVM_COMPILER) 1883class ReferenceMapVisitor : public Thread::StackVisitor { 1884 public: 1885 ReferenceMapVisitor(Context* context, Heap::RootVisitor* root_visitor, void* arg) : 1886 context_(context), root_visitor_(root_visitor), arg_(arg) { 1887 } 1888 1889 bool VisitFrame(const Frame& frame, uintptr_t pc) { 1890 Method* m = frame.GetMethod(); 1891 if (false) { 1892 LOG(INFO) << "Visiting stack roots in " << PrettyMethod(m) 1893 << StringPrintf("@ PC:%04x", m->ToDexPC(pc)); 1894 } 1895 // Process register map (which native and callee save methods don't have) 1896 if (!m->IsNative() && !m->IsCalleeSaveMethod() && !m->IsProxyMethod()) { 1897 CHECK(m->GetGcMap() != NULL) << PrettyMethod(m); 1898 CHECK_NE(0U, m->GetGcMapLength()) << PrettyMethod(m); 1899 verifier::PcToReferenceMap map(m->GetGcMap(), m->GetGcMapLength()); 1900 const uint8_t* reg_bitmap = map.FindBitMap(m->ToDexPC(pc)); 1901 CHECK(reg_bitmap != NULL); 1902 const VmapTable vmap_table(m->GetVmapTableRaw()); 1903 const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem(); 1904 DCHECK(code_item != NULL); // can't be NULL or how would we compile its instructions? 1905 uint32_t core_spills = m->GetCoreSpillMask(); 1906 uint32_t fp_spills = m->GetFpSpillMask(); 1907 size_t frame_size = m->GetFrameSizeInBytes(); 1908 // For all dex registers in the bitmap 1909 size_t num_regs = std::min(map.RegWidth() * 8, 1910 static_cast<size_t>(code_item->registers_size_)); 1911 for (size_t reg = 0; reg < num_regs; ++reg) { 1912 // Does this register hold a reference? 1913 if (TestBitmap(reg, reg_bitmap)) { 1914 uint32_t vmap_offset; 1915 Object* ref; 1916 if (vmap_table.IsInContext(reg, vmap_offset)) { 1917 // Compute the register we need to load from the context 1918 uint32_t spill_mask = m->GetCoreSpillMask(); 1919 CHECK_LT(vmap_offset, static_cast<uint32_t>(__builtin_popcount(spill_mask))); 1920 uint32_t matches = 0; 1921 uint32_t spill_shifts = 0; 1922 while (matches != (vmap_offset + 1)) { 1923 DCHECK_NE(spill_mask, 0u); 1924 matches += spill_mask & 1; // Add 1 if the low bit is set 1925 spill_mask >>= 1; 1926 spill_shifts++; 1927 } 1928 spill_shifts--; // wind back one as we want the last match 1929 ref = reinterpret_cast<Object*>(context_->GetGPR(spill_shifts)); 1930 } else { 1931 ref = reinterpret_cast<Object*>(frame.GetVReg(code_item, core_spills, fp_spills, 1932 frame_size, reg)); 1933 } 1934 if (ref != NULL) { 1935 root_visitor_(ref, arg_); 1936 } 1937 } 1938 } 1939 } 1940 context_->FillCalleeSaves(frame); 1941 return true; 1942 } 1943 1944 private: 1945 bool TestBitmap(int reg, const uint8_t* reg_vector) { 1946 return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0; 1947 } 1948 1949 // Context used to build up picture of callee saves 1950 Context* context_; 1951 // Call-back when we visit a root 1952 Heap::RootVisitor* root_visitor_; 1953 // Argument to call-back 1954 void* arg_; 1955}; 1956#endif 1957 1958void Thread::VisitRoots(Heap::RootVisitor* visitor, void* arg) { 1959 if (exception_ != NULL) { 1960 visitor(exception_, arg); 1961 } 1962 if (peer_ != NULL) { 1963 visitor(peer_, arg); 1964 } 1965 if (pre_allocated_OutOfMemoryError_ != NULL) { 1966 visitor(pre_allocated_OutOfMemoryError_, arg); 1967 } 1968 if (class_loader_override_ != NULL) { 1969 visitor(class_loader_override_, arg); 1970 } 1971 jni_env_->locals.VisitRoots(visitor, arg); 1972 jni_env_->monitors.VisitRoots(visitor, arg); 1973 1974 SirtVisitRoots(visitor, arg); 1975 ShadowFrameVisitRoots(visitor, arg); 1976 1977#if !defined(ART_USE_LLVM_COMPILER) 1978 // Cheat and steal the long jump context. Assume that we are not doing a GC during exception 1979 // delivery. 1980 Context* context = GetLongJumpContext(); 1981 // Visit roots on this thread's stack 1982 ReferenceMapVisitor mapper(context, visitor, arg); 1983 WalkStack(&mapper); 1984#endif 1985} 1986 1987#if VERIFY_OBJECT_ENABLED 1988static void VerifyObject(const Object* obj, void*) { 1989 Runtime::Current()->GetHeap()->VerifyObject(obj); 1990} 1991 1992void Thread::VerifyStack() { 1993#if !defined(ART_USE_LLVM_COMPILER) 1994 UniquePtr<Context> context(Context::Create()); 1995 ReferenceMapVisitor mapper(context.get(), VerifyObject, NULL); 1996 WalkStack(&mapper); 1997#endif 1998} 1999#endif 2000 2001std::ostream& operator<<(std::ostream& os, const Thread& thread) { 2002 thread.Dump(os, false); 2003 return os; 2004} 2005 2006void Thread::CheckSafeToLockOrUnlock(MutexRank rank, bool is_locking) { 2007 if (this == NULL) { 2008 CHECK(Runtime::Current()->IsShuttingDown()); 2009 return; 2010 } 2011 if (is_locking) { 2012 if (held_mutexes_[rank] == 0) { 2013 bool bad_mutexes_held = false; 2014 for (int i = kMaxMutexRank; i > rank; --i) { 2015 if (held_mutexes_[i] != 0) { 2016 LOG(ERROR) << "holding " << static_cast<MutexRank>(i) << " while " << (is_locking ? "locking" : "unlocking") << " " << rank; 2017 bad_mutexes_held = true; 2018 } 2019 } 2020 CHECK(!bad_mutexes_held) << rank; 2021 } 2022 ++held_mutexes_[rank]; 2023 } else { 2024 CHECK_GT(held_mutexes_[rank], 0U) << rank; 2025 --held_mutexes_[rank]; 2026 } 2027} 2028 2029void Thread::CheckSafeToWait(MutexRank rank) { 2030 if (this == NULL) { 2031 CHECK(Runtime::Current()->IsShuttingDown()); 2032 return; 2033 } 2034 bool bad_mutexes_held = false; 2035 for (int i = kMaxMutexRank; i >= 0; --i) { 2036 if (i != rank && held_mutexes_[i] != 0) { 2037 LOG(ERROR) << "holding " << static_cast<MutexRank>(i) << " while doing condition variable wait on " << rank; 2038 bad_mutexes_held = true; 2039 } 2040 } 2041 if (held_mutexes_[rank] == 0) { 2042 LOG(ERROR) << "*not* holding " << rank << " while doing condition variable wait on it"; 2043 bad_mutexes_held = true; 2044 } 2045 CHECK(!bad_mutexes_held); 2046} 2047 2048} // namespace art 2049