thread.cc revision 2acf36d8cfeb5ddb293904148aa70f25ef6d8845
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "thread.h" 18 19#include <dynamic_annotations.h> 20#include <pthread.h> 21#include <signal.h> 22#include <sys/mman.h> 23#include <sys/resource.h> 24#include <sys/time.h> 25 26#include <algorithm> 27#include <bitset> 28#include <cerrno> 29#include <iostream> 30#include <list> 31 32#include "debugger.h" 33#include "class_linker.h" 34#include "class_loader.h" 35#include "heap.h" 36#include "jni_internal.h" 37#include "monitor.h" 38#include "oat/runtime/context.h" 39#include "object.h" 40#include "object_utils.h" 41#include "reflection.h" 42#include "runtime.h" 43#include "runtime_support.h" 44#include "ScopedLocalRef.h" 45#include "scoped_jni_thread_state.h" 46#include "shadow_frame.h" 47#include "space.h" 48#include "stack.h" 49#include "stack_indirect_reference_table.h" 50#include "thread_list.h" 51#include "utils.h" 52#include "verifier/gc_map.h" 53 54namespace art { 55 56pthread_key_t Thread::pthread_key_self_; 57 58static Class* gThreadGroup = NULL; 59static Class* gThreadLock = NULL; 60static Field* gThread_daemon = NULL; 61static Field* gThread_group = NULL; 62static Field* gThread_lock = NULL; 63static Field* gThread_name = NULL; 64static Field* gThread_priority = NULL; 65static Field* gThread_uncaughtHandler = NULL; 66static Field* gThread_vmData = NULL; 67static Field* gThreadGroup_mMain = NULL; 68static Field* gThreadGroup_mSystem = NULL; 69static Field* gThreadGroup_name = NULL; 70static Field* gThreadLock_thread = NULL; 71static Method* gThread_run = NULL; 72static Method* gThreadGroup_removeThread = NULL; 73static Method* gUncaughtExceptionHandler_uncaughtException = NULL; 74 75void Thread::InitCardTable() { 76 card_table_ = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin(); 77} 78 79#if !defined(__APPLE__) 80static void UnimplementedEntryPoint() { 81 UNIMPLEMENTED(FATAL); 82} 83#endif 84 85void Thread::InitFunctionPointers() { 86#if !defined(__APPLE__) // The Mac GCC is too old to accept this code. 87 // Insert a placeholder so we can easily tell if we call an unimplemented entry point. 88 uintptr_t* begin = reinterpret_cast<uintptr_t*>(&entrypoints_); 89 uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(entrypoints_)); 90 for (uintptr_t* it = begin; it != end; ++it) { 91 *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint); 92 } 93#endif 94 InitEntryPoints(&entrypoints_); 95} 96 97void Thread::SetDebuggerUpdatesEnabled(bool enabled) { 98 LOG(INFO) << "Turning debugger updates " << (enabled ? "on" : "off") << " for " << *this; 99#if !defined(ART_USE_LLVM_COMPILER) 100 ChangeDebuggerEntryPoint(&entrypoints_, enabled); 101#else 102 UNIMPLEMENTED(FATAL); 103#endif 104} 105 106void Thread::InitTid() { 107 tid_ = ::art::GetTid(); 108} 109 110void Thread::InitAfterFork() { 111 InitTid(); 112#if defined(__BIONIC__) 113 // Work around a bionic bug. 114 struct bionic_pthread_internal_t { 115 void* next; 116 void** pref; 117 pthread_attr_t attr; 118 pid_t kernel_id; 119 // et cetera. we just need 'kernel_id' so we can stop here. 120 }; 121 reinterpret_cast<bionic_pthread_internal_t*>(pthread_self())->kernel_id = tid_; 122#endif 123} 124 125void* Thread::CreateCallback(void* arg) { 126 Thread* self = reinterpret_cast<Thread*>(arg); 127 self->Init(); 128 129 // Wait until it's safe to start running code. (There may have been a suspend-all 130 // in progress while we were starting up.) 131 Runtime* runtime = Runtime::Current(); 132 runtime->GetThreadList()->WaitForGo(); 133 134 { 135 CHECK_EQ(self->GetState(), kRunnable); 136 SirtRef<String> thread_name(self->GetThreadName()); 137 self->SetThreadName(thread_name->ToModifiedUtf8().c_str()); 138 } 139 140 Dbg::PostThreadStart(self); 141 142 // Invoke the 'run' method of our java.lang.Thread. 143 CHECK(self->peer_ != NULL); 144 Object* receiver = self->peer_; 145 Method* m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(gThread_run); 146 m->Invoke(self, receiver, NULL, NULL); 147 148 // Detach. 149 runtime->GetThreadList()->Unregister(); 150 151 return NULL; 152} 153 154static void SetVmData(Object* managed_thread, Thread* native_thread) { 155 gThread_vmData->SetInt(managed_thread, reinterpret_cast<uintptr_t>(native_thread)); 156} 157 158Thread* Thread::FromManagedThread(Object* thread_peer) { 159 return reinterpret_cast<Thread*>(static_cast<uintptr_t>(gThread_vmData->GetInt(thread_peer))); 160} 161 162Thread* Thread::FromManagedThread(JNIEnv* env, jobject java_thread) { 163 return FromManagedThread(Decode<Object*>(env, java_thread)); 164} 165 166static size_t FixStackSize(size_t stack_size) { 167 // A stack size of zero means "use the default". 168 if (stack_size == 0) { 169 stack_size = Runtime::Current()->GetDefaultStackSize(); 170 } 171 172 // Dalvik used the bionic pthread default stack size for native threads, 173 // so include that here to support apps that expect large native stacks. 174 stack_size += 1 * MB; 175 176 177 // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN. 178 if (stack_size < PTHREAD_STACK_MIN) { 179 stack_size = PTHREAD_STACK_MIN; 180 } 181 182 // It's likely that callers are trying to ensure they have at least a certain amount of 183 // stack space, so we should add our reserved space on top of what they requested, rather 184 // than implicitly take it away from them. 185 stack_size += Thread::kStackOverflowReservedBytes; 186 187 // Some systems require the stack size to be a multiple of the system page size, so round up. 188 stack_size = RoundUp(stack_size, kPageSize); 189 190 return stack_size; 191} 192 193static void SigAltStack(stack_t* new_stack, stack_t* old_stack) { 194 if (sigaltstack(new_stack, old_stack) == -1) { 195 PLOG(FATAL) << "sigaltstack failed"; 196 } 197} 198 199static void SetUpAlternateSignalStack() { 200 // Create and set an alternate signal stack. 201 stack_t ss; 202 ss.ss_sp = new uint8_t[SIGSTKSZ]; 203 ss.ss_size = SIGSTKSZ; 204 ss.ss_flags = 0; 205 CHECK(ss.ss_sp != NULL); 206 SigAltStack(&ss, NULL); 207 208 // Double-check that it worked. 209 ss.ss_sp = NULL; 210 SigAltStack(NULL, &ss); 211 VLOG(threads) << "Alternate signal stack is " << PrettySize(ss.ss_size) << " at " << ss.ss_sp; 212} 213 214static void TearDownAlternateSignalStack() { 215 // Get the pointer so we can free the memory. 216 stack_t ss; 217 SigAltStack(NULL, &ss); 218 uint8_t* allocated_signal_stack = reinterpret_cast<uint8_t*>(ss.ss_sp); 219 220 // Tell the kernel to stop using it. 221 ss.ss_sp = NULL; 222 ss.ss_flags = SS_DISABLE; 223 ss.ss_size = 0; 224 SigAltStack(&ss, NULL); 225 226 // Free it. 227 delete[] allocated_signal_stack; 228} 229 230void Thread::Create(Object* peer, size_t stack_size) { 231 CHECK(peer != NULL); 232 233 stack_size = FixStackSize(stack_size); 234 235 Thread* native_thread = new Thread; 236 native_thread->peer_ = peer; 237 238 // Thread.start is synchronized, so we know that vmData is 0, 239 // and know that we're not racing to assign it. 240 SetVmData(peer, native_thread); 241 242 { 243 ScopedThreadStateChange tsc(Thread::Current(), kVmWait); 244 pthread_t new_pthread; 245 pthread_attr_t attr; 246 CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread"); 247 CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED"); 248 CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size); 249 CHECK_PTHREAD_CALL(pthread_create, (&new_pthread, &attr, Thread::CreateCallback, native_thread), "new thread"); 250 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread"); 251 } 252 253 // Let the child know when it's safe to start running. 254 Runtime::Current()->GetThreadList()->SignalGo(native_thread); 255} 256 257void Thread::Init() { 258 // This function does all the initialization that must be run by the native thread it applies to. 259 // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so 260 // we can handshake with the corresponding native thread when it's ready.) Check this native 261 // thread hasn't been through here already... 262 CHECK(Thread::Current() == NULL); 263 264 SetUpAlternateSignalStack(); 265 InitCpu(); 266 InitFunctionPointers(); 267 InitCardTable(); 268 269 Runtime* runtime = Runtime::Current(); 270 CHECK(runtime != NULL); 271 272 thin_lock_id_ = runtime->GetThreadList()->AllocThreadId(); 273 274 InitTid(); 275 InitStackHwm(); 276 277 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach"); 278 279 jni_env_ = new JNIEnvExt(this, runtime->GetJavaVM()); 280 281 runtime->GetThreadList()->Register(); 282} 283 284Thread* Thread::Attach(const char* thread_name, bool as_daemon, Object* thread_group) { 285 Thread* self = new Thread; 286 self->Init(); 287 288 self->SetState(kNative); 289 290 // If we're the main thread, ClassLinker won't be created until after we're attached, 291 // so that thread needs a two-stage attach. Regular threads don't need this hack. 292 // In the compiler, all threads need this hack, because no-one's going to be getting 293 // a native peer! 294 if (self->thin_lock_id_ != ThreadList::kMainId && !Runtime::Current()->IsCompiler()) { 295 self->CreatePeer(thread_name, as_daemon, thread_group); 296 } else { 297 // These aren't necessary, but they improve diagnostics for unit tests & command-line tools. 298 if (thread_name != NULL) { 299 self->name_->assign(thread_name); 300 ::art::SetThreadName(thread_name); 301 } 302 } 303 304 self->GetJniEnv()->locals.AssertEmpty(); 305 return self; 306} 307 308Object* Thread::GetMainThreadGroup() { 309 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(gThreadGroup, true, true)) { 310 return NULL; 311 } 312 return gThreadGroup_mMain->GetObject(NULL); 313} 314 315Object* Thread::GetSystemThreadGroup() { 316 if (!Runtime::Current()->GetClassLinker()->EnsureInitialized(gThreadGroup, true, true)) { 317 return NULL; 318 } 319 return gThreadGroup_mSystem->GetObject(NULL); 320} 321 322void Thread::CreatePeer(const char* name, bool as_daemon, Object* thread_group) { 323 CHECK(Runtime::Current()->IsStarted()); 324 JNIEnv* env = jni_env_; 325 326 if (thread_group == NULL) { 327 thread_group = Thread::GetMainThreadGroup(); 328 } 329 ScopedLocalRef<jobject> java_thread_group(env, AddLocalReference<jobject>(env, thread_group)); 330 ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name)); 331 jint thread_priority = GetNativePriority(); 332 jboolean thread_is_daemon = as_daemon; 333 334 ScopedLocalRef<jclass> c(env, env->FindClass("java/lang/Thread")); 335 ScopedLocalRef<jobject> peer(env, env->AllocObject(c.get())); 336 peer_ = DecodeJObject(peer.get()); 337 if (peer_ == NULL) { 338 CHECK(IsExceptionPending()); 339 return; 340 } 341 jmethodID mid = env->GetMethodID(c.get(), "<init>", "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V"); 342 env->CallNonvirtualVoidMethod(peer.get(), c.get(), mid, java_thread_group.get(), thread_name.get(), thread_priority, thread_is_daemon); 343 CHECK(!IsExceptionPending()) << " " << PrettyTypeOf(GetException()); 344 SetVmData(peer_, Thread::Current()); 345 346 SirtRef<String> peer_thread_name(GetThreadName()); 347 if (peer_thread_name.get() == NULL) { 348 // The Thread constructor should have set the Thread.name to a 349 // non-null value. However, because we can run without code 350 // available (in the compiler, in tests), we manually assign the 351 // fields the constructor should have set. 352 gThread_daemon->SetBoolean(peer_, thread_is_daemon); 353 gThread_group->SetObject(peer_, thread_group); 354 gThread_name->SetObject(peer_, Decode<Object*>(env, thread_name.get())); 355 gThread_priority->SetInt(peer_, thread_priority); 356 peer_thread_name.reset(GetThreadName()); 357 } 358 // thread_name may have been null, so don't trust this to be non-null 359 if (peer_thread_name.get() != NULL) { 360 SetThreadName(peer_thread_name->ToModifiedUtf8().c_str()); 361 } 362 363 // Pre-allocate an OutOfMemoryError for the double-OOME case. 364 ThrowNewException("Ljava/lang/OutOfMemoryError;", 365 "OutOfMemoryError thrown while trying to throw OutOfMemoryError; no stack available"); 366 ScopedLocalRef<jthrowable> exception(env, env->ExceptionOccurred()); 367 env->ExceptionClear(); 368 pre_allocated_OutOfMemoryError_ = Decode<Throwable*>(env, exception.get()); 369} 370 371void Thread::SetThreadName(const char* name) { 372 name_->assign(name); 373 ::art::SetThreadName(name); 374 Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM")); 375} 376 377void Thread::InitStackHwm() { 378#if defined(__APPLE__) 379 // Only needed to run code. Try implementing this with pthread_get_stacksize_np and pthread_get_stackaddr_np. 380 UNIMPLEMENTED(WARNING); 381#else 382 pthread_attr_t attributes; 383 CHECK_PTHREAD_CALL(pthread_getattr_np, (pthread_self(), &attributes), __FUNCTION__); 384 385 void* temp_stack_base; 386 CHECK_PTHREAD_CALL(pthread_attr_getstack, (&attributes, &temp_stack_base, &stack_size_), 387 __FUNCTION__); 388 stack_begin_ = reinterpret_cast<byte*>(temp_stack_base); 389 390 if (stack_size_ <= kStackOverflowReservedBytes) { 391 LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << stack_size_ << " bytes)"; 392 } 393 394 // Set stack_end_ to the bottom of the stack saving space of stack overflows 395 ResetDefaultStackEnd(); 396 397 // Sanity check. 398 int stack_variable; 399 CHECK_GT(&stack_variable, reinterpret_cast<void*>(stack_end_)); 400 401 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__); 402#endif 403} 404 405void Thread::Dump(std::ostream& os, bool full) const { 406 if (full) { 407 DumpState(os); 408 DumpStack(os); 409 } else { 410 os << "Thread["; 411 if (GetThinLockId() != 0) { 412 // If we're in kStarting, we won't have a thin lock id or tid yet. 413 os << GetThinLockId() 414 << ",tid=" << GetTid() << ','; 415 } 416 os << GetState() 417 << ",Thread*=" << this 418 << ",peer=" << peer_ 419 << ",\"" << *name_ << "\"" 420 << "]"; 421 } 422} 423 424String* Thread::GetThreadName() const { 425 return (peer_ != NULL) ? reinterpret_cast<String*>(gThread_name->GetObject(peer_)) : NULL; 426} 427 428void Thread::GetThreadName(std::string& name) const { 429 name.assign(*name_); 430} 431 432void Thread::DumpState(std::ostream& os) const { 433 std::string group_name; 434 int priority; 435 bool is_daemon = false; 436 437 if (peer_ != NULL) { 438 priority = gThread_priority->GetInt(peer_); 439 is_daemon = gThread_daemon->GetBoolean(peer_); 440 441 Object* thread_group = GetThreadGroup(); 442 if (thread_group != NULL) { 443 String* group_name_string = reinterpret_cast<String*>(gThreadGroup_name->GetObject(thread_group)); 444 group_name = (group_name_string != NULL) ? group_name_string->ToModifiedUtf8() : "<null>"; 445 } 446 } else { 447 priority = GetNativePriority(); 448 } 449 450 int policy; 451 sched_param sp; 452 CHECK_PTHREAD_CALL(pthread_getschedparam, (pthread_self(), &policy, &sp), __FUNCTION__); 453 454 std::string scheduler_group_name(GetSchedulerGroupName(GetTid())); 455 if (scheduler_group_name.empty()) { 456 scheduler_group_name = "default"; 457 } 458 459 os << '"' << *name_ << '"'; 460 if (is_daemon) { 461 os << " daemon"; 462 } 463 os << " prio=" << priority 464 << " tid=" << GetThinLockId() 465 << " " << GetState() << "\n"; 466 467 os << " | group=\"" << group_name << "\"" 468 << " sCount=" << suspend_count_ 469 << " dsCount=" << debug_suspend_count_ 470 << " obj=" << reinterpret_cast<void*>(peer_) 471 << " self=" << reinterpret_cast<const void*>(this) << "\n"; 472 os << " | sysTid=" << GetTid() 473 << " nice=" << getpriority(PRIO_PROCESS, GetTid()) 474 << " sched=" << policy << "/" << sp.sched_priority 475 << " cgrp=" << scheduler_group_name 476 << " handle=" << pthread_self() << "\n"; 477 478 // Grab the scheduler stats for this thread. 479 std::string scheduler_stats; 480 if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", GetTid()).c_str(), &scheduler_stats)) { 481 scheduler_stats.resize(scheduler_stats.size() - 1); // Lose the trailing '\n'. 482 } else { 483 scheduler_stats = "0 0 0"; 484 } 485 486 int utime = 0; 487 int stime = 0; 488 int task_cpu = 0; 489 GetTaskStats(GetTid(), utime, stime, task_cpu); 490 491 os << " | schedstat=( " << scheduler_stats << " )" 492 << " utm=" << utime 493 << " stm=" << stime 494 << " core=" << task_cpu 495 << " HZ=" << sysconf(_SC_CLK_TCK) << "\n"; 496} 497 498#if !defined(ART_USE_LLVM_COMPILER) 499void Thread::PushNativeToManagedRecord(NativeToManagedRecord* record) { 500 Method **sp = top_of_managed_stack_.GetSP(); 501#ifndef NDEBUG 502 if (sp != NULL) { 503 Method* m = *sp; 504 Runtime::Current()->GetHeap()->VerifyObject(m); 505 DCHECK((m == NULL) || m->IsMethod()); 506 } 507#endif 508 record->last_top_of_managed_stack_ = reinterpret_cast<void*>(sp); 509 record->last_top_of_managed_stack_pc_ = top_of_managed_stack_pc_; 510 record->link_ = native_to_managed_record_; 511 native_to_managed_record_ = record; 512 top_of_managed_stack_.SetSP(NULL); 513} 514#else 515void Thread::PushNativeToManagedRecord(NativeToManagedRecord*) { 516 LOG(FATAL) << "Called non-LLVM method with LLVM"; 517} 518#endif 519 520#if !defined(ART_USE_LLVM_COMPILER) 521void Thread::PopNativeToManagedRecord(const NativeToManagedRecord& record) { 522 native_to_managed_record_ = record.link_; 523 top_of_managed_stack_.SetSP(reinterpret_cast<Method**>(record.last_top_of_managed_stack_)); 524 top_of_managed_stack_pc_ = record.last_top_of_managed_stack_pc_; 525} 526#else 527void Thread::PopNativeToManagedRecord(const NativeToManagedRecord&) { 528 LOG(FATAL) << "Called non-LLVM method with LLVM"; 529} 530#endif 531 532struct StackDumpVisitor : public Thread::StackVisitor { 533 StackDumpVisitor(std::ostream& os, const Thread* thread) 534 : last_method(NULL), last_line_number(0), repetition_count(0), os(os), thread(thread), 535 frame_count(0) { 536 } 537 538 virtual ~StackDumpVisitor() { 539 } 540 541 bool VisitFrame(const Frame& frame, uintptr_t pc) { 542 if (!frame.HasMethod()) { 543 return true; 544 } 545 const int kMaxRepetition = 3; 546 Method* m = frame.GetMethod(); 547#if !defined(ART_USE_LLVM_COMPILER) 548 Class* c = m->GetDeclaringClass(); 549 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 550 const DexCache* dex_cache = c->GetDexCache(); 551 int line_number = -1; 552 if (dex_cache != NULL) { // be tolerant of bad input 553 const DexFile& dex_file = class_linker->FindDexFile(dex_cache); 554 line_number = dex_file.GetLineNumFromPC(m, m->ToDexPC(pc)); 555 } 556#else 557 // Compiler_LLVM stores line_number in the ShadowFrame, and passes it to visitor. 558 int line_number = static_cast<int>(pc); 559#endif 560 if (line_number == last_line_number && last_method == m) { 561 repetition_count++; 562 } else { 563 if (repetition_count >= kMaxRepetition) { 564 os << " ... repeated " << (repetition_count - kMaxRepetition) << " times\n"; 565 } 566 repetition_count = 0; 567 last_line_number = line_number; 568 last_method = m; 569 } 570 if (repetition_count < kMaxRepetition) { 571 os << " at " << PrettyMethod(m, false); 572 if (m->IsNative()) { 573 os << "(Native method)"; 574 } else { 575 mh.ChangeMethod(m); 576 const char* source_file(mh.GetDeclaringClassSourceFile()); 577 os << "(" << (source_file != NULL ? source_file : "unavailable") 578 << ":" << line_number << ")"; 579 } 580 os << "\n"; 581 } 582 583 if (frame_count++ == 0) { 584 Monitor::DescribeWait(os, thread); 585 } 586 return true; 587 } 588 MethodHelper mh; 589 Method* last_method; 590 int last_line_number; 591 int repetition_count; 592 std::ostream& os; 593 const Thread* thread; 594 int frame_count; 595}; 596 597void Thread::DumpStack(std::ostream& os) const { 598 // If we're currently in native code, dump that stack before dumping the managed stack. 599 if (GetState() == kNative || GetState() == kVmWait) { 600 DumpNativeStack(os); 601 } 602 StackDumpVisitor dumper(os, this); 603 WalkStack(&dumper); 604} 605 606void Thread::SetStateWithoutSuspendCheck(ThreadState new_state) { 607 volatile void* raw = reinterpret_cast<volatile void*>(&state_); 608 volatile int32_t* addr = reinterpret_cast<volatile int32_t*>(raw); 609 android_atomic_release_store(new_state, addr); 610} 611 612ThreadState Thread::SetState(ThreadState new_state) { 613 ThreadState old_state = state_; 614 if (old_state == new_state) { 615 return old_state; 616 } 617 618 volatile void* raw = reinterpret_cast<volatile void*>(&state_); 619 volatile int32_t* addr = reinterpret_cast<volatile int32_t*>(raw); 620 621 if (new_state == kRunnable) { 622 /* 623 * Change our status to kRunnable. The transition requires 624 * that we check for pending suspension, because the runtime considers 625 * us to be "asleep" in all other states, and another thread could 626 * be performing a GC now. 627 * 628 * The order of operations is very significant here. One way to 629 * do this wrong is: 630 * 631 * GCing thread Our thread (in kNative) 632 * ------------ ---------------------- 633 * check suspend count (== 0) 634 * SuspendAllThreads() 635 * grab suspend-count lock 636 * increment all suspend counts 637 * release suspend-count lock 638 * check thread state (== kNative) 639 * all are suspended, begin GC 640 * set state to kRunnable 641 * (continue executing) 642 * 643 * We can correct this by grabbing the suspend-count lock and 644 * performing both of our operations (check suspend count, set 645 * state) while holding it, now we need to grab a mutex on every 646 * transition to kRunnable. 647 * 648 * What we do instead is change the order of operations so that 649 * the transition to kRunnable happens first. If we then detect 650 * that the suspend count is nonzero, we switch to kSuspended. 651 * 652 * Appropriate compiler and memory barriers are required to ensure 653 * that the operations are observed in the expected order. 654 * 655 * This does create a small window of opportunity where a GC in 656 * progress could observe what appears to be a running thread (if 657 * it happens to look between when we set to kRunnable and when we 658 * switch to kSuspended). At worst this only affects assertions 659 * and thread logging. (We could work around it with some sort 660 * of intermediate "pre-running" state that is generally treated 661 * as equivalent to running, but that doesn't seem worthwhile.) 662 * 663 * We can also solve this by combining the "status" and "suspend 664 * count" fields into a single 32-bit value. This trades the 665 * store/load barrier on transition to kRunnable for an atomic RMW 666 * op on all transitions and all suspend count updates (also, all 667 * accesses to status or the thread count require bit-fiddling). 668 * It also eliminates the brief transition through kRunnable when 669 * the thread is supposed to be suspended. This is possibly faster 670 * on SMP and slightly more correct, but less convenient. 671 */ 672 android_atomic_acquire_store(new_state, addr); 673 ANNOTATE_IGNORE_READS_BEGIN(); 674 int suspend_count = suspend_count_; 675 ANNOTATE_IGNORE_READS_END(); 676 if (suspend_count != 0) { 677 Runtime::Current()->GetThreadList()->FullSuspendCheck(this); 678 } 679 } else { 680 /* 681 * Not changing to kRunnable. No additional work required. 682 * 683 * We use a releasing store to ensure that, if we were runnable, 684 * any updates we previously made to objects on the managed heap 685 * will be observed before the state change. 686 */ 687 android_atomic_release_store(new_state, addr); 688 } 689 690 return old_state; 691} 692 693bool Thread::IsSuspended() { 694 ANNOTATE_IGNORE_READS_BEGIN(); 695 int suspend_count = suspend_count_; 696 ANNOTATE_IGNORE_READS_END(); 697 return suspend_count != 0 && GetState() != kRunnable; 698} 699 700static void ReportThreadSuspendTimeout(Thread* waiting_thread) { 701 Runtime* runtime = Runtime::Current(); 702 std::ostringstream ss; 703 ss << "Thread suspend timeout waiting for thread " << *waiting_thread << "\n"; 704 runtime->DumpLockHolders(ss); 705 ss << "\n"; 706 runtime->GetThreadList()->DumpLocked(ss); 707 LOG(FATAL) << ss.str(); 708} 709 710void Thread::WaitUntilSuspended() { 711 static const useconds_t kTimeoutUs = 30 * 1000000; // 30s. 712 713 useconds_t total_delay = 0; 714 useconds_t delay = 0; 715 while (GetState() == kRunnable) { 716 if (total_delay >= kTimeoutUs) { 717 ReportThreadSuspendTimeout(this); 718 } 719 useconds_t new_delay = delay * 2; 720 CHECK_GE(new_delay, delay); 721 delay = new_delay; 722 if (delay == 0) { 723 sched_yield(); 724 delay = 10000; 725 } else { 726 usleep(delay); 727 total_delay += delay; 728 } 729 } 730} 731 732void Thread::ThreadExitCallback(void* arg) { 733 Thread* self = reinterpret_cast<Thread*>(arg); 734 LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self; 735} 736 737void Thread::Startup() { 738 // Allocate a TLS slot. 739 CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key"); 740 741 // Double-check the TLS slot allocation. 742 if (pthread_getspecific(pthread_key_self_) != NULL) { 743 LOG(FATAL) << "Newly-created pthread TLS slot is not NULL"; 744 } 745} 746 747// TODO: make more accessible? 748static Class* FindClassOrDie(ClassLinker* class_linker, const char* descriptor) { 749 Class* c = class_linker->FindSystemClass(descriptor); 750 CHECK(c != NULL) << descriptor; 751 return c; 752} 753 754// TODO: make more accessible? 755static Field* FindFieldOrDie(Class* c, const char* name, const char* descriptor) { 756 Field* f = c->FindDeclaredInstanceField(name, descriptor); 757 CHECK(f != NULL) << PrettyClass(c) << " " << name << " " << descriptor; 758 return f; 759} 760 761// TODO: make more accessible? 762static Method* FindMethodOrDie(Class* c, const char* name, const char* signature) { 763 Method* m = c->FindVirtualMethod(name, signature); 764 CHECK(m != NULL) << PrettyClass(c) << " " << name << " " << signature; 765 return m; 766} 767 768// TODO: make more accessible? 769static Field* FindStaticFieldOrDie(Class* c, const char* name, const char* descriptor) { 770 Field* f = c->FindDeclaredStaticField(name, descriptor); 771 CHECK(f != NULL) << PrettyClass(c) << " " << name << " " << descriptor; 772 return f; 773} 774 775void Thread::FinishStartup() { 776 CHECK(Runtime::Current()->IsStarted()); 777 Thread* self = Thread::Current(); 778 779 // Need to be kRunnable for FindClass 780 ScopedThreadStateChange tsc(self, kRunnable); 781 782 // Now the ClassLinker is ready, we can find the various Class*, Field*, and Method*s we need. 783 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 784 785 Class* Thread_class = FindClassOrDie(class_linker, "Ljava/lang/Thread;"); 786 Class* UncaughtExceptionHandler_class = FindClassOrDie(class_linker, "Ljava/lang/Thread$UncaughtExceptionHandler;"); 787 gThreadGroup = FindClassOrDie(class_linker, "Ljava/lang/ThreadGroup;"); 788 gThreadLock = FindClassOrDie(class_linker, "Ljava/lang/ThreadLock;"); 789 790 gThread_daemon = FindFieldOrDie(Thread_class, "daemon", "Z"); 791 gThread_group = FindFieldOrDie(Thread_class, "group", "Ljava/lang/ThreadGroup;"); 792 gThread_lock = FindFieldOrDie(Thread_class, "lock", "Ljava/lang/ThreadLock;"); 793 gThread_name = FindFieldOrDie(Thread_class, "name", "Ljava/lang/String;"); 794 gThread_priority = FindFieldOrDie(Thread_class, "priority", "I"); 795 gThread_uncaughtHandler = FindFieldOrDie(Thread_class, "uncaughtHandler", "Ljava/lang/Thread$UncaughtExceptionHandler;"); 796 gThread_vmData = FindFieldOrDie(Thread_class, "vmData", "I"); 797 gThreadGroup_name = FindFieldOrDie(gThreadGroup, "name", "Ljava/lang/String;"); 798 gThreadGroup_mMain = FindStaticFieldOrDie(gThreadGroup, "mMain", "Ljava/lang/ThreadGroup;"); 799 gThreadGroup_mSystem = FindStaticFieldOrDie(gThreadGroup, "mSystem", "Ljava/lang/ThreadGroup;"); 800 gThreadLock_thread = FindFieldOrDie(gThreadLock, "thread", "Ljava/lang/Thread;"); 801 802 gThread_run = FindMethodOrDie(Thread_class, "run", "()V"); 803 gThreadGroup_removeThread = FindMethodOrDie(gThreadGroup, "removeThread", "(Ljava/lang/Thread;)V"); 804 gUncaughtExceptionHandler_uncaughtException = FindMethodOrDie(UncaughtExceptionHandler_class, 805 "uncaughtException", "(Ljava/lang/Thread;Ljava/lang/Throwable;)V"); 806 807 // Finish attaching the main thread. 808 Thread::Current()->CreatePeer("main", false, Thread::GetMainThreadGroup()); 809 810 InitBoxingMethods(); 811 class_linker->RunRootClinits(); 812} 813 814void Thread::Shutdown() { 815 CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key"); 816} 817 818uint32_t Thread::LockOwnerFromThreadLock(Object* thread_lock) { 819 if (thread_lock == NULL || thread_lock->GetClass() != gThreadLock) { 820 return ThreadList::kInvalidId; 821 } 822 Object* managed_thread = gThreadLock_thread->GetObject(thread_lock); 823 if (managed_thread == NULL) { 824 return ThreadList::kInvalidId; 825 } 826 uintptr_t vmData = static_cast<uintptr_t>(gThread_vmData->GetInt(managed_thread)); 827 Thread* thread = reinterpret_cast<Thread*>(vmData); 828 if (thread == NULL) { 829 return ThreadList::kInvalidId; 830 } 831 return thread->GetThinLockId(); 832} 833 834Thread::Thread() 835 : thin_lock_id_(0), 836 tid_(0), 837 peer_(NULL), 838 top_of_managed_stack_(), 839 top_of_managed_stack_pc_(0), 840 wait_mutex_(new Mutex("a thread wait mutex")), 841 wait_cond_(new ConditionVariable("a thread wait condition variable")), 842 wait_monitor_(NULL), 843 interrupted_(false), 844 wait_next_(NULL), 845 monitor_enter_object_(NULL), 846 card_table_(0), 847 stack_end_(NULL), 848 native_to_managed_record_(NULL), 849 top_sirt_(NULL), 850 top_shadow_frame_(NULL), 851 jni_env_(NULL), 852 state_(kNative), 853 self_(NULL), 854 runtime_(NULL), 855 exception_(NULL), 856 suspend_count_(0), 857 debug_suspend_count_(0), 858 class_loader_override_(NULL), 859 long_jump_context_(NULL), 860 throwing_OutOfMemoryError_(false), 861 pre_allocated_OutOfMemoryError_(NULL), 862 debug_invoke_req_(new DebugInvokeReq), 863 trace_stack_(new std::vector<TraceStackFrame>), 864 name_(new std::string("<native thread without managed peer>")) { 865 CHECK_EQ((sizeof(Thread) % 4), 0U) << sizeof(Thread); 866 memset(&held_mutexes_[0], 0, sizeof(held_mutexes_)); 867} 868 869static void MonitorExitVisitor(const Object* object, void*) { 870 Object* entered_monitor = const_cast<Object*>(object); 871 LOG(WARNING) << "Calling MonitorExit on object " << object << " (" << PrettyTypeOf(object) << ")" 872 << " left locked by native thread " << *Thread::Current() << " which is detaching"; 873 entered_monitor->MonitorExit(Thread::Current()); 874} 875 876void Thread::Destroy() { 877 // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited. 878 if (jni_env_ != NULL) { 879 jni_env_->monitors.VisitRoots(MonitorExitVisitor, NULL); 880 } 881 882 if (peer_ != NULL) { 883 Thread* self = this; 884 885 // We may need to call user-supplied managed code. 886 SetState(kRunnable); 887 888 HandleUncaughtExceptions(); 889 RemoveFromThreadGroup(); 890 891 // this.vmData = 0; 892 SetVmData(peer_, NULL); 893 894 Dbg::PostThreadDeath(self); 895 896 // Thread.join() is implemented as an Object.wait() on the Thread.lock 897 // object. Signal anyone who is waiting. 898 Object* lock = gThread_lock->GetObject(peer_); 899 // (This conditional is only needed for tests, where Thread.lock won't have been set.) 900 if (lock != NULL) { 901 lock->MonitorEnter(self); 902 lock->NotifyAll(); 903 lock->MonitorExit(self); 904 } 905 } 906} 907 908Thread::~Thread() { 909 delete jni_env_; 910 jni_env_ = NULL; 911 912 SetState(kTerminated); 913 914 delete wait_cond_; 915 delete wait_mutex_; 916 917#if !defined(ART_USE_LLVM_COMPILER) 918 delete long_jump_context_; 919#endif 920 921 delete debug_invoke_req_; 922 delete trace_stack_; 923 delete name_; 924 925 TearDownAlternateSignalStack(); 926} 927 928void Thread::HandleUncaughtExceptions() { 929 if (!IsExceptionPending()) { 930 return; 931 } 932 933 // Get and clear the exception. 934 Object* exception = GetException(); 935 ClearException(); 936 937 // If the thread has its own handler, use that. 938 Object* handler = gThread_uncaughtHandler->GetObject(peer_); 939 if (handler == NULL) { 940 // Otherwise use the thread group's default handler. 941 handler = GetThreadGroup(); 942 } 943 944 // Call the handler. 945 Method* m = handler->GetClass()->FindVirtualMethodForVirtualOrInterface(gUncaughtExceptionHandler_uncaughtException); 946 JValue args[2]; 947 args[0].SetL(peer_); 948 args[1].SetL(exception); 949 m->Invoke(this, handler, args, NULL); 950 951 // If the handler threw, clear that exception too. 952 ClearException(); 953} 954 955Object* Thread::GetThreadGroup() const { 956 return gThread_group->GetObject(peer_); 957} 958 959void Thread::RemoveFromThreadGroup() { 960 // this.group.removeThread(this); 961 // group can be null if we're in the compiler or a test. 962 Object* group = GetThreadGroup(); 963 if (group != NULL) { 964 Method* m = group->GetClass()->FindVirtualMethodForVirtualOrInterface(gThreadGroup_removeThread); 965 JValue args[1]; 966 args[0].SetL(peer_); 967 m->Invoke(this, group, args, NULL); 968 } 969} 970 971size_t Thread::NumSirtReferences() { 972 size_t count = 0; 973 for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) { 974 count += cur->NumberOfReferences(); 975 } 976 return count; 977} 978 979size_t Thread::NumShadowFrameReferences() { 980 size_t count = 0; 981 for (ShadowFrame* cur = top_shadow_frame_; cur; cur = cur->GetLink()) { 982 count += cur->NumberOfReferences(); 983 } 984 return count; 985} 986 987bool Thread::SirtContains(jobject obj) { 988 Object** sirt_entry = reinterpret_cast<Object**>(obj); 989 for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) { 990 if (cur->Contains(sirt_entry)) { 991 return true; 992 } 993 } 994 return false; 995} 996 997bool Thread::ShadowFrameContains(jobject obj) { 998 Object** shadow_frame_entry = reinterpret_cast<Object**>(obj); 999 for (ShadowFrame* cur = top_shadow_frame_; cur; cur = cur->GetLink()) { 1000 if (cur->Contains(shadow_frame_entry)) { 1001 return true; 1002 } 1003 } 1004 return false; 1005} 1006 1007bool Thread::StackReferencesContain(jobject obj) { 1008 return SirtContains(obj) || ShadowFrameContains(obj); 1009} 1010 1011void Thread::SirtVisitRoots(Heap::RootVisitor* visitor, void* arg) { 1012 for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) { 1013 size_t num_refs = cur->NumberOfReferences(); 1014 for (size_t j = 0; j < num_refs; j++) { 1015 Object* object = cur->GetReference(j); 1016 if (object != NULL) { 1017 visitor(object, arg); 1018 } 1019 } 1020 } 1021} 1022 1023void Thread::ShadowFrameVisitRoots(Heap::RootVisitor* visitor, void* arg) { 1024 for (ShadowFrame* cur = top_shadow_frame_; cur; cur = cur->GetLink()) { 1025 size_t num_refs = cur->NumberOfReferences(); 1026 for (size_t j = 0; j < num_refs; j++) { 1027 Object* object = cur->GetReference(j); 1028 if (object != NULL) { 1029 visitor(object, arg); 1030 } 1031 } 1032 } 1033} 1034 1035Object* Thread::DecodeJObject(jobject obj) { 1036 DCHECK(CanAccessDirectReferences()); 1037 if (obj == NULL) { 1038 return NULL; 1039 } 1040 IndirectRef ref = reinterpret_cast<IndirectRef>(obj); 1041 IndirectRefKind kind = GetIndirectRefKind(ref); 1042 Object* result; 1043 switch (kind) { 1044 case kLocal: 1045 { 1046 IndirectReferenceTable& locals = jni_env_->locals; 1047 result = const_cast<Object*>(locals.Get(ref)); 1048 break; 1049 } 1050 case kGlobal: 1051 { 1052 JavaVMExt* vm = Runtime::Current()->GetJavaVM(); 1053 IndirectReferenceTable& globals = vm->globals; 1054 MutexLock mu(vm->globals_lock); 1055 result = const_cast<Object*>(globals.Get(ref)); 1056 break; 1057 } 1058 case kWeakGlobal: 1059 { 1060 JavaVMExt* vm = Runtime::Current()->GetJavaVM(); 1061 IndirectReferenceTable& weak_globals = vm->weak_globals; 1062 MutexLock mu(vm->weak_globals_lock); 1063 result = const_cast<Object*>(weak_globals.Get(ref)); 1064 if (result == kClearedJniWeakGlobal) { 1065 // This is a special case where it's okay to return NULL. 1066 return NULL; 1067 } 1068 break; 1069 } 1070 case kSirtOrInvalid: 1071 default: 1072 // TODO: make stack indirect reference table lookup more efficient 1073 // Check if this is a local reference in the SIRT 1074 if (StackReferencesContain(obj)) { 1075 result = *reinterpret_cast<Object**>(obj); // Read from SIRT 1076 } else if (Runtime::Current()->GetJavaVM()->work_around_app_jni_bugs) { 1077 // Assume an invalid local reference is actually a direct pointer. 1078 result = reinterpret_cast<Object*>(obj); 1079 } else { 1080 result = kInvalidIndirectRefObject; 1081 } 1082 } 1083 1084 if (result == NULL) { 1085 LOG(ERROR) << "JNI ERROR (app bug): use of deleted " << kind << ": " << obj; 1086 JniAbort(NULL); 1087 } else { 1088 if (result != kInvalidIndirectRefObject) { 1089 Runtime::Current()->GetHeap()->VerifyObject(result); 1090 } 1091 } 1092 return result; 1093} 1094 1095class CountStackDepthVisitor : public Thread::StackVisitor { 1096 public: 1097 CountStackDepthVisitor() : depth_(0), skip_depth_(0), skipping_(true) {} 1098 1099 bool VisitFrame(const Frame& frame, uintptr_t /*pc*/) { 1100 // We want to skip frames up to and including the exception's constructor. 1101 // Note we also skip the frame if it doesn't have a method (namely the callee 1102 // save frame) 1103 if (skipping_ && frame.HasMethod() && 1104 !Throwable::GetJavaLangThrowable()->IsAssignableFrom(frame.GetMethod()->GetDeclaringClass())) { 1105 skipping_ = false; 1106 } 1107 if (!skipping_) { 1108 if (frame.HasMethod()) { // ignore callee save frames 1109 ++depth_; 1110 } 1111 } else { 1112 ++skip_depth_; 1113 } 1114 return true; 1115 } 1116 1117 int GetDepth() const { 1118 return depth_; 1119 } 1120 1121 int GetSkipDepth() const { 1122 return skip_depth_; 1123 } 1124 1125 private: 1126 uint32_t depth_; 1127 uint32_t skip_depth_; 1128 bool skipping_; 1129}; 1130 1131class BuildInternalStackTraceVisitor : public Thread::StackVisitor { 1132 public: 1133 explicit BuildInternalStackTraceVisitor(int skip_depth) 1134 : skip_depth_(skip_depth), count_(0), pc_trace_(NULL), method_trace_(NULL), local_ref_(NULL) { 1135 } 1136 1137 bool Init(int depth, ScopedJniThreadState& ts) { 1138 // Allocate method trace with an extra slot that will hold the PC trace 1139 method_trace_ = Runtime::Current()->GetClassLinker()->AllocObjectArray<Object>(depth + 1); 1140 if (method_trace_ == NULL) { 1141 return false; 1142 } 1143 // Register a local reference as IntArray::Alloc may trigger GC 1144 local_ref_ = AddLocalReference<jobject>(ts.Env(), method_trace_); 1145 pc_trace_ = IntArray::Alloc(depth); 1146 if (pc_trace_ == NULL) { 1147 return false; 1148 } 1149#ifdef MOVING_GARBAGE_COLLECTOR 1150 // Re-read after potential GC 1151 method_trace_ = Decode<ObjectArray<Object>*>(ts.Env(), local_ref_); 1152#endif 1153 // Save PC trace in last element of method trace, also places it into the 1154 // object graph. 1155 method_trace_->Set(depth, pc_trace_); 1156 return true; 1157 } 1158 1159 virtual ~BuildInternalStackTraceVisitor() {} 1160 1161 bool VisitFrame(const Frame& frame, uintptr_t pc) { 1162 if (method_trace_ == NULL || pc_trace_ == NULL) { 1163 return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError. 1164 } 1165 if (skip_depth_ > 0) { 1166 skip_depth_--; 1167 return true; 1168 } 1169 if (!frame.HasMethod()) { 1170 return true; // ignore callee save frames 1171 } 1172 method_trace_->Set(count_, frame.GetMethod()); 1173 pc_trace_->Set(count_, pc); 1174 ++count_; 1175 return true; 1176 } 1177 1178 jobject GetInternalStackTrace() const { 1179 return local_ref_; 1180 } 1181 1182 private: 1183 // How many more frames to skip. 1184 int32_t skip_depth_; 1185 // Current position down stack trace 1186 uint32_t count_; 1187 // Array of return PC values 1188 IntArray* pc_trace_; 1189 // An array of the methods on the stack, the last entry is a reference to the 1190 // PC trace 1191 ObjectArray<Object>* method_trace_; 1192 // Local indirect reference table entry for method trace 1193 jobject local_ref_; 1194}; 1195 1196#if !defined(ART_USE_LLVM_COMPILER) 1197// TODO: remove this. 1198static uintptr_t ManglePc(uintptr_t pc) { 1199 // Move the PC back 2 bytes as a call will frequently terminate the 1200 // decoding of a particular instruction and we want to make sure we 1201 // get the Dex PC of the instruction with the call and not the 1202 // instruction following. 1203 if (pc > 0) { pc -= 2; } 1204 return pc; 1205} 1206#endif 1207 1208// TODO: remove this. 1209static uintptr_t DemanglePc(uintptr_t pc) { 1210 // Revert mangling for the case where we need the PC to return to the upcall 1211 if (pc > 0) { pc += 2; } 1212 return pc; 1213} 1214 1215void Thread::PushShadowFrame(ShadowFrame* frame) { 1216 frame->SetLink(top_shadow_frame_); 1217 top_shadow_frame_ = frame; 1218} 1219 1220ShadowFrame* Thread::PopShadowFrame() { 1221 CHECK(top_shadow_frame_ != NULL); 1222 ShadowFrame* frame = top_shadow_frame_; 1223 top_shadow_frame_ = frame->GetLink(); 1224 return frame; 1225} 1226 1227void Thread::PushSirt(StackIndirectReferenceTable* sirt) { 1228 sirt->SetLink(top_sirt_); 1229 top_sirt_ = sirt; 1230} 1231 1232StackIndirectReferenceTable* Thread::PopSirt() { 1233 CHECK(top_sirt_ != NULL); 1234 StackIndirectReferenceTable* sirt = top_sirt_; 1235 top_sirt_ = top_sirt_->GetLink(); 1236 return sirt; 1237} 1238 1239#if !defined(ART_USE_LLVM_COMPILER) // LLVM use ShadowFrame 1240 1241void Thread::WalkStack(StackVisitor* visitor, bool include_upcalls) const { 1242 Frame frame = GetTopOfStack(); 1243 uintptr_t pc = ManglePc(top_of_managed_stack_pc_); 1244 uint32_t trace_stack_depth = 0; 1245 // TODO: enable this CHECK after native_to_managed_record_ is initialized during startup. 1246 // CHECK(native_to_managed_record_ != NULL); 1247 NativeToManagedRecord* record = native_to_managed_record_; 1248 bool method_tracing_active = Runtime::Current()->IsMethodTracingActive(); 1249 while (frame.GetSP() != NULL) { 1250 for ( ; frame.GetMethod() != NULL; frame.Next()) { 1251 frame.GetMethod()->AssertPcIsWithinCode(pc); 1252 bool should_continue = visitor->VisitFrame(frame, pc); 1253 if (UNLIKELY(!should_continue)) { 1254 return; 1255 } 1256 uintptr_t return_pc = frame.GetReturnPC(); 1257 if (LIKELY(!method_tracing_active)) { 1258 pc = ManglePc(return_pc); 1259 } else { 1260 // While profiling, the return pc is restored from the side stack, except when walking 1261 // the stack for an exception where the side stack will be unwound in VisitFrame. 1262 if (IsTraceExitPc(return_pc) && !include_upcalls) { 1263 TraceStackFrame trace_frame = GetTraceStackFrame(trace_stack_depth++); 1264 CHECK(trace_frame.method_ == frame.GetMethod()); 1265 pc = ManglePc(trace_frame.return_pc_); 1266 } else { 1267 pc = ManglePc(return_pc); 1268 } 1269 } 1270 } 1271 if (include_upcalls) { 1272 bool should_continue = visitor->VisitFrame(frame, pc); 1273 if (!should_continue) { 1274 return; 1275 } 1276 } 1277 if (record == NULL) { 1278 return; 1279 } 1280 // last_tos should return Frame instead of sp? 1281 frame.SetSP(reinterpret_cast<Method**>(record->last_top_of_managed_stack_)); 1282 pc = ManglePc(record->last_top_of_managed_stack_pc_); 1283 record = record->link_; 1284 } 1285} 1286 1287#else // defined(ART_USE_LLVM_COMPILER) // LLVM uses ShadowFrame 1288 1289void Thread::WalkStack(StackVisitor* visitor, bool /*include_upcalls*/) const { 1290 for (ShadowFrame* cur = top_shadow_frame_; cur; cur = cur->GetLink()) { 1291 Frame frame; 1292 frame.SetSP(reinterpret_cast<Method**>(reinterpret_cast<byte*>(cur) + 1293 ShadowFrame::MethodOffset())); 1294 bool should_continue = visitor->VisitFrame(frame, cur->GetLineNumber()); 1295 if (!should_continue) { 1296 return; 1297 } 1298 } 1299} 1300 1301/* 1302 * | | 1303 * | | 1304 * | | 1305 * | . | 1306 * | . | 1307 * | . | 1308 * | . | 1309 * | Method* | 1310 * | . | 1311 * | . | <-- top_shadow_frame_ (ShadowFrame*) 1312 * / +------------------------+ 1313 * ->| . | 1314 * . | . | 1315 * . | . | 1316 * /+------------------------+ 1317 * / | . | 1318 * / | . | 1319 * --- | | . | 1320 * | | | . | 1321 * | | Method* | <-- frame.GetSP() (Method**) 1322 * ShadowFrame \ | . | 1323 * | ->| . | <-- cur (ShadowFrame*) 1324 * --- /+------------------------+ 1325 * / | . | 1326 * / | . | 1327 * --- | | . | 1328 * | cur->GetLink() | | . | 1329 * | | Method* | 1330 * ShadowFrame \ | . | 1331 * | ->| . | 1332 * --- +------------------------+ 1333 * | . | 1334 * | . | 1335 * | . | 1336 * +========================+ 1337 */ 1338 1339#endif 1340 1341jobject Thread::CreateInternalStackTrace(JNIEnv* env) const { 1342 // Compute depth of stack 1343 CountStackDepthVisitor count_visitor; 1344 WalkStack(&count_visitor); 1345 int32_t depth = count_visitor.GetDepth(); 1346 int32_t skip_depth = count_visitor.GetSkipDepth(); 1347 1348 // Transition into runnable state to work on Object*/Array* 1349 ScopedJniThreadState ts(env); 1350 1351 // Build internal stack trace 1352 BuildInternalStackTraceVisitor build_trace_visitor(skip_depth); 1353 if (!build_trace_visitor.Init(depth, ts)) { 1354 return NULL; // Allocation failed 1355 } 1356 WalkStack(&build_trace_visitor); 1357 return build_trace_visitor.GetInternalStackTrace(); 1358} 1359 1360jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal, 1361 jobjectArray output_array, int* stack_depth) { 1362 // Transition into runnable state to work on Object*/Array* 1363 ScopedJniThreadState ts(env); 1364 // Decode the internal stack trace into the depth, method trace and PC trace 1365 ObjectArray<Object>* method_trace = 1366 down_cast<ObjectArray<Object>*>(Decode<Object*>(ts.Env(), internal)); 1367 int32_t depth = method_trace->GetLength() - 1; 1368 IntArray* pc_trace = down_cast<IntArray*>(method_trace->Get(depth)); 1369 1370 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 1371 1372 jobjectArray result; 1373 ObjectArray<StackTraceElement>* java_traces; 1374 if (output_array != NULL) { 1375 // Reuse the array we were given. 1376 result = output_array; 1377 java_traces = reinterpret_cast<ObjectArray<StackTraceElement>*>(Decode<Array*>(env, 1378 output_array)); 1379 // ...adjusting the number of frames we'll write to not exceed the array length. 1380 depth = std::min(depth, java_traces->GetLength()); 1381 } else { 1382 // Create java_trace array and place in local reference table 1383 java_traces = class_linker->AllocStackTraceElementArray(depth); 1384 if (java_traces == NULL) { 1385 return NULL; 1386 } 1387 result = AddLocalReference<jobjectArray>(ts.Env(), java_traces); 1388 } 1389 1390 if (stack_depth != NULL) { 1391 *stack_depth = depth; 1392 } 1393 1394 MethodHelper mh; 1395 for (int32_t i = 0; i < depth; ++i) { 1396 // Prepare parameters for StackTraceElement(String cls, String method, String file, int line) 1397 Method* method = down_cast<Method*>(method_trace->Get(i)); 1398 mh.ChangeMethod(method); 1399 uint32_t native_pc = pc_trace->Get(i); 1400#if !defined(ART_USE_LLVM_COMPILER) 1401 int32_t line_number = mh.GetLineNumFromNativePC(native_pc); 1402#else 1403 int32_t line_number = native_pc; // LLVM stored line_number in the ShadowFrame 1404#endif 1405 // Allocate element, potentially triggering GC 1406 // TODO: reuse class_name_object via Class::name_? 1407 const char* descriptor = mh.GetDeclaringClassDescriptor(); 1408 CHECK(descriptor != NULL); 1409 std::string class_name(PrettyDescriptor(descriptor)); 1410 SirtRef<String> class_name_object(String::AllocFromModifiedUtf8(class_name.c_str())); 1411 if (class_name_object.get() == NULL) { 1412 return NULL; 1413 } 1414 const char* method_name = mh.GetName(); 1415 CHECK(method_name != NULL); 1416 SirtRef<String> method_name_object(String::AllocFromModifiedUtf8(method_name)); 1417 if (method_name_object.get() == NULL) { 1418 return NULL; 1419 } 1420 const char* source_file = mh.GetDeclaringClassSourceFile(); 1421 SirtRef<String> source_name_object(String::AllocFromModifiedUtf8(source_file)); 1422 StackTraceElement* obj = StackTraceElement::Alloc(class_name_object.get(), 1423 method_name_object.get(), 1424 source_name_object.get(), 1425 line_number); 1426 if (obj == NULL) { 1427 return NULL; 1428 } 1429#ifdef MOVING_GARBAGE_COLLECTOR 1430 // Re-read after potential GC 1431 java_traces = Decode<ObjectArray<Object>*>(ts.Env(), result); 1432 method_trace = down_cast<ObjectArray<Object>*>(Decode<Object*>(ts.Env(), internal)); 1433 pc_trace = down_cast<IntArray*>(method_trace->Get(depth)); 1434#endif 1435 java_traces->Set(i, obj); 1436 } 1437 return result; 1438} 1439 1440void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) { 1441 va_list args; 1442 va_start(args, fmt); 1443 ThrowNewExceptionV(exception_class_descriptor, fmt, args); 1444 va_end(args); 1445} 1446 1447void Thread::ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap) { 1448 std::string msg; 1449 StringAppendV(&msg, fmt, ap); 1450 ThrowNewException(exception_class_descriptor, msg.c_str()); 1451} 1452 1453void Thread::ThrowNewException(const char* exception_class_descriptor, const char* msg) { 1454 // Convert "Ljava/lang/Exception;" into JNI-style "java/lang/Exception". 1455 CHECK_EQ('L', exception_class_descriptor[0]); 1456 std::string descriptor(exception_class_descriptor + 1); 1457 CHECK_EQ(';', descriptor[descriptor.length() - 1]); 1458 descriptor.erase(descriptor.length() - 1); 1459 1460 JNIEnv* env = GetJniEnv(); 1461 ScopedLocalRef<jclass> exception_class(env, env->FindClass(descriptor.c_str())); 1462 if (exception_class.get() == NULL) { 1463 LOG(ERROR) << "Couldn't throw new " << descriptor << " because JNI FindClass failed: " 1464 << PrettyTypeOf(GetException()); 1465 CHECK(IsExceptionPending()); 1466 return; 1467 } 1468 if (!Runtime::Current()->IsStarted()) { 1469 // Something is trying to throw an exception without a started 1470 // runtime, which is the common case in the compiler. We won't be 1471 // able to invoke the constructor of the exception, so use 1472 // AllocObject which will not invoke a constructor. 1473 ScopedLocalRef<jthrowable> exception( 1474 env, reinterpret_cast<jthrowable>(env->AllocObject(exception_class.get()))); 1475 if (exception.get() != NULL) { 1476 ScopedJniThreadState ts(env); 1477 Throwable* t = reinterpret_cast<Throwable*>(ts.Self()->DecodeJObject(exception.get())); 1478 t->SetDetailMessage(String::AllocFromModifiedUtf8(msg)); 1479 ts.Self()->SetException(t); 1480 } else { 1481 LOG(ERROR) << "Couldn't throw new " << descriptor << " because JNI AllocObject failed: " 1482 << PrettyTypeOf(GetException()); 1483 CHECK(IsExceptionPending()); 1484 } 1485 return; 1486 } 1487 int rc = env->ThrowNew(exception_class.get(), msg); 1488 if (rc != JNI_OK) { 1489 LOG(ERROR) << "Couldn't throw new " << descriptor << " because JNI ThrowNew failed: " 1490 << PrettyTypeOf(GetException()); 1491 CHECK(IsExceptionPending()); 1492 } 1493} 1494 1495void Thread::ThrowOutOfMemoryError(const char* msg) { 1496 LOG(ERROR) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s", 1497 msg, (throwing_OutOfMemoryError_ ? " (recursive case)" : "")); 1498 if (!throwing_OutOfMemoryError_) { 1499 throwing_OutOfMemoryError_ = true; 1500 ThrowNewException("Ljava/lang/OutOfMemoryError;", NULL); 1501 } else { 1502 SetException(pre_allocated_OutOfMemoryError_); 1503 } 1504 throwing_OutOfMemoryError_ = false; 1505} 1506 1507 1508Thread* Thread::CurrentFromGdb() { 1509 return Thread::Current(); 1510} 1511 1512void Thread::DumpFromGdb() const { 1513 std::ostringstream ss; 1514 Dump(ss); 1515 std::string str(ss.str()); 1516 // log to stderr for debugging command line processes 1517 std::cerr << str; 1518#ifdef HAVE_ANDROID_OS 1519 // log to logcat for debugging frameworks processes 1520 LOG(INFO) << str; 1521#endif 1522} 1523 1524void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers) { 1525 CHECK_EQ(size_of_pointers, 4U); // TODO: support 64-bit targets. 1526#define DO_THREAD_OFFSET(x) if (offset == static_cast<uint32_t>(OFFSETOF_VOLATILE_MEMBER(Thread, x))) { os << # x; } else 1527#define DO_THREAD_ENTRY_POINT_OFFSET(x) if (offset == ENTRYPOINT_OFFSET(x)) { os << # x; } else 1528 DO_THREAD_OFFSET(card_table_) 1529 DO_THREAD_OFFSET(exception_) 1530 DO_THREAD_OFFSET(jni_env_) 1531 DO_THREAD_OFFSET(self_) 1532 DO_THREAD_OFFSET(stack_end_) 1533 DO_THREAD_OFFSET(state_) 1534 DO_THREAD_OFFSET(suspend_count_) 1535 DO_THREAD_OFFSET(thin_lock_id_) 1536 DO_THREAD_OFFSET(top_of_managed_stack_) 1537 DO_THREAD_OFFSET(top_of_managed_stack_pc_) 1538 DO_THREAD_OFFSET(top_sirt_) 1539 DO_THREAD_ENTRY_POINT_OFFSET(pAllocArrayFromCode) 1540 DO_THREAD_ENTRY_POINT_OFFSET(pAllocArrayFromCodeWithAccessCheck) 1541 DO_THREAD_ENTRY_POINT_OFFSET(pAllocObjectFromCode) 1542 DO_THREAD_ENTRY_POINT_OFFSET(pAllocObjectFromCodeWithAccessCheck) 1543 DO_THREAD_ENTRY_POINT_OFFSET(pCheckAndAllocArrayFromCode) 1544 DO_THREAD_ENTRY_POINT_OFFSET(pCheckAndAllocArrayFromCodeWithAccessCheck) 1545 DO_THREAD_ENTRY_POINT_OFFSET(pInstanceofNonTrivialFromCode) 1546 DO_THREAD_ENTRY_POINT_OFFSET(pCanPutArrayElementFromCode) 1547 DO_THREAD_ENTRY_POINT_OFFSET(pCheckCastFromCode) 1548 DO_THREAD_ENTRY_POINT_OFFSET(pDebugMe) 1549 DO_THREAD_ENTRY_POINT_OFFSET(pUpdateDebuggerFromCode) 1550 DO_THREAD_ENTRY_POINT_OFFSET(pInitializeStaticStorage) 1551 DO_THREAD_ENTRY_POINT_OFFSET(pInitializeTypeAndVerifyAccessFromCode) 1552 DO_THREAD_ENTRY_POINT_OFFSET(pInitializeTypeFromCode) 1553 DO_THREAD_ENTRY_POINT_OFFSET(pResolveStringFromCode) 1554 DO_THREAD_ENTRY_POINT_OFFSET(pSet32Instance) 1555 DO_THREAD_ENTRY_POINT_OFFSET(pSet32Static) 1556 DO_THREAD_ENTRY_POINT_OFFSET(pSet64Instance) 1557 DO_THREAD_ENTRY_POINT_OFFSET(pSet64Static) 1558 DO_THREAD_ENTRY_POINT_OFFSET(pSetObjInstance) 1559 DO_THREAD_ENTRY_POINT_OFFSET(pSetObjStatic) 1560 DO_THREAD_ENTRY_POINT_OFFSET(pGet32Instance) 1561 DO_THREAD_ENTRY_POINT_OFFSET(pGet32Static) 1562 DO_THREAD_ENTRY_POINT_OFFSET(pGet64Instance) 1563 DO_THREAD_ENTRY_POINT_OFFSET(pGet64Static) 1564 DO_THREAD_ENTRY_POINT_OFFSET(pGetObjInstance) 1565 DO_THREAD_ENTRY_POINT_OFFSET(pGetObjStatic) 1566 DO_THREAD_ENTRY_POINT_OFFSET(pHandleFillArrayDataFromCode) 1567 DO_THREAD_ENTRY_POINT_OFFSET(pDecodeJObjectInThread) 1568 DO_THREAD_ENTRY_POINT_OFFSET(pFindNativeMethod) 1569 DO_THREAD_ENTRY_POINT_OFFSET(pLockObjectFromCode) 1570 DO_THREAD_ENTRY_POINT_OFFSET(pUnlockObjectFromCode) 1571 DO_THREAD_ENTRY_POINT_OFFSET(pCmpgDouble) 1572 DO_THREAD_ENTRY_POINT_OFFSET(pCmpgFloat) 1573 DO_THREAD_ENTRY_POINT_OFFSET(pCmplDouble) 1574 DO_THREAD_ENTRY_POINT_OFFSET(pCmplFloat) 1575 DO_THREAD_ENTRY_POINT_OFFSET(pDadd) 1576 DO_THREAD_ENTRY_POINT_OFFSET(pDdiv) 1577 DO_THREAD_ENTRY_POINT_OFFSET(pDmul) 1578 DO_THREAD_ENTRY_POINT_OFFSET(pDsub) 1579 DO_THREAD_ENTRY_POINT_OFFSET(pF2d) 1580 DO_THREAD_ENTRY_POINT_OFFSET(pFmod) 1581 DO_THREAD_ENTRY_POINT_OFFSET(pI2d) 1582 DO_THREAD_ENTRY_POINT_OFFSET(pL2d) 1583 DO_THREAD_ENTRY_POINT_OFFSET(pD2f) 1584 DO_THREAD_ENTRY_POINT_OFFSET(pFadd) 1585 DO_THREAD_ENTRY_POINT_OFFSET(pFdiv) 1586 DO_THREAD_ENTRY_POINT_OFFSET(pFmodf) 1587 DO_THREAD_ENTRY_POINT_OFFSET(pFmul) 1588 DO_THREAD_ENTRY_POINT_OFFSET(pFsub) 1589 DO_THREAD_ENTRY_POINT_OFFSET(pI2f) 1590 DO_THREAD_ENTRY_POINT_OFFSET(pL2f) 1591 DO_THREAD_ENTRY_POINT_OFFSET(pD2iz) 1592 DO_THREAD_ENTRY_POINT_OFFSET(pF2iz) 1593 DO_THREAD_ENTRY_POINT_OFFSET(pIdiv) 1594 DO_THREAD_ENTRY_POINT_OFFSET(pIdivmod) 1595 DO_THREAD_ENTRY_POINT_OFFSET(pD2l) 1596 DO_THREAD_ENTRY_POINT_OFFSET(pF2l) 1597 DO_THREAD_ENTRY_POINT_OFFSET(pLdiv) 1598 DO_THREAD_ENTRY_POINT_OFFSET(pLdivmod) 1599 DO_THREAD_ENTRY_POINT_OFFSET(pLmul) 1600 DO_THREAD_ENTRY_POINT_OFFSET(pShlLong) 1601 DO_THREAD_ENTRY_POINT_OFFSET(pShrLong) 1602 DO_THREAD_ENTRY_POINT_OFFSET(pUshrLong) 1603 DO_THREAD_ENTRY_POINT_OFFSET(pIndexOf) 1604 DO_THREAD_ENTRY_POINT_OFFSET(pMemcmp16) 1605 DO_THREAD_ENTRY_POINT_OFFSET(pStringCompareTo) 1606 DO_THREAD_ENTRY_POINT_OFFSET(pMemcpy) 1607 DO_THREAD_ENTRY_POINT_OFFSET(pFindInterfaceMethodInCache) 1608 DO_THREAD_ENTRY_POINT_OFFSET(pUnresolvedDirectMethodTrampolineFromCode) 1609 DO_THREAD_ENTRY_POINT_OFFSET(pInvokeDirectTrampolineWithAccessCheck) 1610 DO_THREAD_ENTRY_POINT_OFFSET(pInvokeInterfaceTrampoline) 1611 DO_THREAD_ENTRY_POINT_OFFSET(pInvokeInterfaceTrampolineWithAccessCheck) 1612 DO_THREAD_ENTRY_POINT_OFFSET(pInvokeStaticTrampolineWithAccessCheck) 1613 DO_THREAD_ENTRY_POINT_OFFSET(pInvokeSuperTrampolineWithAccessCheck) 1614 DO_THREAD_ENTRY_POINT_OFFSET(pInvokeVirtualTrampolineWithAccessCheck) 1615 DO_THREAD_ENTRY_POINT_OFFSET(pCheckSuspendFromCode) 1616 DO_THREAD_ENTRY_POINT_OFFSET(pTestSuspendFromCode) 1617 DO_THREAD_ENTRY_POINT_OFFSET(pDeliverException) 1618 DO_THREAD_ENTRY_POINT_OFFSET(pThrowAbstractMethodErrorFromCode) 1619 DO_THREAD_ENTRY_POINT_OFFSET(pThrowArrayBoundsFromCode) 1620 DO_THREAD_ENTRY_POINT_OFFSET(pThrowDivZeroFromCode) 1621 DO_THREAD_ENTRY_POINT_OFFSET(pThrowNoSuchMethodFromCode) 1622 DO_THREAD_ENTRY_POINT_OFFSET(pThrowNullPointerFromCode) 1623 DO_THREAD_ENTRY_POINT_OFFSET(pThrowStackOverflowFromCode) 1624 DO_THREAD_ENTRY_POINT_OFFSET(pThrowVerificationErrorFromCode) 1625 { 1626 os << offset; 1627 } 1628#undef DO_THREAD_OFFSET 1629#undef DO_THREAD_ENTRY_POINT_OFFSET 1630} 1631 1632class CatchBlockStackVisitor : public Thread::StackVisitor { 1633 public: 1634 CatchBlockStackVisitor(Class* to_find, Context* ljc) 1635 : to_find_(to_find), long_jump_context_(ljc), native_method_count_(0), 1636 method_tracing_active_(Runtime::Current()->IsMethodTracingActive()) { 1637#ifndef NDEBUG 1638 handler_pc_ = 0xEBADC0DE; 1639 handler_frame_.SetSP(reinterpret_cast<Method**>(0xEBADF00D)); 1640#endif 1641 } 1642 1643 bool VisitFrame(const Frame& fr, uintptr_t pc) { 1644 Method* method = fr.GetMethod(); 1645 if (method == NULL) { 1646 // This is the upcall, we remember the frame and last_pc so that we may 1647 // long jump to them 1648 handler_pc_ = DemanglePc(pc); 1649 handler_frame_ = fr; 1650 return false; // End stack walk. 1651 } 1652 uint32_t dex_pc = DexFile::kDexNoIndex; 1653 if (method->IsRuntimeMethod()) { 1654 // ignore callee save method 1655 DCHECK(method->IsCalleeSaveMethod()); 1656 } else if (method->IsNative()) { 1657 native_method_count_++; 1658 } else { 1659 // Unwind stack when an exception occurs during method tracing 1660 if (UNLIKELY(method_tracing_active_)) { 1661#if !defined(ART_USE_LLVM_COMPILER) 1662 if (IsTraceExitPc(DemanglePc(pc))) { 1663 pc = ManglePc(TraceMethodUnwindFromCode(Thread::Current())); 1664 } 1665#else 1666 UNIMPLEMENTED(FATAL); 1667#endif 1668 } 1669 dex_pc = method->ToDexPC(pc); 1670 } 1671 if (dex_pc != DexFile::kDexNoIndex) { 1672 uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc); 1673 if (found_dex_pc != DexFile::kDexNoIndex) { 1674 handler_pc_ = method->ToNativePC(found_dex_pc); 1675 handler_frame_ = fr; 1676 return false; // End stack walk. 1677 } 1678 } 1679#if !defined(ART_USE_LLVM_COMPILER) 1680 // Caller may be handler, fill in callee saves in context 1681 long_jump_context_->FillCalleeSaves(fr); 1682#endif 1683 return true; // Continue stack walk. 1684 } 1685 1686 // The type of the exception catch block to find 1687 Class* to_find_; 1688 // Frame with found handler or last frame if no handler found 1689 Frame handler_frame_; 1690 // PC to branch to for the handler 1691 uintptr_t handler_pc_; 1692 // Context that will be the target of the long jump 1693 Context* long_jump_context_; 1694 // Number of native methods passed in crawl (equates to number of SIRTs to pop) 1695 uint32_t native_method_count_; 1696 // Is method tracing active? 1697 const bool method_tracing_active_; 1698}; 1699 1700void Thread::DeliverException() { 1701#if !defined(ART_USE_LLVM_COMPILER) 1702 const bool kDebugExceptionDelivery = false; 1703 Throwable* exception = GetException(); // Get exception from thread 1704 CHECK(exception != NULL); 1705 // Don't leave exception visible while we try to find the handler, which may cause class 1706 // resolution. 1707 ClearException(); 1708 if (kDebugExceptionDelivery) { 1709 String* msg = exception->GetDetailMessage(); 1710 std::string str_msg(msg != NULL ? msg->ToModifiedUtf8() : ""); 1711 DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception) 1712 << ": " << str_msg << std::endl); 1713 } 1714 1715 Context* long_jump_context = GetLongJumpContext(); 1716 CatchBlockStackVisitor catch_finder(exception->GetClass(), long_jump_context); 1717 WalkStack(&catch_finder, true); 1718 1719 Method** sp; 1720 uintptr_t throw_native_pc; 1721 Method* throw_method = GetCurrentMethod(&throw_native_pc, &sp); 1722 uintptr_t catch_native_pc = catch_finder.handler_pc_; 1723 Method* catch_method = catch_finder.handler_frame_.GetMethod(); 1724 Dbg::PostException(sp, throw_method, throw_native_pc, catch_method, catch_native_pc, exception); 1725 1726 if (kDebugExceptionDelivery) { 1727 if (catch_method == NULL) { 1728 LOG(INFO) << "Handler is upcall"; 1729 } else { 1730 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 1731 const DexFile& dex_file = 1732 class_linker->FindDexFile(catch_method->GetDeclaringClass()->GetDexCache()); 1733 int line_number = dex_file.GetLineNumFromPC(catch_method, 1734 catch_method->ToDexPC(catch_finder.handler_pc_)); 1735 LOG(INFO) << "Handler: " << PrettyMethod(catch_method) << " (line: " << line_number << ")"; 1736 } 1737 } 1738 SetException(exception); 1739 CHECK_NE(catch_native_pc, 0u); 1740 long_jump_context->SetSP(reinterpret_cast<uintptr_t>(catch_finder.handler_frame_.GetSP())); 1741 long_jump_context->SetPC(catch_native_pc); 1742 long_jump_context->SmashCallerSaves(); 1743 long_jump_context->DoLongJump(); 1744#endif 1745 LOG(FATAL) << "UNREACHABLE"; 1746} 1747 1748Context* Thread::GetLongJumpContext() { 1749 Context* result = long_jump_context_; 1750#if !defined(ART_USE_LLVM_COMPILER) 1751 if (result == NULL) { 1752 result = Context::Create(); 1753 long_jump_context_ = result; 1754 } 1755#endif 1756 return result; 1757} 1758 1759#if !defined(ART_USE_LLVM_COMPILER) 1760Method* Thread::GetCurrentMethod(uintptr_t* pc, Method*** sp) const { 1761 Frame f = top_of_managed_stack_; 1762 Method* m = f.GetMethod(); 1763 uintptr_t native_pc = top_of_managed_stack_pc_; 1764 1765 // We use JNI internally for exception throwing, so it's possible to arrive 1766 // here via a "FromCode" function, in which case there's a synthetic 1767 // callee-save method at the top of the stack. These shouldn't be user-visible, 1768 // so if we find one, skip it and return the compiled method underneath. 1769 if (m != NULL && m->IsCalleeSaveMethod()) { 1770 native_pc = f.GetReturnPC(); 1771 f.Next(); 1772 m = f.GetMethod(); 1773 } 1774 if (pc != NULL) { 1775 *pc = (m != NULL) ? ManglePc(native_pc) : 0; 1776 } 1777 if (sp != NULL) { 1778 *sp = f.GetSP(); 1779 } 1780 return m; 1781} 1782#else 1783Method* Thread::GetCurrentMethod(uintptr_t*, Method***) const { 1784 ShadowFrame* frame = top_shadow_frame_; 1785 if (frame == NULL) { 1786 return NULL; 1787 } 1788 return frame->GetMethod(); 1789} 1790#endif 1791 1792bool Thread::HoldsLock(Object* object) { 1793 if (object == NULL) { 1794 return false; 1795 } 1796 return object->GetThinLockId() == thin_lock_id_; 1797} 1798 1799bool Thread::IsDaemon() { 1800 return gThread_daemon->GetBoolean(peer_); 1801} 1802 1803#if !defined(ART_USE_LLVM_COMPILER) 1804class ReferenceMapVisitor : public Thread::StackVisitor { 1805 public: 1806 ReferenceMapVisitor(Context* context, Heap::RootVisitor* root_visitor, void* arg) : 1807 context_(context), root_visitor_(root_visitor), arg_(arg) { 1808 } 1809 1810 bool VisitFrame(const Frame& frame, uintptr_t pc) { 1811 Method* m = frame.GetMethod(); 1812 if (false) { 1813 LOG(INFO) << "Visiting stack roots in " << PrettyMethod(m) 1814 << StringPrintf("@ PC:%04x", m->ToDexPC(pc)); 1815 } 1816 // Process register map (which native and callee save methods don't have) 1817 if (!m->IsNative() && !m->IsCalleeSaveMethod() && !m->IsProxyMethod()) { 1818 CHECK(m->GetGcMap() != NULL) << PrettyMethod(m); 1819 CHECK_NE(0U, m->GetGcMapLength()) << PrettyMethod(m); 1820 verifier::PcToReferenceMap map(m->GetGcMap(), m->GetGcMapLength()); 1821 const uint8_t* reg_bitmap = map.FindBitMap(m->ToDexPC(pc)); 1822 CHECK(reg_bitmap != NULL); 1823 const VmapTable vmap_table(m->GetVmapTableRaw()); 1824 const DexFile::CodeItem* code_item = MethodHelper(m).GetCodeItem(); 1825 DCHECK(code_item != NULL); // can't be NULL or how would we compile its instructions? 1826 uint32_t core_spills = m->GetCoreSpillMask(); 1827 uint32_t fp_spills = m->GetFpSpillMask(); 1828 size_t frame_size = m->GetFrameSizeInBytes(); 1829 // For all dex registers in the bitmap 1830 size_t num_regs = std::min(map.RegWidth() * 8, 1831 static_cast<size_t>(code_item->registers_size_)); 1832 for (size_t reg = 0; reg < num_regs; ++reg) { 1833 // Does this register hold a reference? 1834 if (TestBitmap(reg, reg_bitmap)) { 1835 uint32_t vmap_offset; 1836 Object* ref; 1837 if (vmap_table.IsInContext(reg, vmap_offset)) { 1838 // Compute the register we need to load from the context 1839 uint32_t spill_mask = m->GetCoreSpillMask(); 1840 CHECK_LT(vmap_offset, static_cast<uint32_t>(__builtin_popcount(spill_mask))); 1841 uint32_t matches = 0; 1842 uint32_t spill_shifts = 0; 1843 while (matches != (vmap_offset + 1)) { 1844 DCHECK_NE(spill_mask, 0u); 1845 matches += spill_mask & 1; // Add 1 if the low bit is set 1846 spill_mask >>= 1; 1847 spill_shifts++; 1848 } 1849 spill_shifts--; // wind back one as we want the last match 1850 ref = reinterpret_cast<Object*>(context_->GetGPR(spill_shifts)); 1851 } else { 1852 ref = reinterpret_cast<Object*>(frame.GetVReg(code_item, core_spills, fp_spills, 1853 frame_size, reg)); 1854 } 1855 if (ref != NULL) { 1856 root_visitor_(ref, arg_); 1857 } 1858 } 1859 } 1860 } 1861 context_->FillCalleeSaves(frame); 1862 return true; 1863 } 1864 1865 private: 1866 bool TestBitmap(int reg, const uint8_t* reg_vector) { 1867 return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0; 1868 } 1869 1870 // Context used to build up picture of callee saves 1871 Context* context_; 1872 // Call-back when we visit a root 1873 Heap::RootVisitor* root_visitor_; 1874 // Argument to call-back 1875 void* arg_; 1876}; 1877#endif 1878 1879void Thread::VisitRoots(Heap::RootVisitor* visitor, void* arg) { 1880 if (exception_ != NULL) { 1881 visitor(exception_, arg); 1882 } 1883 if (peer_ != NULL) { 1884 visitor(peer_, arg); 1885 } 1886 if (pre_allocated_OutOfMemoryError_ != NULL) { 1887 visitor(pre_allocated_OutOfMemoryError_, arg); 1888 } 1889 if (class_loader_override_ != NULL) { 1890 visitor(class_loader_override_, arg); 1891 } 1892 jni_env_->locals.VisitRoots(visitor, arg); 1893 jni_env_->monitors.VisitRoots(visitor, arg); 1894 1895 SirtVisitRoots(visitor, arg); 1896 ShadowFrameVisitRoots(visitor, arg); 1897 1898#if !defined(ART_USE_LLVM_COMPILER) 1899 // Cheat and steal the long jump context. Assume that we are not doing a GC during exception 1900 // delivery. 1901 Context* context = GetLongJumpContext(); 1902 // Visit roots on this thread's stack 1903 ReferenceMapVisitor mapper(context, visitor, arg); 1904 WalkStack(&mapper); 1905#endif 1906} 1907 1908#if VERIFY_OBJECT_ENABLED 1909static void VerifyObject(const Object* obj, void*) { 1910 Runtime::Current()->GetHeap()->VerifyObject(obj); 1911} 1912 1913void Thread::VerifyStack() { 1914#if !defined(ART_USE_LLVM_COMPILER) 1915 UniquePtr<Context> context(Context::Create()); 1916 ReferenceMapVisitor mapper(context.get(), VerifyObject, NULL); 1917 WalkStack(&mapper); 1918#endif 1919} 1920#endif 1921 1922std::ostream& operator<<(std::ostream& os, const Thread& thread) { 1923 thread.Dump(os, false); 1924 return os; 1925} 1926 1927void Thread::CheckSafeToLockOrUnlock(MutexRank rank, bool is_locking) { 1928 if (this == NULL) { 1929 CHECK(Runtime::Current()->IsShuttingDown()); 1930 return; 1931 } 1932 if (is_locking) { 1933 if (held_mutexes_[rank] == 0) { 1934 bool bad_mutexes_held = false; 1935 for (int i = kMaxMutexRank; i > rank; --i) { 1936 if (held_mutexes_[i] != 0) { 1937 LOG(ERROR) << "holding " << static_cast<MutexRank>(i) << " while " << (is_locking ? "locking" : "unlocking") << " " << rank; 1938 bad_mutexes_held = true; 1939 } 1940 } 1941 CHECK(!bad_mutexes_held) << rank; 1942 } 1943 ++held_mutexes_[rank]; 1944 } else { 1945 CHECK_GT(held_mutexes_[rank], 0U) << rank; 1946 --held_mutexes_[rank]; 1947 } 1948} 1949 1950void Thread::CheckSafeToWait(MutexRank rank) { 1951 if (this == NULL) { 1952 CHECK(Runtime::Current()->IsShuttingDown()); 1953 return; 1954 } 1955 bool bad_mutexes_held = false; 1956 for (int i = kMaxMutexRank; i >= 0; --i) { 1957 if (i != rank && held_mutexes_[i] != 0) { 1958 LOG(ERROR) << "holding " << static_cast<MutexRank>(i) << " while doing condition variable wait on " << rank; 1959 bad_mutexes_held = true; 1960 } 1961 } 1962 if (held_mutexes_[rank] == 0) { 1963 LOG(ERROR) << "*not* holding " << rank << " while doing condition variable wait on it"; 1964 bad_mutexes_held = true; 1965 } 1966 CHECK(!bad_mutexes_held); 1967} 1968 1969} // namespace art 1970