thread.cc revision 7571e8b761ebc2c923525e12ea9fcf07e62cb33e
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#define ATRACE_TAG ATRACE_TAG_DALVIK 18 19#include "thread.h" 20 21#include <cutils/trace.h> 22#include <pthread.h> 23#include <signal.h> 24#include <sys/resource.h> 25#include <sys/time.h> 26 27#include <algorithm> 28#include <bitset> 29#include <cerrno> 30#include <iostream> 31#include <list> 32 33#include "arch/context.h" 34#include "base/mutex.h" 35#include "class_linker.h" 36#include "class_linker-inl.h" 37#include "cutils/atomic.h" 38#include "cutils/atomic-inline.h" 39#include "debugger.h" 40#include "dex_file-inl.h" 41#include "entrypoints/entrypoint_utils.h" 42#include "gc_map.h" 43#include "gc/accounting/card_table-inl.h" 44#include "gc/heap.h" 45#include "gc/space/space.h" 46#include "invoke_arg_array_builder.h" 47#include "jni_internal.h" 48#include "mirror/art_field-inl.h" 49#include "mirror/art_method-inl.h" 50#include "mirror/class-inl.h" 51#include "mirror/class_loader.h" 52#include "mirror/object_array-inl.h" 53#include "mirror/stack_trace_element.h" 54#include "monitor.h" 55#include "object_utils.h" 56#include "reflection.h" 57#include "runtime.h" 58#include "scoped_thread_state_change.h" 59#include "ScopedLocalRef.h" 60#include "ScopedUtfChars.h" 61#include "sirt_ref.h" 62#include "stack.h" 63#include "stack_indirect_reference_table.h" 64#include "thread-inl.h" 65#include "thread_list.h" 66#include "utils.h" 67#include "verifier/dex_gc_map.h" 68#include "verifier/method_verifier.h" 69#include "vmap_table.h" 70#include "well_known_classes.h" 71 72namespace art { 73 74bool Thread::is_started_ = false; 75pthread_key_t Thread::pthread_key_self_; 76ConditionVariable* Thread::resume_cond_ = NULL; 77 78static const char* kThreadNameDuringStartup = "<native thread without managed peer>"; 79 80void Thread::InitCardTable() { 81 card_table_ = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin(); 82} 83 84#if !defined(__APPLE__) 85static void UnimplementedEntryPoint() { 86 UNIMPLEMENTED(FATAL); 87} 88#endif 89 90void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints, 91 PortableEntryPoints* ppoints, QuickEntryPoints* qpoints); 92 93void Thread::InitTlsEntryPoints() { 94#if !defined(__APPLE__) // The Mac GCC is too old to accept this code. 95 // Insert a placeholder so we can easily tell if we call an unimplemented entry point. 96 uintptr_t* begin = reinterpret_cast<uintptr_t*>(&interpreter_entrypoints_); 97 uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(quick_entrypoints_)); 98 for (uintptr_t* it = begin; it != end; ++it) { 99 *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint); 100 } 101 begin = reinterpret_cast<uintptr_t*>(&interpreter_entrypoints_); 102 end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(portable_entrypoints_)); 103 for (uintptr_t* it = begin; it != end; ++it) { 104 *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint); 105 } 106#endif 107 InitEntryPoints(&interpreter_entrypoints_, &jni_entrypoints_, &portable_entrypoints_, 108 &quick_entrypoints_); 109} 110 111void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) { 112 deoptimization_shadow_frame_ = sf; 113} 114 115void Thread::SetDeoptimizationReturnValue(const JValue& ret_val) { 116 deoptimization_return_value_.SetJ(ret_val.GetJ()); 117} 118 119ShadowFrame* Thread::GetAndClearDeoptimizationShadowFrame(JValue* ret_val) { 120 ShadowFrame* sf = deoptimization_shadow_frame_; 121 deoptimization_shadow_frame_ = NULL; 122 ret_val->SetJ(deoptimization_return_value_.GetJ()); 123 return sf; 124} 125 126void Thread::InitTid() { 127 tid_ = ::art::GetTid(); 128} 129 130void Thread::InitAfterFork() { 131 // One thread (us) survived the fork, but we have a new tid so we need to 132 // update the value stashed in this Thread*. 133 InitTid(); 134} 135 136void* Thread::CreateCallback(void* arg) { 137 Thread* self = reinterpret_cast<Thread*>(arg); 138 Runtime* runtime = Runtime::Current(); 139 if (runtime == NULL) { 140 LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self; 141 return NULL; 142 } 143 { 144 // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true 145 // after self->Init(). 146 MutexLock mu(NULL, *Locks::runtime_shutdown_lock_); 147 // Check that if we got here we cannot be shutting down (as shutdown should never have started 148 // while threads are being born). 149 CHECK(!runtime->IsShuttingDown()); 150 self->Init(runtime->GetThreadList(), runtime->GetJavaVM()); 151 Runtime::Current()->EndThreadBirth(); 152 } 153 { 154 ScopedObjectAccess soa(self); 155 156 // Copy peer into self, deleting global reference when done. 157 CHECK(self->jpeer_ != NULL); 158 self->opeer_ = soa.Decode<mirror::Object*>(self->jpeer_); 159 self->GetJniEnv()->DeleteGlobalRef(self->jpeer_); 160 self->jpeer_ = NULL; 161 162 { 163 SirtRef<mirror::String> thread_name(self, self->GetThreadName(soa)); 164 self->SetThreadName(thread_name->ToModifiedUtf8().c_str()); 165 } 166 Dbg::PostThreadStart(self); 167 168 // Invoke the 'run' method of our java.lang.Thread. 169 mirror::Object* receiver = self->opeer_; 170 jmethodID mid = WellKnownClasses::java_lang_Thread_run; 171 mirror::ArtMethod* m = 172 receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(soa.DecodeMethod(mid)); 173 JValue result; 174 ArgArray arg_array(NULL, 0); 175 arg_array.Append(reinterpret_cast<uint32_t>(receiver)); 176 m->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V'); 177 } 178 // Detach and delete self. 179 Runtime::Current()->GetThreadList()->Unregister(self); 180 181 return NULL; 182} 183 184Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, 185 mirror::Object* thread_peer) { 186 mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer); 187 Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetInt(thread_peer))); 188 // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_ 189 // to stop it from going away. 190 if (kIsDebugBuild) { 191 MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_); 192 if (result != NULL && !result->IsSuspended()) { 193 Locks::thread_list_lock_->AssertHeld(soa.Self()); 194 } 195 } 196 return result; 197} 198 199Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, jobject java_thread) { 200 return FromManagedThread(soa, soa.Decode<mirror::Object*>(java_thread)); 201} 202 203static size_t FixStackSize(size_t stack_size) { 204 // A stack size of zero means "use the default". 205 if (stack_size == 0) { 206 stack_size = Runtime::Current()->GetDefaultStackSize(); 207 } 208 209 // Dalvik used the bionic pthread default stack size for native threads, 210 // so include that here to support apps that expect large native stacks. 211 stack_size += 1 * MB; 212 213 // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN. 214 if (stack_size < PTHREAD_STACK_MIN) { 215 stack_size = PTHREAD_STACK_MIN; 216 } 217 218 // It's likely that callers are trying to ensure they have at least a certain amount of 219 // stack space, so we should add our reserved space on top of what they requested, rather 220 // than implicitly take it away from them. 221 stack_size += Thread::kStackOverflowReservedBytes; 222 223 // Some systems require the stack size to be a multiple of the system page size, so round up. 224 stack_size = RoundUp(stack_size, kPageSize); 225 226 return stack_size; 227} 228 229void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) { 230 CHECK(java_peer != NULL); 231 Thread* self = static_cast<JNIEnvExt*>(env)->self; 232 Runtime* runtime = Runtime::Current(); 233 234 // Atomically start the birth of the thread ensuring the runtime isn't shutting down. 235 bool thread_start_during_shutdown = false; 236 { 237 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 238 if (runtime->IsShuttingDown()) { 239 thread_start_during_shutdown = true; 240 } else { 241 runtime->StartThreadBirth(); 242 } 243 } 244 if (thread_start_during_shutdown) { 245 ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError")); 246 env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown"); 247 return; 248 } 249 250 Thread* child_thread = new Thread(is_daemon); 251 // Use global JNI ref to hold peer live while child thread starts. 252 child_thread->jpeer_ = env->NewGlobalRef(java_peer); 253 stack_size = FixStackSize(stack_size); 254 255 // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to 256 // assign it. 257 env->SetIntField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 258 reinterpret_cast<jint>(child_thread)); 259 260 pthread_t new_pthread; 261 pthread_attr_t attr; 262 CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread"); 263 CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED"); 264 CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size); 265 int pthread_create_result = pthread_create(&new_pthread, &attr, Thread::CreateCallback, child_thread); 266 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread"); 267 268 if (pthread_create_result != 0) { 269 // pthread_create(3) failed, so clean up. 270 { 271 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 272 runtime->EndThreadBirth(); 273 } 274 // Manually delete the global reference since Thread::Init will not have been run. 275 env->DeleteGlobalRef(child_thread->jpeer_); 276 child_thread->jpeer_ = NULL; 277 delete child_thread; 278 child_thread = NULL; 279 // TODO: remove from thread group? 280 env->SetIntField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0); 281 { 282 std::string msg(StringPrintf("pthread_create (%s stack) failed: %s", 283 PrettySize(stack_size).c_str(), strerror(pthread_create_result))); 284 ScopedObjectAccess soa(env); 285 soa.Self()->ThrowOutOfMemoryError(msg.c_str()); 286 } 287 } 288} 289 290void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) { 291 // This function does all the initialization that must be run by the native thread it applies to. 292 // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so 293 // we can handshake with the corresponding native thread when it's ready.) Check this native 294 // thread hasn't been through here already... 295 CHECK(Thread::Current() == NULL); 296 SetUpAlternateSignalStack(); 297 InitCpu(); 298 InitTlsEntryPoints(); 299 InitCardTable(); 300 InitTid(); 301 // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this 302 // avoids pthread_self_ ever being invalid when discovered from Thread::Current(). 303 pthread_self_ = pthread_self(); 304 CHECK(is_started_); 305 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self"); 306 DCHECK_EQ(Thread::Current(), this); 307 308 thin_lock_id_ = thread_list->AllocThreadId(this); 309 InitStackHwm(); 310 311 jni_env_ = new JNIEnvExt(this, java_vm); 312 thread_list->Register(this); 313} 314 315Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group, 316 bool create_peer) { 317 Thread* self; 318 Runtime* runtime = Runtime::Current(); 319 if (runtime == NULL) { 320 LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name; 321 return NULL; 322 } 323 { 324 MutexLock mu(NULL, *Locks::runtime_shutdown_lock_); 325 if (runtime->IsShuttingDown()) { 326 LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name; 327 return NULL; 328 } else { 329 Runtime::Current()->StartThreadBirth(); 330 self = new Thread(as_daemon); 331 self->Init(runtime->GetThreadList(), runtime->GetJavaVM()); 332 Runtime::Current()->EndThreadBirth(); 333 } 334 } 335 336 CHECK_NE(self->GetState(), kRunnable); 337 self->SetState(kNative); 338 339 // If we're the main thread, ClassLinker won't be created until after we're attached, 340 // so that thread needs a two-stage attach. Regular threads don't need this hack. 341 // In the compiler, all threads need this hack, because no-one's going to be getting 342 // a native peer! 343 if (create_peer) { 344 self->CreatePeer(thread_name, as_daemon, thread_group); 345 } else { 346 // These aren't necessary, but they improve diagnostics for unit tests & command-line tools. 347 if (thread_name != NULL) { 348 self->name_->assign(thread_name); 349 ::art::SetThreadName(thread_name); 350 } 351 } 352 353 return self; 354} 355 356void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) { 357 Runtime* runtime = Runtime::Current(); 358 CHECK(runtime->IsStarted()); 359 JNIEnv* env = jni_env_; 360 361 if (thread_group == NULL) { 362 thread_group = runtime->GetMainThreadGroup(); 363 } 364 ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name)); 365 jint thread_priority = GetNativePriority(); 366 jboolean thread_is_daemon = as_daemon; 367 368 ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread)); 369 if (peer.get() == NULL) { 370 CHECK(IsExceptionPending()); 371 return; 372 } 373 { 374 ScopedObjectAccess soa(this); 375 opeer_ = soa.Decode<mirror::Object*>(peer.get()); 376 } 377 env->CallNonvirtualVoidMethod(peer.get(), 378 WellKnownClasses::java_lang_Thread, 379 WellKnownClasses::java_lang_Thread_init, 380 thread_group, thread_name.get(), thread_priority, thread_is_daemon); 381 AssertNoPendingException(); 382 383 Thread* self = this; 384 DCHECK_EQ(self, Thread::Current()); 385 jni_env_->SetIntField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer, 386 reinterpret_cast<jint>(self)); 387 388 ScopedObjectAccess soa(self); 389 SirtRef<mirror::String> peer_thread_name(soa.Self(), GetThreadName(soa)); 390 if (peer_thread_name.get() == NULL) { 391 // The Thread constructor should have set the Thread.name to a 392 // non-null value. However, because we can run without code 393 // available (in the compiler, in tests), we manually assign the 394 // fields the constructor should have set. 395 soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)-> 396 SetBoolean(opeer_, thread_is_daemon); 397 soa.DecodeField(WellKnownClasses::java_lang_Thread_group)-> 398 SetObject(opeer_, soa.Decode<mirror::Object*>(thread_group)); 399 soa.DecodeField(WellKnownClasses::java_lang_Thread_name)-> 400 SetObject(opeer_, soa.Decode<mirror::Object*>(thread_name.get())); 401 soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)-> 402 SetInt(opeer_, thread_priority); 403 peer_thread_name.reset(GetThreadName(soa)); 404 } 405 // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null. 406 if (peer_thread_name.get() != NULL) { 407 SetThreadName(peer_thread_name->ToModifiedUtf8().c_str()); 408 } 409} 410 411void Thread::SetThreadName(const char* name) { 412 name_->assign(name); 413 ::art::SetThreadName(name); 414 Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM")); 415} 416 417void Thread::InitStackHwm() { 418 void* stack_base; 419 size_t stack_size; 420 GetThreadStack(pthread_self_, stack_base, stack_size); 421 422 // TODO: include this in the thread dumps; potentially useful in SIGQUIT output? 423 VLOG(threads) << StringPrintf("Native stack is at %p (%s)", stack_base, PrettySize(stack_size).c_str()); 424 425 stack_begin_ = reinterpret_cast<byte*>(stack_base); 426 stack_size_ = stack_size; 427 428 if (stack_size_ <= kStackOverflowReservedBytes) { 429 LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << stack_size_ << " bytes)"; 430 } 431 432 // TODO: move this into the Linux GetThreadStack implementation. 433#if !defined(__APPLE__) 434 // If we're the main thread, check whether we were run with an unlimited stack. In that case, 435 // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection 436 // will be broken because we'll die long before we get close to 2GB. 437 bool is_main_thread = (::art::GetTid() == getpid()); 438 if (is_main_thread) { 439 rlimit stack_limit; 440 if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) { 441 PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed"; 442 } 443 if (stack_limit.rlim_cur == RLIM_INFINITY) { 444 // Find the default stack size for new threads... 445 pthread_attr_t default_attributes; 446 size_t default_stack_size; 447 CHECK_PTHREAD_CALL(pthread_attr_init, (&default_attributes), "default stack size query"); 448 CHECK_PTHREAD_CALL(pthread_attr_getstacksize, (&default_attributes, &default_stack_size), 449 "default stack size query"); 450 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&default_attributes), "default stack size query"); 451 452 // ...and use that as our limit. 453 size_t old_stack_size = stack_size_; 454 stack_size_ = default_stack_size; 455 stack_begin_ += (old_stack_size - stack_size_); 456 VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")" 457 << " to " << PrettySize(stack_size_) 458 << " with base " << reinterpret_cast<void*>(stack_begin_); 459 } 460 } 461#endif 462 463 // Set stack_end_ to the bottom of the stack saving space of stack overflows 464 ResetDefaultStackEnd(); 465 466 // Sanity check. 467 int stack_variable; 468 CHECK_GT(&stack_variable, reinterpret_cast<void*>(stack_end_)); 469} 470 471void Thread::ShortDump(std::ostream& os) const { 472 os << "Thread["; 473 if (GetThinLockId() != 0) { 474 // If we're in kStarting, we won't have a thin lock id or tid yet. 475 os << GetThinLockId() 476 << ",tid=" << GetTid() << ','; 477 } 478 os << GetState() 479 << ",Thread*=" << this 480 << ",peer=" << opeer_ 481 << ",\"" << *name_ << "\"" 482 << "]"; 483} 484 485void Thread::Dump(std::ostream& os) const { 486 DumpState(os); 487 DumpStack(os); 488} 489 490mirror::String* Thread::GetThreadName(const ScopedObjectAccessUnchecked& soa) const { 491 mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name); 492 return (opeer_ != NULL) ? reinterpret_cast<mirror::String*>(f->GetObject(opeer_)) : NULL; 493} 494 495void Thread::GetThreadName(std::string& name) const { 496 name.assign(*name_); 497} 498 499void Thread::AtomicSetFlag(ThreadFlag flag) { 500 android_atomic_or(flag, &state_and_flags_.as_int); 501} 502 503void Thread::AtomicClearFlag(ThreadFlag flag) { 504 android_atomic_and(-1 ^ flag, &state_and_flags_.as_int); 505} 506 507// Attempt to rectify locks so that we dump thread list with required locks before exiting. 508static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 509 LOG(ERROR) << *thread << " suspend count already zero."; 510 Locks::thread_suspend_count_lock_->Unlock(self); 511 if (!Locks::mutator_lock_->IsSharedHeld(self)) { 512 Locks::mutator_lock_->SharedTryLock(self); 513 if (!Locks::mutator_lock_->IsSharedHeld(self)) { 514 LOG(WARNING) << "Dumping thread list without holding mutator_lock_"; 515 } 516 } 517 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) { 518 Locks::thread_list_lock_->TryLock(self); 519 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) { 520 LOG(WARNING) << "Dumping thread list without holding thread_list_lock_"; 521 } 522 } 523 std::ostringstream ss; 524 Runtime::Current()->GetThreadList()->DumpLocked(ss); 525 LOG(FATAL) << ss.str(); 526} 527 528void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) { 529 DCHECK(delta == -1 || delta == +1 || delta == -debug_suspend_count_) 530 << delta << " " << debug_suspend_count_ << " " << this; 531 DCHECK_GE(suspend_count_, debug_suspend_count_) << this; 532 Locks::thread_suspend_count_lock_->AssertHeld(self); 533 if (this != self && !IsSuspended()) { 534 Locks::thread_list_lock_->AssertHeld(self); 535 } 536 if (UNLIKELY(delta < 0 && suspend_count_ <= 0)) { 537 UnsafeLogFatalForSuspendCount(self, this); 538 return; 539 } 540 541 suspend_count_ += delta; 542 if (for_debugger) { 543 debug_suspend_count_ += delta; 544 } 545 546 if (suspend_count_ == 0) { 547 AtomicClearFlag(kSuspendRequest); 548 } else { 549 AtomicSetFlag(kSuspendRequest); 550 } 551} 552 553void Thread::RunCheckpointFunction() { 554 CHECK(checkpoint_function_ != NULL); 555 ATRACE_BEGIN("Checkpoint function"); 556 checkpoint_function_->Run(this); 557 ATRACE_END(); 558} 559 560bool Thread::RequestCheckpoint(Closure* function) { 561 CHECK(!ReadFlag(kCheckpointRequest)) << "Already have a pending checkpoint request"; 562 checkpoint_function_ = function; 563 union StateAndFlags old_state_and_flags = state_and_flags_; 564 // We must be runnable to request a checkpoint. 565 old_state_and_flags.as_struct.state = kRunnable; 566 union StateAndFlags new_state_and_flags = old_state_and_flags; 567 new_state_and_flags.as_struct.flags |= kCheckpointRequest; 568 int succeeded = android_atomic_cmpxchg(old_state_and_flags.as_int, new_state_and_flags.as_int, 569 &state_and_flags_.as_int); 570 return succeeded == 0; 571} 572 573void Thread::FullSuspendCheck() { 574 VLOG(threads) << this << " self-suspending"; 575 ATRACE_BEGIN("Full suspend check"); 576 // Make thread appear suspended to other threads, release mutator_lock_. 577 TransitionFromRunnableToSuspended(kSuspended); 578 // Transition back to runnable noting requests to suspend, re-acquire share on mutator_lock_. 579 TransitionFromSuspendedToRunnable(); 580 ATRACE_END(); 581 VLOG(threads) << this << " self-reviving"; 582} 583 584Thread* Thread::SuspendForDebugger(jobject peer, bool request_suspension, bool* timed_out) { 585 static const useconds_t kTimeoutUs = 30 * 1000000; // 30s. 586 useconds_t total_delay_us = 0; 587 useconds_t delay_us = 0; 588 bool did_suspend_request = false; 589 *timed_out = false; 590 while (true) { 591 Thread* thread; 592 { 593 ScopedObjectAccess soa(Thread::Current()); 594 Thread* self = soa.Self(); 595 MutexLock mu(self, *Locks::thread_list_lock_); 596 thread = Thread::FromManagedThread(soa, peer); 597 if (thread == NULL) { 598 JNIEnv* env = self->GetJniEnv(); 599 ScopedLocalRef<jstring> scoped_name_string(env, 600 (jstring)env->GetObjectField(peer, 601 WellKnownClasses::java_lang_Thread_name)); 602 ScopedUtfChars scoped_name_chars(env, scoped_name_string.get()); 603 if (scoped_name_chars.c_str() == NULL) { 604 LOG(WARNING) << "No such thread for suspend: " << peer; 605 env->ExceptionClear(); 606 } else { 607 LOG(WARNING) << "No such thread for suspend: " << peer << ":" << scoped_name_chars.c_str(); 608 } 609 610 return NULL; 611 } 612 { 613 MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_); 614 if (request_suspension) { 615 thread->ModifySuspendCount(soa.Self(), +1, true /* for_debugger */); 616 request_suspension = false; 617 did_suspend_request = true; 618 } 619 // IsSuspended on the current thread will fail as the current thread is changed into 620 // Runnable above. As the suspend count is now raised if this is the current thread 621 // it will self suspend on transition to Runnable, making it hard to work with. It's simpler 622 // to just explicitly handle the current thread in the callers to this code. 623 CHECK_NE(thread, soa.Self()) << "Attempt to suspend the current thread for the debugger"; 624 // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend 625 // count, or else we've waited and it has self suspended) or is the current thread, we're 626 // done. 627 if (thread->IsSuspended()) { 628 return thread; 629 } 630 if (total_delay_us >= kTimeoutUs) { 631 LOG(ERROR) << "Thread suspension timed out: " << peer; 632 if (did_suspend_request) { 633 thread->ModifySuspendCount(soa.Self(), -1, true /* for_debugger */); 634 } 635 *timed_out = true; 636 return NULL; 637 } 638 } 639 // Release locks and come out of runnable state. 640 } 641 for (int i = kLockLevelCount - 1; i >= 0; --i) { 642 BaseMutex* held_mutex = Thread::Current()->GetHeldMutex(static_cast<LockLevel>(i)); 643 if (held_mutex != NULL) { 644 LOG(FATAL) << "Holding " << held_mutex->GetName() 645 << " while sleeping for thread suspension"; 646 } 647 } 648 { 649 useconds_t new_delay_us = delay_us * 2; 650 CHECK_GE(new_delay_us, delay_us); 651 if (new_delay_us < 500000) { // Don't allow sleeping to be more than 0.5s. 652 delay_us = new_delay_us; 653 } 654 } 655 if (delay_us == 0) { 656 sched_yield(); 657 // Default to 1 milliseconds (note that this gets multiplied by 2 before the first sleep). 658 delay_us = 500; 659 } else { 660 usleep(delay_us); 661 total_delay_us += delay_us; 662 } 663 } 664} 665 666void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { 667 std::string group_name; 668 int priority; 669 bool is_daemon = false; 670 Thread* self = Thread::Current(); 671 672 if (self != NULL && thread != NULL && thread->opeer_ != NULL) { 673 ScopedObjectAccessUnchecked soa(self); 674 priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->GetInt(thread->opeer_); 675 is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->GetBoolean(thread->opeer_); 676 677 mirror::Object* thread_group = 678 soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->opeer_); 679 680 if (thread_group != NULL) { 681 mirror::ArtField* group_name_field = 682 soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name); 683 mirror::String* group_name_string = 684 reinterpret_cast<mirror::String*>(group_name_field->GetObject(thread_group)); 685 group_name = (group_name_string != NULL) ? group_name_string->ToModifiedUtf8() : "<null>"; 686 } 687 } else { 688 priority = GetNativePriority(); 689 } 690 691 std::string scheduler_group_name(GetSchedulerGroupName(tid)); 692 if (scheduler_group_name.empty()) { 693 scheduler_group_name = "default"; 694 } 695 696 if (thread != NULL) { 697 os << '"' << *thread->name_ << '"'; 698 if (is_daemon) { 699 os << " daemon"; 700 } 701 os << " prio=" << priority 702 << " tid=" << thread->GetThinLockId() 703 << " " << thread->GetState(); 704 if (thread->IsStillStarting()) { 705 os << " (still starting up)"; 706 } 707 os << "\n"; 708 } else { 709 os << '"' << ::art::GetThreadName(tid) << '"' 710 << " prio=" << priority 711 << " (not attached)\n"; 712 } 713 714 if (thread != NULL) { 715 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 716 os << " | group=\"" << group_name << "\"" 717 << " sCount=" << thread->suspend_count_ 718 << " dsCount=" << thread->debug_suspend_count_ 719 << " obj=" << reinterpret_cast<void*>(thread->opeer_) 720 << " self=" << reinterpret_cast<const void*>(thread) << "\n"; 721 } 722 723 os << " | sysTid=" << tid 724 << " nice=" << getpriority(PRIO_PROCESS, tid) 725 << " cgrp=" << scheduler_group_name; 726 if (thread != NULL) { 727 int policy; 728 sched_param sp; 729 CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->pthread_self_, &policy, &sp), __FUNCTION__); 730 os << " sched=" << policy << "/" << sp.sched_priority 731 << " handle=" << reinterpret_cast<void*>(thread->pthread_self_); 732 } 733 os << "\n"; 734 735 // Grab the scheduler stats for this thread. 736 std::string scheduler_stats; 737 if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) { 738 scheduler_stats.resize(scheduler_stats.size() - 1); // Lose the trailing '\n'. 739 } else { 740 scheduler_stats = "0 0 0"; 741 } 742 743 char native_thread_state = '?'; 744 int utime = 0; 745 int stime = 0; 746 int task_cpu = 0; 747 GetTaskStats(tid, native_thread_state, utime, stime, task_cpu); 748 749 os << " | state=" << native_thread_state 750 << " schedstat=( " << scheduler_stats << " )" 751 << " utm=" << utime 752 << " stm=" << stime 753 << " core=" << task_cpu 754 << " HZ=" << sysconf(_SC_CLK_TCK) << "\n"; 755 if (thread != NULL) { 756 os << " | stack=" << reinterpret_cast<void*>(thread->stack_begin_) << "-" << reinterpret_cast<void*>(thread->stack_end_) 757 << " stackSize=" << PrettySize(thread->stack_size_) << "\n"; 758 } 759} 760 761void Thread::DumpState(std::ostream& os) const { 762 Thread::DumpState(os, this, GetTid()); 763} 764 765struct StackDumpVisitor : public StackVisitor { 766 StackDumpVisitor(std::ostream& os, Thread* thread, Context* context, bool can_allocate) 767 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 768 : StackVisitor(thread, context), os(os), thread(thread), can_allocate(can_allocate), 769 last_method(NULL), last_line_number(0), repetition_count(0), frame_count(0) { 770 } 771 772 virtual ~StackDumpVisitor() { 773 if (frame_count == 0) { 774 os << " (no managed stack frames)\n"; 775 } 776 } 777 778 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 779 mirror::ArtMethod* m = GetMethod(); 780 if (m->IsRuntimeMethod()) { 781 return true; 782 } 783 const int kMaxRepetition = 3; 784 mirror::Class* c = m->GetDeclaringClass(); 785 const mirror::DexCache* dex_cache = c->GetDexCache(); 786 int line_number = -1; 787 if (dex_cache != NULL) { // be tolerant of bad input 788 const DexFile& dex_file = *dex_cache->GetDexFile(); 789 line_number = dex_file.GetLineNumFromPC(m, GetDexPc()); 790 } 791 if (line_number == last_line_number && last_method == m) { 792 repetition_count++; 793 } else { 794 if (repetition_count >= kMaxRepetition) { 795 os << " ... repeated " << (repetition_count - kMaxRepetition) << " times\n"; 796 } 797 repetition_count = 0; 798 last_line_number = line_number; 799 last_method = m; 800 } 801 if (repetition_count < kMaxRepetition) { 802 os << " at " << PrettyMethod(m, false); 803 if (m->IsNative()) { 804 os << "(Native method)"; 805 } else { 806 mh.ChangeMethod(m); 807 const char* source_file(mh.GetDeclaringClassSourceFile()); 808 os << "(" << (source_file != NULL ? source_file : "unavailable") 809 << ":" << line_number << ")"; 810 } 811 os << "\n"; 812 if (frame_count == 0) { 813 Monitor::DescribeWait(os, thread); 814 } 815 if (can_allocate) { 816 Monitor::VisitLocks(this, DumpLockedObject, &os); 817 } 818 } 819 820 ++frame_count; 821 return true; 822 } 823 824 static void DumpLockedObject(mirror::Object* o, void* context) 825 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 826 std::ostream& os = *reinterpret_cast<std::ostream*>(context); 827 os << " - locked <" << o << "> (a " << PrettyTypeOf(o) << ")\n"; 828 } 829 830 std::ostream& os; 831 const Thread* thread; 832 const bool can_allocate; 833 MethodHelper mh; 834 mirror::ArtMethod* last_method; 835 int last_line_number; 836 int repetition_count; 837 int frame_count; 838}; 839 840static bool ShouldShowNativeStack(const Thread* thread) 841 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 842 ThreadState state = thread->GetState(); 843 844 // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting. 845 if (state > kWaiting && state < kStarting) { 846 return true; 847 } 848 849 // In an Object.wait variant or Thread.sleep? That's not interesting. 850 if (state == kTimedWaiting || state == kSleeping || state == kWaiting) { 851 return false; 852 } 853 854 // In some other native method? That's interesting. 855 // We don't just check kNative because native methods will be in state kSuspended if they're 856 // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the 857 // thread-startup states if it's early enough in their life cycle (http://b/7432159). 858 mirror::ArtMethod* current_method = thread->GetCurrentMethod(NULL); 859 return current_method != NULL && current_method->IsNative(); 860} 861 862void Thread::DumpStack(std::ostream& os) const { 863 // TODO: we call this code when dying but may not have suspended the thread ourself. The 864 // IsSuspended check is therefore racy with the use for dumping (normally we inhibit 865 // the race with the thread_suspend_count_lock_). 866 bool dump_for_abort = (gAborting > 0); 867 if (this == Thread::Current() || IsSuspended() || dump_for_abort) { 868 // If we're currently in native code, dump that stack before dumping the managed stack. 869 if (dump_for_abort || ShouldShowNativeStack(this)) { 870 DumpKernelStack(os, GetTid(), " kernel: ", false); 871 DumpNativeStack(os, GetTid(), " native: ", false); 872 } 873 UniquePtr<Context> context(Context::Create()); 874 StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(), !throwing_OutOfMemoryError_); 875 dumper.WalkStack(); 876 } else { 877 os << "Not able to dump stack of thread that isn't suspended"; 878 } 879} 880 881void Thread::ThreadExitCallback(void* arg) { 882 Thread* self = reinterpret_cast<Thread*>(arg); 883 if (self->thread_exit_check_count_ == 0) { 884 LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's going to use a pthread_key_create destructor?): " << *self; 885 CHECK(is_started_); 886 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self"); 887 self->thread_exit_check_count_ = 1; 888 } else { 889 LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self; 890 } 891} 892 893void Thread::Startup() { 894 CHECK(!is_started_); 895 is_started_ = true; 896 { 897 // MutexLock to keep annotalysis happy. 898 // 899 // Note we use NULL for the thread because Thread::Current can 900 // return garbage since (is_started_ == true) and 901 // Thread::pthread_key_self_ is not yet initialized. 902 // This was seen on glibc. 903 MutexLock mu(NULL, *Locks::thread_suspend_count_lock_); 904 resume_cond_ = new ConditionVariable("Thread resumption condition variable", 905 *Locks::thread_suspend_count_lock_); 906 } 907 908 // Allocate a TLS slot. 909 CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key"); 910 911 // Double-check the TLS slot allocation. 912 if (pthread_getspecific(pthread_key_self_) != NULL) { 913 LOG(FATAL) << "Newly-created pthread TLS slot is not NULL"; 914 } 915} 916 917void Thread::FinishStartup() { 918 Runtime* runtime = Runtime::Current(); 919 CHECK(runtime->IsStarted()); 920 921 // Finish attaching the main thread. 922 ScopedObjectAccess soa(Thread::Current()); 923 Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup()); 924 925 Runtime::Current()->GetClassLinker()->RunRootClinits(); 926} 927 928void Thread::Shutdown() { 929 CHECK(is_started_); 930 is_started_ = false; 931 CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key"); 932 MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_); 933 if (resume_cond_ != NULL) { 934 delete resume_cond_; 935 resume_cond_ = NULL; 936 } 937} 938 939Thread::Thread(bool daemon) 940 : suspend_count_(0), 941 card_table_(NULL), 942 exception_(NULL), 943 stack_end_(NULL), 944 managed_stack_(), 945 jni_env_(NULL), 946 self_(NULL), 947 opeer_(NULL), 948 jpeer_(NULL), 949 stack_begin_(NULL), 950 stack_size_(0), 951 thin_lock_id_(0), 952 tid_(0), 953 wait_mutex_(new Mutex("a thread wait mutex")), 954 wait_cond_(new ConditionVariable("a thread wait condition variable", *wait_mutex_)), 955 wait_monitor_(NULL), 956 interrupted_(false), 957 wait_next_(NULL), 958 monitor_enter_object_(NULL), 959 top_sirt_(NULL), 960 runtime_(NULL), 961 class_loader_override_(NULL), 962 long_jump_context_(NULL), 963 throwing_OutOfMemoryError_(false), 964 debug_suspend_count_(0), 965 debug_invoke_req_(new DebugInvokeReq), 966 deoptimization_shadow_frame_(NULL), 967 instrumentation_stack_(new std::deque<instrumentation::InstrumentationStackFrame>), 968 name_(new std::string(kThreadNameDuringStartup)), 969 daemon_(daemon), 970 pthread_self_(0), 971 no_thread_suspension_(0), 972 last_no_thread_suspension_cause_(NULL), 973 checkpoint_function_(0), 974 thread_exit_check_count_(0) { 975 CHECK_EQ((sizeof(Thread) % 4), 0U) << sizeof(Thread); 976 state_and_flags_.as_struct.flags = 0; 977 state_and_flags_.as_struct.state = kNative; 978 memset(&held_mutexes_[0], 0, sizeof(held_mutexes_)); 979} 980 981bool Thread::IsStillStarting() const { 982 // You might think you can check whether the state is kStarting, but for much of thread startup, 983 // the thread is in kNative; it might also be in kVmWait. 984 // You might think you can check whether the peer is NULL, but the peer is actually created and 985 // assigned fairly early on, and needs to be. 986 // It turns out that the last thing to change is the thread name; that's a good proxy for "has 987 // this thread _ever_ entered kRunnable". 988 return (jpeer_ == NULL && opeer_ == NULL) || (*name_ == kThreadNameDuringStartup); 989} 990 991void Thread::AssertNoPendingException() const { 992 if (UNLIKELY(IsExceptionPending())) { 993 ScopedObjectAccess soa(Thread::Current()); 994 mirror::Throwable* exception = GetException(NULL); 995 LOG(FATAL) << "No pending exception expected: " << exception->Dump(); 996 } 997} 998 999static void MonitorExitVisitor(const mirror::Object* object, void* arg) NO_THREAD_SAFETY_ANALYSIS { 1000 Thread* self = reinterpret_cast<Thread*>(arg); 1001 mirror::Object* entered_monitor = const_cast<mirror::Object*>(object); 1002 if (self->HoldsLock(entered_monitor)) { 1003 LOG(WARNING) << "Calling MonitorExit on object " 1004 << object << " (" << PrettyTypeOf(object) << ")" 1005 << " left locked by native thread " 1006 << *Thread::Current() << " which is detaching"; 1007 entered_monitor->MonitorExit(self); 1008 } 1009} 1010 1011void Thread::Destroy() { 1012 Thread* self = this; 1013 DCHECK_EQ(self, Thread::Current()); 1014 1015 if (opeer_ != NULL) { 1016 ScopedObjectAccess soa(self); 1017 // We may need to call user-supplied managed code, do this before final clean-up. 1018 HandleUncaughtExceptions(soa); 1019 RemoveFromThreadGroup(soa); 1020 1021 // this.nativePeer = 0; 1022 soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)->SetInt(opeer_, 0); 1023 Dbg::PostThreadDeath(self); 1024 1025 // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone 1026 // who is waiting. 1027 mirror::Object* lock = 1028 soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(opeer_); 1029 // (This conditional is only needed for tests, where Thread.lock won't have been set.) 1030 if (lock != NULL) { 1031 ObjectLock locker(self, lock); 1032 locker.Notify(); 1033 } 1034 } 1035 1036 // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited. 1037 if (jni_env_ != NULL) { 1038 jni_env_->monitors.VisitRoots(MonitorExitVisitor, self); 1039 } 1040} 1041 1042Thread::~Thread() { 1043 if (jni_env_ != NULL && jpeer_ != NULL) { 1044 // If pthread_create fails we don't have a jni env here. 1045 jni_env_->DeleteGlobalRef(jpeer_); 1046 jpeer_ = NULL; 1047 } 1048 opeer_ = NULL; 1049 1050 delete jni_env_; 1051 jni_env_ = NULL; 1052 1053 CHECK_NE(GetState(), kRunnable); 1054 // We may be deleting a still born thread. 1055 SetStateUnsafe(kTerminated); 1056 1057 delete wait_cond_; 1058 delete wait_mutex_; 1059 1060 if (long_jump_context_ != NULL) { 1061 delete long_jump_context_; 1062 } 1063 1064 delete debug_invoke_req_; 1065 delete instrumentation_stack_; 1066 delete name_; 1067 1068 TearDownAlternateSignalStack(); 1069} 1070 1071void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) { 1072 if (!IsExceptionPending()) { 1073 return; 1074 } 1075 ScopedLocalRef<jobject> peer(jni_env_, soa.AddLocalReference<jobject>(opeer_)); 1076 ScopedThreadStateChange tsc(this, kNative); 1077 1078 // Get and clear the exception. 1079 ScopedLocalRef<jthrowable> exception(jni_env_, jni_env_->ExceptionOccurred()); 1080 jni_env_->ExceptionClear(); 1081 1082 // If the thread has its own handler, use that. 1083 ScopedLocalRef<jobject> handler(jni_env_, 1084 jni_env_->GetObjectField(peer.get(), 1085 WellKnownClasses::java_lang_Thread_uncaughtHandler)); 1086 if (handler.get() == NULL) { 1087 // Otherwise use the thread group's default handler. 1088 handler.reset(jni_env_->GetObjectField(peer.get(), WellKnownClasses::java_lang_Thread_group)); 1089 } 1090 1091 // Call the handler. 1092 jni_env_->CallVoidMethod(handler.get(), 1093 WellKnownClasses::java_lang_Thread$UncaughtExceptionHandler_uncaughtException, 1094 peer.get(), exception.get()); 1095 1096 // If the handler threw, clear that exception too. 1097 jni_env_->ExceptionClear(); 1098} 1099 1100void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) { 1101 // this.group.removeThread(this); 1102 // group can be null if we're in the compiler or a test. 1103 mirror::Object* ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(opeer_); 1104 if (ogroup != NULL) { 1105 ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup)); 1106 ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(opeer_)); 1107 ScopedThreadStateChange tsc(soa.Self(), kNative); 1108 jni_env_->CallVoidMethod(group.get(), WellKnownClasses::java_lang_ThreadGroup_removeThread, 1109 peer.get()); 1110 } 1111} 1112 1113size_t Thread::NumSirtReferences() { 1114 size_t count = 0; 1115 for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) { 1116 count += cur->NumberOfReferences(); 1117 } 1118 return count; 1119} 1120 1121bool Thread::SirtContains(jobject obj) const { 1122 mirror::Object** sirt_entry = reinterpret_cast<mirror::Object**>(obj); 1123 for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) { 1124 if (cur->Contains(sirt_entry)) { 1125 return true; 1126 } 1127 } 1128 // JNI code invoked from portable code uses shadow frames rather than the SIRT. 1129 return managed_stack_.ShadowFramesContain(sirt_entry); 1130} 1131 1132void Thread::SirtVisitRoots(RootVisitor* visitor, void* arg) { 1133 for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) { 1134 size_t num_refs = cur->NumberOfReferences(); 1135 for (size_t j = 0; j < num_refs; j++) { 1136 mirror::Object* object = cur->GetReference(j); 1137 if (object != NULL) { 1138 visitor(object, arg); 1139 } 1140 } 1141 } 1142} 1143 1144mirror::Object* Thread::DecodeJObject(jobject obj) const { 1145 Locks::mutator_lock_->AssertSharedHeld(this); 1146 if (obj == NULL) { 1147 return NULL; 1148 } 1149 IndirectRef ref = reinterpret_cast<IndirectRef>(obj); 1150 IndirectRefKind kind = GetIndirectRefKind(ref); 1151 mirror::Object* result; 1152 // The "kinds" below are sorted by the frequency we expect to encounter them. 1153 if (kind == kLocal) { 1154 IndirectReferenceTable& locals = jni_env_->locals; 1155 result = const_cast<mirror::Object*>(locals.Get(ref)); 1156 } else if (kind == kSirtOrInvalid) { 1157 // TODO: make stack indirect reference table lookup more efficient 1158 // Check if this is a local reference in the SIRT 1159 if (LIKELY(SirtContains(obj))) { 1160 result = *reinterpret_cast<mirror::Object**>(obj); // Read from SIRT 1161 } else if (Runtime::Current()->GetJavaVM()->work_around_app_jni_bugs) { 1162 // Assume an invalid local reference is actually a direct pointer. 1163 result = reinterpret_cast<mirror::Object*>(obj); 1164 } else { 1165 result = kInvalidIndirectRefObject; 1166 } 1167 } else if (kind == kGlobal) { 1168 JavaVMExt* vm = Runtime::Current()->GetJavaVM(); 1169 IndirectReferenceTable& globals = vm->globals; 1170 MutexLock mu(const_cast<Thread*>(this), vm->globals_lock); 1171 result = const_cast<mirror::Object*>(globals.Get(ref)); 1172 } else { 1173 DCHECK_EQ(kind, kWeakGlobal); 1174 JavaVMExt* vm = Runtime::Current()->GetJavaVM(); 1175 IndirectReferenceTable& weak_globals = vm->weak_globals; 1176 MutexLock mu(const_cast<Thread*>(this), vm->weak_globals_lock); 1177 result = const_cast<mirror::Object*>(weak_globals.Get(ref)); 1178 if (result == kClearedJniWeakGlobal) { 1179 // This is a special case where it's okay to return NULL. 1180 return NULL; 1181 } 1182 } 1183 1184 if (UNLIKELY(result == NULL)) { 1185 JniAbortF(NULL, "use of deleted %s %p", ToStr<IndirectRefKind>(kind).c_str(), obj); 1186 } else { 1187 if (kIsDebugBuild && (result != kInvalidIndirectRefObject)) { 1188 Runtime::Current()->GetHeap()->VerifyObject(result); 1189 } 1190 } 1191 return result; 1192} 1193 1194// Implements java.lang.Thread.interrupted. 1195bool Thread::Interrupted() { 1196 MutexLock mu(Thread::Current(), *wait_mutex_); 1197 bool interrupted = interrupted_; 1198 interrupted_ = false; 1199 return interrupted; 1200} 1201 1202// Implements java.lang.Thread.isInterrupted. 1203bool Thread::IsInterrupted() { 1204 MutexLock mu(Thread::Current(), *wait_mutex_); 1205 return interrupted_; 1206} 1207 1208void Thread::Interrupt() { 1209 Thread* self = Thread::Current(); 1210 MutexLock mu(self, *wait_mutex_); 1211 if (interrupted_) { 1212 return; 1213 } 1214 interrupted_ = true; 1215 NotifyLocked(self); 1216} 1217 1218void Thread::Notify() { 1219 Thread* self = Thread::Current(); 1220 MutexLock mu(self, *wait_mutex_); 1221 NotifyLocked(self); 1222} 1223 1224void Thread::NotifyLocked(Thread* self) { 1225 if (wait_monitor_ != NULL) { 1226 wait_cond_->Signal(self); 1227 } 1228} 1229 1230class CountStackDepthVisitor : public StackVisitor { 1231 public: 1232 explicit CountStackDepthVisitor(Thread* thread) 1233 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1234 : StackVisitor(thread, NULL), 1235 depth_(0), skip_depth_(0), skipping_(true) {} 1236 1237 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1238 // We want to skip frames up to and including the exception's constructor. 1239 // Note we also skip the frame if it doesn't have a method (namely the callee 1240 // save frame) 1241 mirror::ArtMethod* m = GetMethod(); 1242 if (skipping_ && !m->IsRuntimeMethod() && 1243 !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) { 1244 skipping_ = false; 1245 } 1246 if (!skipping_) { 1247 if (!m->IsRuntimeMethod()) { // Ignore runtime frames (in particular callee save). 1248 ++depth_; 1249 } 1250 } else { 1251 ++skip_depth_; 1252 } 1253 return true; 1254 } 1255 1256 int GetDepth() const { 1257 return depth_; 1258 } 1259 1260 int GetSkipDepth() const { 1261 return skip_depth_; 1262 } 1263 1264 private: 1265 uint32_t depth_; 1266 uint32_t skip_depth_; 1267 bool skipping_; 1268}; 1269 1270class BuildInternalStackTraceVisitor : public StackVisitor { 1271 public: 1272 explicit BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth) 1273 : StackVisitor(thread, NULL), self_(self), 1274 skip_depth_(skip_depth), count_(0), dex_pc_trace_(NULL), method_trace_(NULL) {} 1275 1276 bool Init(int depth) 1277 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1278 // Allocate method trace with an extra slot that will hold the PC trace 1279 SirtRef<mirror::ObjectArray<mirror::Object> > 1280 method_trace(self_, 1281 Runtime::Current()->GetClassLinker()->AllocObjectArray<mirror::Object>(self_, 1282 depth + 1)); 1283 if (method_trace.get() == NULL) { 1284 return false; 1285 } 1286 mirror::IntArray* dex_pc_trace = mirror::IntArray::Alloc(self_, depth); 1287 if (dex_pc_trace == NULL) { 1288 return false; 1289 } 1290 // Save PC trace in last element of method trace, also places it into the 1291 // object graph. 1292 method_trace->Set(depth, dex_pc_trace); 1293 // Set the Object*s and assert that no thread suspension is now possible. 1294 const char* last_no_suspend_cause = 1295 self_->StartAssertNoThreadSuspension("Building internal stack trace"); 1296 CHECK(last_no_suspend_cause == NULL) << last_no_suspend_cause; 1297 method_trace_ = method_trace.get(); 1298 dex_pc_trace_ = dex_pc_trace; 1299 return true; 1300 } 1301 1302 virtual ~BuildInternalStackTraceVisitor() { 1303 if (method_trace_ != NULL) { 1304 self_->EndAssertNoThreadSuspension(NULL); 1305 } 1306 } 1307 1308 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1309 if (method_trace_ == NULL || dex_pc_trace_ == NULL) { 1310 return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError. 1311 } 1312 if (skip_depth_ > 0) { 1313 skip_depth_--; 1314 return true; 1315 } 1316 mirror::ArtMethod* m = GetMethod(); 1317 if (m->IsRuntimeMethod()) { 1318 return true; // Ignore runtime frames (in particular callee save). 1319 } 1320 method_trace_->Set(count_, m); 1321 dex_pc_trace_->Set(count_, GetDexPc()); 1322 ++count_; 1323 return true; 1324 } 1325 1326 mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const { 1327 return method_trace_; 1328 } 1329 1330 private: 1331 Thread* const self_; 1332 // How many more frames to skip. 1333 int32_t skip_depth_; 1334 // Current position down stack trace. 1335 uint32_t count_; 1336 // Array of dex PC values. 1337 mirror::IntArray* dex_pc_trace_; 1338 // An array of the methods on the stack, the last entry is a reference to the PC trace. 1339 mirror::ObjectArray<mirror::Object>* method_trace_; 1340}; 1341 1342jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessUnchecked& soa) const { 1343 // Compute depth of stack 1344 CountStackDepthVisitor count_visitor(const_cast<Thread*>(this)); 1345 count_visitor.WalkStack(); 1346 int32_t depth = count_visitor.GetDepth(); 1347 int32_t skip_depth = count_visitor.GetSkipDepth(); 1348 1349 // Build internal stack trace. 1350 BuildInternalStackTraceVisitor build_trace_visitor(soa.Self(), const_cast<Thread*>(this), 1351 skip_depth); 1352 if (!build_trace_visitor.Init(depth)) { 1353 return NULL; // Allocation failed. 1354 } 1355 build_trace_visitor.WalkStack(); 1356 mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace(); 1357 if (kIsDebugBuild) { 1358 for (int32_t i = 0; i < trace->GetLength(); ++i) { 1359 CHECK(trace->Get(i) != NULL); 1360 } 1361 } 1362 return soa.AddLocalReference<jobjectArray>(trace); 1363} 1364 1365jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal, 1366 jobjectArray output_array, int* stack_depth) { 1367 // Transition into runnable state to work on Object*/Array* 1368 ScopedObjectAccess soa(env); 1369 // Decode the internal stack trace into the depth, method trace and PC trace 1370 mirror::ObjectArray<mirror::Object>* method_trace = 1371 soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal); 1372 int32_t depth = method_trace->GetLength() - 1; 1373 mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth)); 1374 1375 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 1376 1377 jobjectArray result; 1378 mirror::ObjectArray<mirror::StackTraceElement>* java_traces; 1379 if (output_array != NULL) { 1380 // Reuse the array we were given. 1381 result = output_array; 1382 java_traces = soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(output_array); 1383 // ...adjusting the number of frames we'll write to not exceed the array length. 1384 depth = std::min(depth, java_traces->GetLength()); 1385 } else { 1386 // Create java_trace array and place in local reference table 1387 java_traces = class_linker->AllocStackTraceElementArray(soa.Self(), depth); 1388 if (java_traces == NULL) { 1389 return NULL; 1390 } 1391 result = soa.AddLocalReference<jobjectArray>(java_traces); 1392 } 1393 1394 if (stack_depth != NULL) { 1395 *stack_depth = depth; 1396 } 1397 1398 MethodHelper mh; 1399 for (int32_t i = 0; i < depth; ++i) { 1400 // Prepare parameters for StackTraceElement(String cls, String method, String file, int line) 1401 mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(method_trace->Get(i)); 1402 mh.ChangeMethod(method); 1403 uint32_t dex_pc = pc_trace->Get(i); 1404 int32_t line_number = mh.GetLineNumFromDexPC(dex_pc); 1405 // Allocate element, potentially triggering GC 1406 // TODO: reuse class_name_object via Class::name_? 1407 const char* descriptor = mh.GetDeclaringClassDescriptor(); 1408 CHECK(descriptor != NULL); 1409 std::string class_name(PrettyDescriptor(descriptor)); 1410 SirtRef<mirror::String> class_name_object(soa.Self(), 1411 mirror::String::AllocFromModifiedUtf8(soa.Self(), 1412 class_name.c_str())); 1413 if (class_name_object.get() == NULL) { 1414 return NULL; 1415 } 1416 const char* method_name = mh.GetName(); 1417 CHECK(method_name != NULL); 1418 SirtRef<mirror::String> method_name_object(soa.Self(), 1419 mirror::String::AllocFromModifiedUtf8(soa.Self(), 1420 method_name)); 1421 if (method_name_object.get() == NULL) { 1422 return NULL; 1423 } 1424 const char* source_file = mh.GetDeclaringClassSourceFile(); 1425 SirtRef<mirror::String> source_name_object(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(), 1426 source_file)); 1427 mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(soa.Self(), 1428 class_name_object.get(), 1429 method_name_object.get(), 1430 source_name_object.get(), 1431 line_number); 1432 if (obj == NULL) { 1433 return NULL; 1434 } 1435#ifdef MOVING_GARBAGE_COLLECTOR 1436 // Re-read after potential GC 1437 java_traces = Decode<ObjectArray<Object>*>(soa.Env(), result); 1438 method_trace = down_cast<ObjectArray<Object>*>(Decode<Object*>(soa.Env(), internal)); 1439 pc_trace = down_cast<IntArray*>(method_trace->Get(depth)); 1440#endif 1441 java_traces->Set(i, obj); 1442 } 1443 return result; 1444} 1445 1446void Thread::ThrowNewExceptionF(const ThrowLocation& throw_location, 1447 const char* exception_class_descriptor, const char* fmt, ...) { 1448 va_list args; 1449 va_start(args, fmt); 1450 ThrowNewExceptionV(throw_location, exception_class_descriptor, 1451 fmt, args); 1452 va_end(args); 1453} 1454 1455void Thread::ThrowNewExceptionV(const ThrowLocation& throw_location, 1456 const char* exception_class_descriptor, 1457 const char* fmt, va_list ap) { 1458 std::string msg; 1459 StringAppendV(&msg, fmt, ap); 1460 ThrowNewException(throw_location, exception_class_descriptor, msg.c_str()); 1461} 1462 1463void Thread::ThrowNewException(const ThrowLocation& throw_location, const char* exception_class_descriptor, 1464 const char* msg) { 1465 AssertNoPendingException(); // Callers should either clear or call ThrowNewWrappedException. 1466 ThrowNewWrappedException(throw_location, exception_class_descriptor, msg); 1467} 1468 1469void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location, 1470 const char* exception_class_descriptor, 1471 const char* msg) { 1472 DCHECK_EQ(this, Thread::Current()); 1473 // Ensure we don't forget arguments over object allocation. 1474 SirtRef<mirror::Object> saved_throw_this(this, throw_location.GetThis()); 1475 SirtRef<mirror::ArtMethod> saved_throw_method(this, throw_location.GetMethod()); 1476 // Ignore the cause throw location. TODO: should we report this as a re-throw? 1477 SirtRef<mirror::Throwable> cause(this, GetException(NULL)); 1478 ClearException(); 1479 Runtime* runtime = Runtime::Current(); 1480 1481 mirror::ClassLoader* cl = NULL; 1482 if (throw_location.GetMethod() != NULL) { 1483 cl = throw_location.GetMethod()->GetDeclaringClass()->GetClassLoader(); 1484 } 1485 SirtRef<mirror::Class> 1486 exception_class(this, runtime->GetClassLinker()->FindClass(exception_class_descriptor, cl)); 1487 if (UNLIKELY(exception_class.get() == NULL)) { 1488 CHECK(IsExceptionPending()); 1489 LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor); 1490 return; 1491 } 1492 1493 if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(exception_class.get(), true, true))) { 1494 DCHECK(IsExceptionPending()); 1495 return; 1496 } 1497 DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass()); 1498 SirtRef<mirror::Throwable> exception(this, 1499 down_cast<mirror::Throwable*>(exception_class->AllocObject(this))); 1500 1501 // Choose an appropriate constructor and set up the arguments. 1502 const char* signature; 1503 SirtRef<mirror::String> msg_string(this, NULL); 1504 if (msg != NULL) { 1505 // Ensure we remember this and the method over the String allocation. 1506 msg_string.reset(mirror::String::AllocFromModifiedUtf8(this, msg)); 1507 if (UNLIKELY(msg_string.get() == NULL)) { 1508 CHECK(IsExceptionPending()); // OOME. 1509 return; 1510 } 1511 if (cause.get() == NULL) { 1512 signature = "(Ljava/lang/String;)V"; 1513 } else { 1514 signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V"; 1515 } 1516 } else { 1517 if (cause.get() == NULL) { 1518 signature = "()V"; 1519 } else { 1520 signature = "(Ljava/lang/Throwable;)V"; 1521 } 1522 } 1523 mirror::ArtMethod* exception_init_method = 1524 exception_class->FindDeclaredDirectMethod("<init>", signature); 1525 1526 CHECK(exception_init_method != NULL) << "No <init>" << signature << " in " 1527 << PrettyDescriptor(exception_class_descriptor); 1528 1529 if (UNLIKELY(!runtime->IsStarted())) { 1530 // Something is trying to throw an exception without a started runtime, which is the common 1531 // case in the compiler. We won't be able to invoke the constructor of the exception, so set 1532 // the exception fields directly. 1533 if (msg != NULL) { 1534 exception->SetDetailMessage(msg_string.get()); 1535 } 1536 if (cause.get() != NULL) { 1537 exception->SetCause(cause.get()); 1538 } 1539 ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(), 1540 throw_location.GetDexPc()); 1541 SetException(gc_safe_throw_location, exception.get()); 1542 } else { 1543 ArgArray args("VLL", 3); 1544 args.Append(reinterpret_cast<uint32_t>(exception.get())); 1545 if (msg != NULL) { 1546 args.Append(reinterpret_cast<uint32_t>(msg_string.get())); 1547 } 1548 if (cause.get() != NULL) { 1549 args.Append(reinterpret_cast<uint32_t>(cause.get())); 1550 } 1551 JValue result; 1552 exception_init_method->Invoke(this, args.GetArray(), args.GetNumBytes(), &result, 'V'); 1553 if (LIKELY(!IsExceptionPending())) { 1554 ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(), 1555 throw_location.GetDexPc()); 1556 SetException(gc_safe_throw_location, exception.get()); 1557 } 1558 } 1559} 1560 1561void Thread::ThrowOutOfMemoryError(const char* msg) { 1562 LOG(ERROR) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s", 1563 msg, (throwing_OutOfMemoryError_ ? " (recursive case)" : "")); 1564 ThrowLocation throw_location = GetCurrentLocationForThrow(); 1565 if (!throwing_OutOfMemoryError_) { 1566 throwing_OutOfMemoryError_ = true; 1567 ThrowNewException(throw_location, "Ljava/lang/OutOfMemoryError;", msg); 1568 throwing_OutOfMemoryError_ = false; 1569 } else { 1570 Dump(LOG(ERROR)); // The pre-allocated OOME has no stack, so help out and log one. 1571 SetException(throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError()); 1572 } 1573} 1574 1575Thread* Thread::CurrentFromGdb() { 1576 return Thread::Current(); 1577} 1578 1579void Thread::DumpFromGdb() const { 1580 std::ostringstream ss; 1581 Dump(ss); 1582 std::string str(ss.str()); 1583 // log to stderr for debugging command line processes 1584 std::cerr << str; 1585#ifdef HAVE_ANDROID_OS 1586 // log to logcat for debugging frameworks processes 1587 LOG(INFO) << str; 1588#endif 1589} 1590 1591struct EntryPointInfo { 1592 uint32_t offset; 1593 const char* name; 1594}; 1595#define INTERPRETER_ENTRY_POINT_INFO(x) { INTERPRETER_ENTRYPOINT_OFFSET(x).Uint32Value(), #x } 1596#define JNI_ENTRY_POINT_INFO(x) { JNI_ENTRYPOINT_OFFSET(x).Uint32Value(), #x } 1597#define PORTABLE_ENTRY_POINT_INFO(x) { PORTABLE_ENTRYPOINT_OFFSET(x).Uint32Value(), #x } 1598#define QUICK_ENTRY_POINT_INFO(x) { QUICK_ENTRYPOINT_OFFSET(x).Uint32Value(), #x } 1599static const EntryPointInfo gThreadEntryPointInfo[] = { 1600 INTERPRETER_ENTRY_POINT_INFO(pInterpreterToInterpreterBridge), 1601 INTERPRETER_ENTRY_POINT_INFO(pInterpreterToCompiledCodeBridge), 1602 JNI_ENTRY_POINT_INFO(pDlsymLookup), 1603 PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampoline), 1604 PORTABLE_ENTRY_POINT_INFO(pPortableToInterpreterBridge), 1605 QUICK_ENTRY_POINT_INFO(pAllocArray), 1606 QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck), 1607 QUICK_ENTRY_POINT_INFO(pAllocObject), 1608 QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck), 1609 QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray), 1610 QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck), 1611 QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial), 1612 QUICK_ENTRY_POINT_INFO(pCanPutArrayElement), 1613 QUICK_ENTRY_POINT_INFO(pCheckCast), 1614 QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage), 1615 QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess), 1616 QUICK_ENTRY_POINT_INFO(pInitializeType), 1617 QUICK_ENTRY_POINT_INFO(pResolveString), 1618 QUICK_ENTRY_POINT_INFO(pSet32Instance), 1619 QUICK_ENTRY_POINT_INFO(pSet32Static), 1620 QUICK_ENTRY_POINT_INFO(pSet64Instance), 1621 QUICK_ENTRY_POINT_INFO(pSet64Static), 1622 QUICK_ENTRY_POINT_INFO(pSetObjInstance), 1623 QUICK_ENTRY_POINT_INFO(pSetObjStatic), 1624 QUICK_ENTRY_POINT_INFO(pGet32Instance), 1625 QUICK_ENTRY_POINT_INFO(pGet32Static), 1626 QUICK_ENTRY_POINT_INFO(pGet64Instance), 1627 QUICK_ENTRY_POINT_INFO(pGet64Static), 1628 QUICK_ENTRY_POINT_INFO(pGetObjInstance), 1629 QUICK_ENTRY_POINT_INFO(pGetObjStatic), 1630 QUICK_ENTRY_POINT_INFO(pHandleFillArrayData), 1631 QUICK_ENTRY_POINT_INFO(pJniMethodStart), 1632 QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized), 1633 QUICK_ENTRY_POINT_INFO(pJniMethodEnd), 1634 QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized), 1635 QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference), 1636 QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized), 1637 QUICK_ENTRY_POINT_INFO(pLockObject), 1638 QUICK_ENTRY_POINT_INFO(pUnlockObject), 1639 QUICK_ENTRY_POINT_INFO(pCmpgDouble), 1640 QUICK_ENTRY_POINT_INFO(pCmpgFloat), 1641 QUICK_ENTRY_POINT_INFO(pCmplDouble), 1642 QUICK_ENTRY_POINT_INFO(pCmplFloat), 1643 QUICK_ENTRY_POINT_INFO(pFmod), 1644 QUICK_ENTRY_POINT_INFO(pSqrt), 1645 QUICK_ENTRY_POINT_INFO(pL2d), 1646 QUICK_ENTRY_POINT_INFO(pFmodf), 1647 QUICK_ENTRY_POINT_INFO(pL2f), 1648 QUICK_ENTRY_POINT_INFO(pD2iz), 1649 QUICK_ENTRY_POINT_INFO(pF2iz), 1650 QUICK_ENTRY_POINT_INFO(pIdivmod), 1651 QUICK_ENTRY_POINT_INFO(pD2l), 1652 QUICK_ENTRY_POINT_INFO(pF2l), 1653 QUICK_ENTRY_POINT_INFO(pLdiv), 1654 QUICK_ENTRY_POINT_INFO(pLdivmod), 1655 QUICK_ENTRY_POINT_INFO(pLmul), 1656 QUICK_ENTRY_POINT_INFO(pShlLong), 1657 QUICK_ENTRY_POINT_INFO(pShrLong), 1658 QUICK_ENTRY_POINT_INFO(pUshrLong), 1659 QUICK_ENTRY_POINT_INFO(pIndexOf), 1660 QUICK_ENTRY_POINT_INFO(pMemcmp16), 1661 QUICK_ENTRY_POINT_INFO(pStringCompareTo), 1662 QUICK_ENTRY_POINT_INFO(pMemcpy), 1663 QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline), 1664 QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge), 1665 QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck), 1666 QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampoline), 1667 QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck), 1668 QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck), 1669 QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck), 1670 QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck), 1671 QUICK_ENTRY_POINT_INFO(pCheckSuspend), 1672 QUICK_ENTRY_POINT_INFO(pTestSuspend), 1673 QUICK_ENTRY_POINT_INFO(pDeliverException), 1674 QUICK_ENTRY_POINT_INFO(pThrowArrayBounds), 1675 QUICK_ENTRY_POINT_INFO(pThrowDivZero), 1676 QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod), 1677 QUICK_ENTRY_POINT_INFO(pThrowNullPointer), 1678 QUICK_ENTRY_POINT_INFO(pThrowStackOverflow), 1679}; 1680#undef QUICK_ENTRY_POINT_INFO 1681 1682void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers) { 1683 CHECK_EQ(size_of_pointers, 4U); // TODO: support 64-bit targets. 1684 1685#define DO_THREAD_OFFSET(x) \ 1686 if (offset == static_cast<uint32_t>(OFFSETOF_VOLATILE_MEMBER(Thread, x))) { \ 1687 os << # x; \ 1688 return; \ 1689 } 1690 DO_THREAD_OFFSET(state_and_flags_); 1691 DO_THREAD_OFFSET(card_table_); 1692 DO_THREAD_OFFSET(exception_); 1693 DO_THREAD_OFFSET(opeer_); 1694 DO_THREAD_OFFSET(jni_env_); 1695 DO_THREAD_OFFSET(self_); 1696 DO_THREAD_OFFSET(stack_end_); 1697 DO_THREAD_OFFSET(suspend_count_); 1698 DO_THREAD_OFFSET(thin_lock_id_); 1699 // DO_THREAD_OFFSET(top_of_managed_stack_); 1700 // DO_THREAD_OFFSET(top_of_managed_stack_pc_); 1701 DO_THREAD_OFFSET(top_sirt_); 1702#undef DO_THREAD_OFFSET 1703 1704 size_t entry_point_count = arraysize(gThreadEntryPointInfo); 1705 CHECK_EQ(entry_point_count * size_of_pointers, 1706 sizeof(InterpreterEntryPoints) + sizeof(JniEntryPoints) + sizeof(PortableEntryPoints) + 1707 sizeof(QuickEntryPoints)); 1708 uint32_t expected_offset = OFFSETOF_MEMBER(Thread, interpreter_entrypoints_); 1709 for (size_t i = 0; i < entry_point_count; ++i) { 1710 CHECK_EQ(gThreadEntryPointInfo[i].offset, expected_offset) << gThreadEntryPointInfo[i].name; 1711 expected_offset += size_of_pointers; 1712 if (gThreadEntryPointInfo[i].offset == offset) { 1713 os << gThreadEntryPointInfo[i].name; 1714 return; 1715 } 1716 } 1717 os << offset; 1718} 1719 1720static const bool kDebugExceptionDelivery = false; 1721class CatchBlockStackVisitor : public StackVisitor { 1722 public: 1723 CatchBlockStackVisitor(Thread* self, const ThrowLocation& throw_location, 1724 mirror::Throwable* exception, bool is_deoptimization) 1725 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1726 : StackVisitor(self, self->GetLongJumpContext()), 1727 self_(self), exception_(exception), is_deoptimization_(is_deoptimization), 1728 to_find_(is_deoptimization ? NULL : exception->GetClass()), throw_location_(throw_location), 1729 handler_quick_frame_(NULL), handler_quick_frame_pc_(0), handler_dex_pc_(0), 1730 native_method_count_(0), clear_exception_(false), 1731 method_tracing_active_(is_deoptimization || 1732 Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()), 1733 instrumentation_frames_to_pop_(0), top_shadow_frame_(NULL), prev_shadow_frame_(NULL) { 1734 // Exception not in root sets, can't allow GC. 1735 last_no_assert_suspension_cause_ = self->StartAssertNoThreadSuspension("Finding catch block"); 1736 } 1737 1738 ~CatchBlockStackVisitor() { 1739 LOG(FATAL) << "UNREACHABLE"; // Expected to take long jump. 1740 } 1741 1742 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1743 mirror::ArtMethod* method = GetMethod(); 1744 if (method == NULL) { 1745 // This is the upcall, we remember the frame and last pc so that we may long jump to them. 1746 handler_quick_frame_pc_ = GetCurrentQuickFramePc(); 1747 handler_quick_frame_ = GetCurrentQuickFrame(); 1748 return false; // End stack walk. 1749 } else { 1750 if (UNLIKELY(method_tracing_active_ && 1751 GetQuickInstrumentationExitPc() == GetReturnPc())) { 1752 // Keep count of the number of unwinds during instrumentation. 1753 instrumentation_frames_to_pop_++; 1754 } 1755 if (method->IsRuntimeMethod()) { 1756 // Ignore callee save method. 1757 DCHECK(method->IsCalleeSaveMethod()); 1758 return true; 1759 } else if (is_deoptimization_) { 1760 return HandleDeoptimization(method); 1761 } else { 1762 return HandleTryItems(method); 1763 } 1764 } 1765 } 1766 1767 bool HandleTryItems(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1768 uint32_t dex_pc = DexFile::kDexNoIndex; 1769 if (method->IsNative()) { 1770 native_method_count_++; 1771 } else { 1772 dex_pc = GetDexPc(); 1773 } 1774 if (dex_pc != DexFile::kDexNoIndex) { 1775 uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc, &clear_exception_); 1776 if (found_dex_pc != DexFile::kDexNoIndex) { 1777 handler_dex_pc_ = found_dex_pc; 1778 handler_quick_frame_pc_ = method->ToNativePc(found_dex_pc); 1779 handler_quick_frame_ = GetCurrentQuickFrame(); 1780 return false; // End stack walk. 1781 } 1782 } 1783 return true; // Continue stack walk. 1784 } 1785 1786 bool HandleDeoptimization(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1787 MethodHelper mh(m); 1788 const DexFile::CodeItem* code_item = mh.GetCodeItem(); 1789 CHECK(code_item != NULL); 1790 uint16_t num_regs = code_item->registers_size_; 1791 uint32_t dex_pc = GetDexPc(); 1792 const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc); 1793 uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits(); 1794 ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, m, new_dex_pc); 1795 verifier::MethodVerifier verifier(&mh.GetDexFile(), mh.GetDexCache(), mh.GetClassLoader(), 1796 mh.GetClassDefIndex(), code_item, 1797 m->GetDexMethodIndex(), m, m->GetAccessFlags(), false, true); 1798 verifier.Verify(); 1799 std::vector<int32_t> kinds = verifier.DescribeVRegs(dex_pc); 1800 for (uint16_t reg = 0; reg < num_regs; reg++) { 1801 VRegKind kind = static_cast<VRegKind>(kinds.at(reg * 2)); 1802 switch (kind) { 1803 case kUndefined: 1804 new_frame->SetVReg(reg, 0xEBADDE09); 1805 break; 1806 case kConstant: 1807 new_frame->SetVReg(reg, kinds.at((reg * 2) + 1)); 1808 break; 1809 case kReferenceVReg: 1810 new_frame->SetVRegReference(reg, 1811 reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kind))); 1812 break; 1813 default: 1814 new_frame->SetVReg(reg, GetVReg(m, reg, kind)); 1815 break; 1816 } 1817 } 1818 if (prev_shadow_frame_ != NULL) { 1819 prev_shadow_frame_->SetLink(new_frame); 1820 } else { 1821 top_shadow_frame_ = new_frame; 1822 } 1823 prev_shadow_frame_ = new_frame; 1824 return true; 1825 } 1826 1827 void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1828 mirror::ArtMethod* catch_method = *handler_quick_frame_; 1829 if (catch_method == NULL) { 1830 if (kDebugExceptionDelivery) { 1831 LOG(INFO) << "Handler is upcall"; 1832 } 1833 } else { 1834 CHECK(!is_deoptimization_); 1835 if (kDebugExceptionDelivery) { 1836 const DexFile& dex_file = *catch_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); 1837 int line_number = dex_file.GetLineNumFromPC(catch_method, handler_dex_pc_); 1838 LOG(INFO) << "Handler: " << PrettyMethod(catch_method) << " (line: " << line_number << ")"; 1839 } 1840 } 1841 if (clear_exception_) { 1842 // Exception was cleared as part of delivery. 1843 DCHECK(!self_->IsExceptionPending()); 1844 } else { 1845 // Put exception back in root set with clear throw location. 1846 self_->SetException(ThrowLocation(), exception_); 1847 } 1848 self_->EndAssertNoThreadSuspension(last_no_assert_suspension_cause_); 1849 // Do instrumentation events after allowing thread suspension again. 1850 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 1851 for (size_t i = 0; i < instrumentation_frames_to_pop_; ++i) { 1852 // We pop the instrumentation stack here so as not to corrupt it during the stack walk. 1853 if (i != instrumentation_frames_to_pop_ - 1 || self_->GetInstrumentationStack()->front().method_ != catch_method) { 1854 // Don't pop the instrumentation frame of the catch handler. 1855 instrumentation->PopMethodForUnwind(self_, is_deoptimization_); 1856 } 1857 } 1858 if (!is_deoptimization_) { 1859 instrumentation->ExceptionCaughtEvent(self_, throw_location_, catch_method, handler_dex_pc_, 1860 exception_); 1861 } else { 1862 // TODO: proper return value. 1863 self_->SetDeoptimizationShadowFrame(top_shadow_frame_); 1864 } 1865 // Place context back on thread so it will be available when we continue. 1866 self_->ReleaseLongJumpContext(context_); 1867 context_->SetSP(reinterpret_cast<uintptr_t>(handler_quick_frame_)); 1868 CHECK_NE(handler_quick_frame_pc_, 0u); 1869 context_->SetPC(handler_quick_frame_pc_); 1870 context_->SmashCallerSaves(); 1871 context_->DoLongJump(); 1872 } 1873 1874 private: 1875 Thread* const self_; 1876 mirror::Throwable* const exception_; 1877 const bool is_deoptimization_; 1878 // The type of the exception catch block to find. 1879 mirror::Class* const to_find_; 1880 // Location of the throw. 1881 const ThrowLocation& throw_location_; 1882 // Quick frame with found handler or last frame if no handler found. 1883 mirror::ArtMethod** handler_quick_frame_; 1884 // PC to branch to for the handler. 1885 uintptr_t handler_quick_frame_pc_; 1886 // Associated dex PC. 1887 uint32_t handler_dex_pc_; 1888 // Number of native methods passed in crawl (equates to number of SIRTs to pop) 1889 uint32_t native_method_count_; 1890 // Should the exception be cleared as the catch block has no move-exception? 1891 bool clear_exception_; 1892 // Is method tracing active? 1893 const bool method_tracing_active_; 1894 // Support for nesting no thread suspension checks. 1895 const char* last_no_assert_suspension_cause_; 1896 // Number of frames to pop in long jump. 1897 size_t instrumentation_frames_to_pop_; 1898 ShadowFrame* top_shadow_frame_; 1899 ShadowFrame* prev_shadow_frame_; 1900}; 1901 1902void Thread::QuickDeliverException() { 1903 // Get exception from thread. 1904 ThrowLocation throw_location; 1905 mirror::Throwable* exception = GetException(&throw_location); 1906 CHECK(exception != NULL); 1907 // Don't leave exception visible while we try to find the handler, which may cause class 1908 // resolution. 1909 ClearException(); 1910 bool is_deoptimization = (exception == reinterpret_cast<mirror::Throwable*>(-1)); 1911 if (kDebugExceptionDelivery) { 1912 if (!is_deoptimization) { 1913 mirror::String* msg = exception->GetDetailMessage(); 1914 std::string str_msg(msg != NULL ? msg->ToModifiedUtf8() : ""); 1915 DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception) 1916 << ": " << str_msg << "\n"); 1917 } else { 1918 DumpStack(LOG(INFO) << "Deoptimizing: "); 1919 } 1920 } 1921 CatchBlockStackVisitor catch_finder(this, throw_location, exception, is_deoptimization); 1922 catch_finder.WalkStack(true); 1923 catch_finder.DoLongJump(); 1924 LOG(FATAL) << "UNREACHABLE"; 1925} 1926 1927Context* Thread::GetLongJumpContext() { 1928 Context* result = long_jump_context_; 1929 if (result == NULL) { 1930 result = Context::Create(); 1931 } else { 1932 long_jump_context_ = NULL; // Avoid context being shared. 1933 result->Reset(); 1934 } 1935 return result; 1936} 1937 1938struct CurrentMethodVisitor : public StackVisitor { 1939 CurrentMethodVisitor(Thread* thread, Context* context) 1940 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1941 : StackVisitor(thread, context), this_object_(NULL), method_(NULL), dex_pc_(0) {} 1942 virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1943 mirror::ArtMethod* m = GetMethod(); 1944 if (m->IsRuntimeMethod()) { 1945 // Continue if this is a runtime method. 1946 return true; 1947 } 1948 if (context_ != NULL) { 1949 this_object_ = GetThisObject(); 1950 } 1951 method_ = m; 1952 dex_pc_ = GetDexPc(); 1953 return false; 1954 } 1955 mirror::Object* this_object_; 1956 mirror::ArtMethod* method_; 1957 uint32_t dex_pc_; 1958}; 1959 1960mirror::ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc) const { 1961 CurrentMethodVisitor visitor(const_cast<Thread*>(this), NULL); 1962 visitor.WalkStack(false); 1963 if (dex_pc != NULL) { 1964 *dex_pc = visitor.dex_pc_; 1965 } 1966 return visitor.method_; 1967} 1968 1969ThrowLocation Thread::GetCurrentLocationForThrow() { 1970 Context* context = GetLongJumpContext(); 1971 CurrentMethodVisitor visitor(this, context); 1972 visitor.WalkStack(false); 1973 ReleaseLongJumpContext(context); 1974 return ThrowLocation(visitor.this_object_, visitor.method_, visitor.dex_pc_); 1975} 1976 1977bool Thread::HoldsLock(mirror::Object* object) { 1978 if (object == NULL) { 1979 return false; 1980 } 1981 return object->GetThinLockId() == thin_lock_id_; 1982} 1983 1984// RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor). 1985template <typename RootVisitor> 1986class ReferenceMapVisitor : public StackVisitor { 1987 public: 1988 ReferenceMapVisitor(Thread* thread, Context* context, const RootVisitor& visitor) 1989 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1990 : StackVisitor(thread, context), visitor_(visitor) {} 1991 1992 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1993 if (false) { 1994 LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod()) 1995 << StringPrintf("@ PC:%04x", GetDexPc()); 1996 } 1997 ShadowFrame* shadow_frame = GetCurrentShadowFrame(); 1998 if (shadow_frame != NULL) { 1999 mirror::ArtMethod* m = shadow_frame->GetMethod(); 2000 size_t num_regs = shadow_frame->NumberOfVRegs(); 2001 if (m->IsNative() || shadow_frame->HasReferenceArray()) { 2002 // SIRT for JNI or References for interpreter. 2003 for (size_t reg = 0; reg < num_regs; ++reg) { 2004 mirror::Object* ref = shadow_frame->GetVRegReference(reg); 2005 if (ref != NULL) { 2006 visitor_(ref, reg, this); 2007 } 2008 } 2009 } else { 2010 // Java method. 2011 // Portable path use DexGcMap and store in Method.native_gc_map_. 2012 const uint8_t* gc_map = m->GetNativeGcMap(); 2013 CHECK(gc_map != NULL) << PrettyMethod(m); 2014 uint32_t gc_map_length = static_cast<uint32_t>((gc_map[0] << 24) | 2015 (gc_map[1] << 16) | 2016 (gc_map[2] << 8) | 2017 (gc_map[3] << 0)); 2018 verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length); 2019 uint32_t dex_pc = GetDexPc(); 2020 const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc); 2021 DCHECK(reg_bitmap != NULL); 2022 num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs); 2023 for (size_t reg = 0; reg < num_regs; ++reg) { 2024 if (TestBitmap(reg, reg_bitmap)) { 2025 mirror::Object* ref = shadow_frame->GetVRegReference(reg); 2026 if (ref != NULL) { 2027 visitor_(ref, reg, this); 2028 } 2029 } 2030 } 2031 } 2032 } else { 2033 mirror::ArtMethod* m = GetMethod(); 2034 // Process register map (which native and runtime methods don't have) 2035 if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) { 2036 const uint8_t* native_gc_map = m->GetNativeGcMap(); 2037 CHECK(native_gc_map != NULL) << PrettyMethod(m); 2038 mh_.ChangeMethod(m); 2039 const DexFile::CodeItem* code_item = mh_.GetCodeItem(); 2040 DCHECK(code_item != NULL) << PrettyMethod(m); // Can't be NULL or how would we compile its instructions? 2041 NativePcOffsetToReferenceMap map(native_gc_map); 2042 size_t num_regs = std::min(map.RegWidth() * 8, 2043 static_cast<size_t>(code_item->registers_size_)); 2044 if (num_regs > 0) { 2045 const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset()); 2046 DCHECK(reg_bitmap != NULL); 2047 const VmapTable vmap_table(m->GetVmapTable()); 2048 uint32_t core_spills = m->GetCoreSpillMask(); 2049 uint32_t fp_spills = m->GetFpSpillMask(); 2050 size_t frame_size = m->GetFrameSizeInBytes(); 2051 // For all dex registers in the bitmap 2052 mirror::ArtMethod** cur_quick_frame = GetCurrentQuickFrame(); 2053 DCHECK(cur_quick_frame != NULL); 2054 for (size_t reg = 0; reg < num_regs; ++reg) { 2055 // Does this register hold a reference? 2056 if (TestBitmap(reg, reg_bitmap)) { 2057 uint32_t vmap_offset; 2058 mirror::Object* ref; 2059 if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) { 2060 uintptr_t val = GetGPR(vmap_table.ComputeRegister(core_spills, vmap_offset, 2061 kReferenceVReg)); 2062 ref = reinterpret_cast<mirror::Object*>(val); 2063 } else { 2064 ref = reinterpret_cast<mirror::Object*>(GetVReg(cur_quick_frame, code_item, 2065 core_spills, fp_spills, frame_size, 2066 reg)); 2067 } 2068 2069 if (ref != NULL) { 2070 visitor_(ref, reg, this); 2071 } 2072 } 2073 } 2074 } 2075 } 2076 } 2077 return true; 2078 } 2079 2080 private: 2081 static bool TestBitmap(int reg, const uint8_t* reg_vector) { 2082 return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0; 2083 } 2084 2085 // Visitor for when we visit a root. 2086 const RootVisitor& visitor_; 2087 2088 // A method helper we keep around to avoid dex file/cache re-computations. 2089 MethodHelper mh_; 2090}; 2091 2092class RootCallbackVisitor { 2093 public: 2094 RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {} 2095 2096 void operator()(const mirror::Object* obj, size_t, const StackVisitor*) const { 2097 visitor_(obj, arg_); 2098 } 2099 2100 private: 2101 RootVisitor* visitor_; 2102 void* arg_; 2103}; 2104 2105class VerifyCallbackVisitor { 2106 public: 2107 VerifyCallbackVisitor(VerifyRootVisitor* visitor, void* arg) 2108 : visitor_(visitor), 2109 arg_(arg) { 2110 } 2111 2112 void operator()(const mirror::Object* obj, size_t vreg, const StackVisitor* visitor) const { 2113 visitor_(obj, arg_, vreg, visitor); 2114 } 2115 2116 private: 2117 VerifyRootVisitor* const visitor_; 2118 void* const arg_; 2119}; 2120 2121struct VerifyRootWrapperArg { 2122 VerifyRootVisitor* visitor; 2123 void* arg; 2124}; 2125 2126static void VerifyRootWrapperCallback(const mirror::Object* root, void* arg) { 2127 VerifyRootWrapperArg* wrapperArg = reinterpret_cast<VerifyRootWrapperArg*>(arg); 2128 wrapperArg->visitor(root, wrapperArg->arg, 0, NULL); 2129} 2130 2131void Thread::VerifyRoots(VerifyRootVisitor* visitor, void* arg) { 2132 // We need to map from a RootVisitor to VerifyRootVisitor, so pass in nulls for arguments we 2133 // don't have. 2134 VerifyRootWrapperArg wrapperArg; 2135 wrapperArg.arg = arg; 2136 wrapperArg.visitor = visitor; 2137 2138 if (opeer_ != NULL) { 2139 VerifyRootWrapperCallback(opeer_, &wrapperArg); 2140 } 2141 if (exception_ != NULL) { 2142 VerifyRootWrapperCallback(exception_, &wrapperArg); 2143 } 2144 throw_location_.VisitRoots(VerifyRootWrapperCallback, &wrapperArg); 2145 if (class_loader_override_ != NULL) { 2146 VerifyRootWrapperCallback(class_loader_override_, &wrapperArg); 2147 } 2148 jni_env_->locals.VisitRoots(VerifyRootWrapperCallback, &wrapperArg); 2149 jni_env_->monitors.VisitRoots(VerifyRootWrapperCallback, &wrapperArg); 2150 2151 SirtVisitRoots(VerifyRootWrapperCallback, &wrapperArg); 2152 2153 // Visit roots on this thread's stack 2154 Context* context = GetLongJumpContext(); 2155 VerifyCallbackVisitor visitorToCallback(visitor, arg); 2156 ReferenceMapVisitor<VerifyCallbackVisitor> mapper(this, context, visitorToCallback); 2157 mapper.WalkStack(); 2158 ReleaseLongJumpContext(context); 2159 2160 std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack = GetInstrumentationStack(); 2161 typedef std::deque<instrumentation::InstrumentationStackFrame>::const_iterator It; 2162 for (It it = instrumentation_stack->begin(), end = instrumentation_stack->end(); it != end; ++it) { 2163 mirror::Object* this_object = (*it).this_object_; 2164 if (this_object != NULL) { 2165 VerifyRootWrapperCallback(this_object, &wrapperArg); 2166 } 2167 mirror::ArtMethod* method = (*it).method_; 2168 VerifyRootWrapperCallback(method, &wrapperArg); 2169 } 2170} 2171 2172void Thread::VisitRoots(RootVisitor* visitor, void* arg) { 2173 if (opeer_ != NULL) { 2174 visitor(opeer_, arg); 2175 } 2176 if (exception_ != NULL) { 2177 visitor(exception_, arg); 2178 } 2179 throw_location_.VisitRoots(visitor, arg); 2180 if (class_loader_override_ != NULL) { 2181 visitor(class_loader_override_, arg); 2182 } 2183 jni_env_->locals.VisitRoots(visitor, arg); 2184 jni_env_->monitors.VisitRoots(visitor, arg); 2185 2186 SirtVisitRoots(visitor, arg); 2187 2188 // Visit roots on this thread's stack 2189 Context* context = GetLongJumpContext(); 2190 RootCallbackVisitor visitorToCallback(visitor, arg); 2191 ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context, visitorToCallback); 2192 mapper.WalkStack(); 2193 ReleaseLongJumpContext(context); 2194 2195 std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack = GetInstrumentationStack(); 2196 typedef std::deque<instrumentation::InstrumentationStackFrame>::const_iterator It; 2197 for (It it = instrumentation_stack->begin(), end = instrumentation_stack->end(); it != end; ++it) { 2198 mirror::Object* this_object = (*it).this_object_; 2199 if (this_object != NULL) { 2200 visitor(this_object, arg); 2201 } 2202 mirror::ArtMethod* method = (*it).method_; 2203 visitor(method, arg); 2204 } 2205} 2206 2207static void VerifyObject(const mirror::Object* root, void* arg) { 2208 gc::Heap* heap = reinterpret_cast<gc::Heap*>(arg); 2209 heap->VerifyObject(root); 2210} 2211 2212void Thread::VerifyStackImpl() { 2213 UniquePtr<Context> context(Context::Create()); 2214 RootCallbackVisitor visitorToCallback(VerifyObject, Runtime::Current()->GetHeap()); 2215 ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitorToCallback); 2216 mapper.WalkStack(); 2217} 2218 2219// Set the stack end to that to be used during a stack overflow 2220void Thread::SetStackEndForStackOverflow() { 2221 // During stack overflow we allow use of the full stack. 2222 if (stack_end_ == stack_begin_) { 2223 // However, we seem to have already extended to use the full stack. 2224 LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently " 2225 << kStackOverflowReservedBytes << ")?"; 2226 DumpStack(LOG(ERROR)); 2227 LOG(FATAL) << "Recursive stack overflow."; 2228 } 2229 2230 stack_end_ = stack_begin_; 2231} 2232 2233std::ostream& operator<<(std::ostream& os, const Thread& thread) { 2234 thread.ShortDump(os); 2235 return os; 2236} 2237 2238} // namespace art 2239