thread.cc revision fb2802da02337309ac64970e06c90bb3b1b1de3f
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#define ATRACE_TAG ATRACE_TAG_DALVIK 18 19#include "thread.h" 20 21#include <cutils/trace.h> 22#include <pthread.h> 23#include <signal.h> 24#include <sys/resource.h> 25#include <sys/time.h> 26 27#include <algorithm> 28#include <bitset> 29#include <cerrno> 30#include <iostream> 31#include <list> 32 33#include "base/mutex.h" 34#include "class_linker.h" 35#include "class_linker-inl.h" 36#include "cutils/atomic.h" 37#include "cutils/atomic-inline.h" 38#include "debugger.h" 39#include "dex_file-inl.h" 40#include "gc_map.h" 41#include "gc/accounting/card_table-inl.h" 42#include "gc/heap.h" 43#include "gc/space/space.h" 44#include "invoke_arg_array_builder.h" 45#include "jni_internal.h" 46#include "mirror/abstract_method-inl.h" 47#include "mirror/class-inl.h" 48#include "mirror/class_loader.h" 49#include "mirror/field-inl.h" 50#include "mirror/object_array-inl.h" 51#include "mirror/stack_trace_element.h" 52#include "monitor.h" 53#include "oat/runtime/context.h" 54#include "object_utils.h" 55#include "reflection.h" 56#include "runtime.h" 57#include "runtime_support.h" 58#include "scoped_thread_state_change.h" 59#include "ScopedLocalRef.h" 60#include "ScopedUtfChars.h" 61#include "sirt_ref.h" 62#include "stack.h" 63#include "stack_indirect_reference_table.h" 64#include "thread-inl.h" 65#include "thread_list.h" 66#include "utils.h" 67#include "verifier/dex_gc_map.h" 68#include "verifier/method_verifier.h" 69#include "well_known_classes.h" 70 71namespace art { 72 73bool Thread::is_started_ = false; 74pthread_key_t Thread::pthread_key_self_; 75ConditionVariable* Thread::resume_cond_ = NULL; 76 77static const char* kThreadNameDuringStartup = "<native thread without managed peer>"; 78 79void Thread::InitCardTable() { 80 card_table_ = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin(); 81} 82 83#if !defined(__APPLE__) 84static void UnimplementedEntryPoint() { 85 UNIMPLEMENTED(FATAL); 86} 87#endif 88 89void Thread::InitFunctionPointers() { 90#if !defined(__APPLE__) // The Mac GCC is too old to accept this code. 91 // Insert a placeholder so we can easily tell if we call an unimplemented entry point. 92 uintptr_t* begin = reinterpret_cast<uintptr_t*>(&entrypoints_); 93 uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(entrypoints_)); 94 for (uintptr_t* it = begin; it != end; ++it) { 95 *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint); 96 } 97#endif 98 InitEntryPoints(&entrypoints_); 99} 100 101void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) { 102 deoptimization_shadow_frame_ = sf; 103} 104 105void Thread::SetDeoptimizationReturnValue(const JValue& ret_val) { 106 deoptimization_return_value_.SetJ(ret_val.GetJ()); 107} 108 109ShadowFrame* Thread::GetAndClearDeoptimizationShadowFrame(JValue* ret_val) { 110 ShadowFrame* sf = deoptimization_shadow_frame_; 111 deoptimization_shadow_frame_ = NULL; 112 ret_val->SetJ(deoptimization_return_value_.GetJ()); 113 return sf; 114} 115 116void Thread::InitTid() { 117 tid_ = ::art::GetTid(); 118} 119 120void Thread::InitAfterFork() { 121 // One thread (us) survived the fork, but we have a new tid so we need to 122 // update the value stashed in this Thread*. 123 InitTid(); 124} 125 126void* Thread::CreateCallback(void* arg) { 127 Thread* self = reinterpret_cast<Thread*>(arg); 128 Runtime* runtime = Runtime::Current(); 129 if (runtime == NULL) { 130 LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self; 131 return NULL; 132 } 133 { 134 // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true 135 // after self->Init(). 136 MutexLock mu(NULL, *Locks::runtime_shutdown_lock_); 137 // Check that if we got here we cannot be shutting down (as shutdown should never have started 138 // while threads are being born). 139 CHECK(!runtime->IsShuttingDown()); 140 self->Init(runtime->GetThreadList(), runtime->GetJavaVM()); 141 Runtime::Current()->EndThreadBirth(); 142 } 143 { 144 ScopedObjectAccess soa(self); 145 146 // Copy peer into self, deleting global reference when done. 147 CHECK(self->jpeer_ != NULL); 148 self->opeer_ = soa.Decode<mirror::Object*>(self->jpeer_); 149 self->GetJniEnv()->DeleteGlobalRef(self->jpeer_); 150 self->jpeer_ = NULL; 151 152 { 153 SirtRef<mirror::String> thread_name(self, self->GetThreadName(soa)); 154 self->SetThreadName(thread_name->ToModifiedUtf8().c_str()); 155 } 156 Dbg::PostThreadStart(self); 157 158 // Invoke the 'run' method of our java.lang.Thread. 159 mirror::Object* receiver = self->opeer_; 160 jmethodID mid = WellKnownClasses::java_lang_Thread_run; 161 mirror::AbstractMethod* m = 162 receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(soa.DecodeMethod(mid)); 163 JValue result; 164 ArgArray arg_array(NULL, 0); 165 arg_array.Append(reinterpret_cast<uint32_t>(receiver)); 166 m->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V'); 167 } 168 // Detach and delete self. 169 Runtime::Current()->GetThreadList()->Unregister(self); 170 171 return NULL; 172} 173 174Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, 175 mirror::Object* thread_peer) { 176 mirror::Field* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer); 177 Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetInt(thread_peer))); 178 // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_ 179 // to stop it from going away. 180 if (kIsDebugBuild) { 181 MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_); 182 if (result != NULL && !result->IsSuspended()) { 183 Locks::thread_list_lock_->AssertHeld(soa.Self()); 184 } 185 } 186 return result; 187} 188 189Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, jobject java_thread) { 190 return FromManagedThread(soa, soa.Decode<mirror::Object*>(java_thread)); 191} 192 193static size_t FixStackSize(size_t stack_size) { 194 // A stack size of zero means "use the default". 195 if (stack_size == 0) { 196 stack_size = Runtime::Current()->GetDefaultStackSize(); 197 } 198 199 // Dalvik used the bionic pthread default stack size for native threads, 200 // so include that here to support apps that expect large native stacks. 201 stack_size += 1 * MB; 202 203 // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN. 204 if (stack_size < PTHREAD_STACK_MIN) { 205 stack_size = PTHREAD_STACK_MIN; 206 } 207 208 // It's likely that callers are trying to ensure they have at least a certain amount of 209 // stack space, so we should add our reserved space on top of what they requested, rather 210 // than implicitly take it away from them. 211 stack_size += Thread::kStackOverflowReservedBytes; 212 213 // Some systems require the stack size to be a multiple of the system page size, so round up. 214 stack_size = RoundUp(stack_size, kPageSize); 215 216 return stack_size; 217} 218 219void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) { 220 CHECK(java_peer != NULL); 221 Thread* self = static_cast<JNIEnvExt*>(env)->self; 222 Runtime* runtime = Runtime::Current(); 223 224 // Atomically start the birth of the thread ensuring the runtime isn't shutting down. 225 bool thread_start_during_shutdown = false; 226 { 227 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 228 if (runtime->IsShuttingDown()) { 229 thread_start_during_shutdown = true; 230 } else { 231 runtime->StartThreadBirth(); 232 } 233 } 234 if (thread_start_during_shutdown) { 235 ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError")); 236 env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown"); 237 return; 238 } 239 240 Thread* child_thread = new Thread(is_daemon); 241 // Use global JNI ref to hold peer live while child thread starts. 242 child_thread->jpeer_ = env->NewGlobalRef(java_peer); 243 stack_size = FixStackSize(stack_size); 244 245 // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to 246 // assign it. 247 env->SetIntField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 248 reinterpret_cast<jint>(child_thread)); 249 250 pthread_t new_pthread; 251 pthread_attr_t attr; 252 CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread"); 253 CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED"); 254 CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size); 255 int pthread_create_result = pthread_create(&new_pthread, &attr, Thread::CreateCallback, child_thread); 256 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread"); 257 258 if (pthread_create_result != 0) { 259 // pthread_create(3) failed, so clean up. 260 { 261 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 262 runtime->EndThreadBirth(); 263 } 264 // Manually delete the global reference since Thread::Init will not have been run. 265 env->DeleteGlobalRef(child_thread->jpeer_); 266 child_thread->jpeer_ = NULL; 267 delete child_thread; 268 child_thread = NULL; 269 // TODO: remove from thread group? 270 env->SetIntField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0); 271 { 272 std::string msg(StringPrintf("pthread_create (%s stack) failed: %s", 273 PrettySize(stack_size).c_str(), strerror(pthread_create_result))); 274 ScopedObjectAccess soa(env); 275 soa.Self()->ThrowOutOfMemoryError(msg.c_str()); 276 } 277 } 278} 279 280void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) { 281 // This function does all the initialization that must be run by the native thread it applies to. 282 // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so 283 // we can handshake with the corresponding native thread when it's ready.) Check this native 284 // thread hasn't been through here already... 285 CHECK(Thread::Current() == NULL); 286 SetUpAlternateSignalStack(); 287 InitCpu(); 288 InitFunctionPointers(); 289 InitCardTable(); 290 InitTid(); 291 // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this 292 // avoids pthread_self_ ever being invalid when discovered from Thread::Current(). 293 pthread_self_ = pthread_self(); 294 CHECK(is_started_); 295 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self"); 296 DCHECK_EQ(Thread::Current(), this); 297 298 thin_lock_id_ = thread_list->AllocThreadId(this); 299 InitStackHwm(); 300 301 jni_env_ = new JNIEnvExt(this, java_vm); 302 thread_list->Register(this); 303} 304 305Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group, 306 bool create_peer) { 307 Thread* self; 308 Runtime* runtime = Runtime::Current(); 309 if (runtime == NULL) { 310 LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name; 311 return NULL; 312 } 313 { 314 MutexLock mu(NULL, *Locks::runtime_shutdown_lock_); 315 if (runtime->IsShuttingDown()) { 316 LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name; 317 return NULL; 318 } else { 319 Runtime::Current()->StartThreadBirth(); 320 self = new Thread(as_daemon); 321 self->Init(runtime->GetThreadList(), runtime->GetJavaVM()); 322 Runtime::Current()->EndThreadBirth(); 323 } 324 } 325 326 CHECK_NE(self->GetState(), kRunnable); 327 self->SetState(kNative); 328 329 // If we're the main thread, ClassLinker won't be created until after we're attached, 330 // so that thread needs a two-stage attach. Regular threads don't need this hack. 331 // In the compiler, all threads need this hack, because no-one's going to be getting 332 // a native peer! 333 if (create_peer) { 334 self->CreatePeer(thread_name, as_daemon, thread_group); 335 } else { 336 // These aren't necessary, but they improve diagnostics for unit tests & command-line tools. 337 if (thread_name != NULL) { 338 self->name_->assign(thread_name); 339 ::art::SetThreadName(thread_name); 340 } 341 } 342 343 return self; 344} 345 346void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) { 347 Runtime* runtime = Runtime::Current(); 348 CHECK(runtime->IsStarted()); 349 JNIEnv* env = jni_env_; 350 351 if (thread_group == NULL) { 352 thread_group = runtime->GetMainThreadGroup(); 353 } 354 ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name)); 355 jint thread_priority = GetNativePriority(); 356 jboolean thread_is_daemon = as_daemon; 357 358 ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread)); 359 if (peer.get() == NULL) { 360 CHECK(IsExceptionPending()); 361 return; 362 } 363 { 364 ScopedObjectAccess soa(this); 365 opeer_ = soa.Decode<mirror::Object*>(peer.get()); 366 } 367 env->CallNonvirtualVoidMethod(peer.get(), 368 WellKnownClasses::java_lang_Thread, 369 WellKnownClasses::java_lang_Thread_init, 370 thread_group, thread_name.get(), thread_priority, thread_is_daemon); 371 AssertNoPendingException(); 372 373 Thread* self = this; 374 DCHECK_EQ(self, Thread::Current()); 375 jni_env_->SetIntField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer, 376 reinterpret_cast<jint>(self)); 377 378 ScopedObjectAccess soa(self); 379 SirtRef<mirror::String> peer_thread_name(soa.Self(), GetThreadName(soa)); 380 if (peer_thread_name.get() == NULL) { 381 // The Thread constructor should have set the Thread.name to a 382 // non-null value. However, because we can run without code 383 // available (in the compiler, in tests), we manually assign the 384 // fields the constructor should have set. 385 soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)-> 386 SetBoolean(opeer_, thread_is_daemon); 387 soa.DecodeField(WellKnownClasses::java_lang_Thread_group)-> 388 SetObject(opeer_, soa.Decode<mirror::Object*>(thread_group)); 389 soa.DecodeField(WellKnownClasses::java_lang_Thread_name)-> 390 SetObject(opeer_, soa.Decode<mirror::Object*>(thread_name.get())); 391 soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)-> 392 SetInt(opeer_, thread_priority); 393 peer_thread_name.reset(GetThreadName(soa)); 394 } 395 // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null. 396 if (peer_thread_name.get() != NULL) { 397 SetThreadName(peer_thread_name->ToModifiedUtf8().c_str()); 398 } 399} 400 401void Thread::SetThreadName(const char* name) { 402 name_->assign(name); 403 ::art::SetThreadName(name); 404 Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM")); 405} 406 407void Thread::InitStackHwm() { 408 void* stack_base; 409 size_t stack_size; 410 GetThreadStack(pthread_self_, stack_base, stack_size); 411 412 // TODO: include this in the thread dumps; potentially useful in SIGQUIT output? 413 VLOG(threads) << StringPrintf("Native stack is at %p (%s)", stack_base, PrettySize(stack_size).c_str()); 414 415 stack_begin_ = reinterpret_cast<byte*>(stack_base); 416 stack_size_ = stack_size; 417 418 if (stack_size_ <= kStackOverflowReservedBytes) { 419 LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << stack_size_ << " bytes)"; 420 } 421 422 // TODO: move this into the Linux GetThreadStack implementation. 423#if !defined(__APPLE__) 424 // If we're the main thread, check whether we were run with an unlimited stack. In that case, 425 // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection 426 // will be broken because we'll die long before we get close to 2GB. 427 bool is_main_thread = (::art::GetTid() == getpid()); 428 if (is_main_thread) { 429 rlimit stack_limit; 430 if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) { 431 PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed"; 432 } 433 if (stack_limit.rlim_cur == RLIM_INFINITY) { 434 // Find the default stack size for new threads... 435 pthread_attr_t default_attributes; 436 size_t default_stack_size; 437 CHECK_PTHREAD_CALL(pthread_attr_init, (&default_attributes), "default stack size query"); 438 CHECK_PTHREAD_CALL(pthread_attr_getstacksize, (&default_attributes, &default_stack_size), 439 "default stack size query"); 440 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&default_attributes), "default stack size query"); 441 442 // ...and use that as our limit. 443 size_t old_stack_size = stack_size_; 444 stack_size_ = default_stack_size; 445 stack_begin_ += (old_stack_size - stack_size_); 446 VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")" 447 << " to " << PrettySize(stack_size_) 448 << " with base " << reinterpret_cast<void*>(stack_begin_); 449 } 450 } 451#endif 452 453 // Set stack_end_ to the bottom of the stack saving space of stack overflows 454 ResetDefaultStackEnd(); 455 456 // Sanity check. 457 int stack_variable; 458 CHECK_GT(&stack_variable, reinterpret_cast<void*>(stack_end_)); 459} 460 461void Thread::ShortDump(std::ostream& os) const { 462 os << "Thread["; 463 if (GetThinLockId() != 0) { 464 // If we're in kStarting, we won't have a thin lock id or tid yet. 465 os << GetThinLockId() 466 << ",tid=" << GetTid() << ','; 467 } 468 os << GetState() 469 << ",Thread*=" << this 470 << ",peer=" << opeer_ 471 << ",\"" << *name_ << "\"" 472 << "]"; 473} 474 475void Thread::Dump(std::ostream& os) const { 476 DumpState(os); 477 DumpStack(os); 478} 479 480mirror::String* Thread::GetThreadName(const ScopedObjectAccessUnchecked& soa) const { 481 mirror::Field* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name); 482 return (opeer_ != NULL) ? reinterpret_cast<mirror::String*>(f->GetObject(opeer_)) : NULL; 483} 484 485void Thread::GetThreadName(std::string& name) const { 486 name.assign(*name_); 487} 488 489void Thread::AtomicSetFlag(ThreadFlag flag) { 490 android_atomic_or(flag, &state_and_flags_.as_int); 491} 492 493void Thread::AtomicClearFlag(ThreadFlag flag) { 494 android_atomic_and(-1 ^ flag, &state_and_flags_.as_int); 495} 496 497// Attempt to rectify locks so that we dump thread list with required locks before exiting. 498static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 499 LOG(ERROR) << *thread << " suspend count already zero."; 500 Locks::thread_suspend_count_lock_->Unlock(self); 501 if (!Locks::mutator_lock_->IsSharedHeld(self)) { 502 Locks::mutator_lock_->SharedTryLock(self); 503 if (!Locks::mutator_lock_->IsSharedHeld(self)) { 504 LOG(WARNING) << "Dumping thread list without holding mutator_lock_"; 505 } 506 } 507 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) { 508 Locks::thread_list_lock_->TryLock(self); 509 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) { 510 LOG(WARNING) << "Dumping thread list without holding thread_list_lock_"; 511 } 512 } 513 std::ostringstream ss; 514 Runtime::Current()->GetThreadList()->DumpLocked(ss); 515 LOG(FATAL) << ss.str(); 516} 517 518void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) { 519 DCHECK(delta == -1 || delta == +1 || delta == -debug_suspend_count_) 520 << delta << " " << debug_suspend_count_ << " " << this; 521 DCHECK_GE(suspend_count_, debug_suspend_count_) << this; 522 Locks::thread_suspend_count_lock_->AssertHeld(self); 523 if (this != self && !IsSuspended()) { 524 Locks::thread_list_lock_->AssertHeld(self); 525 } 526 if (UNLIKELY(delta < 0 && suspend_count_ <= 0)) { 527 UnsafeLogFatalForSuspendCount(self, this); 528 return; 529 } 530 531 suspend_count_ += delta; 532 if (for_debugger) { 533 debug_suspend_count_ += delta; 534 } 535 536 if (suspend_count_ == 0) { 537 AtomicClearFlag(kSuspendRequest); 538 } else { 539 AtomicSetFlag(kSuspendRequest); 540 } 541} 542 543void Thread::RunCheckpointFunction() { 544 CHECK(checkpoint_function_ != NULL); 545 ATRACE_BEGIN("Checkpoint function"); 546 checkpoint_function_->Run(this); 547 ATRACE_END(); 548} 549 550bool Thread::RequestCheckpoint(Closure* function) { 551 CHECK(!ReadFlag(kCheckpointRequest)) << "Already have a pending checkpoint request"; 552 checkpoint_function_ = function; 553 union StateAndFlags old_state_and_flags = state_and_flags_; 554 // We must be runnable to request a checkpoint. 555 old_state_and_flags.as_struct.state = kRunnable; 556 union StateAndFlags new_state_and_flags = old_state_and_flags; 557 new_state_and_flags.as_struct.flags |= kCheckpointRequest; 558 int succeeded = android_atomic_cmpxchg(old_state_and_flags.as_int, new_state_and_flags.as_int, 559 &state_and_flags_.as_int); 560 return succeeded == 0; 561} 562 563void Thread::FullSuspendCheck() { 564 VLOG(threads) << this << " self-suspending"; 565 ATRACE_BEGIN("Full suspend check"); 566 // Make thread appear suspended to other threads, release mutator_lock_. 567 TransitionFromRunnableToSuspended(kSuspended); 568 // Transition back to runnable noting requests to suspend, re-acquire share on mutator_lock_. 569 TransitionFromSuspendedToRunnable(); 570 ATRACE_END(); 571 VLOG(threads) << this << " self-reviving"; 572} 573 574Thread* Thread::SuspendForDebugger(jobject peer, bool request_suspension, bool* timed_out) { 575 static const useconds_t kTimeoutUs = 30 * 1000000; // 30s. 576 useconds_t total_delay_us = 0; 577 useconds_t delay_us = 0; 578 bool did_suspend_request = false; 579 *timed_out = false; 580 while (true) { 581 Thread* thread; 582 { 583 ScopedObjectAccess soa(Thread::Current()); 584 Thread* self = soa.Self(); 585 MutexLock mu(self, *Locks::thread_list_lock_); 586 thread = Thread::FromManagedThread(soa, peer); 587 if (thread == NULL) { 588 JNIEnv* env = self->GetJniEnv(); 589 ScopedLocalRef<jstring> scoped_name_string(env, 590 (jstring)env->GetObjectField(peer, 591 WellKnownClasses::java_lang_Thread_name)); 592 ScopedUtfChars scoped_name_chars(env, scoped_name_string.get()); 593 if (scoped_name_chars.c_str() == NULL) { 594 LOG(WARNING) << "No such thread for suspend: " << peer; 595 env->ExceptionClear(); 596 } else { 597 LOG(WARNING) << "No such thread for suspend: " << peer << ":" << scoped_name_chars.c_str(); 598 } 599 600 return NULL; 601 } 602 { 603 MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_); 604 if (request_suspension) { 605 thread->ModifySuspendCount(soa.Self(), +1, true /* for_debugger */); 606 request_suspension = false; 607 did_suspend_request = true; 608 } 609 // IsSuspended on the current thread will fail as the current thread is changed into 610 // Runnable above. As the suspend count is now raised if this is the current thread 611 // it will self suspend on transition to Runnable, making it hard to work with. It's simpler 612 // to just explicitly handle the current thread in the callers to this code. 613 CHECK_NE(thread, soa.Self()) << "Attempt to suspend the current thread for the debugger"; 614 // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend 615 // count, or else we've waited and it has self suspended) or is the current thread, we're 616 // done. 617 if (thread->IsSuspended()) { 618 return thread; 619 } 620 if (total_delay_us >= kTimeoutUs) { 621 LOG(ERROR) << "Thread suspension timed out: " << peer; 622 if (did_suspend_request) { 623 thread->ModifySuspendCount(soa.Self(), -1, true /* for_debugger */); 624 } 625 *timed_out = true; 626 return NULL; 627 } 628 } 629 // Release locks and come out of runnable state. 630 } 631 for (int i = kLockLevelCount - 1; i >= 0; --i) { 632 BaseMutex* held_mutex = Thread::Current()->GetHeldMutex(static_cast<LockLevel>(i)); 633 if (held_mutex != NULL) { 634 LOG(FATAL) << "Holding " << held_mutex->GetName() 635 << " while sleeping for thread suspension"; 636 } 637 } 638 { 639 useconds_t new_delay_us = delay_us * 2; 640 CHECK_GE(new_delay_us, delay_us); 641 if (new_delay_us < 500000) { // Don't allow sleeping to be more than 0.5s. 642 delay_us = new_delay_us; 643 } 644 } 645 if (delay_us == 0) { 646 sched_yield(); 647 // Default to 1 milliseconds (note that this gets multiplied by 2 before the first sleep). 648 delay_us = 500; 649 } else { 650 usleep(delay_us); 651 total_delay_us += delay_us; 652 } 653 } 654} 655 656void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { 657 std::string group_name; 658 int priority; 659 bool is_daemon = false; 660 Thread* self = Thread::Current(); 661 662 if (self != NULL && thread != NULL && thread->opeer_ != NULL) { 663 ScopedObjectAccessUnchecked soa(self); 664 priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->GetInt(thread->opeer_); 665 is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->GetBoolean(thread->opeer_); 666 667 mirror::Object* thread_group = 668 soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->opeer_); 669 670 if (thread_group != NULL) { 671 mirror::Field* group_name_field = 672 soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name); 673 mirror::String* group_name_string = 674 reinterpret_cast<mirror::String*>(group_name_field->GetObject(thread_group)); 675 group_name = (group_name_string != NULL) ? group_name_string->ToModifiedUtf8() : "<null>"; 676 } 677 } else { 678 priority = GetNativePriority(); 679 } 680 681 std::string scheduler_group_name(GetSchedulerGroupName(tid)); 682 if (scheduler_group_name.empty()) { 683 scheduler_group_name = "default"; 684 } 685 686 if (thread != NULL) { 687 os << '"' << *thread->name_ << '"'; 688 if (is_daemon) { 689 os << " daemon"; 690 } 691 os << " prio=" << priority 692 << " tid=" << thread->GetThinLockId() 693 << " " << thread->GetState(); 694 if (thread->IsStillStarting()) { 695 os << " (still starting up)"; 696 } 697 os << "\n"; 698 } else { 699 os << '"' << ::art::GetThreadName(tid) << '"' 700 << " prio=" << priority 701 << " (not attached)\n"; 702 } 703 704 if (thread != NULL) { 705 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 706 os << " | group=\"" << group_name << "\"" 707 << " sCount=" << thread->suspend_count_ 708 << " dsCount=" << thread->debug_suspend_count_ 709 << " obj=" << reinterpret_cast<void*>(thread->opeer_) 710 << " self=" << reinterpret_cast<const void*>(thread) << "\n"; 711 } 712 713 os << " | sysTid=" << tid 714 << " nice=" << getpriority(PRIO_PROCESS, tid) 715 << " cgrp=" << scheduler_group_name; 716 if (thread != NULL) { 717 int policy; 718 sched_param sp; 719 CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->pthread_self_, &policy, &sp), __FUNCTION__); 720 os << " sched=" << policy << "/" << sp.sched_priority 721 << " handle=" << reinterpret_cast<void*>(thread->pthread_self_); 722 } 723 os << "\n"; 724 725 // Grab the scheduler stats for this thread. 726 std::string scheduler_stats; 727 if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) { 728 scheduler_stats.resize(scheduler_stats.size() - 1); // Lose the trailing '\n'. 729 } else { 730 scheduler_stats = "0 0 0"; 731 } 732 733 char native_thread_state = '?'; 734 int utime = 0; 735 int stime = 0; 736 int task_cpu = 0; 737 GetTaskStats(tid, native_thread_state, utime, stime, task_cpu); 738 739 os << " | state=" << native_thread_state 740 << " schedstat=( " << scheduler_stats << " )" 741 << " utm=" << utime 742 << " stm=" << stime 743 << " core=" << task_cpu 744 << " HZ=" << sysconf(_SC_CLK_TCK) << "\n"; 745 if (thread != NULL) { 746 os << " | stack=" << reinterpret_cast<void*>(thread->stack_begin_) << "-" << reinterpret_cast<void*>(thread->stack_end_) 747 << " stackSize=" << PrettySize(thread->stack_size_) << "\n"; 748 } 749} 750 751void Thread::DumpState(std::ostream& os) const { 752 Thread::DumpState(os, this, GetTid()); 753} 754 755struct StackDumpVisitor : public StackVisitor { 756 StackDumpVisitor(std::ostream& os, Thread* thread, Context* context, bool can_allocate) 757 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 758 : StackVisitor(thread, context), os(os), thread(thread), can_allocate(can_allocate), 759 last_method(NULL), last_line_number(0), repetition_count(0), frame_count(0) { 760 } 761 762 virtual ~StackDumpVisitor() { 763 if (frame_count == 0) { 764 os << " (no managed stack frames)\n"; 765 } 766 } 767 768 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 769 mirror::AbstractMethod* m = GetMethod(); 770 if (m->IsRuntimeMethod()) { 771 return true; 772 } 773 const int kMaxRepetition = 3; 774 mirror::Class* c = m->GetDeclaringClass(); 775 const mirror::DexCache* dex_cache = c->GetDexCache(); 776 int line_number = -1; 777 if (dex_cache != NULL) { // be tolerant of bad input 778 const DexFile& dex_file = *dex_cache->GetDexFile(); 779 line_number = dex_file.GetLineNumFromPC(m, GetDexPc()); 780 } 781 if (line_number == last_line_number && last_method == m) { 782 repetition_count++; 783 } else { 784 if (repetition_count >= kMaxRepetition) { 785 os << " ... repeated " << (repetition_count - kMaxRepetition) << " times\n"; 786 } 787 repetition_count = 0; 788 last_line_number = line_number; 789 last_method = m; 790 } 791 if (repetition_count < kMaxRepetition) { 792 os << " at " << PrettyMethod(m, false); 793 if (m->IsNative()) { 794 os << "(Native method)"; 795 } else { 796 mh.ChangeMethod(m); 797 const char* source_file(mh.GetDeclaringClassSourceFile()); 798 os << "(" << (source_file != NULL ? source_file : "unavailable") 799 << ":" << line_number << ")"; 800 } 801 os << "\n"; 802 if (frame_count == 0) { 803 Monitor::DescribeWait(os, thread); 804 } 805 if (can_allocate) { 806 Monitor::VisitLocks(this, DumpLockedObject, &os); 807 } 808 } 809 810 ++frame_count; 811 return true; 812 } 813 814 static void DumpLockedObject(mirror::Object* o, void* context) 815 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 816 std::ostream& os = *reinterpret_cast<std::ostream*>(context); 817 os << " - locked <" << o << "> (a " << PrettyTypeOf(o) << ")\n"; 818 } 819 820 std::ostream& os; 821 const Thread* thread; 822 const bool can_allocate; 823 MethodHelper mh; 824 mirror::AbstractMethod* last_method; 825 int last_line_number; 826 int repetition_count; 827 int frame_count; 828}; 829 830static bool ShouldShowNativeStack(const Thread* thread) 831 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 832 ThreadState state = thread->GetState(); 833 834 // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting. 835 if (state > kWaiting && state < kStarting) { 836 return true; 837 } 838 839 // In an Object.wait variant or Thread.sleep? That's not interesting. 840 if (state == kTimedWaiting || state == kSleeping || state == kWaiting) { 841 return false; 842 } 843 844 // In some other native method? That's interesting. 845 // We don't just check kNative because native methods will be in state kSuspended if they're 846 // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the 847 // thread-startup states if it's early enough in their life cycle (http://b/7432159). 848 mirror::AbstractMethod* current_method = thread->GetCurrentMethod(NULL); 849 return current_method != NULL && current_method->IsNative(); 850} 851 852void Thread::DumpStack(std::ostream& os) const { 853 // TODO: we call this code when dying but may not have suspended the thread ourself. The 854 // IsSuspended check is therefore racy with the use for dumping (normally we inhibit 855 // the race with the thread_suspend_count_lock_). 856 bool dump_for_abort = (gAborting > 0); 857 if (this == Thread::Current() || IsSuspended() || dump_for_abort) { 858 // If we're currently in native code, dump that stack before dumping the managed stack. 859 if (dump_for_abort || ShouldShowNativeStack(this)) { 860 DumpKernelStack(os, GetTid(), " kernel: ", false); 861 DumpNativeStack(os, GetTid(), " native: ", false); 862 } 863 UniquePtr<Context> context(Context::Create()); 864 StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(), !throwing_OutOfMemoryError_); 865 dumper.WalkStack(); 866 } else { 867 os << "Not able to dump stack of thread that isn't suspended"; 868 } 869} 870 871void Thread::ThreadExitCallback(void* arg) { 872 Thread* self = reinterpret_cast<Thread*>(arg); 873 if (self->thread_exit_check_count_ == 0) { 874 LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's going to use a pthread_key_create destructor?): " << *self; 875 CHECK(is_started_); 876 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self"); 877 self->thread_exit_check_count_ = 1; 878 } else { 879 LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self; 880 } 881} 882 883void Thread::Startup() { 884 CHECK(!is_started_); 885 is_started_ = true; 886 { 887 // MutexLock to keep annotalysis happy. 888 // 889 // Note we use NULL for the thread because Thread::Current can 890 // return garbage since (is_started_ == true) and 891 // Thread::pthread_key_self_ is not yet initialized. 892 // This was seen on glibc. 893 MutexLock mu(NULL, *Locks::thread_suspend_count_lock_); 894 resume_cond_ = new ConditionVariable("Thread resumption condition variable", 895 *Locks::thread_suspend_count_lock_); 896 } 897 898 // Allocate a TLS slot. 899 CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key"); 900 901 // Double-check the TLS slot allocation. 902 if (pthread_getspecific(pthread_key_self_) != NULL) { 903 LOG(FATAL) << "Newly-created pthread TLS slot is not NULL"; 904 } 905} 906 907void Thread::FinishStartup() { 908 Runtime* runtime = Runtime::Current(); 909 CHECK(runtime->IsStarted()); 910 911 // Finish attaching the main thread. 912 ScopedObjectAccess soa(Thread::Current()); 913 Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup()); 914 915 Runtime::Current()->GetClassLinker()->RunRootClinits(); 916} 917 918void Thread::Shutdown() { 919 CHECK(is_started_); 920 is_started_ = false; 921 CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key"); 922 MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_); 923 if (resume_cond_ != NULL) { 924 delete resume_cond_; 925 resume_cond_ = NULL; 926 } 927} 928 929Thread::Thread(bool daemon) 930 : suspend_count_(0), 931 card_table_(NULL), 932 exception_(NULL), 933 stack_end_(NULL), 934 managed_stack_(), 935 jni_env_(NULL), 936 self_(NULL), 937 opeer_(NULL), 938 jpeer_(NULL), 939 stack_begin_(NULL), 940 stack_size_(0), 941 thin_lock_id_(0), 942 tid_(0), 943 wait_mutex_(new Mutex("a thread wait mutex")), 944 wait_cond_(new ConditionVariable("a thread wait condition variable", *wait_mutex_)), 945 wait_monitor_(NULL), 946 interrupted_(false), 947 wait_next_(NULL), 948 monitor_enter_object_(NULL), 949 top_sirt_(NULL), 950 runtime_(NULL), 951 class_loader_override_(NULL), 952 long_jump_context_(NULL), 953 throwing_OutOfMemoryError_(false), 954 debug_suspend_count_(0), 955 debug_invoke_req_(new DebugInvokeReq), 956 deoptimization_shadow_frame_(NULL), 957 instrumentation_stack_(new std::deque<instrumentation::InstrumentationStackFrame>), 958 name_(new std::string(kThreadNameDuringStartup)), 959 daemon_(daemon), 960 pthread_self_(0), 961 no_thread_suspension_(0), 962 last_no_thread_suspension_cause_(NULL), 963 checkpoint_function_(0), 964 thread_exit_check_count_(0) { 965 CHECK_EQ((sizeof(Thread) % 4), 0U) << sizeof(Thread); 966 state_and_flags_.as_struct.flags = 0; 967 state_and_flags_.as_struct.state = kNative; 968 memset(&held_mutexes_[0], 0, sizeof(held_mutexes_)); 969} 970 971bool Thread::IsStillStarting() const { 972 // You might think you can check whether the state is kStarting, but for much of thread startup, 973 // the thread is in kNative; it might also be in kVmWait. 974 // You might think you can check whether the peer is NULL, but the peer is actually created and 975 // assigned fairly early on, and needs to be. 976 // It turns out that the last thing to change is the thread name; that's a good proxy for "has 977 // this thread _ever_ entered kRunnable". 978 return (jpeer_ == NULL && opeer_ == NULL) || (*name_ == kThreadNameDuringStartup); 979} 980 981void Thread::AssertNoPendingException() const { 982 if (UNLIKELY(IsExceptionPending())) { 983 ScopedObjectAccess soa(Thread::Current()); 984 mirror::Throwable* exception = GetException(NULL); 985 LOG(FATAL) << "No pending exception expected: " << exception->Dump(); 986 } 987} 988 989static void MonitorExitVisitor(const mirror::Object* object, void* arg) NO_THREAD_SAFETY_ANALYSIS { 990 Thread* self = reinterpret_cast<Thread*>(arg); 991 mirror::Object* entered_monitor = const_cast<mirror::Object*>(object); 992 if (self->HoldsLock(entered_monitor)) { 993 LOG(WARNING) << "Calling MonitorExit on object " 994 << object << " (" << PrettyTypeOf(object) << ")" 995 << " left locked by native thread " 996 << *Thread::Current() << " which is detaching"; 997 entered_monitor->MonitorExit(self); 998 } 999} 1000 1001void Thread::Destroy() { 1002 Thread* self = this; 1003 DCHECK_EQ(self, Thread::Current()); 1004 1005 if (opeer_ != NULL) { 1006 ScopedObjectAccess soa(self); 1007 // We may need to call user-supplied managed code, do this before final clean-up. 1008 HandleUncaughtExceptions(soa); 1009 RemoveFromThreadGroup(soa); 1010 1011 // this.nativePeer = 0; 1012 soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)->SetInt(opeer_, 0); 1013 Dbg::PostThreadDeath(self); 1014 1015 // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone 1016 // who is waiting. 1017 mirror::Object* lock = 1018 soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(opeer_); 1019 // (This conditional is only needed for tests, where Thread.lock won't have been set.) 1020 if (lock != NULL) { 1021 ObjectLock locker(self, lock); 1022 locker.Notify(); 1023 } 1024 } 1025 1026 // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited. 1027 if (jni_env_ != NULL) { 1028 jni_env_->monitors.VisitRoots(MonitorExitVisitor, self); 1029 } 1030} 1031 1032Thread::~Thread() { 1033 if (jni_env_ != NULL && jpeer_ != NULL) { 1034 // If pthread_create fails we don't have a jni env here. 1035 jni_env_->DeleteGlobalRef(jpeer_); 1036 jpeer_ = NULL; 1037 } 1038 opeer_ = NULL; 1039 1040 delete jni_env_; 1041 jni_env_ = NULL; 1042 1043 CHECK_NE(GetState(), kRunnable); 1044 // We may be deleting a still born thread. 1045 SetStateUnsafe(kTerminated); 1046 1047 delete wait_cond_; 1048 delete wait_mutex_; 1049 1050 if (long_jump_context_ != NULL) { 1051 delete long_jump_context_; 1052 } 1053 1054 delete debug_invoke_req_; 1055 delete instrumentation_stack_; 1056 delete name_; 1057 1058 TearDownAlternateSignalStack(); 1059} 1060 1061void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) { 1062 if (!IsExceptionPending()) { 1063 return; 1064 } 1065 ScopedLocalRef<jobject> peer(jni_env_, soa.AddLocalReference<jobject>(opeer_)); 1066 ScopedThreadStateChange tsc(this, kNative); 1067 1068 // Get and clear the exception. 1069 ScopedLocalRef<jthrowable> exception(jni_env_, jni_env_->ExceptionOccurred()); 1070 jni_env_->ExceptionClear(); 1071 1072 // If the thread has its own handler, use that. 1073 ScopedLocalRef<jobject> handler(jni_env_, 1074 jni_env_->GetObjectField(peer.get(), 1075 WellKnownClasses::java_lang_Thread_uncaughtHandler)); 1076 if (handler.get() == NULL) { 1077 // Otherwise use the thread group's default handler. 1078 handler.reset(jni_env_->GetObjectField(peer.get(), WellKnownClasses::java_lang_Thread_group)); 1079 } 1080 1081 // Call the handler. 1082 jni_env_->CallVoidMethod(handler.get(), 1083 WellKnownClasses::java_lang_Thread$UncaughtExceptionHandler_uncaughtException, 1084 peer.get(), exception.get()); 1085 1086 // If the handler threw, clear that exception too. 1087 jni_env_->ExceptionClear(); 1088} 1089 1090void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) { 1091 // this.group.removeThread(this); 1092 // group can be null if we're in the compiler or a test. 1093 mirror::Object* ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(opeer_); 1094 if (ogroup != NULL) { 1095 ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup)); 1096 ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(opeer_)); 1097 ScopedThreadStateChange tsc(soa.Self(), kNative); 1098 jni_env_->CallVoidMethod(group.get(), WellKnownClasses::java_lang_ThreadGroup_removeThread, 1099 peer.get()); 1100 } 1101} 1102 1103size_t Thread::NumSirtReferences() { 1104 size_t count = 0; 1105 for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) { 1106 count += cur->NumberOfReferences(); 1107 } 1108 return count; 1109} 1110 1111bool Thread::SirtContains(jobject obj) const { 1112 mirror::Object** sirt_entry = reinterpret_cast<mirror::Object**>(obj); 1113 for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) { 1114 if (cur->Contains(sirt_entry)) { 1115 return true; 1116 } 1117 } 1118 // JNI code invoked from portable code uses shadow frames rather than the SIRT. 1119 return managed_stack_.ShadowFramesContain(sirt_entry); 1120} 1121 1122void Thread::SirtVisitRoots(RootVisitor* visitor, void* arg) { 1123 for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) { 1124 size_t num_refs = cur->NumberOfReferences(); 1125 for (size_t j = 0; j < num_refs; j++) { 1126 mirror::Object* object = cur->GetReference(j); 1127 if (object != NULL) { 1128 visitor(object, arg); 1129 } 1130 } 1131 } 1132} 1133 1134mirror::Object* Thread::DecodeJObject(jobject obj) const { 1135 Locks::mutator_lock_->AssertSharedHeld(this); 1136 if (obj == NULL) { 1137 return NULL; 1138 } 1139 IndirectRef ref = reinterpret_cast<IndirectRef>(obj); 1140 IndirectRefKind kind = GetIndirectRefKind(ref); 1141 mirror::Object* result; 1142 // The "kinds" below are sorted by the frequency we expect to encounter them. 1143 if (kind == kLocal) { 1144 IndirectReferenceTable& locals = jni_env_->locals; 1145 result = const_cast<mirror::Object*>(locals.Get(ref)); 1146 } else if (kind == kSirtOrInvalid) { 1147 // TODO: make stack indirect reference table lookup more efficient 1148 // Check if this is a local reference in the SIRT 1149 if (LIKELY(SirtContains(obj))) { 1150 result = *reinterpret_cast<mirror::Object**>(obj); // Read from SIRT 1151 } else if (Runtime::Current()->GetJavaVM()->work_around_app_jni_bugs) { 1152 // Assume an invalid local reference is actually a direct pointer. 1153 result = reinterpret_cast<mirror::Object*>(obj); 1154 } else { 1155 result = kInvalidIndirectRefObject; 1156 } 1157 } else if (kind == kGlobal) { 1158 JavaVMExt* vm = Runtime::Current()->GetJavaVM(); 1159 IndirectReferenceTable& globals = vm->globals; 1160 MutexLock mu(const_cast<Thread*>(this), vm->globals_lock); 1161 result = const_cast<mirror::Object*>(globals.Get(ref)); 1162 } else { 1163 DCHECK_EQ(kind, kWeakGlobal); 1164 JavaVMExt* vm = Runtime::Current()->GetJavaVM(); 1165 IndirectReferenceTable& weak_globals = vm->weak_globals; 1166 MutexLock mu(const_cast<Thread*>(this), vm->weak_globals_lock); 1167 result = const_cast<mirror::Object*>(weak_globals.Get(ref)); 1168 if (result == kClearedJniWeakGlobal) { 1169 // This is a special case where it's okay to return NULL. 1170 return NULL; 1171 } 1172 } 1173 1174 if (UNLIKELY(result == NULL)) { 1175 JniAbortF(NULL, "use of deleted %s %p", ToStr<IndirectRefKind>(kind).c_str(), obj); 1176 } else { 1177 if (kIsDebugBuild && (result != kInvalidIndirectRefObject)) { 1178 Runtime::Current()->GetHeap()->VerifyObject(result); 1179 } 1180 } 1181 return result; 1182} 1183 1184// Implements java.lang.Thread.interrupted. 1185bool Thread::Interrupted() { 1186 MutexLock mu(Thread::Current(), *wait_mutex_); 1187 bool interrupted = interrupted_; 1188 interrupted_ = false; 1189 return interrupted; 1190} 1191 1192// Implements java.lang.Thread.isInterrupted. 1193bool Thread::IsInterrupted() { 1194 MutexLock mu(Thread::Current(), *wait_mutex_); 1195 return interrupted_; 1196} 1197 1198void Thread::Interrupt() { 1199 Thread* self = Thread::Current(); 1200 MutexLock mu(self, *wait_mutex_); 1201 if (interrupted_) { 1202 return; 1203 } 1204 interrupted_ = true; 1205 NotifyLocked(self); 1206} 1207 1208void Thread::Notify() { 1209 Thread* self = Thread::Current(); 1210 MutexLock mu(self, *wait_mutex_); 1211 NotifyLocked(self); 1212} 1213 1214void Thread::NotifyLocked(Thread* self) { 1215 if (wait_monitor_ != NULL) { 1216 wait_cond_->Signal(self); 1217 } 1218} 1219 1220class CountStackDepthVisitor : public StackVisitor { 1221 public: 1222 explicit CountStackDepthVisitor(Thread* thread) 1223 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1224 : StackVisitor(thread, NULL), 1225 depth_(0), skip_depth_(0), skipping_(true) {} 1226 1227 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1228 // We want to skip frames up to and including the exception's constructor. 1229 // Note we also skip the frame if it doesn't have a method (namely the callee 1230 // save frame) 1231 mirror::AbstractMethod* m = GetMethod(); 1232 if (skipping_ && !m->IsRuntimeMethod() && 1233 !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) { 1234 skipping_ = false; 1235 } 1236 if (!skipping_) { 1237 if (!m->IsRuntimeMethod()) { // Ignore runtime frames (in particular callee save). 1238 ++depth_; 1239 } 1240 } else { 1241 ++skip_depth_; 1242 } 1243 return true; 1244 } 1245 1246 int GetDepth() const { 1247 return depth_; 1248 } 1249 1250 int GetSkipDepth() const { 1251 return skip_depth_; 1252 } 1253 1254 private: 1255 uint32_t depth_; 1256 uint32_t skip_depth_; 1257 bool skipping_; 1258}; 1259 1260class BuildInternalStackTraceVisitor : public StackVisitor { 1261 public: 1262 explicit BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth) 1263 : StackVisitor(thread, NULL), self_(self), 1264 skip_depth_(skip_depth), count_(0), dex_pc_trace_(NULL), method_trace_(NULL) {} 1265 1266 bool Init(int depth) 1267 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1268 // Allocate method trace with an extra slot that will hold the PC trace 1269 SirtRef<mirror::ObjectArray<mirror::Object> > 1270 method_trace(self_, 1271 Runtime::Current()->GetClassLinker()->AllocObjectArray<mirror::Object>(self_, 1272 depth + 1)); 1273 if (method_trace.get() == NULL) { 1274 return false; 1275 } 1276 mirror::IntArray* dex_pc_trace = mirror::IntArray::Alloc(self_, depth); 1277 if (dex_pc_trace == NULL) { 1278 return false; 1279 } 1280 // Save PC trace in last element of method trace, also places it into the 1281 // object graph. 1282 method_trace->Set(depth, dex_pc_trace); 1283 // Set the Object*s and assert that no thread suspension is now possible. 1284 const char* last_no_suspend_cause = 1285 self_->StartAssertNoThreadSuspension("Building internal stack trace"); 1286 CHECK(last_no_suspend_cause == NULL) << last_no_suspend_cause; 1287 method_trace_ = method_trace.get(); 1288 dex_pc_trace_ = dex_pc_trace; 1289 return true; 1290 } 1291 1292 virtual ~BuildInternalStackTraceVisitor() { 1293 if (method_trace_ != NULL) { 1294 self_->EndAssertNoThreadSuspension(NULL); 1295 } 1296 } 1297 1298 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1299 if (method_trace_ == NULL || dex_pc_trace_ == NULL) { 1300 return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError. 1301 } 1302 if (skip_depth_ > 0) { 1303 skip_depth_--; 1304 return true; 1305 } 1306 mirror::AbstractMethod* m = GetMethod(); 1307 if (m->IsRuntimeMethod()) { 1308 return true; // Ignore runtime frames (in particular callee save). 1309 } 1310 method_trace_->Set(count_, m); 1311 dex_pc_trace_->Set(count_, GetDexPc()); 1312 ++count_; 1313 return true; 1314 } 1315 1316 mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const { 1317 return method_trace_; 1318 } 1319 1320 private: 1321 Thread* const self_; 1322 // How many more frames to skip. 1323 int32_t skip_depth_; 1324 // Current position down stack trace. 1325 uint32_t count_; 1326 // Array of dex PC values. 1327 mirror::IntArray* dex_pc_trace_; 1328 // An array of the methods on the stack, the last entry is a reference to the PC trace. 1329 mirror::ObjectArray<mirror::Object>* method_trace_; 1330}; 1331 1332jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessUnchecked& soa) const { 1333 // Compute depth of stack 1334 CountStackDepthVisitor count_visitor(const_cast<Thread*>(this)); 1335 count_visitor.WalkStack(); 1336 int32_t depth = count_visitor.GetDepth(); 1337 int32_t skip_depth = count_visitor.GetSkipDepth(); 1338 1339 // Build internal stack trace. 1340 BuildInternalStackTraceVisitor build_trace_visitor(soa.Self(), const_cast<Thread*>(this), 1341 skip_depth); 1342 if (!build_trace_visitor.Init(depth)) { 1343 return NULL; // Allocation failed. 1344 } 1345 build_trace_visitor.WalkStack(); 1346 mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace(); 1347 if (kIsDebugBuild) { 1348 for (int32_t i = 0; i < trace->GetLength(); ++i) { 1349 CHECK(trace->Get(i) != NULL); 1350 } 1351 } 1352 return soa.AddLocalReference<jobjectArray>(trace); 1353} 1354 1355jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal, 1356 jobjectArray output_array, int* stack_depth) { 1357 // Transition into runnable state to work on Object*/Array* 1358 ScopedObjectAccess soa(env); 1359 // Decode the internal stack trace into the depth, method trace and PC trace 1360 mirror::ObjectArray<mirror::Object>* method_trace = 1361 soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal); 1362 int32_t depth = method_trace->GetLength() - 1; 1363 mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth)); 1364 1365 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 1366 1367 jobjectArray result; 1368 mirror::ObjectArray<mirror::StackTraceElement>* java_traces; 1369 if (output_array != NULL) { 1370 // Reuse the array we were given. 1371 result = output_array; 1372 java_traces = soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(output_array); 1373 // ...adjusting the number of frames we'll write to not exceed the array length. 1374 depth = std::min(depth, java_traces->GetLength()); 1375 } else { 1376 // Create java_trace array and place in local reference table 1377 java_traces = class_linker->AllocStackTraceElementArray(soa.Self(), depth); 1378 if (java_traces == NULL) { 1379 return NULL; 1380 } 1381 result = soa.AddLocalReference<jobjectArray>(java_traces); 1382 } 1383 1384 if (stack_depth != NULL) { 1385 *stack_depth = depth; 1386 } 1387 1388 MethodHelper mh; 1389 for (int32_t i = 0; i < depth; ++i) { 1390 // Prepare parameters for StackTraceElement(String cls, String method, String file, int line) 1391 mirror::AbstractMethod* method = down_cast<mirror::AbstractMethod*>(method_trace->Get(i)); 1392 mh.ChangeMethod(method); 1393 uint32_t dex_pc = pc_trace->Get(i); 1394 int32_t line_number = mh.GetLineNumFromDexPC(dex_pc); 1395 // Allocate element, potentially triggering GC 1396 // TODO: reuse class_name_object via Class::name_? 1397 const char* descriptor = mh.GetDeclaringClassDescriptor(); 1398 CHECK(descriptor != NULL); 1399 std::string class_name(PrettyDescriptor(descriptor)); 1400 SirtRef<mirror::String> class_name_object(soa.Self(), 1401 mirror::String::AllocFromModifiedUtf8(soa.Self(), 1402 class_name.c_str())); 1403 if (class_name_object.get() == NULL) { 1404 return NULL; 1405 } 1406 const char* method_name = mh.GetName(); 1407 CHECK(method_name != NULL); 1408 SirtRef<mirror::String> method_name_object(soa.Self(), 1409 mirror::String::AllocFromModifiedUtf8(soa.Self(), 1410 method_name)); 1411 if (method_name_object.get() == NULL) { 1412 return NULL; 1413 } 1414 const char* source_file = mh.GetDeclaringClassSourceFile(); 1415 SirtRef<mirror::String> source_name_object(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(), 1416 source_file)); 1417 mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(soa.Self(), 1418 class_name_object.get(), 1419 method_name_object.get(), 1420 source_name_object.get(), 1421 line_number); 1422 if (obj == NULL) { 1423 return NULL; 1424 } 1425#ifdef MOVING_GARBAGE_COLLECTOR 1426 // Re-read after potential GC 1427 java_traces = Decode<ObjectArray<Object>*>(soa.Env(), result); 1428 method_trace = down_cast<ObjectArray<Object>*>(Decode<Object*>(soa.Env(), internal)); 1429 pc_trace = down_cast<IntArray*>(method_trace->Get(depth)); 1430#endif 1431 java_traces->Set(i, obj); 1432 } 1433 return result; 1434} 1435 1436void Thread::ThrowNewExceptionF(const ThrowLocation& throw_location, 1437 const char* exception_class_descriptor, const char* fmt, ...) { 1438 va_list args; 1439 va_start(args, fmt); 1440 ThrowNewExceptionV(throw_location, exception_class_descriptor, 1441 fmt, args); 1442 va_end(args); 1443} 1444 1445void Thread::ThrowNewExceptionV(const ThrowLocation& throw_location, 1446 const char* exception_class_descriptor, 1447 const char* fmt, va_list ap) { 1448 std::string msg; 1449 StringAppendV(&msg, fmt, ap); 1450 ThrowNewException(throw_location, exception_class_descriptor, msg.c_str()); 1451} 1452 1453void Thread::ThrowNewException(const ThrowLocation& throw_location, const char* exception_class_descriptor, 1454 const char* msg) { 1455 AssertNoPendingException(); // Callers should either clear or call ThrowNewWrappedException. 1456 ThrowNewWrappedException(throw_location, exception_class_descriptor, msg); 1457} 1458 1459void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location, 1460 const char* exception_class_descriptor, 1461 const char* msg) { 1462 DCHECK_EQ(this, Thread::Current()); 1463 // Ensure we don't forget arguments over object allocation. 1464 SirtRef<mirror::Object> saved_throw_this(this, throw_location.GetThis()); 1465 SirtRef<mirror::AbstractMethod> saved_throw_method(this, throw_location.GetMethod()); 1466 // Ignore the cause throw location. TODO: should we report this as a re-throw? 1467 SirtRef<mirror::Throwable> cause(this, GetException(NULL)); 1468 ClearException(); 1469 Runtime* runtime = Runtime::Current(); 1470 1471 mirror::ClassLoader* cl = NULL; 1472 if (throw_location.GetMethod() != NULL) { 1473 cl = throw_location.GetMethod()->GetDeclaringClass()->GetClassLoader(); 1474 } 1475 SirtRef<mirror::Class> 1476 exception_class(this, runtime->GetClassLinker()->FindClass(exception_class_descriptor, cl)); 1477 if (UNLIKELY(exception_class.get() == NULL)) { 1478 CHECK(IsExceptionPending()); 1479 LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor); 1480 return; 1481 } 1482 1483 if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(exception_class.get(), true, true))) { 1484 DCHECK(IsExceptionPending()); 1485 return; 1486 } 1487 DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass()); 1488 SirtRef<mirror::Throwable> exception(this, 1489 down_cast<mirror::Throwable*>(exception_class->AllocObject(this))); 1490 1491 // Choose an appropriate constructor and set up the arguments. 1492 const char* signature; 1493 SirtRef<mirror::String> msg_string(this, NULL); 1494 if (msg != NULL) { 1495 // Ensure we remember this and the method over the String allocation. 1496 msg_string.reset(mirror::String::AllocFromModifiedUtf8(this, msg)); 1497 if (UNLIKELY(msg_string.get() == NULL)) { 1498 CHECK(IsExceptionPending()); // OOME. 1499 return; 1500 } 1501 if (cause.get() == NULL) { 1502 signature = "(Ljava/lang/String;)V"; 1503 } else { 1504 signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V"; 1505 } 1506 } else { 1507 if (cause.get() == NULL) { 1508 signature = "()V"; 1509 } else { 1510 signature = "(Ljava/lang/Throwable;)V"; 1511 } 1512 } 1513 mirror::AbstractMethod* exception_init_method = 1514 exception_class->FindDeclaredDirectMethod("<init>", signature); 1515 1516 CHECK(exception_init_method != NULL) << "No <init>" << signature << " in " 1517 << PrettyDescriptor(exception_class_descriptor); 1518 1519 if (UNLIKELY(!runtime->IsStarted())) { 1520 // Something is trying to throw an exception without a started runtime, which is the common 1521 // case in the compiler. We won't be able to invoke the constructor of the exception, so set 1522 // the exception fields directly. 1523 if (msg != NULL) { 1524 exception->SetDetailMessage(msg_string.get()); 1525 } 1526 if (cause.get() != NULL) { 1527 exception->SetCause(cause.get()); 1528 } 1529 ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(), 1530 throw_location.GetDexPc()); 1531 SetException(gc_safe_throw_location, exception.get()); 1532 } else { 1533 ArgArray args("VLL", 3); 1534 args.Append(reinterpret_cast<uint32_t>(exception.get())); 1535 if (msg != NULL) { 1536 args.Append(reinterpret_cast<uint32_t>(msg_string.get())); 1537 } 1538 if (cause.get() != NULL) { 1539 args.Append(reinterpret_cast<uint32_t>(cause.get())); 1540 } 1541 JValue result; 1542 exception_init_method->Invoke(this, args.GetArray(), args.GetNumBytes(), &result, 'V'); 1543 if (LIKELY(!IsExceptionPending())) { 1544 ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(), 1545 throw_location.GetDexPc()); 1546 SetException(gc_safe_throw_location, exception.get()); 1547 } 1548 } 1549} 1550 1551void Thread::ThrowOutOfMemoryError(const char* msg) { 1552 LOG(ERROR) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s", 1553 msg, (throwing_OutOfMemoryError_ ? " (recursive case)" : "")); 1554 ThrowLocation throw_location = GetCurrentLocationForThrow(); 1555 if (!throwing_OutOfMemoryError_) { 1556 throwing_OutOfMemoryError_ = true; 1557 ThrowNewException(throw_location, "Ljava/lang/OutOfMemoryError;", msg); 1558 throwing_OutOfMemoryError_ = false; 1559 } else { 1560 Dump(LOG(ERROR)); // The pre-allocated OOME has no stack, so help out and log one. 1561 SetException(throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError()); 1562 } 1563} 1564 1565Thread* Thread::CurrentFromGdb() { 1566 return Thread::Current(); 1567} 1568 1569void Thread::DumpFromGdb() const { 1570 std::ostringstream ss; 1571 Dump(ss); 1572 std::string str(ss.str()); 1573 // log to stderr for debugging command line processes 1574 std::cerr << str; 1575#ifdef HAVE_ANDROID_OS 1576 // log to logcat for debugging frameworks processes 1577 LOG(INFO) << str; 1578#endif 1579} 1580 1581struct EntryPointInfo { 1582 uint32_t offset; 1583 const char* name; 1584}; 1585#define ENTRY_POINT_INFO(x) { ENTRYPOINT_OFFSET(x), #x } 1586static const EntryPointInfo gThreadEntryPointInfo[] = { 1587 ENTRY_POINT_INFO(pAllocArrayFromCode), 1588 ENTRY_POINT_INFO(pAllocArrayFromCodeWithAccessCheck), 1589 ENTRY_POINT_INFO(pAllocObjectFromCode), 1590 ENTRY_POINT_INFO(pAllocObjectFromCodeWithAccessCheck), 1591 ENTRY_POINT_INFO(pCheckAndAllocArrayFromCode), 1592 ENTRY_POINT_INFO(pCheckAndAllocArrayFromCodeWithAccessCheck), 1593 ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode), 1594 ENTRY_POINT_INFO(pCanPutArrayElementFromCode), 1595 ENTRY_POINT_INFO(pCheckCastFromCode), 1596 ENTRY_POINT_INFO(pInitializeStaticStorage), 1597 ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode), 1598 ENTRY_POINT_INFO(pInitializeTypeFromCode), 1599 ENTRY_POINT_INFO(pResolveStringFromCode), 1600 ENTRY_POINT_INFO(pSet32Instance), 1601 ENTRY_POINT_INFO(pSet32Static), 1602 ENTRY_POINT_INFO(pSet64Instance), 1603 ENTRY_POINT_INFO(pSet64Static), 1604 ENTRY_POINT_INFO(pSetObjInstance), 1605 ENTRY_POINT_INFO(pSetObjStatic), 1606 ENTRY_POINT_INFO(pGet32Instance), 1607 ENTRY_POINT_INFO(pGet32Static), 1608 ENTRY_POINT_INFO(pGet64Instance), 1609 ENTRY_POINT_INFO(pGet64Static), 1610 ENTRY_POINT_INFO(pGetObjInstance), 1611 ENTRY_POINT_INFO(pGetObjStatic), 1612 ENTRY_POINT_INFO(pHandleFillArrayDataFromCode), 1613 ENTRY_POINT_INFO(pJniMethodStart), 1614 ENTRY_POINT_INFO(pJniMethodStartSynchronized), 1615 ENTRY_POINT_INFO(pJniMethodEnd), 1616 ENTRY_POINT_INFO(pJniMethodEndSynchronized), 1617 ENTRY_POINT_INFO(pJniMethodEndWithReference), 1618 ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized), 1619 ENTRY_POINT_INFO(pLockObjectFromCode), 1620 ENTRY_POINT_INFO(pUnlockObjectFromCode), 1621 ENTRY_POINT_INFO(pCmpgDouble), 1622 ENTRY_POINT_INFO(pCmpgFloat), 1623 ENTRY_POINT_INFO(pCmplDouble), 1624 ENTRY_POINT_INFO(pCmplFloat), 1625 ENTRY_POINT_INFO(pFmod), 1626 ENTRY_POINT_INFO(pSqrt), 1627 ENTRY_POINT_INFO(pL2d), 1628 ENTRY_POINT_INFO(pFmodf), 1629 ENTRY_POINT_INFO(pL2f), 1630 ENTRY_POINT_INFO(pD2iz), 1631 ENTRY_POINT_INFO(pF2iz), 1632 ENTRY_POINT_INFO(pIdivmod), 1633 ENTRY_POINT_INFO(pD2l), 1634 ENTRY_POINT_INFO(pF2l), 1635 ENTRY_POINT_INFO(pLdiv), 1636 ENTRY_POINT_INFO(pLdivmod), 1637 ENTRY_POINT_INFO(pLmul), 1638 ENTRY_POINT_INFO(pShlLong), 1639 ENTRY_POINT_INFO(pShrLong), 1640 ENTRY_POINT_INFO(pUshrLong), 1641 ENTRY_POINT_INFO(pInterpreterToInterpreterEntry), 1642 ENTRY_POINT_INFO(pInterpreterToQuickEntry), 1643 ENTRY_POINT_INFO(pIndexOf), 1644 ENTRY_POINT_INFO(pMemcmp16), 1645 ENTRY_POINT_INFO(pStringCompareTo), 1646 ENTRY_POINT_INFO(pMemcpy), 1647 ENTRY_POINT_INFO(pPortableResolutionTrampolineFromCode), 1648 ENTRY_POINT_INFO(pQuickResolutionTrampolineFromCode), 1649 ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck), 1650 ENTRY_POINT_INFO(pInvokeInterfaceTrampoline), 1651 ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck), 1652 ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck), 1653 ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck), 1654 ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck), 1655 ENTRY_POINT_INFO(pCheckSuspendFromCode), 1656 ENTRY_POINT_INFO(pTestSuspendFromCode), 1657 ENTRY_POINT_INFO(pDeliverException), 1658 ENTRY_POINT_INFO(pThrowArrayBoundsFromCode), 1659 ENTRY_POINT_INFO(pThrowDivZeroFromCode), 1660 ENTRY_POINT_INFO(pThrowNoSuchMethodFromCode), 1661 ENTRY_POINT_INFO(pThrowNullPointerFromCode), 1662 ENTRY_POINT_INFO(pThrowStackOverflowFromCode), 1663}; 1664#undef ENTRY_POINT_INFO 1665 1666void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers) { 1667 CHECK_EQ(size_of_pointers, 4U); // TODO: support 64-bit targets. 1668 1669#define DO_THREAD_OFFSET(x) \ 1670 if (offset == static_cast<uint32_t>(OFFSETOF_VOLATILE_MEMBER(Thread, x))) { \ 1671 os << # x; \ 1672 return; \ 1673 } 1674 DO_THREAD_OFFSET(state_and_flags_); 1675 DO_THREAD_OFFSET(card_table_); 1676 DO_THREAD_OFFSET(exception_); 1677 DO_THREAD_OFFSET(opeer_); 1678 DO_THREAD_OFFSET(jni_env_); 1679 DO_THREAD_OFFSET(self_); 1680 DO_THREAD_OFFSET(stack_end_); 1681 DO_THREAD_OFFSET(suspend_count_); 1682 DO_THREAD_OFFSET(thin_lock_id_); 1683 //DO_THREAD_OFFSET(top_of_managed_stack_); 1684 //DO_THREAD_OFFSET(top_of_managed_stack_pc_); 1685 DO_THREAD_OFFSET(top_sirt_); 1686#undef DO_THREAD_OFFSET 1687 1688 size_t entry_point_count = arraysize(gThreadEntryPointInfo); 1689 CHECK_EQ(entry_point_count * size_of_pointers, sizeof(EntryPoints)); 1690 uint32_t expected_offset = OFFSETOF_MEMBER(Thread, entrypoints_); 1691 for (size_t i = 0; i < entry_point_count; ++i) { 1692 CHECK_EQ(gThreadEntryPointInfo[i].offset, expected_offset) << gThreadEntryPointInfo[i].name; 1693 expected_offset += size_of_pointers; 1694 if (gThreadEntryPointInfo[i].offset == offset) { 1695 os << gThreadEntryPointInfo[i].name; 1696 return; 1697 } 1698 } 1699 os << offset; 1700} 1701 1702static const bool kDebugExceptionDelivery = false; 1703class CatchBlockStackVisitor : public StackVisitor { 1704 public: 1705 CatchBlockStackVisitor(Thread* self, const ThrowLocation& throw_location, 1706 mirror::Throwable* exception, bool is_deoptimization) 1707 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1708 : StackVisitor(self, self->GetLongJumpContext()), 1709 self_(self), exception_(exception), is_deoptimization_(is_deoptimization), 1710 to_find_(is_deoptimization ? NULL : exception->GetClass()), throw_location_(throw_location), 1711 handler_quick_frame_(NULL), handler_quick_frame_pc_(0), handler_dex_pc_(0), 1712 native_method_count_(0), 1713 method_tracing_active_(is_deoptimization || 1714 Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()), 1715 instrumentation_frames_to_pop_(0), top_shadow_frame_(NULL), prev_shadow_frame_(NULL) { 1716 // Exception not in root sets, can't allow GC. 1717 last_no_assert_suspension_cause_ = self->StartAssertNoThreadSuspension("Finding catch block"); 1718 } 1719 1720 ~CatchBlockStackVisitor() { 1721 LOG(FATAL) << "UNREACHABLE"; // Expected to take long jump. 1722 } 1723 1724 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1725 mirror::AbstractMethod* method = GetMethod(); 1726 if (method == NULL) { 1727 // This is the upcall, we remember the frame and last pc so that we may long jump to them. 1728 handler_quick_frame_pc_ = GetCurrentQuickFramePc(); 1729 handler_quick_frame_ = GetCurrentQuickFrame(); 1730 return false; // End stack walk. 1731 } else { 1732 if (UNLIKELY(method_tracing_active_ && 1733 GetInstrumentationExitPc() == GetReturnPc())) { 1734 // Keep count of the number of unwinds during instrumentation. 1735 instrumentation_frames_to_pop_++; 1736 } 1737 if (method->IsRuntimeMethod()) { 1738 // Ignore callee save method. 1739 DCHECK(method->IsCalleeSaveMethod()); 1740 return true; 1741 } else if (is_deoptimization_) { 1742 return HandleDeoptimization(method); 1743 } else { 1744 return HandleTryItems(method); 1745 } 1746 } 1747 } 1748 1749 bool HandleTryItems(mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1750 uint32_t dex_pc = DexFile::kDexNoIndex; 1751 if (method->IsNative()) { 1752 native_method_count_++; 1753 } else { 1754 dex_pc = GetDexPc(); 1755 } 1756 if (dex_pc != DexFile::kDexNoIndex) { 1757 uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc); 1758 if (found_dex_pc != DexFile::kDexNoIndex) { 1759 handler_dex_pc_ = found_dex_pc; 1760 handler_quick_frame_pc_ = method->ToNativePc(found_dex_pc); 1761 handler_quick_frame_ = GetCurrentQuickFrame(); 1762 return false; // End stack walk. 1763 } 1764 } 1765 return true; // Continue stack walk. 1766 } 1767 1768 bool HandleDeoptimization(mirror::AbstractMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1769 MethodHelper mh(m); 1770 const DexFile::CodeItem* code_item = mh.GetCodeItem(); 1771 CHECK(code_item != NULL); 1772 uint16_t num_regs = code_item->registers_size_; 1773 uint32_t dex_pc = GetDexPc(); 1774 const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc); 1775 uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits(); 1776 ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, m, new_dex_pc); 1777 verifier::MethodVerifier verifier(&mh.GetDexFile(), mh.GetDexCache(), mh.GetClassLoader(), 1778 mh.GetClassDefIndex(), code_item, 1779 m->GetDexMethodIndex(), m, m->GetAccessFlags(), false, true); 1780 verifier.Verify(); 1781 std::vector<int32_t> kinds = verifier.DescribeVRegs(dex_pc); 1782 for (uint16_t reg = 0; reg < num_regs; reg++) { 1783 VRegKind kind = static_cast<VRegKind>(kinds.at(reg * 2)); 1784 switch (kind) { 1785 case kUndefined: 1786 new_frame->SetVReg(reg, 0xEBADDE09); 1787 break; 1788 case kConstant: 1789 new_frame->SetVReg(reg, kinds.at((reg * 2) + 1)); 1790 break; 1791 case kReferenceVReg: 1792 new_frame->SetVRegReference(reg, 1793 reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kind))); 1794 break; 1795 default: 1796 new_frame->SetVReg(reg, GetVReg(m, reg, kind)); 1797 break; 1798 } 1799 } 1800 if (prev_shadow_frame_ != NULL) { 1801 prev_shadow_frame_->SetLink(new_frame); 1802 } else { 1803 top_shadow_frame_ = new_frame; 1804 } 1805 prev_shadow_frame_ = new_frame; 1806 return true; 1807 } 1808 1809 void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1810 mirror::AbstractMethod* catch_method = *handler_quick_frame_; 1811 if (catch_method == NULL) { 1812 if (kDebugExceptionDelivery) { 1813 LOG(INFO) << "Handler is upcall"; 1814 } 1815 } else { 1816 CHECK(!is_deoptimization_); 1817 if (kDebugExceptionDelivery) { 1818 const DexFile& dex_file = *catch_method->GetDeclaringClass()->GetDexCache()->GetDexFile(); 1819 int line_number = dex_file.GetLineNumFromPC(catch_method, handler_dex_pc_); 1820 LOG(INFO) << "Handler: " << PrettyMethod(catch_method) << " (line: " << line_number << ")"; 1821 } 1822 } 1823 // Put exception back in root set and clear throw location. 1824 self_->SetException(ThrowLocation(), exception_); 1825 self_->EndAssertNoThreadSuspension(last_no_assert_suspension_cause_); 1826 // Do instrumentation events after allowing thread suspension again. 1827 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 1828 for (size_t i = 0; i < instrumentation_frames_to_pop_; ++i) { 1829 // We pop the instrumentation stack here so as not to corrupt it during the stack walk. 1830 if (i != instrumentation_frames_to_pop_ - 1 || self_->GetInstrumentationStack()->front().method_ != catch_method) { 1831 // Don't pop the instrumentation frame of the catch handler. 1832 instrumentation->PopMethodForUnwind(self_, is_deoptimization_); 1833 } 1834 } 1835 if (!is_deoptimization_) { 1836 instrumentation->ExceptionCaughtEvent(self_, throw_location_, catch_method, handler_dex_pc_, 1837 exception_); 1838 } else { 1839 // TODO: proper return value. 1840 self_->SetDeoptimizationShadowFrame(top_shadow_frame_); 1841 } 1842 // Place context back on thread so it will be available when we continue. 1843 self_->ReleaseLongJumpContext(context_); 1844 context_->SetSP(reinterpret_cast<uintptr_t>(handler_quick_frame_)); 1845 CHECK_NE(handler_quick_frame_pc_, 0u); 1846 context_->SetPC(handler_quick_frame_pc_); 1847 context_->SmashCallerSaves(); 1848 context_->DoLongJump(); 1849 } 1850 1851 private: 1852 Thread* const self_; 1853 mirror::Throwable* const exception_; 1854 const bool is_deoptimization_; 1855 // The type of the exception catch block to find. 1856 mirror::Class* const to_find_; 1857 // Location of the throw. 1858 const ThrowLocation& throw_location_; 1859 // Quick frame with found handler or last frame if no handler found. 1860 mirror::AbstractMethod** handler_quick_frame_; 1861 // PC to branch to for the handler. 1862 uintptr_t handler_quick_frame_pc_; 1863 // Associated dex PC. 1864 uint32_t handler_dex_pc_; 1865 // Number of native methods passed in crawl (equates to number of SIRTs to pop) 1866 uint32_t native_method_count_; 1867 // Is method tracing active? 1868 const bool method_tracing_active_; 1869 // Support for nesting no thread suspension checks. 1870 const char* last_no_assert_suspension_cause_; 1871 // Number of frames to pop in long jump. 1872 size_t instrumentation_frames_to_pop_; 1873 ShadowFrame* top_shadow_frame_; 1874 ShadowFrame* prev_shadow_frame_; 1875}; 1876 1877void Thread::QuickDeliverException() { 1878 // Get exception from thread. 1879 ThrowLocation throw_location; 1880 mirror::Throwable* exception = GetException(&throw_location); 1881 CHECK(exception != NULL); 1882 // Don't leave exception visible while we try to find the handler, which may cause class 1883 // resolution. 1884 ClearException(); 1885 bool is_deoptimization = (exception == reinterpret_cast<mirror::Throwable*>(-1)); 1886 if (kDebugExceptionDelivery) { 1887 if (!is_deoptimization) { 1888 mirror::String* msg = exception->GetDetailMessage(); 1889 std::string str_msg(msg != NULL ? msg->ToModifiedUtf8() : ""); 1890 DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception) 1891 << ": " << str_msg << "\n"); 1892 } else { 1893 DumpStack(LOG(INFO) << "Deoptimizing: "); 1894 } 1895 } 1896 CatchBlockStackVisitor catch_finder(this, throw_location, exception, is_deoptimization); 1897 catch_finder.WalkStack(true); 1898 catch_finder.DoLongJump(); 1899 LOG(FATAL) << "UNREACHABLE"; 1900} 1901 1902Context* Thread::GetLongJumpContext() { 1903 Context* result = long_jump_context_; 1904 if (result == NULL) { 1905 result = Context::Create(); 1906 } else { 1907 long_jump_context_ = NULL; // Avoid context being shared. 1908 result->Reset(); 1909 } 1910 return result; 1911} 1912 1913struct CurrentMethodVisitor : public StackVisitor { 1914 CurrentMethodVisitor(Thread* thread, Context* context) 1915 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1916 : StackVisitor(thread, context), this_object_(NULL), method_(NULL), dex_pc_(0) {} 1917 virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1918 mirror::AbstractMethod* m = GetMethod(); 1919 if (m->IsRuntimeMethod()) { 1920 // Continue if this is a runtime method. 1921 return true; 1922 } 1923 if (context_ != NULL) { 1924 this_object_ = GetThisObject(); 1925 } 1926 method_ = m; 1927 dex_pc_ = GetDexPc(); 1928 return false; 1929 } 1930 mirror::Object* this_object_; 1931 mirror::AbstractMethod* method_; 1932 uint32_t dex_pc_; 1933}; 1934 1935mirror::AbstractMethod* Thread::GetCurrentMethod(uint32_t* dex_pc) const { 1936 CurrentMethodVisitor visitor(const_cast<Thread*>(this), NULL); 1937 visitor.WalkStack(false); 1938 if (dex_pc != NULL) { 1939 *dex_pc = visitor.dex_pc_; 1940 } 1941 return visitor.method_; 1942} 1943 1944ThrowLocation Thread::GetCurrentLocationForThrow() { 1945 Context* context = GetLongJumpContext(); 1946 CurrentMethodVisitor visitor(this, context); 1947 visitor.WalkStack(false); 1948 ReleaseLongJumpContext(context); 1949 return ThrowLocation(visitor.this_object_, visitor.method_, visitor.dex_pc_); 1950} 1951 1952bool Thread::HoldsLock(mirror::Object* object) { 1953 if (object == NULL) { 1954 return false; 1955 } 1956 return object->GetThinLockId() == thin_lock_id_; 1957} 1958 1959// RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor). 1960template <typename RootVisitor> 1961class ReferenceMapVisitor : public StackVisitor { 1962 public: 1963 ReferenceMapVisitor(Thread* thread, Context* context, const RootVisitor& visitor) 1964 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) 1965 : StackVisitor(thread, context), visitor_(visitor) {} 1966 1967 bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 1968 if (false) { 1969 LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod()) 1970 << StringPrintf("@ PC:%04x", GetDexPc()); 1971 } 1972 ShadowFrame* shadow_frame = GetCurrentShadowFrame(); 1973 if (shadow_frame != NULL) { 1974 mirror::AbstractMethod* m = shadow_frame->GetMethod(); 1975 size_t num_regs = shadow_frame->NumberOfVRegs(); 1976 if (m->IsNative() || shadow_frame->HasReferenceArray()) { 1977 // SIRT for JNI or References for interpreter. 1978 for (size_t reg = 0; reg < num_regs; ++reg) { 1979 mirror::Object* ref = shadow_frame->GetVRegReference(reg); 1980 if (ref != NULL) { 1981 visitor_(ref, reg, this); 1982 } 1983 } 1984 } else { 1985 // Java method. 1986 // Portable path use DexGcMap and store in Method.native_gc_map_. 1987 const uint8_t* gc_map = m->GetNativeGcMap(); 1988 CHECK(gc_map != NULL) << PrettyMethod(m); 1989 uint32_t gc_map_length = static_cast<uint32_t>((gc_map[0] << 24) | 1990 (gc_map[1] << 16) | 1991 (gc_map[2] << 8) | 1992 (gc_map[3] << 0)); 1993 verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length); 1994 uint32_t dex_pc = GetDexPc(); 1995 const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc); 1996 DCHECK(reg_bitmap != NULL); 1997 num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs); 1998 for (size_t reg = 0; reg < num_regs; ++reg) { 1999 if (TestBitmap(reg, reg_bitmap)) { 2000 mirror::Object* ref = shadow_frame->GetVRegReference(reg); 2001 if (ref != NULL) { 2002 visitor_(ref, reg, this); 2003 } 2004 } 2005 } 2006 } 2007 } else { 2008 mirror::AbstractMethod* m = GetMethod(); 2009 // Process register map (which native and runtime methods don't have) 2010 if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) { 2011 const uint8_t* native_gc_map = m->GetNativeGcMap(); 2012 CHECK(native_gc_map != NULL) << PrettyMethod(m); 2013 mh_.ChangeMethod(m); 2014 const DexFile::CodeItem* code_item = mh_.GetCodeItem(); 2015 DCHECK(code_item != NULL) << PrettyMethod(m); // Can't be NULL or how would we compile its instructions? 2016 NativePcOffsetToReferenceMap map(native_gc_map); 2017 size_t num_regs = std::min(map.RegWidth() * 8, 2018 static_cast<size_t>(code_item->registers_size_)); 2019 if (num_regs > 0) { 2020 const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset()); 2021 DCHECK(reg_bitmap != NULL); 2022 const VmapTable vmap_table(m->GetVmapTableRaw()); 2023 uint32_t core_spills = m->GetCoreSpillMask(); 2024 uint32_t fp_spills = m->GetFpSpillMask(); 2025 size_t frame_size = m->GetFrameSizeInBytes(); 2026 // For all dex registers in the bitmap 2027 mirror::AbstractMethod** cur_quick_frame = GetCurrentQuickFrame(); 2028 DCHECK(cur_quick_frame != NULL); 2029 for (size_t reg = 0; reg < num_regs; ++reg) { 2030 // Does this register hold a reference? 2031 if (TestBitmap(reg, reg_bitmap)) { 2032 uint32_t vmap_offset; 2033 mirror::Object* ref; 2034 if (vmap_table.IsInContext(reg, vmap_offset, kReferenceVReg)) { 2035 uintptr_t val = GetGPR(vmap_table.ComputeRegister(core_spills, vmap_offset, 2036 kReferenceVReg)); 2037 ref = reinterpret_cast<mirror::Object*>(val); 2038 } else { 2039 ref = reinterpret_cast<mirror::Object*>(GetVReg(cur_quick_frame, code_item, 2040 core_spills, fp_spills, frame_size, 2041 reg)); 2042 } 2043 2044 if (ref != NULL) { 2045 visitor_(ref, reg, this); 2046 } 2047 } 2048 } 2049 } 2050 } 2051 } 2052 return true; 2053 } 2054 2055 private: 2056 static bool TestBitmap(int reg, const uint8_t* reg_vector) { 2057 return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0; 2058 } 2059 2060 // Visitor for when we visit a root. 2061 const RootVisitor& visitor_; 2062 2063 // A method helper we keep around to avoid dex file/cache re-computations. 2064 MethodHelper mh_; 2065}; 2066 2067class RootCallbackVisitor { 2068 public: 2069 RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {} 2070 2071 void operator()(const mirror::Object* obj, size_t, const StackVisitor*) const { 2072 visitor_(obj, arg_); 2073 } 2074 2075 private: 2076 RootVisitor* visitor_; 2077 void* arg_; 2078}; 2079 2080class VerifyCallbackVisitor { 2081 public: 2082 VerifyCallbackVisitor(VerifyRootVisitor* visitor, void* arg) 2083 : visitor_(visitor), 2084 arg_(arg) { 2085 } 2086 2087 void operator()(const mirror::Object* obj, size_t vreg, const StackVisitor* visitor) const { 2088 visitor_(obj, arg_, vreg, visitor); 2089 } 2090 2091 private: 2092 VerifyRootVisitor* const visitor_; 2093 void* const arg_; 2094}; 2095 2096struct VerifyRootWrapperArg { 2097 VerifyRootVisitor* visitor; 2098 void* arg; 2099}; 2100 2101static void VerifyRootWrapperCallback(const mirror::Object* root, void* arg) { 2102 VerifyRootWrapperArg* wrapperArg = reinterpret_cast<VerifyRootWrapperArg*>(arg); 2103 wrapperArg->visitor(root, wrapperArg->arg, 0, NULL); 2104} 2105 2106void Thread::VerifyRoots(VerifyRootVisitor* visitor, void* arg) { 2107 // We need to map from a RootVisitor to VerifyRootVisitor, so pass in nulls for arguments we 2108 // don't have. 2109 VerifyRootWrapperArg wrapperArg; 2110 wrapperArg.arg = arg; 2111 wrapperArg.visitor = visitor; 2112 2113 if (opeer_ != NULL) { 2114 VerifyRootWrapperCallback(opeer_, &wrapperArg); 2115 } 2116 if (exception_ != NULL) { 2117 VerifyRootWrapperCallback(exception_, &wrapperArg); 2118 } 2119 throw_location_.VisitRoots(VerifyRootWrapperCallback, &wrapperArg); 2120 if (class_loader_override_ != NULL) { 2121 VerifyRootWrapperCallback(class_loader_override_, &wrapperArg); 2122 } 2123 jni_env_->locals.VisitRoots(VerifyRootWrapperCallback, &wrapperArg); 2124 jni_env_->monitors.VisitRoots(VerifyRootWrapperCallback, &wrapperArg); 2125 2126 SirtVisitRoots(VerifyRootWrapperCallback, &wrapperArg); 2127 2128 // Visit roots on this thread's stack 2129 Context* context = GetLongJumpContext(); 2130 VerifyCallbackVisitor visitorToCallback(visitor, arg); 2131 ReferenceMapVisitor<VerifyCallbackVisitor> mapper(this, context, visitorToCallback); 2132 mapper.WalkStack(); 2133 ReleaseLongJumpContext(context); 2134 2135 std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack = GetInstrumentationStack(); 2136 typedef std::deque<instrumentation::InstrumentationStackFrame>::const_iterator It; 2137 for (It it = instrumentation_stack->begin(), end = instrumentation_stack->end(); it != end; ++it) { 2138 mirror::Object* this_object = (*it).this_object_; 2139 if (this_object != NULL) { 2140 VerifyRootWrapperCallback(this_object, &wrapperArg); 2141 } 2142 mirror::AbstractMethod* method = (*it).method_; 2143 VerifyRootWrapperCallback(method, &wrapperArg); 2144 } 2145} 2146 2147void Thread::VisitRoots(RootVisitor* visitor, void* arg) { 2148 if (opeer_ != NULL) { 2149 visitor(opeer_, arg); 2150 } 2151 if (exception_ != NULL) { 2152 visitor(exception_, arg); 2153 } 2154 throw_location_.VisitRoots(visitor, arg); 2155 if (class_loader_override_ != NULL) { 2156 visitor(class_loader_override_, arg); 2157 } 2158 jni_env_->locals.VisitRoots(visitor, arg); 2159 jni_env_->monitors.VisitRoots(visitor, arg); 2160 2161 SirtVisitRoots(visitor, arg); 2162 2163 // Visit roots on this thread's stack 2164 Context* context = GetLongJumpContext(); 2165 RootCallbackVisitor visitorToCallback(visitor, arg); 2166 ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context, visitorToCallback); 2167 mapper.WalkStack(); 2168 ReleaseLongJumpContext(context); 2169 2170 std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack = GetInstrumentationStack(); 2171 typedef std::deque<instrumentation::InstrumentationStackFrame>::const_iterator It; 2172 for (It it = instrumentation_stack->begin(), end = instrumentation_stack->end(); it != end; ++it) { 2173 mirror::Object* this_object = (*it).this_object_; 2174 if (this_object != NULL) { 2175 visitor(this_object, arg); 2176 } 2177 mirror::AbstractMethod* method = (*it).method_; 2178 visitor(method, arg); 2179 } 2180} 2181 2182static void VerifyObject(const mirror::Object* root, void* arg) { 2183 gc::Heap* heap = reinterpret_cast<gc::Heap*>(arg); 2184 heap->VerifyObject(root); 2185} 2186 2187void Thread::VerifyStackImpl() { 2188 UniquePtr<Context> context(Context::Create()); 2189 RootCallbackVisitor visitorToCallback(VerifyObject, Runtime::Current()->GetHeap()); 2190 ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitorToCallback); 2191 mapper.WalkStack(); 2192} 2193 2194// Set the stack end to that to be used during a stack overflow 2195void Thread::SetStackEndForStackOverflow() { 2196 // During stack overflow we allow use of the full stack 2197 if (stack_end_ == stack_begin_) { 2198 DumpStack(std::cerr); 2199 LOG(FATAL) << "Need to increase kStackOverflowReservedBytes (currently " 2200 << kStackOverflowReservedBytes << ")"; 2201 } 2202 2203 stack_end_ = stack_begin_; 2204} 2205 2206std::ostream& operator<<(std::ostream& os, const Thread& thread) { 2207 thread.ShortDump(os); 2208 return os; 2209} 2210 2211} // namespace art 2212