thread.cc revision 28c4a233681040de4b2785ab5beef0a6d150e46a
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "thread.h" 18 19#if !defined(__APPLE__) 20#include <sched.h> 21#endif 22 23#include <pthread.h> 24#include <signal.h> 25#include <sys/resource.h> 26#include <sys/time.h> 27 28#include <algorithm> 29#include <bitset> 30#include <cerrno> 31#include <iostream> 32#include <list> 33#include <sstream> 34 35#include "android-base/stringprintf.h" 36 37#include "arch/context.h" 38#include "arch/context-inl.h" 39#include "art_field-inl.h" 40#include "art_method-inl.h" 41#include "base/bit_utils.h" 42#include "base/memory_tool.h" 43#include "base/mutex.h" 44#include "base/timing_logger.h" 45#include "base/to_str.h" 46#include "base/systrace.h" 47#include "class_linker-inl.h" 48#include "debugger.h" 49#include "dex_file-inl.h" 50#include "dex_file_annotations.h" 51#include "entrypoints/entrypoint_utils.h" 52#include "entrypoints/quick/quick_alloc_entrypoints.h" 53#include "gc/accounting/card_table-inl.h" 54#include "gc/accounting/heap_bitmap-inl.h" 55#include "gc/allocator/rosalloc.h" 56#include "gc/heap.h" 57#include "gc/space/space-inl.h" 58#include "gc_root.h" 59#include "handle_scope-inl.h" 60#include "indirect_reference_table-inl.h" 61#include "interpreter/shadow_frame.h" 62#include "java_frame_root_info.h" 63#include "java_vm_ext.h" 64#include "jni_internal.h" 65#include "mirror/class_loader.h" 66#include "mirror/class-inl.h" 67#include "mirror/object_array-inl.h" 68#include "mirror/stack_trace_element.h" 69#include "monitor.h" 70#include "native_stack_dump.h" 71#include "nth_caller_visitor.h" 72#include "oat_quick_method_header.h" 73#include "obj_ptr-inl.h" 74#include "object_lock.h" 75#include "quick_exception_handler.h" 76#include "quick/quick_method_frame_info.h" 77#include "read_barrier-inl.h" 78#include "reflection.h" 79#include "runtime.h" 80#include "runtime_callbacks.h" 81#include "scoped_thread_state_change-inl.h" 82#include "ScopedLocalRef.h" 83#include "ScopedUtfChars.h" 84#include "stack.h" 85#include "stack_map.h" 86#include "thread_list.h" 87#include "thread-inl.h" 88#include "utils.h" 89#include "verifier/method_verifier.h" 90#include "verify_object.h" 91#include "well_known_classes.h" 92#include "interpreter/interpreter.h" 93 94#if ART_USE_FUTEXES 95#include "linux/futex.h" 96#include "sys/syscall.h" 97#ifndef SYS_futex 98#define SYS_futex __NR_futex 99#endif 100#endif // ART_USE_FUTEXES 101 102namespace art { 103 104using android::base::StringAppendV; 105using android::base::StringPrintf; 106 107extern "C" NO_RETURN void artDeoptimize(Thread* self); 108 109bool Thread::is_started_ = false; 110pthread_key_t Thread::pthread_key_self_; 111ConditionVariable* Thread::resume_cond_ = nullptr; 112const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA); 113bool (*Thread::is_sensitive_thread_hook_)() = nullptr; 114Thread* Thread::jit_sensitive_thread_ = nullptr; 115 116static constexpr bool kVerifyImageObjectsMarked = kIsDebugBuild; 117 118// For implicit overflow checks we reserve an extra piece of memory at the bottom 119// of the stack (lowest memory). The higher portion of the memory 120// is protected against reads and the lower is available for use while 121// throwing the StackOverflow exception. 122constexpr size_t kStackOverflowProtectedSize = 4 * kMemoryToolStackGuardSizeScale * KB; 123 124static const char* kThreadNameDuringStartup = "<native thread without managed peer>"; 125 126void Thread::InitCardTable() { 127 tlsPtr_.card_table = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin(); 128} 129 130static void UnimplementedEntryPoint() { 131 UNIMPLEMENTED(FATAL); 132} 133 134void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints); 135void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active); 136 137void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) { 138 CHECK(kUseReadBarrier); 139 tls32_.is_gc_marking = is_marking; 140 UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active */ is_marking); 141 ResetQuickAllocEntryPointsForThread(is_marking); 142} 143 144void Thread::InitTlsEntryPoints() { 145 // Insert a placeholder so we can easily tell if we call an unimplemented entry point. 146 uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.jni_entrypoints); 147 uintptr_t* end = reinterpret_cast<uintptr_t*>( 148 reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) + sizeof(tlsPtr_.quick_entrypoints)); 149 for (uintptr_t* it = begin; it != end; ++it) { 150 *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint); 151 } 152 InitEntryPoints(&tlsPtr_.jni_entrypoints, &tlsPtr_.quick_entrypoints); 153} 154 155void Thread::ResetQuickAllocEntryPointsForThread(bool is_marking) { 156 if (kUseReadBarrier && kRuntimeISA != kX86_64) { 157 // Allocation entrypoint switching is currently only implemented for X86_64. 158 is_marking = true; 159 } 160 ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints, is_marking); 161} 162 163class DeoptimizationContextRecord { 164 public: 165 DeoptimizationContextRecord(const JValue& ret_val, 166 bool is_reference, 167 bool from_code, 168 ObjPtr<mirror::Throwable> pending_exception, 169 DeoptimizationContextRecord* link) 170 : ret_val_(ret_val), 171 is_reference_(is_reference), 172 from_code_(from_code), 173 pending_exception_(pending_exception.Ptr()), 174 link_(link) {} 175 176 JValue GetReturnValue() const { return ret_val_; } 177 bool IsReference() const { return is_reference_; } 178 bool GetFromCode() const { return from_code_; } 179 ObjPtr<mirror::Throwable> GetPendingException() const { return pending_exception_; } 180 DeoptimizationContextRecord* GetLink() const { return link_; } 181 mirror::Object** GetReturnValueAsGCRoot() { 182 DCHECK(is_reference_); 183 return ret_val_.GetGCRoot(); 184 } 185 mirror::Object** GetPendingExceptionAsGCRoot() { 186 return reinterpret_cast<mirror::Object**>(&pending_exception_); 187 } 188 189 private: 190 // The value returned by the method at the top of the stack before deoptimization. 191 JValue ret_val_; 192 193 // Indicates whether the returned value is a reference. If so, the GC will visit it. 194 const bool is_reference_; 195 196 // Whether the context was created from an explicit deoptimization in the code. 197 const bool from_code_; 198 199 // The exception that was pending before deoptimization (or null if there was no pending 200 // exception). 201 mirror::Throwable* pending_exception_; 202 203 // A link to the previous DeoptimizationContextRecord. 204 DeoptimizationContextRecord* const link_; 205 206 DISALLOW_COPY_AND_ASSIGN(DeoptimizationContextRecord); 207}; 208 209class StackedShadowFrameRecord { 210 public: 211 StackedShadowFrameRecord(ShadowFrame* shadow_frame, 212 StackedShadowFrameType type, 213 StackedShadowFrameRecord* link) 214 : shadow_frame_(shadow_frame), 215 type_(type), 216 link_(link) {} 217 218 ShadowFrame* GetShadowFrame() const { return shadow_frame_; } 219 StackedShadowFrameType GetType() const { return type_; } 220 StackedShadowFrameRecord* GetLink() const { return link_; } 221 222 private: 223 ShadowFrame* const shadow_frame_; 224 const StackedShadowFrameType type_; 225 StackedShadowFrameRecord* const link_; 226 227 DISALLOW_COPY_AND_ASSIGN(StackedShadowFrameRecord); 228}; 229 230void Thread::PushDeoptimizationContext(const JValue& return_value, 231 bool is_reference, 232 bool from_code, 233 ObjPtr<mirror::Throwable> exception) { 234 DeoptimizationContextRecord* record = new DeoptimizationContextRecord( 235 return_value, 236 is_reference, 237 from_code, 238 exception, 239 tlsPtr_.deoptimization_context_stack); 240 tlsPtr_.deoptimization_context_stack = record; 241} 242 243void Thread::PopDeoptimizationContext(JValue* result, 244 ObjPtr<mirror::Throwable>* exception, 245 bool* from_code) { 246 AssertHasDeoptimizationContext(); 247 DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack; 248 tlsPtr_.deoptimization_context_stack = record->GetLink(); 249 result->SetJ(record->GetReturnValue().GetJ()); 250 *exception = record->GetPendingException(); 251 *from_code = record->GetFromCode(); 252 delete record; 253} 254 255void Thread::AssertHasDeoptimizationContext() { 256 CHECK(tlsPtr_.deoptimization_context_stack != nullptr) 257 << "No deoptimization context for thread " << *this; 258} 259 260void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type) { 261 StackedShadowFrameRecord* record = new StackedShadowFrameRecord( 262 sf, type, tlsPtr_.stacked_shadow_frame_record); 263 tlsPtr_.stacked_shadow_frame_record = record; 264} 265 266ShadowFrame* Thread::PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present) { 267 StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record; 268 if (must_be_present) { 269 DCHECK(record != nullptr); 270 } else { 271 if (record == nullptr || record->GetType() != type) { 272 return nullptr; 273 } 274 } 275 tlsPtr_.stacked_shadow_frame_record = record->GetLink(); 276 ShadowFrame* shadow_frame = record->GetShadowFrame(); 277 delete record; 278 return shadow_frame; 279} 280 281class FrameIdToShadowFrame { 282 public: 283 static FrameIdToShadowFrame* Create(size_t frame_id, 284 ShadowFrame* shadow_frame, 285 FrameIdToShadowFrame* next, 286 size_t num_vregs) { 287 // Append a bool array at the end to keep track of what vregs are updated by the debugger. 288 uint8_t* memory = new uint8_t[sizeof(FrameIdToShadowFrame) + sizeof(bool) * num_vregs]; 289 return new (memory) FrameIdToShadowFrame(frame_id, shadow_frame, next); 290 } 291 292 static void Delete(FrameIdToShadowFrame* f) { 293 uint8_t* memory = reinterpret_cast<uint8_t*>(f); 294 delete[] memory; 295 } 296 297 size_t GetFrameId() const { return frame_id_; } 298 ShadowFrame* GetShadowFrame() const { return shadow_frame_; } 299 FrameIdToShadowFrame* GetNext() const { return next_; } 300 void SetNext(FrameIdToShadowFrame* next) { next_ = next; } 301 bool* GetUpdatedVRegFlags() { 302 return updated_vreg_flags_; 303 } 304 305 private: 306 FrameIdToShadowFrame(size_t frame_id, 307 ShadowFrame* shadow_frame, 308 FrameIdToShadowFrame* next) 309 : frame_id_(frame_id), 310 shadow_frame_(shadow_frame), 311 next_(next) {} 312 313 const size_t frame_id_; 314 ShadowFrame* const shadow_frame_; 315 FrameIdToShadowFrame* next_; 316 bool updated_vreg_flags_[0]; 317 318 DISALLOW_COPY_AND_ASSIGN(FrameIdToShadowFrame); 319}; 320 321static FrameIdToShadowFrame* FindFrameIdToShadowFrame(FrameIdToShadowFrame* head, 322 size_t frame_id) { 323 FrameIdToShadowFrame* found = nullptr; 324 for (FrameIdToShadowFrame* record = head; record != nullptr; record = record->GetNext()) { 325 if (record->GetFrameId() == frame_id) { 326 if (kIsDebugBuild) { 327 // Sanity check we have at most one record for this frame. 328 CHECK(found == nullptr) << "Multiple records for the frame " << frame_id; 329 found = record; 330 } else { 331 return record; 332 } 333 } 334 } 335 return found; 336} 337 338ShadowFrame* Thread::FindDebuggerShadowFrame(size_t frame_id) { 339 FrameIdToShadowFrame* record = FindFrameIdToShadowFrame( 340 tlsPtr_.frame_id_to_shadow_frame, frame_id); 341 if (record != nullptr) { 342 return record->GetShadowFrame(); 343 } 344 return nullptr; 345} 346 347// Must only be called when FindDebuggerShadowFrame(frame_id) returns non-nullptr. 348bool* Thread::GetUpdatedVRegFlags(size_t frame_id) { 349 FrameIdToShadowFrame* record = FindFrameIdToShadowFrame( 350 tlsPtr_.frame_id_to_shadow_frame, frame_id); 351 CHECK(record != nullptr); 352 return record->GetUpdatedVRegFlags(); 353} 354 355ShadowFrame* Thread::FindOrCreateDebuggerShadowFrame(size_t frame_id, 356 uint32_t num_vregs, 357 ArtMethod* method, 358 uint32_t dex_pc) { 359 ShadowFrame* shadow_frame = FindDebuggerShadowFrame(frame_id); 360 if (shadow_frame != nullptr) { 361 return shadow_frame; 362 } 363 VLOG(deopt) << "Create pre-deopted ShadowFrame for " << ArtMethod::PrettyMethod(method); 364 shadow_frame = ShadowFrame::CreateDeoptimizedFrame(num_vregs, nullptr, method, dex_pc); 365 FrameIdToShadowFrame* record = FrameIdToShadowFrame::Create(frame_id, 366 shadow_frame, 367 tlsPtr_.frame_id_to_shadow_frame, 368 num_vregs); 369 for (uint32_t i = 0; i < num_vregs; i++) { 370 // Do this to clear all references for root visitors. 371 shadow_frame->SetVRegReference(i, nullptr); 372 // This flag will be changed to true if the debugger modifies the value. 373 record->GetUpdatedVRegFlags()[i] = false; 374 } 375 tlsPtr_.frame_id_to_shadow_frame = record; 376 return shadow_frame; 377} 378 379void Thread::RemoveDebuggerShadowFrameMapping(size_t frame_id) { 380 FrameIdToShadowFrame* head = tlsPtr_.frame_id_to_shadow_frame; 381 if (head->GetFrameId() == frame_id) { 382 tlsPtr_.frame_id_to_shadow_frame = head->GetNext(); 383 FrameIdToShadowFrame::Delete(head); 384 return; 385 } 386 FrameIdToShadowFrame* prev = head; 387 for (FrameIdToShadowFrame* record = head->GetNext(); 388 record != nullptr; 389 prev = record, record = record->GetNext()) { 390 if (record->GetFrameId() == frame_id) { 391 prev->SetNext(record->GetNext()); 392 FrameIdToShadowFrame::Delete(record); 393 return; 394 } 395 } 396 LOG(FATAL) << "No shadow frame for frame " << frame_id; 397 UNREACHABLE(); 398} 399 400void Thread::InitTid() { 401 tls32_.tid = ::art::GetTid(); 402} 403 404void Thread::InitAfterFork() { 405 // One thread (us) survived the fork, but we have a new tid so we need to 406 // update the value stashed in this Thread*. 407 InitTid(); 408} 409 410void* Thread::CreateCallback(void* arg) { 411 Thread* self = reinterpret_cast<Thread*>(arg); 412 Runtime* runtime = Runtime::Current(); 413 if (runtime == nullptr) { 414 LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self; 415 return nullptr; 416 } 417 { 418 // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true 419 // after self->Init(). 420 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_); 421 // Check that if we got here we cannot be shutting down (as shutdown should never have started 422 // while threads are being born). 423 CHECK(!runtime->IsShuttingDownLocked()); 424 // Note: given that the JNIEnv is created in the parent thread, the only failure point here is 425 // a mess in InitStackHwm. We do not have a reasonable way to recover from that, so abort 426 // the runtime in such a case. In case this ever changes, we need to make sure here to 427 // delete the tmp_jni_env, as we own it at this point. 428 CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM(), self->tlsPtr_.tmp_jni_env)); 429 self->tlsPtr_.tmp_jni_env = nullptr; 430 Runtime::Current()->EndThreadBirth(); 431 } 432 { 433 ScopedObjectAccess soa(self); 434 self->InitStringEntryPoints(); 435 436 // Copy peer into self, deleting global reference when done. 437 CHECK(self->tlsPtr_.jpeer != nullptr); 438 self->tlsPtr_.opeer = soa.Decode<mirror::Object>(self->tlsPtr_.jpeer).Ptr(); 439 self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer); 440 self->tlsPtr_.jpeer = nullptr; 441 self->SetThreadName(self->GetThreadName()->ToModifiedUtf8().c_str()); 442 443 ArtField* priorityField = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority); 444 self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer)); 445 446 runtime->GetRuntimeCallbacks()->ThreadStart(self); 447 448 // Invoke the 'run' method of our java.lang.Thread. 449 ObjPtr<mirror::Object> receiver = self->tlsPtr_.opeer; 450 jmethodID mid = WellKnownClasses::java_lang_Thread_run; 451 ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(receiver)); 452 InvokeVirtualOrInterfaceWithJValues(soa, ref.get(), mid, nullptr); 453 } 454 // Detach and delete self. 455 Runtime::Current()->GetThreadList()->Unregister(self); 456 457 return nullptr; 458} 459 460Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa, 461 ObjPtr<mirror::Object> thread_peer) { 462 ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer); 463 Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer))); 464 // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_ 465 // to stop it from going away. 466 if (kIsDebugBuild) { 467 MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_); 468 if (result != nullptr && !result->IsSuspended()) { 469 Locks::thread_list_lock_->AssertHeld(soa.Self()); 470 } 471 } 472 return result; 473} 474 475Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa, 476 jobject java_thread) { 477 return FromManagedThread(soa, soa.Decode<mirror::Object>(java_thread).Ptr()); 478} 479 480static size_t FixStackSize(size_t stack_size) { 481 // A stack size of zero means "use the default". 482 if (stack_size == 0) { 483 stack_size = Runtime::Current()->GetDefaultStackSize(); 484 } 485 486 // Dalvik used the bionic pthread default stack size for native threads, 487 // so include that here to support apps that expect large native stacks. 488 stack_size += 1 * MB; 489 490 // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN. 491 if (stack_size < PTHREAD_STACK_MIN) { 492 stack_size = PTHREAD_STACK_MIN; 493 } 494 495 if (Runtime::Current()->ExplicitStackOverflowChecks()) { 496 // It's likely that callers are trying to ensure they have at least a certain amount of 497 // stack space, so we should add our reserved space on top of what they requested, rather 498 // than implicitly take it away from them. 499 stack_size += GetStackOverflowReservedBytes(kRuntimeISA); 500 } else { 501 // If we are going to use implicit stack checks, allocate space for the protected 502 // region at the bottom of the stack. 503 stack_size += Thread::kStackOverflowImplicitCheckSize + 504 GetStackOverflowReservedBytes(kRuntimeISA); 505 } 506 507 // Some systems require the stack size to be a multiple of the system page size, so round up. 508 stack_size = RoundUp(stack_size, kPageSize); 509 510 return stack_size; 511} 512 513// Return the nearest page-aligned address below the current stack top. 514NO_INLINE 515static uint8_t* FindStackTop() { 516 return reinterpret_cast<uint8_t*>( 517 AlignDown(__builtin_frame_address(0), kPageSize)); 518} 519 520// Install a protected region in the stack. This is used to trigger a SIGSEGV if a stack 521// overflow is detected. It is located right below the stack_begin_. 522ATTRIBUTE_NO_SANITIZE_ADDRESS 523void Thread::InstallImplicitProtection() { 524 uint8_t* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; 525 // Page containing current top of stack. 526 uint8_t* stack_top = FindStackTop(); 527 528 // Try to directly protect the stack. 529 VLOG(threads) << "installing stack protected region at " << std::hex << 530 static_cast<void*>(pregion) << " to " << 531 static_cast<void*>(pregion + kStackOverflowProtectedSize - 1); 532 if (ProtectStack(/* fatal_on_error */ false)) { 533 // Tell the kernel that we won't be needing these pages any more. 534 // NB. madvise will probably write zeroes into the memory (on linux it does). 535 uint32_t unwanted_size = stack_top - pregion - kPageSize; 536 madvise(pregion, unwanted_size, MADV_DONTNEED); 537 return; 538 } 539 540 // There is a little complexity here that deserves a special mention. On some 541 // architectures, the stack is created using a VM_GROWSDOWN flag 542 // to prevent memory being allocated when it's not needed. This flag makes the 543 // kernel only allocate memory for the stack by growing down in memory. Because we 544 // want to put an mprotected region far away from that at the stack top, we need 545 // to make sure the pages for the stack are mapped in before we call mprotect. 546 // 547 // The failed mprotect in UnprotectStack is an indication of a thread with VM_GROWSDOWN 548 // with a non-mapped stack (usually only the main thread). 549 // 550 // We map in the stack by reading every page from the stack bottom (highest address) 551 // to the stack top. (We then madvise this away.) This must be done by reading from the 552 // current stack pointer downwards. Any access more than a page below the current SP 553 // might cause a segv. 554 // TODO: This comment may be out of date. It seems possible to speed this up. As 555 // this is normally done once in the zygote on startup, ignore for now. 556 // 557 // AddressSanitizer does not like the part of this functions that reads every stack page. 558 // Looks a lot like an out-of-bounds access. 559 560 // (Defensively) first remove the protection on the protected region as will want to read 561 // and write it. Ignore errors. 562 UnprotectStack(); 563 564 VLOG(threads) << "Need to map in stack for thread at " << std::hex << 565 static_cast<void*>(pregion); 566 567 // Read every page from the high address to the low. 568 volatile uint8_t dont_optimize_this; 569 UNUSED(dont_optimize_this); 570 for (uint8_t* p = stack_top; p >= pregion; p -= kPageSize) { 571 dont_optimize_this = *p; 572 } 573 574 VLOG(threads) << "(again) installing stack protected region at " << std::hex << 575 static_cast<void*>(pregion) << " to " << 576 static_cast<void*>(pregion + kStackOverflowProtectedSize - 1); 577 578 // Protect the bottom of the stack to prevent read/write to it. 579 ProtectStack(/* fatal_on_error */ true); 580 581 // Tell the kernel that we won't be needing these pages any more. 582 // NB. madvise will probably write zeroes into the memory (on linux it does). 583 uint32_t unwanted_size = stack_top - pregion - kPageSize; 584 madvise(pregion, unwanted_size, MADV_DONTNEED); 585} 586 587void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) { 588 CHECK(java_peer != nullptr); 589 Thread* self = static_cast<JNIEnvExt*>(env)->self; 590 591 if (VLOG_IS_ON(threads)) { 592 ScopedObjectAccess soa(env); 593 594 ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name); 595 ObjPtr<mirror::String> java_name = 596 f->GetObject(soa.Decode<mirror::Object>(java_peer))->AsString(); 597 std::string thread_name; 598 if (java_name != nullptr) { 599 thread_name = java_name->ToModifiedUtf8(); 600 } else { 601 thread_name = "(Unnamed)"; 602 } 603 604 VLOG(threads) << "Creating native thread for " << thread_name; 605 self->Dump(LOG_STREAM(INFO)); 606 } 607 608 Runtime* runtime = Runtime::Current(); 609 610 // Atomically start the birth of the thread ensuring the runtime isn't shutting down. 611 bool thread_start_during_shutdown = false; 612 { 613 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 614 if (runtime->IsShuttingDownLocked()) { 615 thread_start_during_shutdown = true; 616 } else { 617 runtime->StartThreadBirth(); 618 } 619 } 620 if (thread_start_during_shutdown) { 621 ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError")); 622 env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown"); 623 return; 624 } 625 626 Thread* child_thread = new Thread(is_daemon); 627 // Use global JNI ref to hold peer live while child thread starts. 628 child_thread->tlsPtr_.jpeer = env->NewGlobalRef(java_peer); 629 stack_size = FixStackSize(stack_size); 630 631 // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to 632 // assign it. 633 env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 634 reinterpret_cast<jlong>(child_thread)); 635 636 // Try to allocate a JNIEnvExt for the thread. We do this here as we might be out of memory and 637 // do not have a good way to report this on the child's side. 638 std::string error_msg; 639 std::unique_ptr<JNIEnvExt> child_jni_env_ext( 640 JNIEnvExt::Create(child_thread, Runtime::Current()->GetJavaVM(), &error_msg)); 641 642 int pthread_create_result = 0; 643 if (child_jni_env_ext.get() != nullptr) { 644 pthread_t new_pthread; 645 pthread_attr_t attr; 646 child_thread->tlsPtr_.tmp_jni_env = child_jni_env_ext.get(); 647 CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread"); 648 CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), 649 "PTHREAD_CREATE_DETACHED"); 650 CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size); 651 pthread_create_result = pthread_create(&new_pthread, 652 &attr, 653 Thread::CreateCallback, 654 child_thread); 655 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread"); 656 657 if (pthread_create_result == 0) { 658 // pthread_create started the new thread. The child is now responsible for managing the 659 // JNIEnvExt we created. 660 // Note: we can't check for tmp_jni_env == nullptr, as that would require synchronization 661 // between the threads. 662 child_jni_env_ext.release(); 663 return; 664 } 665 } 666 667 // Either JNIEnvExt::Create or pthread_create(3) failed, so clean up. 668 { 669 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 670 runtime->EndThreadBirth(); 671 } 672 // Manually delete the global reference since Thread::Init will not have been run. 673 env->DeleteGlobalRef(child_thread->tlsPtr_.jpeer); 674 child_thread->tlsPtr_.jpeer = nullptr; 675 delete child_thread; 676 child_thread = nullptr; 677 // TODO: remove from thread group? 678 env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0); 679 { 680 std::string msg(child_jni_env_ext.get() == nullptr ? 681 StringPrintf("Could not allocate JNI Env: %s", error_msg.c_str()) : 682 StringPrintf("pthread_create (%s stack) failed: %s", 683 PrettySize(stack_size).c_str(), strerror(pthread_create_result))); 684 ScopedObjectAccess soa(env); 685 soa.Self()->ThrowOutOfMemoryError(msg.c_str()); 686 } 687} 688 689bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_env_ext) { 690 // This function does all the initialization that must be run by the native thread it applies to. 691 // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so 692 // we can handshake with the corresponding native thread when it's ready.) Check this native 693 // thread hasn't been through here already... 694 CHECK(Thread::Current() == nullptr); 695 696 // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this 697 // avoids pthread_self_ ever being invalid when discovered from Thread::Current(). 698 tlsPtr_.pthread_self = pthread_self(); 699 CHECK(is_started_); 700 701 SetUpAlternateSignalStack(); 702 if (!InitStackHwm()) { 703 return false; 704 } 705 InitCpu(); 706 InitTlsEntryPoints(); 707 RemoveSuspendTrigger(); 708 InitCardTable(); 709 InitTid(); 710 interpreter::InitInterpreterTls(this); 711 712#ifdef ART_TARGET_ANDROID 713 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = this; 714#else 715 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self"); 716#endif 717 DCHECK_EQ(Thread::Current(), this); 718 719 tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this); 720 721 if (jni_env_ext != nullptr) { 722 DCHECK_EQ(jni_env_ext->vm, java_vm); 723 DCHECK_EQ(jni_env_ext->self, this); 724 tlsPtr_.jni_env = jni_env_ext; 725 } else { 726 std::string error_msg; 727 tlsPtr_.jni_env = JNIEnvExt::Create(this, java_vm, &error_msg); 728 if (tlsPtr_.jni_env == nullptr) { 729 LOG(ERROR) << "Failed to create JNIEnvExt: " << error_msg; 730 return false; 731 } 732 } 733 734 thread_list->Register(this); 735 return true; 736} 737 738template <typename PeerAction> 739Thread* Thread::Attach(const char* thread_name, bool as_daemon, PeerAction peer_action) { 740 Runtime* runtime = Runtime::Current(); 741 if (runtime == nullptr) { 742 LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name; 743 return nullptr; 744 } 745 Thread* self; 746 { 747 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_); 748 if (runtime->IsShuttingDownLocked()) { 749 LOG(WARNING) << "Thread attaching while runtime is shutting down: " << thread_name; 750 return nullptr; 751 } else { 752 Runtime::Current()->StartThreadBirth(); 753 self = new Thread(as_daemon); 754 bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM()); 755 Runtime::Current()->EndThreadBirth(); 756 if (!init_success) { 757 delete self; 758 return nullptr; 759 } 760 } 761 } 762 763 self->InitStringEntryPoints(); 764 765 CHECK_NE(self->GetState(), kRunnable); 766 self->SetState(kNative); 767 768 // Run the action that is acting on the peer. 769 if (!peer_action(self)) { 770 runtime->GetThreadList()->Unregister(self); 771 // Unregister deletes self, no need to do this here. 772 return nullptr; 773 } 774 775 if (VLOG_IS_ON(threads)) { 776 if (thread_name != nullptr) { 777 VLOG(threads) << "Attaching thread " << thread_name; 778 } else { 779 VLOG(threads) << "Attaching unnamed thread."; 780 } 781 ScopedObjectAccess soa(self); 782 self->Dump(LOG_STREAM(INFO)); 783 } 784 785 { 786 ScopedObjectAccess soa(self); 787 runtime->GetRuntimeCallbacks()->ThreadStart(self); 788 } 789 790 return self; 791} 792 793Thread* Thread::Attach(const char* thread_name, 794 bool as_daemon, 795 jobject thread_group, 796 bool create_peer) { 797 auto create_peer_action = [&](Thread* self) { 798 // If we're the main thread, ClassLinker won't be created until after we're attached, 799 // so that thread needs a two-stage attach. Regular threads don't need this hack. 800 // In the compiler, all threads need this hack, because no-one's going to be getting 801 // a native peer! 802 if (create_peer) { 803 self->CreatePeer(thread_name, as_daemon, thread_group); 804 if (self->IsExceptionPending()) { 805 // We cannot keep the exception around, as we're deleting self. Try to be helpful and log it. 806 { 807 ScopedObjectAccess soa(self); 808 LOG(ERROR) << "Exception creating thread peer:"; 809 LOG(ERROR) << self->GetException()->Dump(); 810 self->ClearException(); 811 } 812 return false; 813 } 814 } else { 815 // These aren't necessary, but they improve diagnostics for unit tests & command-line tools. 816 if (thread_name != nullptr) { 817 self->tlsPtr_.name->assign(thread_name); 818 ::art::SetThreadName(thread_name); 819 } else if (self->GetJniEnv()->check_jni) { 820 LOG(WARNING) << *Thread::Current() << " attached without supplying a name"; 821 } 822 } 823 return true; 824 }; 825 return Attach(thread_name, as_daemon, create_peer_action); 826} 827 828Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_peer) { 829 auto set_peer_action = [&](Thread* self) { 830 // Install the given peer. 831 { 832 DCHECK(self == Thread::Current()); 833 ScopedObjectAccess soa(self); 834 self->tlsPtr_.opeer = soa.Decode<mirror::Object>(thread_peer).Ptr(); 835 } 836 self->GetJniEnv()->SetLongField(thread_peer, 837 WellKnownClasses::java_lang_Thread_nativePeer, 838 reinterpret_cast<jlong>(self)); 839 return true; 840 }; 841 return Attach(thread_name, as_daemon, set_peer_action); 842} 843 844void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) { 845 Runtime* runtime = Runtime::Current(); 846 CHECK(runtime->IsStarted()); 847 JNIEnv* env = tlsPtr_.jni_env; 848 849 if (thread_group == nullptr) { 850 thread_group = runtime->GetMainThreadGroup(); 851 } 852 ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name)); 853 // Add missing null check in case of OOM b/18297817 854 if (name != nullptr && thread_name.get() == nullptr) { 855 CHECK(IsExceptionPending()); 856 return; 857 } 858 jint thread_priority = GetNativePriority(); 859 jboolean thread_is_daemon = as_daemon; 860 861 ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread)); 862 if (peer.get() == nullptr) { 863 CHECK(IsExceptionPending()); 864 return; 865 } 866 { 867 ScopedObjectAccess soa(this); 868 tlsPtr_.opeer = soa.Decode<mirror::Object>(peer.get()).Ptr(); 869 } 870 env->CallNonvirtualVoidMethod(peer.get(), 871 WellKnownClasses::java_lang_Thread, 872 WellKnownClasses::java_lang_Thread_init, 873 thread_group, thread_name.get(), thread_priority, thread_is_daemon); 874 if (IsExceptionPending()) { 875 return; 876 } 877 878 Thread* self = this; 879 DCHECK_EQ(self, Thread::Current()); 880 env->SetLongField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer, 881 reinterpret_cast<jlong>(self)); 882 883 ScopedObjectAccess soa(self); 884 StackHandleScope<1> hs(self); 885 MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName())); 886 if (peer_thread_name == nullptr) { 887 // The Thread constructor should have set the Thread.name to a 888 // non-null value. However, because we can run without code 889 // available (in the compiler, in tests), we manually assign the 890 // fields the constructor should have set. 891 if (runtime->IsActiveTransaction()) { 892 InitPeer<true>(soa, 893 tlsPtr_.opeer, 894 thread_is_daemon, 895 thread_group, 896 thread_name.get(), 897 thread_priority); 898 } else { 899 InitPeer<false>(soa, 900 tlsPtr_.opeer, 901 thread_is_daemon, 902 thread_group, 903 thread_name.get(), 904 thread_priority); 905 } 906 peer_thread_name.Assign(GetThreadName()); 907 } 908 // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null. 909 if (peer_thread_name != nullptr) { 910 SetThreadName(peer_thread_name->ToModifiedUtf8().c_str()); 911 } 912} 913 914jobject Thread::CreateCompileTimePeer(JNIEnv* env, 915 const char* name, 916 bool as_daemon, 917 jobject thread_group) { 918 Runtime* runtime = Runtime::Current(); 919 CHECK(!runtime->IsStarted()); 920 921 if (thread_group == nullptr) { 922 thread_group = runtime->GetMainThreadGroup(); 923 } 924 ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name)); 925 // Add missing null check in case of OOM b/18297817 926 if (name != nullptr && thread_name.get() == nullptr) { 927 CHECK(Thread::Current()->IsExceptionPending()); 928 return nullptr; 929 } 930 jint thread_priority = GetNativePriority(); 931 jboolean thread_is_daemon = as_daemon; 932 933 ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread)); 934 if (peer.get() == nullptr) { 935 CHECK(Thread::Current()->IsExceptionPending()); 936 return nullptr; 937 } 938 939 // We cannot call Thread.init, as it will recursively ask for currentThread. 940 941 // The Thread constructor should have set the Thread.name to a 942 // non-null value. However, because we can run without code 943 // available (in the compiler, in tests), we manually assign the 944 // fields the constructor should have set. 945 ScopedObjectAccessUnchecked soa(Thread::Current()); 946 if (runtime->IsActiveTransaction()) { 947 InitPeer<true>(soa, 948 soa.Decode<mirror::Object>(peer.get()), 949 thread_is_daemon, 950 thread_group, 951 thread_name.get(), 952 thread_priority); 953 } else { 954 InitPeer<false>(soa, 955 soa.Decode<mirror::Object>(peer.get()), 956 thread_is_daemon, 957 thread_group, 958 thread_name.get(), 959 thread_priority); 960 } 961 962 return peer.release(); 963} 964 965template<bool kTransactionActive> 966void Thread::InitPeer(ScopedObjectAccessAlreadyRunnable& soa, 967 ObjPtr<mirror::Object> peer, 968 jboolean thread_is_daemon, 969 jobject thread_group, 970 jobject thread_name, 971 jint thread_priority) { 972 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_daemon)-> 973 SetBoolean<kTransactionActive>(peer, thread_is_daemon); 974 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group)-> 975 SetObject<kTransactionActive>(peer, soa.Decode<mirror::Object>(thread_group)); 976 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name)-> 977 SetObject<kTransactionActive>(peer, soa.Decode<mirror::Object>(thread_name)); 978 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority)-> 979 SetInt<kTransactionActive>(peer, thread_priority); 980} 981 982void Thread::SetThreadName(const char* name) { 983 tlsPtr_.name->assign(name); 984 ::art::SetThreadName(name); 985 Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM")); 986} 987 988static void GetThreadStack(pthread_t thread, 989 void** stack_base, 990 size_t* stack_size, 991 size_t* guard_size) { 992#if defined(__APPLE__) 993 *stack_size = pthread_get_stacksize_np(thread); 994 void* stack_addr = pthread_get_stackaddr_np(thread); 995 996 // Check whether stack_addr is the base or end of the stack. 997 // (On Mac OS 10.7, it's the end.) 998 int stack_variable; 999 if (stack_addr > &stack_variable) { 1000 *stack_base = reinterpret_cast<uint8_t*>(stack_addr) - *stack_size; 1001 } else { 1002 *stack_base = stack_addr; 1003 } 1004 1005 // This is wrong, but there doesn't seem to be a way to get the actual value on the Mac. 1006 pthread_attr_t attributes; 1007 CHECK_PTHREAD_CALL(pthread_attr_init, (&attributes), __FUNCTION__); 1008 CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__); 1009 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__); 1010#else 1011 pthread_attr_t attributes; 1012 CHECK_PTHREAD_CALL(pthread_getattr_np, (thread, &attributes), __FUNCTION__); 1013 CHECK_PTHREAD_CALL(pthread_attr_getstack, (&attributes, stack_base, stack_size), __FUNCTION__); 1014 CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__); 1015 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__); 1016 1017#if defined(__GLIBC__) 1018 // If we're the main thread, check whether we were run with an unlimited stack. In that case, 1019 // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection 1020 // will be broken because we'll die long before we get close to 2GB. 1021 bool is_main_thread = (::art::GetTid() == getpid()); 1022 if (is_main_thread) { 1023 rlimit stack_limit; 1024 if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) { 1025 PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed"; 1026 } 1027 if (stack_limit.rlim_cur == RLIM_INFINITY) { 1028 size_t old_stack_size = *stack_size; 1029 1030 // Use the kernel default limit as our size, and adjust the base to match. 1031 *stack_size = 8 * MB; 1032 *stack_base = reinterpret_cast<uint8_t*>(*stack_base) + (old_stack_size - *stack_size); 1033 1034 VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")" 1035 << " to " << PrettySize(*stack_size) 1036 << " with base " << *stack_base; 1037 } 1038 } 1039#endif 1040 1041#endif 1042} 1043 1044bool Thread::InitStackHwm() { 1045 void* read_stack_base; 1046 size_t read_stack_size; 1047 size_t read_guard_size; 1048 GetThreadStack(tlsPtr_.pthread_self, &read_stack_base, &read_stack_size, &read_guard_size); 1049 1050 tlsPtr_.stack_begin = reinterpret_cast<uint8_t*>(read_stack_base); 1051 tlsPtr_.stack_size = read_stack_size; 1052 1053 // The minimum stack size we can cope with is the overflow reserved bytes (typically 1054 // 8K) + the protected region size (4K) + another page (4K). Typically this will 1055 // be 8+4+4 = 16K. The thread won't be able to do much with this stack even the GC takes 1056 // between 8K and 12K. 1057 uint32_t min_stack = GetStackOverflowReservedBytes(kRuntimeISA) + kStackOverflowProtectedSize 1058 + 4 * KB; 1059 if (read_stack_size <= min_stack) { 1060 // Note, as we know the stack is small, avoid operations that could use a lot of stack. 1061 LogHelper::LogLineLowStack(__PRETTY_FUNCTION__, 1062 __LINE__, 1063 ::android::base::ERROR, 1064 "Attempt to attach a thread with a too-small stack"); 1065 return false; 1066 } 1067 1068 // This is included in the SIGQUIT output, but it's useful here for thread debugging. 1069 VLOG(threads) << StringPrintf("Native stack is at %p (%s with %s guard)", 1070 read_stack_base, 1071 PrettySize(read_stack_size).c_str(), 1072 PrettySize(read_guard_size).c_str()); 1073 1074 // Set stack_end_ to the bottom of the stack saving space of stack overflows 1075 1076 Runtime* runtime = Runtime::Current(); 1077 bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler(); 1078 1079 // Valgrind on arm doesn't give the right values here. Do not install the guard page, and 1080 // effectively disable stack overflow checks (we'll get segfaults, potentially) by setting 1081 // stack_begin to 0. 1082 const bool valgrind_on_arm = 1083 (kRuntimeISA == kArm || kRuntimeISA == kArm64) && 1084 kMemoryToolIsValgrind && 1085 RUNNING_ON_MEMORY_TOOL != 0; 1086 if (valgrind_on_arm) { 1087 tlsPtr_.stack_begin = nullptr; 1088 } 1089 1090 ResetDefaultStackEnd(); 1091 1092 // Install the protected region if we are doing implicit overflow checks. 1093 if (implicit_stack_check && !valgrind_on_arm) { 1094 // The thread might have protected region at the bottom. We need 1095 // to install our own region so we need to move the limits 1096 // of the stack to make room for it. 1097 1098 tlsPtr_.stack_begin += read_guard_size + kStackOverflowProtectedSize; 1099 tlsPtr_.stack_end += read_guard_size + kStackOverflowProtectedSize; 1100 tlsPtr_.stack_size -= read_guard_size; 1101 1102 InstallImplicitProtection(); 1103 } 1104 1105 // Sanity check. 1106 CHECK_GT(FindStackTop(), reinterpret_cast<void*>(tlsPtr_.stack_end)); 1107 1108 return true; 1109} 1110 1111void Thread::ShortDump(std::ostream& os) const { 1112 os << "Thread["; 1113 if (GetThreadId() != 0) { 1114 // If we're in kStarting, we won't have a thin lock id or tid yet. 1115 os << GetThreadId() 1116 << ",tid=" << GetTid() << ','; 1117 } 1118 os << GetState() 1119 << ",Thread*=" << this 1120 << ",peer=" << tlsPtr_.opeer 1121 << ",\"" << (tlsPtr_.name != nullptr ? *tlsPtr_.name : "null") << "\"" 1122 << "]"; 1123} 1124 1125void Thread::Dump(std::ostream& os, bool dump_native_stack, BacktraceMap* backtrace_map, 1126 bool force_dump_stack) const { 1127 DumpState(os); 1128 DumpStack(os, dump_native_stack, backtrace_map, force_dump_stack); 1129} 1130 1131mirror::String* Thread::GetThreadName() const { 1132 ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name); 1133 if (tlsPtr_.opeer == nullptr) { 1134 return nullptr; 1135 } 1136 ObjPtr<mirror::Object> name = f->GetObject(tlsPtr_.opeer); 1137 return name == nullptr ? nullptr : name->AsString(); 1138} 1139 1140void Thread::GetThreadName(std::string& name) const { 1141 name.assign(*tlsPtr_.name); 1142} 1143 1144uint64_t Thread::GetCpuMicroTime() const { 1145#if defined(__linux__) 1146 clockid_t cpu_clock_id; 1147 pthread_getcpuclockid(tlsPtr_.pthread_self, &cpu_clock_id); 1148 timespec now; 1149 clock_gettime(cpu_clock_id, &now); 1150 return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000); 1151#else // __APPLE__ 1152 UNIMPLEMENTED(WARNING); 1153 return -1; 1154#endif 1155} 1156 1157// Attempt to rectify locks so that we dump thread list with required locks before exiting. 1158static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 1159 LOG(ERROR) << *thread << " suspend count already zero."; 1160 Locks::thread_suspend_count_lock_->Unlock(self); 1161 if (!Locks::mutator_lock_->IsSharedHeld(self)) { 1162 Locks::mutator_lock_->SharedTryLock(self); 1163 if (!Locks::mutator_lock_->IsSharedHeld(self)) { 1164 LOG(WARNING) << "Dumping thread list without holding mutator_lock_"; 1165 } 1166 } 1167 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) { 1168 Locks::thread_list_lock_->TryLock(self); 1169 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) { 1170 LOG(WARNING) << "Dumping thread list without holding thread_list_lock_"; 1171 } 1172 } 1173 std::ostringstream ss; 1174 Runtime::Current()->GetThreadList()->Dump(ss); 1175 LOG(FATAL) << ss.str(); 1176} 1177 1178bool Thread::ModifySuspendCountInternal(Thread* self, 1179 int delta, 1180 AtomicInteger* suspend_barrier, 1181 bool for_debugger) { 1182 if (kIsDebugBuild) { 1183 DCHECK(delta == -1 || delta == +1 || delta == -tls32_.debug_suspend_count) 1184 << delta << " " << tls32_.debug_suspend_count << " " << this; 1185 DCHECK_GE(tls32_.suspend_count, tls32_.debug_suspend_count) << this; 1186 Locks::thread_suspend_count_lock_->AssertHeld(self); 1187 if (this != self && !IsSuspended()) { 1188 Locks::thread_list_lock_->AssertHeld(self); 1189 } 1190 } 1191 if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) { 1192 UnsafeLogFatalForSuspendCount(self, this); 1193 return false; 1194 } 1195 1196 if (kUseReadBarrier && delta > 0 && this != self && tlsPtr_.flip_function != nullptr) { 1197 // Force retry of a suspend request if it's in the middle of a thread flip to avoid a 1198 // deadlock. b/31683379. 1199 return false; 1200 } 1201 1202 uint16_t flags = kSuspendRequest; 1203 if (delta > 0 && suspend_barrier != nullptr) { 1204 uint32_t available_barrier = kMaxSuspendBarriers; 1205 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 1206 if (tlsPtr_.active_suspend_barriers[i] == nullptr) { 1207 available_barrier = i; 1208 break; 1209 } 1210 } 1211 if (available_barrier == kMaxSuspendBarriers) { 1212 // No barrier spaces available, we can't add another. 1213 return false; 1214 } 1215 tlsPtr_.active_suspend_barriers[available_barrier] = suspend_barrier; 1216 flags |= kActiveSuspendBarrier; 1217 } 1218 1219 tls32_.suspend_count += delta; 1220 if (for_debugger) { 1221 tls32_.debug_suspend_count += delta; 1222 } 1223 1224 if (tls32_.suspend_count == 0) { 1225 AtomicClearFlag(kSuspendRequest); 1226 } else { 1227 // Two bits might be set simultaneously. 1228 tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flags); 1229 TriggerSuspend(); 1230 } 1231 return true; 1232} 1233 1234bool Thread::PassActiveSuspendBarriers(Thread* self) { 1235 // Grab the suspend_count lock and copy the current set of 1236 // barriers. Then clear the list and the flag. The ModifySuspendCount 1237 // function requires the lock so we prevent a race between setting 1238 // the kActiveSuspendBarrier flag and clearing it. 1239 AtomicInteger* pass_barriers[kMaxSuspendBarriers]; 1240 { 1241 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1242 if (!ReadFlag(kActiveSuspendBarrier)) { 1243 // quick exit test: the barriers have already been claimed - this is 1244 // possible as there may be a race to claim and it doesn't matter 1245 // who wins. 1246 // All of the callers of this function (except the SuspendAllInternal) 1247 // will first test the kActiveSuspendBarrier flag without lock. Here 1248 // double-check whether the barrier has been passed with the 1249 // suspend_count lock. 1250 return false; 1251 } 1252 1253 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 1254 pass_barriers[i] = tlsPtr_.active_suspend_barriers[i]; 1255 tlsPtr_.active_suspend_barriers[i] = nullptr; 1256 } 1257 AtomicClearFlag(kActiveSuspendBarrier); 1258 } 1259 1260 uint32_t barrier_count = 0; 1261 for (uint32_t i = 0; i < kMaxSuspendBarriers; i++) { 1262 AtomicInteger* pending_threads = pass_barriers[i]; 1263 if (pending_threads != nullptr) { 1264 bool done = false; 1265 do { 1266 int32_t cur_val = pending_threads->LoadRelaxed(); 1267 CHECK_GT(cur_val, 0) << "Unexpected value for PassActiveSuspendBarriers(): " << cur_val; 1268 // Reduce value by 1. 1269 done = pending_threads->CompareExchangeWeakRelaxed(cur_val, cur_val - 1); 1270#if ART_USE_FUTEXES 1271 if (done && (cur_val - 1) == 0) { // Weak CAS may fail spuriously. 1272 futex(pending_threads->Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0); 1273 } 1274#endif 1275 } while (!done); 1276 ++barrier_count; 1277 } 1278 } 1279 CHECK_GT(barrier_count, 0U); 1280 return true; 1281} 1282 1283void Thread::ClearSuspendBarrier(AtomicInteger* target) { 1284 CHECK(ReadFlag(kActiveSuspendBarrier)); 1285 bool clear_flag = true; 1286 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 1287 AtomicInteger* ptr = tlsPtr_.active_suspend_barriers[i]; 1288 if (ptr == target) { 1289 tlsPtr_.active_suspend_barriers[i] = nullptr; 1290 } else if (ptr != nullptr) { 1291 clear_flag = false; 1292 } 1293 } 1294 if (LIKELY(clear_flag)) { 1295 AtomicClearFlag(kActiveSuspendBarrier); 1296 } 1297} 1298 1299void Thread::RunCheckpointFunction() { 1300 bool done = false; 1301 do { 1302 // Grab the suspend_count lock and copy the checkpoints one by one. When the last checkpoint is 1303 // copied, clear the list and the flag. The RequestCheckpoint function will also grab this lock 1304 // to prevent a race between setting the kCheckpointRequest flag and clearing it. 1305 Closure* checkpoint = nullptr; 1306 { 1307 MutexLock mu(this, *Locks::thread_suspend_count_lock_); 1308 if (tlsPtr_.checkpoint_function != nullptr) { 1309 checkpoint = tlsPtr_.checkpoint_function; 1310 if (!checkpoint_overflow_.empty()) { 1311 // Overflow list not empty, copy the first one out and continue. 1312 tlsPtr_.checkpoint_function = checkpoint_overflow_.front(); 1313 checkpoint_overflow_.pop_front(); 1314 } else { 1315 // No overflow checkpoints, this means that we are on the last pending checkpoint. 1316 tlsPtr_.checkpoint_function = nullptr; 1317 AtomicClearFlag(kCheckpointRequest); 1318 done = true; 1319 } 1320 } else { 1321 LOG(FATAL) << "Checkpoint flag set without pending checkpoint"; 1322 } 1323 } 1324 1325 // Outside the lock, run the checkpoint functions that we collected. 1326 ScopedTrace trace("Run checkpoint function"); 1327 DCHECK(checkpoint != nullptr); 1328 checkpoint->Run(this); 1329 } while (!done); 1330} 1331 1332void Thread::RunEmptyCheckpoint() { 1333 DCHECK_EQ(Thread::Current(), this); 1334 AtomicClearFlag(kEmptyCheckpointRequest); 1335 Runtime::Current()->GetThreadList()->EmptyCheckpointBarrier()->Pass(this); 1336} 1337 1338bool Thread::RequestCheckpoint(Closure* function) { 1339 union StateAndFlags old_state_and_flags; 1340 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 1341 if (old_state_and_flags.as_struct.state != kRunnable) { 1342 return false; // Fail, thread is suspended and so can't run a checkpoint. 1343 } 1344 1345 // We must be runnable to request a checkpoint. 1346 DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable); 1347 union StateAndFlags new_state_and_flags; 1348 new_state_and_flags.as_int = old_state_and_flags.as_int; 1349 new_state_and_flags.as_struct.flags |= kCheckpointRequest; 1350 bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent( 1351 old_state_and_flags.as_int, new_state_and_flags.as_int); 1352 if (success) { 1353 // Succeeded setting checkpoint flag, now insert the actual checkpoint. 1354 if (tlsPtr_.checkpoint_function == nullptr) { 1355 tlsPtr_.checkpoint_function = function; 1356 } else { 1357 checkpoint_overflow_.push_back(function); 1358 } 1359 CHECK_EQ(ReadFlag(kCheckpointRequest), true); 1360 TriggerSuspend(); 1361 } 1362 return success; 1363} 1364 1365bool Thread::RequestEmptyCheckpoint() { 1366 union StateAndFlags old_state_and_flags; 1367 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 1368 if (old_state_and_flags.as_struct.state != kRunnable) { 1369 // If it's not runnable, we don't need to do anything because it won't be in the middle of a 1370 // heap access (eg. the read barrier). 1371 return false; 1372 } 1373 1374 // We must be runnable to request a checkpoint. 1375 DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable); 1376 union StateAndFlags new_state_and_flags; 1377 new_state_and_flags.as_int = old_state_and_flags.as_int; 1378 new_state_and_flags.as_struct.flags |= kEmptyCheckpointRequest; 1379 bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent( 1380 old_state_and_flags.as_int, new_state_and_flags.as_int); 1381 if (success) { 1382 TriggerSuspend(); 1383 } 1384 return success; 1385} 1386 1387class BarrierClosure : public Closure { 1388 public: 1389 explicit BarrierClosure(Closure* wrapped) : wrapped_(wrapped), barrier_(0) {} 1390 1391 void Run(Thread* self) OVERRIDE { 1392 wrapped_->Run(self); 1393 barrier_.Pass(self); 1394 } 1395 1396 void Wait(Thread* self) { 1397 barrier_.Increment(self, 1); 1398 } 1399 1400 private: 1401 Closure* wrapped_; 1402 Barrier barrier_; 1403}; 1404 1405bool Thread::RequestSynchronousCheckpoint(Closure* function) { 1406 if (this == Thread::Current()) { 1407 // Asked to run on this thread. Just run. 1408 function->Run(this); 1409 return true; 1410 } 1411 Thread* self = Thread::Current(); 1412 1413 // The current thread is not this thread. 1414 1415 if (GetState() == ThreadState::kTerminated) { 1416 return false; 1417 } 1418 1419 // Note: we're holding the thread-list lock. The thread cannot die at this point. 1420 struct ScopedThreadListLockUnlock { 1421 explicit ScopedThreadListLockUnlock(Thread* self_in) RELEASE(*Locks::thread_list_lock_) 1422 : self_thread(self_in) { 1423 Locks::thread_list_lock_->AssertHeld(self_thread); 1424 Locks::thread_list_lock_->Unlock(self_thread); 1425 } 1426 1427 ~ScopedThreadListLockUnlock() ACQUIRE(*Locks::thread_list_lock_) { 1428 Locks::thread_list_lock_->AssertNotHeld(self_thread); 1429 Locks::thread_list_lock_->Lock(self_thread); 1430 } 1431 1432 Thread* self_thread; 1433 }; 1434 1435 for (;;) { 1436 // If this thread is runnable, try to schedule a checkpoint. Do some gymnastics to not hold the 1437 // suspend-count lock for too long. 1438 if (GetState() == ThreadState::kRunnable) { 1439 BarrierClosure barrier_closure(function); 1440 bool installed = false; 1441 { 1442 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1443 installed = RequestCheckpoint(&barrier_closure); 1444 } 1445 if (installed) { 1446 // Relinquish the thread-list lock, temporarily. We should not wait holding any locks. 1447 ScopedThreadListLockUnlock stllu(self); 1448 ScopedThreadSuspension sts(self, ThreadState::kWaiting); 1449 barrier_closure.Wait(self); 1450 return true; 1451 } 1452 // Fall-through. 1453 } 1454 1455 // This thread is not runnable, make sure we stay suspended, then run the checkpoint. 1456 // Note: ModifySuspendCountInternal also expects the thread_list_lock to be held in 1457 // certain situations. 1458 { 1459 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1460 1461 if (!ModifySuspendCount(self, +1, nullptr, false)) { 1462 // Just retry the loop. 1463 sched_yield(); 1464 continue; 1465 } 1466 } 1467 1468 { 1469 ScopedThreadListLockUnlock stllu(self); 1470 ScopedThreadSuspension sts(self, ThreadState::kWaiting); 1471 while (GetState() == ThreadState::kRunnable) { 1472 // We became runnable again. Wait till the suspend triggered in ModifySuspendCount 1473 // moves us to suspended. 1474 sched_yield(); 1475 } 1476 1477 function->Run(this); 1478 } 1479 1480 { 1481 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1482 1483 DCHECK_NE(GetState(), ThreadState::kRunnable); 1484 bool updated = ModifySuspendCount(self, -1, nullptr, false); 1485 DCHECK(updated); 1486 } 1487 1488 return true; // We're done, break out of the loop. 1489 } 1490} 1491 1492Closure* Thread::GetFlipFunction() { 1493 Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function); 1494 Closure* func; 1495 do { 1496 func = atomic_func->LoadRelaxed(); 1497 if (func == nullptr) { 1498 return nullptr; 1499 } 1500 } while (!atomic_func->CompareExchangeWeakSequentiallyConsistent(func, nullptr)); 1501 DCHECK(func != nullptr); 1502 return func; 1503} 1504 1505void Thread::SetFlipFunction(Closure* function) { 1506 CHECK(function != nullptr); 1507 Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function); 1508 atomic_func->StoreSequentiallyConsistent(function); 1509} 1510 1511void Thread::FullSuspendCheck() { 1512 ScopedTrace trace(__FUNCTION__); 1513 VLOG(threads) << this << " self-suspending"; 1514 // Make thread appear suspended to other threads, release mutator_lock_. 1515 // Transition to suspended and back to runnable, re-acquire share on mutator_lock_. 1516 ScopedThreadSuspension(this, kSuspended); 1517 VLOG(threads) << this << " self-reviving"; 1518} 1519 1520static std::string GetSchedulerGroupName(pid_t tid) { 1521 // /proc/<pid>/cgroup looks like this: 1522 // 2:devices:/ 1523 // 1:cpuacct,cpu:/ 1524 // We want the third field from the line whose second field contains the "cpu" token. 1525 std::string cgroup_file; 1526 if (!ReadFileToString(StringPrintf("/proc/self/task/%d/cgroup", tid), &cgroup_file)) { 1527 return ""; 1528 } 1529 std::vector<std::string> cgroup_lines; 1530 Split(cgroup_file, '\n', &cgroup_lines); 1531 for (size_t i = 0; i < cgroup_lines.size(); ++i) { 1532 std::vector<std::string> cgroup_fields; 1533 Split(cgroup_lines[i], ':', &cgroup_fields); 1534 std::vector<std::string> cgroups; 1535 Split(cgroup_fields[1], ',', &cgroups); 1536 for (size_t j = 0; j < cgroups.size(); ++j) { 1537 if (cgroups[j] == "cpu") { 1538 return cgroup_fields[2].substr(1); // Skip the leading slash. 1539 } 1540 } 1541 } 1542 return ""; 1543} 1544 1545 1546void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { 1547 std::string group_name; 1548 int priority; 1549 bool is_daemon = false; 1550 Thread* self = Thread::Current(); 1551 1552 // If flip_function is not null, it means we have run a checkpoint 1553 // before the thread wakes up to execute the flip function and the 1554 // thread roots haven't been forwarded. So the following access to 1555 // the roots (opeer or methods in the frames) would be bad. Run it 1556 // here. TODO: clean up. 1557 if (thread != nullptr) { 1558 ScopedObjectAccessUnchecked soa(self); 1559 Thread* this_thread = const_cast<Thread*>(thread); 1560 Closure* flip_func = this_thread->GetFlipFunction(); 1561 if (flip_func != nullptr) { 1562 flip_func->Run(this_thread); 1563 } 1564 } 1565 1566 // Don't do this if we are aborting since the GC may have all the threads suspended. This will 1567 // cause ScopedObjectAccessUnchecked to deadlock. 1568 if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) { 1569 ScopedObjectAccessUnchecked soa(self); 1570 priority = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority) 1571 ->GetInt(thread->tlsPtr_.opeer); 1572 is_daemon = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_daemon) 1573 ->GetBoolean(thread->tlsPtr_.opeer); 1574 1575 ObjPtr<mirror::Object> thread_group = 1576 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group) 1577 ->GetObject(thread->tlsPtr_.opeer); 1578 1579 if (thread_group != nullptr) { 1580 ArtField* group_name_field = 1581 jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_name); 1582 ObjPtr<mirror::String> group_name_string = 1583 group_name_field->GetObject(thread_group)->AsString(); 1584 group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>"; 1585 } 1586 } else { 1587 priority = GetNativePriority(); 1588 } 1589 1590 std::string scheduler_group_name(GetSchedulerGroupName(tid)); 1591 if (scheduler_group_name.empty()) { 1592 scheduler_group_name = "default"; 1593 } 1594 1595 if (thread != nullptr) { 1596 os << '"' << *thread->tlsPtr_.name << '"'; 1597 if (is_daemon) { 1598 os << " daemon"; 1599 } 1600 os << " prio=" << priority 1601 << " tid=" << thread->GetThreadId() 1602 << " " << thread->GetState(); 1603 if (thread->IsStillStarting()) { 1604 os << " (still starting up)"; 1605 } 1606 os << "\n"; 1607 } else { 1608 os << '"' << ::art::GetThreadName(tid) << '"' 1609 << " prio=" << priority 1610 << " (not attached)\n"; 1611 } 1612 1613 if (thread != nullptr) { 1614 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1615 os << " | group=\"" << group_name << "\"" 1616 << " sCount=" << thread->tls32_.suspend_count 1617 << " dsCount=" << thread->tls32_.debug_suspend_count 1618 << " flags=" << thread->tls32_.state_and_flags.as_struct.flags 1619 << " obj=" << reinterpret_cast<void*>(thread->tlsPtr_.opeer) 1620 << " self=" << reinterpret_cast<const void*>(thread) << "\n"; 1621 } 1622 1623 os << " | sysTid=" << tid 1624 << " nice=" << getpriority(PRIO_PROCESS, tid) 1625 << " cgrp=" << scheduler_group_name; 1626 if (thread != nullptr) { 1627 int policy; 1628 sched_param sp; 1629#if !defined(__APPLE__) 1630 // b/36445592 Don't use pthread_getschedparam since pthread may have exited. 1631 policy = sched_getscheduler(tid); 1632 if (policy == -1) { 1633 PLOG(WARNING) << "sched_getscheduler(" << tid << ")"; 1634 } 1635 int sched_getparam_result = sched_getparam(tid, &sp); 1636 if (sched_getparam_result == -1) { 1637 PLOG(WARNING) << "sched_getparam(" << tid << ", &sp)"; 1638 sp.sched_priority = -1; 1639 } 1640#else 1641 CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp), 1642 __FUNCTION__); 1643#endif 1644 os << " sched=" << policy << "/" << sp.sched_priority 1645 << " handle=" << reinterpret_cast<void*>(thread->tlsPtr_.pthread_self); 1646 } 1647 os << "\n"; 1648 1649 // Grab the scheduler stats for this thread. 1650 std::string scheduler_stats; 1651 if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) { 1652 scheduler_stats.resize(scheduler_stats.size() - 1); // Lose the trailing '\n'. 1653 } else { 1654 scheduler_stats = "0 0 0"; 1655 } 1656 1657 char native_thread_state = '?'; 1658 int utime = 0; 1659 int stime = 0; 1660 int task_cpu = 0; 1661 GetTaskStats(tid, &native_thread_state, &utime, &stime, &task_cpu); 1662 1663 os << " | state=" << native_thread_state 1664 << " schedstat=( " << scheduler_stats << " )" 1665 << " utm=" << utime 1666 << " stm=" << stime 1667 << " core=" << task_cpu 1668 << " HZ=" << sysconf(_SC_CLK_TCK) << "\n"; 1669 if (thread != nullptr) { 1670 os << " | stack=" << reinterpret_cast<void*>(thread->tlsPtr_.stack_begin) << "-" 1671 << reinterpret_cast<void*>(thread->tlsPtr_.stack_end) << " stackSize=" 1672 << PrettySize(thread->tlsPtr_.stack_size) << "\n"; 1673 // Dump the held mutexes. 1674 os << " | held mutexes="; 1675 for (size_t i = 0; i < kLockLevelCount; ++i) { 1676 if (i != kMonitorLock) { 1677 BaseMutex* mutex = thread->GetHeldMutex(static_cast<LockLevel>(i)); 1678 if (mutex != nullptr) { 1679 os << " \"" << mutex->GetName() << "\""; 1680 if (mutex->IsReaderWriterMutex()) { 1681 ReaderWriterMutex* rw_mutex = down_cast<ReaderWriterMutex*>(mutex); 1682 if (rw_mutex->GetExclusiveOwnerTid() == static_cast<uint64_t>(tid)) { 1683 os << "(exclusive held)"; 1684 } else { 1685 os << "(shared held)"; 1686 } 1687 } 1688 } 1689 } 1690 } 1691 os << "\n"; 1692 } 1693} 1694 1695void Thread::DumpState(std::ostream& os) const { 1696 Thread::DumpState(os, this, GetTid()); 1697} 1698 1699struct StackDumpVisitor : public StackVisitor { 1700 StackDumpVisitor(std::ostream& os_in, 1701 Thread* thread_in, 1702 Context* context, 1703 bool can_allocate_in, 1704 bool check_suspended = true, 1705 bool dump_locks_in = true) 1706 REQUIRES_SHARED(Locks::mutator_lock_) 1707 : StackVisitor(thread_in, 1708 context, 1709 StackVisitor::StackWalkKind::kIncludeInlinedFrames, 1710 check_suspended), 1711 os(os_in), 1712 can_allocate(can_allocate_in), 1713 last_method(nullptr), 1714 last_line_number(0), 1715 repetition_count(0), 1716 frame_count(0), 1717 dump_locks(dump_locks_in) {} 1718 1719 virtual ~StackDumpVisitor() { 1720 if (frame_count == 0) { 1721 os << " (no managed stack frames)\n"; 1722 } 1723 } 1724 1725 bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 1726 ArtMethod* m = GetMethod(); 1727 if (m->IsRuntimeMethod()) { 1728 return true; 1729 } 1730 m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize); 1731 const int kMaxRepetition = 3; 1732 ObjPtr<mirror::Class> c = m->GetDeclaringClass(); 1733 ObjPtr<mirror::DexCache> dex_cache = c->GetDexCache(); 1734 int line_number = -1; 1735 if (dex_cache != nullptr) { // be tolerant of bad input 1736 const DexFile* dex_file = dex_cache->GetDexFile(); 1737 line_number = annotations::GetLineNumFromPC(dex_file, m, GetDexPc(false)); 1738 } 1739 if (line_number == last_line_number && last_method == m) { 1740 ++repetition_count; 1741 } else { 1742 if (repetition_count >= kMaxRepetition) { 1743 os << " ... repeated " << (repetition_count - kMaxRepetition) << " times\n"; 1744 } 1745 repetition_count = 0; 1746 last_line_number = line_number; 1747 last_method = m; 1748 } 1749 if (repetition_count < kMaxRepetition) { 1750 os << " at " << m->PrettyMethod(false); 1751 if (m->IsNative()) { 1752 os << "(Native method)"; 1753 } else { 1754 const char* source_file(m->GetDeclaringClassSourceFile()); 1755 os << "(" << (source_file != nullptr ? source_file : "unavailable") 1756 << ":" << line_number << ")"; 1757 } 1758 os << "\n"; 1759 if (frame_count == 0) { 1760 Monitor::DescribeWait(os, GetThread()); 1761 } 1762 if (can_allocate && dump_locks) { 1763 // Visit locks, but do not abort on errors. This would trigger a nested abort. 1764 // Skip visiting locks if dump_locks is false as it would cause a bad_mutexes_held in 1765 // RegTypeCache::RegTypeCache due to thread_list_lock. 1766 Monitor::VisitLocks(this, DumpLockedObject, &os, false); 1767 } 1768 } 1769 1770 ++frame_count; 1771 return true; 1772 } 1773 1774 static void DumpLockedObject(mirror::Object* o, void* context) 1775 REQUIRES_SHARED(Locks::mutator_lock_) { 1776 std::ostream& os = *reinterpret_cast<std::ostream*>(context); 1777 os << " - locked "; 1778 if (o == nullptr) { 1779 os << "an unknown object"; 1780 } else { 1781 if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) { 1782 // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack 1783 // may have not been flipped yet and "o" may be a from-space (stale) ref, in which case the 1784 // IdentityHashCode call below will crash. So explicitly mark/forward it here. 1785 o = ReadBarrier::Mark(o); 1786 } 1787 if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) && 1788 Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) { 1789 // Getting the identity hashcode here would result in lock inflation and suspension of the 1790 // current thread, which isn't safe if this is the only runnable thread. 1791 os << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)", reinterpret_cast<intptr_t>(o), 1792 o->PrettyTypeOf().c_str()); 1793 } else { 1794 // IdentityHashCode can cause thread suspension, which would invalidate o if it moved. So 1795 // we get the pretty type beofre we call IdentityHashCode. 1796 const std::string pretty_type(o->PrettyTypeOf()); 1797 os << StringPrintf("<0x%08x> (a %s)", o->IdentityHashCode(), pretty_type.c_str()); 1798 } 1799 } 1800 os << "\n"; 1801 } 1802 1803 std::ostream& os; 1804 const bool can_allocate; 1805 ArtMethod* last_method; 1806 int last_line_number; 1807 int repetition_count; 1808 int frame_count; 1809 const bool dump_locks; 1810}; 1811 1812static bool ShouldShowNativeStack(const Thread* thread) 1813 REQUIRES_SHARED(Locks::mutator_lock_) { 1814 ThreadState state = thread->GetState(); 1815 1816 // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting. 1817 if (state > kWaiting && state < kStarting) { 1818 return true; 1819 } 1820 1821 // In an Object.wait variant or Thread.sleep? That's not interesting. 1822 if (state == kTimedWaiting || state == kSleeping || state == kWaiting) { 1823 return false; 1824 } 1825 1826 // Threads with no managed stack frames should be shown. 1827 const ManagedStack* managed_stack = thread->GetManagedStack(); 1828 if (managed_stack == nullptr || (managed_stack->GetTopQuickFrame() == nullptr && 1829 managed_stack->GetTopShadowFrame() == nullptr)) { 1830 return true; 1831 } 1832 1833 // In some other native method? That's interesting. 1834 // We don't just check kNative because native methods will be in state kSuspended if they're 1835 // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the 1836 // thread-startup states if it's early enough in their life cycle (http://b/7432159). 1837 ArtMethod* current_method = thread->GetCurrentMethod(nullptr); 1838 return current_method != nullptr && current_method->IsNative(); 1839} 1840 1841void Thread::DumpJavaStack(std::ostream& os, bool check_suspended, bool dump_locks) const { 1842 // If flip_function is not null, it means we have run a checkpoint 1843 // before the thread wakes up to execute the flip function and the 1844 // thread roots haven't been forwarded. So the following access to 1845 // the roots (locks or methods in the frames) would be bad. Run it 1846 // here. TODO: clean up. 1847 { 1848 Thread* this_thread = const_cast<Thread*>(this); 1849 Closure* flip_func = this_thread->GetFlipFunction(); 1850 if (flip_func != nullptr) { 1851 flip_func->Run(this_thread); 1852 } 1853 } 1854 1855 // Dumping the Java stack involves the verifier for locks. The verifier operates under the 1856 // assumption that there is no exception pending on entry. Thus, stash any pending exception. 1857 // Thread::Current() instead of this in case a thread is dumping the stack of another suspended 1858 // thread. 1859 StackHandleScope<1> scope(Thread::Current()); 1860 Handle<mirror::Throwable> exc; 1861 bool have_exception = false; 1862 if (IsExceptionPending()) { 1863 exc = scope.NewHandle(GetException()); 1864 const_cast<Thread*>(this)->ClearException(); 1865 have_exception = true; 1866 } 1867 1868 std::unique_ptr<Context> context(Context::Create()); 1869 StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(), 1870 !tls32_.throwing_OutOfMemoryError, check_suspended, dump_locks); 1871 dumper.WalkStack(); 1872 1873 if (have_exception) { 1874 const_cast<Thread*>(this)->SetException(exc.Get()); 1875 } 1876} 1877 1878void Thread::DumpStack(std::ostream& os, 1879 bool dump_native_stack, 1880 BacktraceMap* backtrace_map, 1881 bool force_dump_stack) const { 1882 // TODO: we call this code when dying but may not have suspended the thread ourself. The 1883 // IsSuspended check is therefore racy with the use for dumping (normally we inhibit 1884 // the race with the thread_suspend_count_lock_). 1885 bool dump_for_abort = (gAborting > 0); 1886 bool safe_to_dump = (this == Thread::Current() || IsSuspended()); 1887 if (!kIsDebugBuild) { 1888 // We always want to dump the stack for an abort, however, there is no point dumping another 1889 // thread's stack in debug builds where we'll hit the not suspended check in the stack walk. 1890 safe_to_dump = (safe_to_dump || dump_for_abort); 1891 } 1892 if (safe_to_dump || force_dump_stack) { 1893 // If we're currently in native code, dump that stack before dumping the managed stack. 1894 if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) { 1895 DumpKernelStack(os, GetTid(), " kernel: ", false); 1896 ArtMethod* method = 1897 GetCurrentMethod(nullptr, 1898 /*check_suspended*/ !force_dump_stack, 1899 /*abort_on_error*/ !(dump_for_abort || force_dump_stack)); 1900 DumpNativeStack(os, GetTid(), backtrace_map, " native: ", method); 1901 } 1902 DumpJavaStack(os, 1903 /*check_suspended*/ !force_dump_stack, 1904 /*dump_locks*/ !force_dump_stack); 1905 } else { 1906 os << "Not able to dump stack of thread that isn't suspended"; 1907 } 1908} 1909 1910void Thread::ThreadExitCallback(void* arg) { 1911 Thread* self = reinterpret_cast<Thread*>(arg); 1912 if (self->tls32_.thread_exit_check_count == 0) { 1913 LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's " 1914 "going to use a pthread_key_create destructor?): " << *self; 1915 CHECK(is_started_); 1916#ifdef ART_TARGET_ANDROID 1917 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = self; 1918#else 1919 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self"); 1920#endif 1921 self->tls32_.thread_exit_check_count = 1; 1922 } else { 1923 LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self; 1924 } 1925} 1926 1927void Thread::Startup() { 1928 CHECK(!is_started_); 1929 is_started_ = true; 1930 { 1931 // MutexLock to keep annotalysis happy. 1932 // 1933 // Note we use null for the thread because Thread::Current can 1934 // return garbage since (is_started_ == true) and 1935 // Thread::pthread_key_self_ is not yet initialized. 1936 // This was seen on glibc. 1937 MutexLock mu(nullptr, *Locks::thread_suspend_count_lock_); 1938 resume_cond_ = new ConditionVariable("Thread resumption condition variable", 1939 *Locks::thread_suspend_count_lock_); 1940 } 1941 1942 // Allocate a TLS slot. 1943 CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), 1944 "self key"); 1945 1946 // Double-check the TLS slot allocation. 1947 if (pthread_getspecific(pthread_key_self_) != nullptr) { 1948 LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr"; 1949 } 1950} 1951 1952void Thread::FinishStartup() { 1953 Runtime* runtime = Runtime::Current(); 1954 CHECK(runtime->IsStarted()); 1955 1956 // Finish attaching the main thread. 1957 ScopedObjectAccess soa(Thread::Current()); 1958 Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup()); 1959 Thread::Current()->AssertNoPendingException(); 1960 1961 Runtime::Current()->GetClassLinker()->RunRootClinits(); 1962 1963 // The thread counts as started from now on. We need to add it to the ThreadGroup. For regular 1964 // threads, this is done in Thread.start() on the Java side. 1965 { 1966 // This is only ever done once. There's no benefit in caching the method. 1967 jmethodID thread_group_add = soa.Env()->GetMethodID(WellKnownClasses::java_lang_ThreadGroup, 1968 "add", 1969 "(Ljava/lang/Thread;)V"); 1970 CHECK(thread_group_add != nullptr); 1971 ScopedLocalRef<jobject> thread_jobject( 1972 soa.Env(), soa.Env()->AddLocalReference<jobject>(Thread::Current()->GetPeer())); 1973 soa.Env()->CallNonvirtualVoidMethod(runtime->GetMainThreadGroup(), 1974 WellKnownClasses::java_lang_ThreadGroup, 1975 thread_group_add, 1976 thread_jobject.get()); 1977 Thread::Current()->AssertNoPendingException(); 1978 } 1979} 1980 1981void Thread::Shutdown() { 1982 CHECK(is_started_); 1983 is_started_ = false; 1984 CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key"); 1985 MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_); 1986 if (resume_cond_ != nullptr) { 1987 delete resume_cond_; 1988 resume_cond_ = nullptr; 1989 } 1990} 1991 1992Thread::Thread(bool daemon) 1993 : tls32_(daemon), 1994 wait_monitor_(nullptr), 1995 custom_tls_(nullptr), 1996 can_call_into_java_(true) { 1997 wait_mutex_ = new Mutex("a thread wait mutex"); 1998 wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_); 1999 tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>; 2000 tlsPtr_.name = new std::string(kThreadNameDuringStartup); 2001 2002 static_assert((sizeof(Thread) % 4) == 0U, 2003 "art::Thread has a size which is not a multiple of 4."); 2004 tls32_.state_and_flags.as_struct.flags = 0; 2005 tls32_.state_and_flags.as_struct.state = kNative; 2006 tls32_.interrupted.StoreRelaxed(false); 2007 memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes)); 2008 std::fill(tlsPtr_.rosalloc_runs, 2009 tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread, 2010 gc::allocator::RosAlloc::GetDedicatedFullRun()); 2011 tlsPtr_.checkpoint_function = nullptr; 2012 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 2013 tlsPtr_.active_suspend_barriers[i] = nullptr; 2014 } 2015 tlsPtr_.flip_function = nullptr; 2016 tlsPtr_.thread_local_mark_stack = nullptr; 2017 tls32_.is_transitioning_to_runnable = false; 2018} 2019 2020bool Thread::IsStillStarting() const { 2021 // You might think you can check whether the state is kStarting, but for much of thread startup, 2022 // the thread is in kNative; it might also be in kVmWait. 2023 // You might think you can check whether the peer is null, but the peer is actually created and 2024 // assigned fairly early on, and needs to be. 2025 // It turns out that the last thing to change is the thread name; that's a good proxy for "has 2026 // this thread _ever_ entered kRunnable". 2027 return (tlsPtr_.jpeer == nullptr && tlsPtr_.opeer == nullptr) || 2028 (*tlsPtr_.name == kThreadNameDuringStartup); 2029} 2030 2031void Thread::AssertPendingException() const { 2032 CHECK(IsExceptionPending()) << "Pending exception expected."; 2033} 2034 2035void Thread::AssertPendingOOMException() const { 2036 AssertPendingException(); 2037 auto* e = GetException(); 2038 CHECK_EQ(e->GetClass(), DecodeJObject(WellKnownClasses::java_lang_OutOfMemoryError)->AsClass()) 2039 << e->Dump(); 2040} 2041 2042void Thread::AssertNoPendingException() const { 2043 if (UNLIKELY(IsExceptionPending())) { 2044 ScopedObjectAccess soa(Thread::Current()); 2045 LOG(FATAL) << "No pending exception expected: " << GetException()->Dump(); 2046 } 2047} 2048 2049void Thread::AssertNoPendingExceptionForNewException(const char* msg) const { 2050 if (UNLIKELY(IsExceptionPending())) { 2051 ScopedObjectAccess soa(Thread::Current()); 2052 LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: " 2053 << GetException()->Dump(); 2054 } 2055} 2056 2057class MonitorExitVisitor : public SingleRootVisitor { 2058 public: 2059 explicit MonitorExitVisitor(Thread* self) : self_(self) { } 2060 2061 // NO_THREAD_SAFETY_ANALYSIS due to MonitorExit. 2062 void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED) 2063 OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 2064 if (self_->HoldsLock(entered_monitor)) { 2065 LOG(WARNING) << "Calling MonitorExit on object " 2066 << entered_monitor << " (" << entered_monitor->PrettyTypeOf() << ")" 2067 << " left locked by native thread " 2068 << *Thread::Current() << " which is detaching"; 2069 entered_monitor->MonitorExit(self_); 2070 } 2071 } 2072 2073 private: 2074 Thread* const self_; 2075}; 2076 2077void Thread::Destroy() { 2078 Thread* self = this; 2079 DCHECK_EQ(self, Thread::Current()); 2080 2081 if (tlsPtr_.jni_env != nullptr) { 2082 { 2083 ScopedObjectAccess soa(self); 2084 MonitorExitVisitor visitor(self); 2085 // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited. 2086 tlsPtr_.jni_env->monitors.VisitRoots(&visitor, RootInfo(kRootVMInternal)); 2087 } 2088 // Release locally held global references which releasing may require the mutator lock. 2089 if (tlsPtr_.jpeer != nullptr) { 2090 // If pthread_create fails we don't have a jni env here. 2091 tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer); 2092 tlsPtr_.jpeer = nullptr; 2093 } 2094 if (tlsPtr_.class_loader_override != nullptr) { 2095 tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.class_loader_override); 2096 tlsPtr_.class_loader_override = nullptr; 2097 } 2098 } 2099 2100 if (tlsPtr_.opeer != nullptr) { 2101 ScopedObjectAccess soa(self); 2102 // We may need to call user-supplied managed code, do this before final clean-up. 2103 HandleUncaughtExceptions(soa); 2104 RemoveFromThreadGroup(soa); 2105 2106 // this.nativePeer = 0; 2107 if (Runtime::Current()->IsActiveTransaction()) { 2108 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer) 2109 ->SetLong<true>(tlsPtr_.opeer, 0); 2110 } else { 2111 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer) 2112 ->SetLong<false>(tlsPtr_.opeer, 0); 2113 } 2114 Runtime* runtime = Runtime::Current(); 2115 if (runtime != nullptr) { 2116 runtime->GetRuntimeCallbacks()->ThreadDeath(self); 2117 } 2118 2119 2120 // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone 2121 // who is waiting. 2122 ObjPtr<mirror::Object> lock = 2123 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer); 2124 // (This conditional is only needed for tests, where Thread.lock won't have been set.) 2125 if (lock != nullptr) { 2126 StackHandleScope<1> hs(self); 2127 Handle<mirror::Object> h_obj(hs.NewHandle(lock)); 2128 ObjectLock<mirror::Object> locker(self, h_obj); 2129 locker.NotifyAll(); 2130 } 2131 tlsPtr_.opeer = nullptr; 2132 } 2133 2134 { 2135 ScopedObjectAccess soa(self); 2136 Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this); 2137 if (kUseReadBarrier) { 2138 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this); 2139 } 2140 } 2141} 2142 2143Thread::~Thread() { 2144 CHECK(tlsPtr_.class_loader_override == nullptr); 2145 CHECK(tlsPtr_.jpeer == nullptr); 2146 CHECK(tlsPtr_.opeer == nullptr); 2147 bool initialized = (tlsPtr_.jni_env != nullptr); // Did Thread::Init run? 2148 if (initialized) { 2149 delete tlsPtr_.jni_env; 2150 tlsPtr_.jni_env = nullptr; 2151 } 2152 CHECK_NE(GetState(), kRunnable); 2153 CHECK(!ReadFlag(kCheckpointRequest)); 2154 CHECK(!ReadFlag(kEmptyCheckpointRequest)); 2155 CHECK(tlsPtr_.checkpoint_function == nullptr); 2156 CHECK_EQ(checkpoint_overflow_.size(), 0u); 2157 CHECK(tlsPtr_.flip_function == nullptr); 2158 CHECK_EQ(tls32_.is_transitioning_to_runnable, false); 2159 2160 // Make sure we processed all deoptimization requests. 2161 CHECK(tlsPtr_.deoptimization_context_stack == nullptr) << "Missed deoptimization"; 2162 CHECK(tlsPtr_.frame_id_to_shadow_frame == nullptr) << 2163 "Not all deoptimized frames have been consumed by the debugger."; 2164 2165 // We may be deleting a still born thread. 2166 SetStateUnsafe(kTerminated); 2167 2168 delete wait_cond_; 2169 delete wait_mutex_; 2170 2171 if (tlsPtr_.long_jump_context != nullptr) { 2172 delete tlsPtr_.long_jump_context; 2173 } 2174 2175 if (initialized) { 2176 CleanupCpu(); 2177 } 2178 2179 if (tlsPtr_.single_step_control != nullptr) { 2180 delete tlsPtr_.single_step_control; 2181 } 2182 delete tlsPtr_.instrumentation_stack; 2183 delete tlsPtr_.name; 2184 delete tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample; 2185 2186 Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this); 2187 2188 TearDownAlternateSignalStack(); 2189} 2190 2191void Thread::HandleUncaughtExceptions(ScopedObjectAccessAlreadyRunnable& soa) { 2192 if (!IsExceptionPending()) { 2193 return; 2194 } 2195 ScopedLocalRef<jobject> peer(tlsPtr_.jni_env, soa.AddLocalReference<jobject>(tlsPtr_.opeer)); 2196 ScopedThreadStateChange tsc(this, kNative); 2197 2198 // Get and clear the exception. 2199 ScopedLocalRef<jthrowable> exception(tlsPtr_.jni_env, tlsPtr_.jni_env->ExceptionOccurred()); 2200 tlsPtr_.jni_env->ExceptionClear(); 2201 2202 // Call the Thread instance's dispatchUncaughtException(Throwable) 2203 tlsPtr_.jni_env->CallVoidMethod(peer.get(), 2204 WellKnownClasses::java_lang_Thread_dispatchUncaughtException, 2205 exception.get()); 2206 2207 // If the dispatchUncaughtException threw, clear that exception too. 2208 tlsPtr_.jni_env->ExceptionClear(); 2209} 2210 2211void Thread::RemoveFromThreadGroup(ScopedObjectAccessAlreadyRunnable& soa) { 2212 // this.group.removeThread(this); 2213 // group can be null if we're in the compiler or a test. 2214 ObjPtr<mirror::Object> ogroup = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group) 2215 ->GetObject(tlsPtr_.opeer); 2216 if (ogroup != nullptr) { 2217 ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup)); 2218 ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(tlsPtr_.opeer)); 2219 ScopedThreadStateChange tsc(soa.Self(), kNative); 2220 tlsPtr_.jni_env->CallVoidMethod(group.get(), 2221 WellKnownClasses::java_lang_ThreadGroup_removeThread, 2222 peer.get()); 2223 } 2224} 2225 2226bool Thread::HandleScopeContains(jobject obj) const { 2227 StackReference<mirror::Object>* hs_entry = 2228 reinterpret_cast<StackReference<mirror::Object>*>(obj); 2229 for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur!= nullptr; cur = cur->GetLink()) { 2230 if (cur->Contains(hs_entry)) { 2231 return true; 2232 } 2233 } 2234 // JNI code invoked from portable code uses shadow frames rather than the handle scope. 2235 return tlsPtr_.managed_stack.ShadowFramesContain(hs_entry); 2236} 2237 2238void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) { 2239 BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor( 2240 visitor, RootInfo(kRootNativeStack, thread_id)); 2241 for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) { 2242 cur->VisitRoots(buffered_visitor); 2243 } 2244} 2245 2246ObjPtr<mirror::Object> Thread::DecodeJObject(jobject obj) const { 2247 if (obj == nullptr) { 2248 return nullptr; 2249 } 2250 IndirectRef ref = reinterpret_cast<IndirectRef>(obj); 2251 IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref); 2252 ObjPtr<mirror::Object> result; 2253 bool expect_null = false; 2254 // The "kinds" below are sorted by the frequency we expect to encounter them. 2255 if (kind == kLocal) { 2256 IndirectReferenceTable& locals = tlsPtr_.jni_env->locals; 2257 // Local references do not need a read barrier. 2258 result = locals.Get<kWithoutReadBarrier>(ref); 2259 } else if (kind == kHandleScopeOrInvalid) { 2260 // TODO: make stack indirect reference table lookup more efficient. 2261 // Check if this is a local reference in the handle scope. 2262 if (LIKELY(HandleScopeContains(obj))) { 2263 // Read from handle scope. 2264 result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr(); 2265 VerifyObject(result); 2266 } else { 2267 tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of invalid jobject %p", obj); 2268 expect_null = true; 2269 result = nullptr; 2270 } 2271 } else if (kind == kGlobal) { 2272 result = tlsPtr_.jni_env->vm->DecodeGlobal(ref); 2273 } else { 2274 DCHECK_EQ(kind, kWeakGlobal); 2275 result = tlsPtr_.jni_env->vm->DecodeWeakGlobal(const_cast<Thread*>(this), ref); 2276 if (Runtime::Current()->IsClearedJniWeakGlobal(result)) { 2277 // This is a special case where it's okay to return null. 2278 expect_null = true; 2279 result = nullptr; 2280 } 2281 } 2282 2283 if (UNLIKELY(!expect_null && result == nullptr)) { 2284 tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of deleted %s %p", 2285 ToStr<IndirectRefKind>(kind).c_str(), obj); 2286 } 2287 return result; 2288} 2289 2290bool Thread::IsJWeakCleared(jweak obj) const { 2291 CHECK(obj != nullptr); 2292 IndirectRef ref = reinterpret_cast<IndirectRef>(obj); 2293 IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref); 2294 CHECK_EQ(kind, kWeakGlobal); 2295 return tlsPtr_.jni_env->vm->IsWeakGlobalCleared(const_cast<Thread*>(this), ref); 2296} 2297 2298// Implements java.lang.Thread.interrupted. 2299bool Thread::Interrupted() { 2300 DCHECK_EQ(Thread::Current(), this); 2301 // No other thread can concurrently reset the interrupted flag. 2302 bool interrupted = tls32_.interrupted.LoadSequentiallyConsistent(); 2303 if (interrupted) { 2304 tls32_.interrupted.StoreSequentiallyConsistent(false); 2305 } 2306 return interrupted; 2307} 2308 2309// Implements java.lang.Thread.isInterrupted. 2310bool Thread::IsInterrupted() { 2311 return tls32_.interrupted.LoadSequentiallyConsistent(); 2312} 2313 2314void Thread::Interrupt(Thread* self) { 2315 MutexLock mu(self, *wait_mutex_); 2316 if (tls32_.interrupted.LoadSequentiallyConsistent()) { 2317 return; 2318 } 2319 tls32_.interrupted.StoreSequentiallyConsistent(true); 2320 NotifyLocked(self); 2321} 2322 2323void Thread::Notify() { 2324 Thread* self = Thread::Current(); 2325 MutexLock mu(self, *wait_mutex_); 2326 NotifyLocked(self); 2327} 2328 2329void Thread::NotifyLocked(Thread* self) { 2330 if (wait_monitor_ != nullptr) { 2331 wait_cond_->Signal(self); 2332 } 2333} 2334 2335void Thread::SetClassLoaderOverride(jobject class_loader_override) { 2336 if (tlsPtr_.class_loader_override != nullptr) { 2337 GetJniEnv()->DeleteGlobalRef(tlsPtr_.class_loader_override); 2338 } 2339 tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override); 2340} 2341 2342using ArtMethodDexPcPair = std::pair<ArtMethod*, uint32_t>; 2343 2344// Counts the stack trace depth and also fetches the first max_saved_frames frames. 2345class FetchStackTraceVisitor : public StackVisitor { 2346 public: 2347 explicit FetchStackTraceVisitor(Thread* thread, 2348 ArtMethodDexPcPair* saved_frames = nullptr, 2349 size_t max_saved_frames = 0) 2350 REQUIRES_SHARED(Locks::mutator_lock_) 2351 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), 2352 saved_frames_(saved_frames), 2353 max_saved_frames_(max_saved_frames) {} 2354 2355 bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 2356 // We want to skip frames up to and including the exception's constructor. 2357 // Note we also skip the frame if it doesn't have a method (namely the callee 2358 // save frame) 2359 ArtMethod* m = GetMethod(); 2360 if (skipping_ && !m->IsRuntimeMethod() && 2361 !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) { 2362 skipping_ = false; 2363 } 2364 if (!skipping_) { 2365 if (!m->IsRuntimeMethod()) { // Ignore runtime frames (in particular callee save). 2366 if (depth_ < max_saved_frames_) { 2367 saved_frames_[depth_].first = m; 2368 saved_frames_[depth_].second = m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc(); 2369 } 2370 ++depth_; 2371 } 2372 } else { 2373 ++skip_depth_; 2374 } 2375 return true; 2376 } 2377 2378 uint32_t GetDepth() const { 2379 return depth_; 2380 } 2381 2382 uint32_t GetSkipDepth() const { 2383 return skip_depth_; 2384 } 2385 2386 private: 2387 uint32_t depth_ = 0; 2388 uint32_t skip_depth_ = 0; 2389 bool skipping_ = true; 2390 ArtMethodDexPcPair* saved_frames_; 2391 const size_t max_saved_frames_; 2392 2393 DISALLOW_COPY_AND_ASSIGN(FetchStackTraceVisitor); 2394}; 2395 2396template<bool kTransactionActive> 2397class BuildInternalStackTraceVisitor : public StackVisitor { 2398 public: 2399 BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth) 2400 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), 2401 self_(self), 2402 skip_depth_(skip_depth), 2403 pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {} 2404 2405 bool Init(int depth) REQUIRES_SHARED(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) { 2406 // Allocate method trace as an object array where the first element is a pointer array that 2407 // contains the ArtMethod pointers and dex PCs. The rest of the elements are the declaring 2408 // class of the ArtMethod pointers. 2409 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 2410 StackHandleScope<1> hs(self_); 2411 ObjPtr<mirror::Class> array_class = class_linker->GetClassRoot(ClassLinker::kObjectArrayClass); 2412 // The first element is the methods and dex pc array, the other elements are declaring classes 2413 // for the methods to ensure classes in the stack trace don't get unloaded. 2414 Handle<mirror::ObjectArray<mirror::Object>> trace( 2415 hs.NewHandle( 2416 mirror::ObjectArray<mirror::Object>::Alloc(hs.Self(), array_class, depth + 1))); 2417 if (trace == nullptr) { 2418 // Acquire uninterruptible_ in all paths. 2419 self_->StartAssertNoThreadSuspension("Building internal stack trace"); 2420 self_->AssertPendingOOMException(); 2421 return false; 2422 } 2423 ObjPtr<mirror::PointerArray> methods_and_pcs = 2424 class_linker->AllocPointerArray(self_, depth * 2); 2425 const char* last_no_suspend_cause = 2426 self_->StartAssertNoThreadSuspension("Building internal stack trace"); 2427 if (methods_and_pcs == nullptr) { 2428 self_->AssertPendingOOMException(); 2429 return false; 2430 } 2431 trace->Set(0, methods_and_pcs); 2432 trace_ = trace.Get(); 2433 // If We are called from native, use non-transactional mode. 2434 CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause; 2435 return true; 2436 } 2437 2438 virtual ~BuildInternalStackTraceVisitor() RELEASE(Roles::uninterruptible_) { 2439 self_->EndAssertNoThreadSuspension(nullptr); 2440 } 2441 2442 bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 2443 if (trace_ == nullptr) { 2444 return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError. 2445 } 2446 if (skip_depth_ > 0) { 2447 skip_depth_--; 2448 return true; 2449 } 2450 ArtMethod* m = GetMethod(); 2451 if (m->IsRuntimeMethod()) { 2452 return true; // Ignore runtime frames (in particular callee save). 2453 } 2454 AddFrame(m, m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc()); 2455 return true; 2456 } 2457 2458 void AddFrame(ArtMethod* method, uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) { 2459 ObjPtr<mirror::PointerArray> trace_methods_and_pcs = GetTraceMethodsAndPCs(); 2460 trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>(count_, method, pointer_size_); 2461 trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>( 2462 trace_methods_and_pcs->GetLength() / 2 + count_, 2463 dex_pc, 2464 pointer_size_); 2465 // Save the declaring class of the method to ensure that the declaring classes of the methods 2466 // do not get unloaded while the stack trace is live. 2467 trace_->Set(count_ + 1, method->GetDeclaringClass()); 2468 ++count_; 2469 } 2470 2471 ObjPtr<mirror::PointerArray> GetTraceMethodsAndPCs() const REQUIRES_SHARED(Locks::mutator_lock_) { 2472 return ObjPtr<mirror::PointerArray>::DownCast(MakeObjPtr(trace_->Get(0))); 2473 } 2474 2475 mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const { 2476 return trace_; 2477 } 2478 2479 private: 2480 Thread* const self_; 2481 // How many more frames to skip. 2482 int32_t skip_depth_; 2483 // Current position down stack trace. 2484 uint32_t count_ = 0; 2485 // An object array where the first element is a pointer array that contains the ArtMethod 2486 // pointers on the stack and dex PCs. The rest of the elements are the declaring 2487 // class of the ArtMethod pointers. trace_[i+1] contains the declaring class of the ArtMethod of 2488 // the i'th frame. 2489 mirror::ObjectArray<mirror::Object>* trace_ = nullptr; 2490 // For cross compilation. 2491 const PointerSize pointer_size_; 2492 2493 DISALLOW_COPY_AND_ASSIGN(BuildInternalStackTraceVisitor); 2494}; 2495 2496template<bool kTransactionActive> 2497jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const { 2498 // Compute depth of stack, save frames if possible to avoid needing to recompute many. 2499 constexpr size_t kMaxSavedFrames = 256; 2500 std::unique_ptr<ArtMethodDexPcPair[]> saved_frames(new ArtMethodDexPcPair[kMaxSavedFrames]); 2501 FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this), 2502 &saved_frames[0], 2503 kMaxSavedFrames); 2504 count_visitor.WalkStack(); 2505 const uint32_t depth = count_visitor.GetDepth(); 2506 const uint32_t skip_depth = count_visitor.GetSkipDepth(); 2507 2508 // Build internal stack trace. 2509 BuildInternalStackTraceVisitor<kTransactionActive> build_trace_visitor(soa.Self(), 2510 const_cast<Thread*>(this), 2511 skip_depth); 2512 if (!build_trace_visitor.Init(depth)) { 2513 return nullptr; // Allocation failed. 2514 } 2515 // If we saved all of the frames we don't even need to do the actual stack walk. This is faster 2516 // than doing the stack walk twice. 2517 if (depth < kMaxSavedFrames) { 2518 for (size_t i = 0; i < depth; ++i) { 2519 build_trace_visitor.AddFrame(saved_frames[i].first, saved_frames[i].second); 2520 } 2521 } else { 2522 build_trace_visitor.WalkStack(); 2523 } 2524 2525 mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace(); 2526 if (kIsDebugBuild) { 2527 ObjPtr<mirror::PointerArray> trace_methods = build_trace_visitor.GetTraceMethodsAndPCs(); 2528 // Second half of trace_methods is dex PCs. 2529 for (uint32_t i = 0; i < static_cast<uint32_t>(trace_methods->GetLength() / 2); ++i) { 2530 auto* method = trace_methods->GetElementPtrSize<ArtMethod*>( 2531 i, Runtime::Current()->GetClassLinker()->GetImagePointerSize()); 2532 CHECK(method != nullptr); 2533 } 2534 } 2535 return soa.AddLocalReference<jobject>(trace); 2536} 2537template jobject Thread::CreateInternalStackTrace<false>( 2538 const ScopedObjectAccessAlreadyRunnable& soa) const; 2539template jobject Thread::CreateInternalStackTrace<true>( 2540 const ScopedObjectAccessAlreadyRunnable& soa) const; 2541 2542bool Thread::IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const { 2543 // Only count the depth since we do not pass a stack frame array as an argument. 2544 FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this)); 2545 count_visitor.WalkStack(); 2546 return count_visitor.GetDepth() == static_cast<uint32_t>(exception->GetStackDepth()); 2547} 2548 2549jobjectArray Thread::InternalStackTraceToStackTraceElementArray( 2550 const ScopedObjectAccessAlreadyRunnable& soa, 2551 jobject internal, 2552 jobjectArray output_array, 2553 int* stack_depth) { 2554 // Decode the internal stack trace into the depth, method trace and PC trace. 2555 // Subtract one for the methods and PC trace. 2556 int32_t depth = soa.Decode<mirror::Array>(internal)->GetLength() - 1; 2557 DCHECK_GE(depth, 0); 2558 2559 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); 2560 2561 jobjectArray result; 2562 2563 if (output_array != nullptr) { 2564 // Reuse the array we were given. 2565 result = output_array; 2566 // ...adjusting the number of frames we'll write to not exceed the array length. 2567 const int32_t traces_length = 2568 soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->GetLength(); 2569 depth = std::min(depth, traces_length); 2570 } else { 2571 // Create java_trace array and place in local reference table 2572 mirror::ObjectArray<mirror::StackTraceElement>* java_traces = 2573 class_linker->AllocStackTraceElementArray(soa.Self(), depth); 2574 if (java_traces == nullptr) { 2575 return nullptr; 2576 } 2577 result = soa.AddLocalReference<jobjectArray>(java_traces); 2578 } 2579 2580 if (stack_depth != nullptr) { 2581 *stack_depth = depth; 2582 } 2583 2584 for (int32_t i = 0; i < depth; ++i) { 2585 ObjPtr<mirror::ObjectArray<mirror::Object>> decoded_traces = 2586 soa.Decode<mirror::Object>(internal)->AsObjectArray<mirror::Object>(); 2587 // Methods and dex PC trace is element 0. 2588 DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray()); 2589 ObjPtr<mirror::PointerArray> const method_trace = 2590 ObjPtr<mirror::PointerArray>::DownCast(MakeObjPtr(decoded_traces->Get(0))); 2591 // Prepare parameters for StackTraceElement(String cls, String method, String file, int line) 2592 ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize); 2593 uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>( 2594 i + method_trace->GetLength() / 2, kRuntimePointerSize); 2595 int32_t line_number; 2596 StackHandleScope<3> hs(soa.Self()); 2597 auto class_name_object(hs.NewHandle<mirror::String>(nullptr)); 2598 auto source_name_object(hs.NewHandle<mirror::String>(nullptr)); 2599 if (method->IsProxyMethod()) { 2600 line_number = -1; 2601 class_name_object.Assign(method->GetDeclaringClass()->GetName()); 2602 // source_name_object intentionally left null for proxy methods 2603 } else { 2604 line_number = method->GetLineNumFromDexPC(dex_pc); 2605 // Allocate element, potentially triggering GC 2606 // TODO: reuse class_name_object via Class::name_? 2607 const char* descriptor = method->GetDeclaringClassDescriptor(); 2608 CHECK(descriptor != nullptr); 2609 std::string class_name(PrettyDescriptor(descriptor)); 2610 class_name_object.Assign( 2611 mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str())); 2612 if (class_name_object == nullptr) { 2613 soa.Self()->AssertPendingOOMException(); 2614 return nullptr; 2615 } 2616 const char* source_file = method->GetDeclaringClassSourceFile(); 2617 if (line_number == -1) { 2618 // Make the line_number field of StackTraceElement hold the dex pc. 2619 // source_name_object is intentionally left null if we failed to map the dex pc to 2620 // a line number (most probably because there is no debug info). See b/30183883. 2621 line_number = dex_pc; 2622 } else { 2623 if (source_file != nullptr) { 2624 source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file)); 2625 if (source_name_object == nullptr) { 2626 soa.Self()->AssertPendingOOMException(); 2627 return nullptr; 2628 } 2629 } 2630 } 2631 } 2632 const char* method_name = method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName(); 2633 CHECK(method_name != nullptr); 2634 Handle<mirror::String> method_name_object( 2635 hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name))); 2636 if (method_name_object == nullptr) { 2637 return nullptr; 2638 } 2639 ObjPtr<mirror::StackTraceElement> obj = mirror::StackTraceElement::Alloc(soa.Self(), 2640 class_name_object, 2641 method_name_object, 2642 source_name_object, 2643 line_number); 2644 if (obj == nullptr) { 2645 return nullptr; 2646 } 2647 // We are called from native: use non-transactional mode. 2648 soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->Set<false>(i, obj); 2649 } 2650 return result; 2651} 2652 2653void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) { 2654 va_list args; 2655 va_start(args, fmt); 2656 ThrowNewExceptionV(exception_class_descriptor, fmt, args); 2657 va_end(args); 2658} 2659 2660void Thread::ThrowNewExceptionV(const char* exception_class_descriptor, 2661 const char* fmt, va_list ap) { 2662 std::string msg; 2663 StringAppendV(&msg, fmt, ap); 2664 ThrowNewException(exception_class_descriptor, msg.c_str()); 2665} 2666 2667void Thread::ThrowNewException(const char* exception_class_descriptor, 2668 const char* msg) { 2669 // Callers should either clear or call ThrowNewWrappedException. 2670 AssertNoPendingExceptionForNewException(msg); 2671 ThrowNewWrappedException(exception_class_descriptor, msg); 2672} 2673 2674static ObjPtr<mirror::ClassLoader> GetCurrentClassLoader(Thread* self) 2675 REQUIRES_SHARED(Locks::mutator_lock_) { 2676 ArtMethod* method = self->GetCurrentMethod(nullptr); 2677 return method != nullptr 2678 ? method->GetDeclaringClass()->GetClassLoader() 2679 : nullptr; 2680} 2681 2682void Thread::ThrowNewWrappedException(const char* exception_class_descriptor, 2683 const char* msg) { 2684 DCHECK_EQ(this, Thread::Current()); 2685 ScopedObjectAccessUnchecked soa(this); 2686 StackHandleScope<3> hs(soa.Self()); 2687 Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetCurrentClassLoader(soa.Self()))); 2688 ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException())); 2689 ClearException(); 2690 Runtime* runtime = Runtime::Current(); 2691 auto* cl = runtime->GetClassLinker(); 2692 Handle<mirror::Class> exception_class( 2693 hs.NewHandle(cl->FindClass(this, exception_class_descriptor, class_loader))); 2694 if (UNLIKELY(exception_class == nullptr)) { 2695 CHECK(IsExceptionPending()); 2696 LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor); 2697 return; 2698 } 2699 2700 if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(soa.Self(), exception_class, true, 2701 true))) { 2702 DCHECK(IsExceptionPending()); 2703 return; 2704 } 2705 DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass()); 2706 Handle<mirror::Throwable> exception( 2707 hs.NewHandle(ObjPtr<mirror::Throwable>::DownCast(exception_class->AllocObject(this)))); 2708 2709 // If we couldn't allocate the exception, throw the pre-allocated out of memory exception. 2710 if (exception == nullptr) { 2711 SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError()); 2712 return; 2713 } 2714 2715 // Choose an appropriate constructor and set up the arguments. 2716 const char* signature; 2717 ScopedLocalRef<jstring> msg_string(GetJniEnv(), nullptr); 2718 if (msg != nullptr) { 2719 // Ensure we remember this and the method over the String allocation. 2720 msg_string.reset( 2721 soa.AddLocalReference<jstring>(mirror::String::AllocFromModifiedUtf8(this, msg))); 2722 if (UNLIKELY(msg_string.get() == nullptr)) { 2723 CHECK(IsExceptionPending()); // OOME. 2724 return; 2725 } 2726 if (cause.get() == nullptr) { 2727 signature = "(Ljava/lang/String;)V"; 2728 } else { 2729 signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V"; 2730 } 2731 } else { 2732 if (cause.get() == nullptr) { 2733 signature = "()V"; 2734 } else { 2735 signature = "(Ljava/lang/Throwable;)V"; 2736 } 2737 } 2738 ArtMethod* exception_init_method = 2739 exception_class->FindDeclaredDirectMethod("<init>", signature, cl->GetImagePointerSize()); 2740 2741 CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in " 2742 << PrettyDescriptor(exception_class_descriptor); 2743 2744 if (UNLIKELY(!runtime->IsStarted())) { 2745 // Something is trying to throw an exception without a started runtime, which is the common 2746 // case in the compiler. We won't be able to invoke the constructor of the exception, so set 2747 // the exception fields directly. 2748 if (msg != nullptr) { 2749 exception->SetDetailMessage(DecodeJObject(msg_string.get())->AsString()); 2750 } 2751 if (cause.get() != nullptr) { 2752 exception->SetCause(DecodeJObject(cause.get())->AsThrowable()); 2753 } 2754 ScopedLocalRef<jobject> trace(GetJniEnv(), 2755 Runtime::Current()->IsActiveTransaction() 2756 ? CreateInternalStackTrace<true>(soa) 2757 : CreateInternalStackTrace<false>(soa)); 2758 if (trace.get() != nullptr) { 2759 exception->SetStackState(DecodeJObject(trace.get()).Ptr()); 2760 } 2761 SetException(exception.Get()); 2762 } else { 2763 jvalue jv_args[2]; 2764 size_t i = 0; 2765 2766 if (msg != nullptr) { 2767 jv_args[i].l = msg_string.get(); 2768 ++i; 2769 } 2770 if (cause.get() != nullptr) { 2771 jv_args[i].l = cause.get(); 2772 ++i; 2773 } 2774 ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(exception.Get())); 2775 InvokeWithJValues(soa, ref.get(), jni::EncodeArtMethod(exception_init_method), jv_args); 2776 if (LIKELY(!IsExceptionPending())) { 2777 SetException(exception.Get()); 2778 } 2779 } 2780} 2781 2782void Thread::ThrowOutOfMemoryError(const char* msg) { 2783 LOG(WARNING) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s", 2784 msg, (tls32_.throwing_OutOfMemoryError ? " (recursive case)" : "")); 2785 if (!tls32_.throwing_OutOfMemoryError) { 2786 tls32_.throwing_OutOfMemoryError = true; 2787 ThrowNewException("Ljava/lang/OutOfMemoryError;", msg); 2788 tls32_.throwing_OutOfMemoryError = false; 2789 } else { 2790 Dump(LOG_STREAM(WARNING)); // The pre-allocated OOME has no stack, so help out and log one. 2791 SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError()); 2792 } 2793} 2794 2795Thread* Thread::CurrentFromGdb() { 2796 return Thread::Current(); 2797} 2798 2799void Thread::DumpFromGdb() const { 2800 std::ostringstream ss; 2801 Dump(ss); 2802 std::string str(ss.str()); 2803 // log to stderr for debugging command line processes 2804 std::cerr << str; 2805#ifdef ART_TARGET_ANDROID 2806 // log to logcat for debugging frameworks processes 2807 LOG(INFO) << str; 2808#endif 2809} 2810 2811// Explicitly instantiate 32 and 64bit thread offset dumping support. 2812template 2813void Thread::DumpThreadOffset<PointerSize::k32>(std::ostream& os, uint32_t offset); 2814template 2815void Thread::DumpThreadOffset<PointerSize::k64>(std::ostream& os, uint32_t offset); 2816 2817template<PointerSize ptr_size> 2818void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) { 2819#define DO_THREAD_OFFSET(x, y) \ 2820 if (offset == (x).Uint32Value()) { \ 2821 os << (y); \ 2822 return; \ 2823 } 2824 DO_THREAD_OFFSET(ThreadFlagsOffset<ptr_size>(), "state_and_flags") 2825 DO_THREAD_OFFSET(CardTableOffset<ptr_size>(), "card_table") 2826 DO_THREAD_OFFSET(ExceptionOffset<ptr_size>(), "exception") 2827 DO_THREAD_OFFSET(PeerOffset<ptr_size>(), "peer"); 2828 DO_THREAD_OFFSET(JniEnvOffset<ptr_size>(), "jni_env") 2829 DO_THREAD_OFFSET(SelfOffset<ptr_size>(), "self") 2830 DO_THREAD_OFFSET(StackEndOffset<ptr_size>(), "stack_end") 2831 DO_THREAD_OFFSET(ThinLockIdOffset<ptr_size>(), "thin_lock_thread_id") 2832 DO_THREAD_OFFSET(TopOfManagedStackOffset<ptr_size>(), "top_quick_frame_method") 2833 DO_THREAD_OFFSET(TopShadowFrameOffset<ptr_size>(), "top_shadow_frame") 2834 DO_THREAD_OFFSET(TopHandleScopeOffset<ptr_size>(), "top_handle_scope") 2835 DO_THREAD_OFFSET(ThreadSuspendTriggerOffset<ptr_size>(), "suspend_trigger") 2836#undef DO_THREAD_OFFSET 2837 2838#define JNI_ENTRY_POINT_INFO(x) \ 2839 if (JNI_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \ 2840 os << #x; \ 2841 return; \ 2842 } 2843 JNI_ENTRY_POINT_INFO(pDlsymLookup) 2844#undef JNI_ENTRY_POINT_INFO 2845 2846#define QUICK_ENTRY_POINT_INFO(x) \ 2847 if (QUICK_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \ 2848 os << #x; \ 2849 return; \ 2850 } 2851 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved) 2852 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved8) 2853 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved16) 2854 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved32) 2855 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved64) 2856 QUICK_ENTRY_POINT_INFO(pAllocObjectResolved) 2857 QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized) 2858 QUICK_ENTRY_POINT_INFO(pAllocObjectWithChecks) 2859 QUICK_ENTRY_POINT_INFO(pAllocStringFromBytes) 2860 QUICK_ENTRY_POINT_INFO(pAllocStringFromChars) 2861 QUICK_ENTRY_POINT_INFO(pAllocStringFromString) 2862 QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial) 2863 QUICK_ENTRY_POINT_INFO(pCheckInstanceOf) 2864 QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage) 2865 QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess) 2866 QUICK_ENTRY_POINT_INFO(pInitializeType) 2867 QUICK_ENTRY_POINT_INFO(pResolveString) 2868 QUICK_ENTRY_POINT_INFO(pSet8Instance) 2869 QUICK_ENTRY_POINT_INFO(pSet8Static) 2870 QUICK_ENTRY_POINT_INFO(pSet16Instance) 2871 QUICK_ENTRY_POINT_INFO(pSet16Static) 2872 QUICK_ENTRY_POINT_INFO(pSet32Instance) 2873 QUICK_ENTRY_POINT_INFO(pSet32Static) 2874 QUICK_ENTRY_POINT_INFO(pSet64Instance) 2875 QUICK_ENTRY_POINT_INFO(pSet64Static) 2876 QUICK_ENTRY_POINT_INFO(pSetObjInstance) 2877 QUICK_ENTRY_POINT_INFO(pSetObjStatic) 2878 QUICK_ENTRY_POINT_INFO(pGetByteInstance) 2879 QUICK_ENTRY_POINT_INFO(pGetBooleanInstance) 2880 QUICK_ENTRY_POINT_INFO(pGetByteStatic) 2881 QUICK_ENTRY_POINT_INFO(pGetBooleanStatic) 2882 QUICK_ENTRY_POINT_INFO(pGetShortInstance) 2883 QUICK_ENTRY_POINT_INFO(pGetCharInstance) 2884 QUICK_ENTRY_POINT_INFO(pGetShortStatic) 2885 QUICK_ENTRY_POINT_INFO(pGetCharStatic) 2886 QUICK_ENTRY_POINT_INFO(pGet32Instance) 2887 QUICK_ENTRY_POINT_INFO(pGet32Static) 2888 QUICK_ENTRY_POINT_INFO(pGet64Instance) 2889 QUICK_ENTRY_POINT_INFO(pGet64Static) 2890 QUICK_ENTRY_POINT_INFO(pGetObjInstance) 2891 QUICK_ENTRY_POINT_INFO(pGetObjStatic) 2892 QUICK_ENTRY_POINT_INFO(pAputObject) 2893 QUICK_ENTRY_POINT_INFO(pJniMethodStart) 2894 QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized) 2895 QUICK_ENTRY_POINT_INFO(pJniMethodEnd) 2896 QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized) 2897 QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference) 2898 QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized) 2899 QUICK_ENTRY_POINT_INFO(pQuickGenericJniTrampoline) 2900 QUICK_ENTRY_POINT_INFO(pLockObject) 2901 QUICK_ENTRY_POINT_INFO(pUnlockObject) 2902 QUICK_ENTRY_POINT_INFO(pCmpgDouble) 2903 QUICK_ENTRY_POINT_INFO(pCmpgFloat) 2904 QUICK_ENTRY_POINT_INFO(pCmplDouble) 2905 QUICK_ENTRY_POINT_INFO(pCmplFloat) 2906 QUICK_ENTRY_POINT_INFO(pCos) 2907 QUICK_ENTRY_POINT_INFO(pSin) 2908 QUICK_ENTRY_POINT_INFO(pAcos) 2909 QUICK_ENTRY_POINT_INFO(pAsin) 2910 QUICK_ENTRY_POINT_INFO(pAtan) 2911 QUICK_ENTRY_POINT_INFO(pAtan2) 2912 QUICK_ENTRY_POINT_INFO(pCbrt) 2913 QUICK_ENTRY_POINT_INFO(pCosh) 2914 QUICK_ENTRY_POINT_INFO(pExp) 2915 QUICK_ENTRY_POINT_INFO(pExpm1) 2916 QUICK_ENTRY_POINT_INFO(pHypot) 2917 QUICK_ENTRY_POINT_INFO(pLog) 2918 QUICK_ENTRY_POINT_INFO(pLog10) 2919 QUICK_ENTRY_POINT_INFO(pNextAfter) 2920 QUICK_ENTRY_POINT_INFO(pSinh) 2921 QUICK_ENTRY_POINT_INFO(pTan) 2922 QUICK_ENTRY_POINT_INFO(pTanh) 2923 QUICK_ENTRY_POINT_INFO(pFmod) 2924 QUICK_ENTRY_POINT_INFO(pL2d) 2925 QUICK_ENTRY_POINT_INFO(pFmodf) 2926 QUICK_ENTRY_POINT_INFO(pL2f) 2927 QUICK_ENTRY_POINT_INFO(pD2iz) 2928 QUICK_ENTRY_POINT_INFO(pF2iz) 2929 QUICK_ENTRY_POINT_INFO(pIdivmod) 2930 QUICK_ENTRY_POINT_INFO(pD2l) 2931 QUICK_ENTRY_POINT_INFO(pF2l) 2932 QUICK_ENTRY_POINT_INFO(pLdiv) 2933 QUICK_ENTRY_POINT_INFO(pLmod) 2934 QUICK_ENTRY_POINT_INFO(pLmul) 2935 QUICK_ENTRY_POINT_INFO(pShlLong) 2936 QUICK_ENTRY_POINT_INFO(pShrLong) 2937 QUICK_ENTRY_POINT_INFO(pUshrLong) 2938 QUICK_ENTRY_POINT_INFO(pIndexOf) 2939 QUICK_ENTRY_POINT_INFO(pStringCompareTo) 2940 QUICK_ENTRY_POINT_INFO(pMemcpy) 2941 QUICK_ENTRY_POINT_INFO(pQuickImtConflictTrampoline) 2942 QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline) 2943 QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge) 2944 QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck) 2945 QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck) 2946 QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck) 2947 QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck) 2948 QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck) 2949 QUICK_ENTRY_POINT_INFO(pInvokePolymorphic) 2950 QUICK_ENTRY_POINT_INFO(pTestSuspend) 2951 QUICK_ENTRY_POINT_INFO(pDeliverException) 2952 QUICK_ENTRY_POINT_INFO(pThrowArrayBounds) 2953 QUICK_ENTRY_POINT_INFO(pThrowDivZero) 2954 QUICK_ENTRY_POINT_INFO(pThrowNullPointer) 2955 QUICK_ENTRY_POINT_INFO(pThrowStackOverflow) 2956 QUICK_ENTRY_POINT_INFO(pDeoptimize) 2957 QUICK_ENTRY_POINT_INFO(pA64Load) 2958 QUICK_ENTRY_POINT_INFO(pA64Store) 2959 QUICK_ENTRY_POINT_INFO(pNewEmptyString) 2960 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_B) 2961 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BI) 2962 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BII) 2963 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIII) 2964 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIIString) 2965 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BString) 2966 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIICharset) 2967 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BCharset) 2968 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_C) 2969 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_CII) 2970 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_IIC) 2971 QUICK_ENTRY_POINT_INFO(pNewStringFromCodePoints) 2972 QUICK_ENTRY_POINT_INFO(pNewStringFromString) 2973 QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer) 2974 QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder) 2975 QUICK_ENTRY_POINT_INFO(pReadBarrierJni) 2976 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg00) 2977 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg01) 2978 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg02) 2979 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg03) 2980 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg04) 2981 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg05) 2982 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg06) 2983 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg07) 2984 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg08) 2985 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg09) 2986 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg10) 2987 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg11) 2988 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg12) 2989 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg13) 2990 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg14) 2991 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg15) 2992 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg16) 2993 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg17) 2994 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg18) 2995 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg19) 2996 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg20) 2997 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg21) 2998 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg22) 2999 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg23) 3000 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg24) 3001 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg25) 3002 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg26) 3003 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg27) 3004 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg28) 3005 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg29) 3006 QUICK_ENTRY_POINT_INFO(pReadBarrierSlow) 3007 QUICK_ENTRY_POINT_INFO(pReadBarrierForRootSlow) 3008 3009 QUICK_ENTRY_POINT_INFO(pJniMethodFastStart) 3010 QUICK_ENTRY_POINT_INFO(pJniMethodFastEnd) 3011#undef QUICK_ENTRY_POINT_INFO 3012 3013 os << offset; 3014} 3015 3016void Thread::QuickDeliverException() { 3017 // Get exception from thread. 3018 ObjPtr<mirror::Throwable> exception = GetException(); 3019 CHECK(exception != nullptr); 3020 if (exception == GetDeoptimizationException()) { 3021 artDeoptimize(this); 3022 UNREACHABLE(); 3023 } 3024 3025 // This is a real exception: let the instrumentation know about it. 3026 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 3027 if (instrumentation->HasExceptionCaughtListeners() && 3028 IsExceptionThrownByCurrentMethod(exception)) { 3029 // Instrumentation may cause GC so keep the exception object safe. 3030 StackHandleScope<1> hs(this); 3031 HandleWrapperObjPtr<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception)); 3032 instrumentation->ExceptionCaughtEvent(this, exception.Ptr()); 3033 } 3034 // Does instrumentation need to deoptimize the stack? 3035 // Note: we do this *after* reporting the exception to instrumentation in case it 3036 // now requires deoptimization. It may happen if a debugger is attached and requests 3037 // new events (single-step, breakpoint, ...) when the exception is reported. 3038 if (Dbg::IsForcedInterpreterNeededForException(this)) { 3039 NthCallerVisitor visitor(this, 0, false); 3040 visitor.WalkStack(); 3041 if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.caller_pc)) { 3042 // Save the exception into the deoptimization context so it can be restored 3043 // before entering the interpreter. 3044 PushDeoptimizationContext( 3045 JValue(), /*is_reference */ false, /* from_code */ false, exception); 3046 artDeoptimize(this); 3047 UNREACHABLE(); 3048 } else { 3049 LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method " 3050 << visitor.caller->PrettyMethod(); 3051 } 3052 } 3053 3054 // Don't leave exception visible while we try to find the handler, which may cause class 3055 // resolution. 3056 ClearException(); 3057 QuickExceptionHandler exception_handler(this, false); 3058 exception_handler.FindCatch(exception); 3059 exception_handler.UpdateInstrumentationStack(); 3060 exception_handler.DoLongJump(); 3061} 3062 3063Context* Thread::GetLongJumpContext() { 3064 Context* result = tlsPtr_.long_jump_context; 3065 if (result == nullptr) { 3066 result = Context::Create(); 3067 } else { 3068 tlsPtr_.long_jump_context = nullptr; // Avoid context being shared. 3069 result->Reset(); 3070 } 3071 return result; 3072} 3073 3074// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is 3075// so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack. 3076struct CurrentMethodVisitor FINAL : public StackVisitor { 3077 CurrentMethodVisitor(Thread* thread, Context* context, bool check_suspended, bool abort_on_error) 3078 REQUIRES_SHARED(Locks::mutator_lock_) 3079 : StackVisitor(thread, 3080 context, 3081 StackVisitor::StackWalkKind::kIncludeInlinedFrames, 3082 check_suspended), 3083 this_object_(nullptr), 3084 method_(nullptr), 3085 dex_pc_(0), 3086 abort_on_error_(abort_on_error) {} 3087 bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 3088 ArtMethod* m = GetMethod(); 3089 if (m->IsRuntimeMethod()) { 3090 // Continue if this is a runtime method. 3091 return true; 3092 } 3093 if (context_ != nullptr) { 3094 this_object_ = GetThisObject(); 3095 } 3096 method_ = m; 3097 dex_pc_ = GetDexPc(abort_on_error_); 3098 return false; 3099 } 3100 ObjPtr<mirror::Object> this_object_; 3101 ArtMethod* method_; 3102 uint32_t dex_pc_; 3103 const bool abort_on_error_; 3104}; 3105 3106ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, 3107 bool check_suspended, 3108 bool abort_on_error) const { 3109 CurrentMethodVisitor visitor(const_cast<Thread*>(this), 3110 nullptr, 3111 check_suspended, 3112 abort_on_error); 3113 visitor.WalkStack(false); 3114 if (dex_pc != nullptr) { 3115 *dex_pc = visitor.dex_pc_; 3116 } 3117 return visitor.method_; 3118} 3119 3120bool Thread::HoldsLock(ObjPtr<mirror::Object> object) const { 3121 return object != nullptr && object->GetLockOwnerThreadId() == GetThreadId(); 3122} 3123 3124// RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor). 3125template <typename RootVisitor, bool kPrecise = false> 3126class ReferenceMapVisitor : public StackVisitor { 3127 public: 3128 ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor) 3129 REQUIRES_SHARED(Locks::mutator_lock_) 3130 // We are visiting the references in compiled frames, so we do not need 3131 // to know the inlined frames. 3132 : StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames), 3133 visitor_(visitor) {} 3134 3135 bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 3136 if (false) { 3137 LOG(INFO) << "Visiting stack roots in " << ArtMethod::PrettyMethod(GetMethod()) 3138 << StringPrintf("@ PC:%04x", GetDexPc()); 3139 } 3140 ShadowFrame* shadow_frame = GetCurrentShadowFrame(); 3141 if (shadow_frame != nullptr) { 3142 VisitShadowFrame(shadow_frame); 3143 } else { 3144 VisitQuickFrame(); 3145 } 3146 return true; 3147 } 3148 3149 void VisitShadowFrame(ShadowFrame* shadow_frame) REQUIRES_SHARED(Locks::mutator_lock_) { 3150 ArtMethod* m = shadow_frame->GetMethod(); 3151 VisitDeclaringClass(m); 3152 DCHECK(m != nullptr); 3153 size_t num_regs = shadow_frame->NumberOfVRegs(); 3154 DCHECK(m->IsNative() || shadow_frame->HasReferenceArray()); 3155 // handle scope for JNI or References for interpreter. 3156 for (size_t reg = 0; reg < num_regs; ++reg) { 3157 mirror::Object* ref = shadow_frame->GetVRegReference(reg); 3158 if (ref != nullptr) { 3159 mirror::Object* new_ref = ref; 3160 visitor_(&new_ref, reg, this); 3161 if (new_ref != ref) { 3162 shadow_frame->SetVRegReference(reg, new_ref); 3163 } 3164 } 3165 } 3166 // Mark lock count map required for structured locking checks. 3167 shadow_frame->GetLockCountData().VisitMonitors(visitor_, -1, this); 3168 } 3169 3170 private: 3171 // Visiting the declaring class is necessary so that we don't unload the class of a method that 3172 // is executing. We need to ensure that the code stays mapped. NO_THREAD_SAFETY_ANALYSIS since 3173 // the threads do not all hold the heap bitmap lock for parallel GC. 3174 void VisitDeclaringClass(ArtMethod* method) 3175 REQUIRES_SHARED(Locks::mutator_lock_) 3176 NO_THREAD_SAFETY_ANALYSIS { 3177 ObjPtr<mirror::Class> klass = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>(); 3178 // klass can be null for runtime methods. 3179 if (klass != nullptr) { 3180 if (kVerifyImageObjectsMarked) { 3181 gc::Heap* const heap = Runtime::Current()->GetHeap(); 3182 gc::space::ContinuousSpace* space = heap->FindContinuousSpaceFromObject(klass, 3183 /*fail_ok*/true); 3184 if (space != nullptr && space->IsImageSpace()) { 3185 bool failed = false; 3186 if (!space->GetLiveBitmap()->Test(klass.Ptr())) { 3187 failed = true; 3188 LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image " << *space; 3189 } else if (!heap->GetLiveBitmap()->Test(klass.Ptr())) { 3190 failed = true; 3191 LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image through live bitmap " << *space; 3192 } 3193 if (failed) { 3194 GetThread()->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT)); 3195 space->AsImageSpace()->DumpSections(LOG_STREAM(FATAL_WITHOUT_ABORT)); 3196 LOG(FATAL_WITHOUT_ABORT) << "Method@" << method->GetDexMethodIndex() << ":" << method 3197 << " klass@" << klass.Ptr(); 3198 // Pretty info last in case it crashes. 3199 LOG(FATAL) << "Method " << method->PrettyMethod() << " klass " 3200 << klass->PrettyClass(); 3201 } 3202 } 3203 } 3204 mirror::Object* new_ref = klass.Ptr(); 3205 visitor_(&new_ref, -1, this); 3206 if (new_ref != klass) { 3207 method->CASDeclaringClass(klass.Ptr(), new_ref->AsClass()); 3208 } 3209 } 3210 } 3211 3212 template <typename T> 3213 ALWAYS_INLINE 3214 inline void VisitQuickFrameWithVregCallback() REQUIRES_SHARED(Locks::mutator_lock_) { 3215 ArtMethod** cur_quick_frame = GetCurrentQuickFrame(); 3216 DCHECK(cur_quick_frame != nullptr); 3217 ArtMethod* m = *cur_quick_frame; 3218 VisitDeclaringClass(m); 3219 3220 // Process register map (which native and runtime methods don't have) 3221 if (!m->IsNative() && !m->IsRuntimeMethod() && (!m->IsProxyMethod() || m->IsConstructor())) { 3222 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); 3223 DCHECK(method_header->IsOptimized()); 3224 auto* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>( 3225 reinterpret_cast<uintptr_t>(cur_quick_frame)); 3226 uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc()); 3227 CodeInfo code_info = method_header->GetOptimizedCodeInfo(); 3228 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 3229 StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding); 3230 DCHECK(map.IsValid()); 3231 3232 T vreg_info(m, code_info, encoding, map, visitor_); 3233 3234 // Visit stack entries that hold pointers. 3235 const size_t number_of_bits = code_info.GetNumberOfStackMaskBits(encoding); 3236 BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, map); 3237 for (size_t i = 0; i < number_of_bits; ++i) { 3238 if (stack_mask.LoadBit(i)) { 3239 auto* ref_addr = vreg_base + i; 3240 mirror::Object* ref = ref_addr->AsMirrorPtr(); 3241 if (ref != nullptr) { 3242 mirror::Object* new_ref = ref; 3243 vreg_info.VisitStack(&new_ref, i, this); 3244 if (ref != new_ref) { 3245 ref_addr->Assign(new_ref); 3246 } 3247 } 3248 } 3249 } 3250 // Visit callee-save registers that hold pointers. 3251 uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, map); 3252 for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) { 3253 if (register_mask & (1 << i)) { 3254 mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i)); 3255 if (kIsDebugBuild && ref_addr == nullptr) { 3256 std::string thread_name; 3257 GetThread()->GetThreadName(thread_name); 3258 LOG(FATAL_WITHOUT_ABORT) << "On thread " << thread_name; 3259 DescribeStack(GetThread()); 3260 LOG(FATAL) << "Found an unsaved callee-save register " << i << " (null GPRAddress) " 3261 << "set in register_mask=" << register_mask << " at " << DescribeLocation(); 3262 } 3263 if (*ref_addr != nullptr) { 3264 vreg_info.VisitRegister(ref_addr, i, this); 3265 } 3266 } 3267 } 3268 } 3269 } 3270 3271 void VisitQuickFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 3272 if (kPrecise) { 3273 VisitQuickFramePrecise(); 3274 } else { 3275 VisitQuickFrameNonPrecise(); 3276 } 3277 } 3278 3279 void VisitQuickFrameNonPrecise() REQUIRES_SHARED(Locks::mutator_lock_) { 3280 struct UndefinedVRegInfo { 3281 UndefinedVRegInfo(ArtMethod* method ATTRIBUTE_UNUSED, 3282 const CodeInfo& code_info ATTRIBUTE_UNUSED, 3283 const CodeInfoEncoding& encoding ATTRIBUTE_UNUSED, 3284 const StackMap& map ATTRIBUTE_UNUSED, 3285 RootVisitor& _visitor) 3286 : visitor(_visitor) { 3287 } 3288 3289 ALWAYS_INLINE 3290 void VisitStack(mirror::Object** ref, 3291 size_t stack_index ATTRIBUTE_UNUSED, 3292 const StackVisitor* stack_visitor) 3293 REQUIRES_SHARED(Locks::mutator_lock_) { 3294 visitor(ref, -1, stack_visitor); 3295 } 3296 3297 ALWAYS_INLINE 3298 void VisitRegister(mirror::Object** ref, 3299 size_t register_index ATTRIBUTE_UNUSED, 3300 const StackVisitor* stack_visitor) 3301 REQUIRES_SHARED(Locks::mutator_lock_) { 3302 visitor(ref, -1, stack_visitor); 3303 } 3304 3305 RootVisitor& visitor; 3306 }; 3307 VisitQuickFrameWithVregCallback<UndefinedVRegInfo>(); 3308 } 3309 3310 void VisitQuickFramePrecise() REQUIRES_SHARED(Locks::mutator_lock_) { 3311 struct StackMapVRegInfo { 3312 StackMapVRegInfo(ArtMethod* method, 3313 const CodeInfo& _code_info, 3314 const CodeInfoEncoding& _encoding, 3315 const StackMap& map, 3316 RootVisitor& _visitor) 3317 : number_of_dex_registers(method->GetCodeItem()->registers_size_), 3318 code_info(_code_info), 3319 encoding(_encoding), 3320 dex_register_map(code_info.GetDexRegisterMapOf(map, 3321 encoding, 3322 number_of_dex_registers)), 3323 visitor(_visitor) { 3324 } 3325 3326 // TODO: If necessary, we should consider caching a reverse map instead of the linear 3327 // lookups for each location. 3328 void FindWithType(const size_t index, 3329 const DexRegisterLocation::Kind kind, 3330 mirror::Object** ref, 3331 const StackVisitor* stack_visitor) 3332 REQUIRES_SHARED(Locks::mutator_lock_) { 3333 bool found = false; 3334 for (size_t dex_reg = 0; dex_reg != number_of_dex_registers; ++dex_reg) { 3335 DexRegisterLocation location = dex_register_map.GetDexRegisterLocation( 3336 dex_reg, number_of_dex_registers, code_info, encoding); 3337 if (location.GetKind() == kind && static_cast<size_t>(location.GetValue()) == index) { 3338 visitor(ref, dex_reg, stack_visitor); 3339 found = true; 3340 } 3341 } 3342 3343 if (!found) { 3344 // If nothing found, report with -1. 3345 visitor(ref, -1, stack_visitor); 3346 } 3347 } 3348 3349 void VisitStack(mirror::Object** ref, size_t stack_index, const StackVisitor* stack_visitor) 3350 REQUIRES_SHARED(Locks::mutator_lock_) { 3351 const size_t stack_offset = stack_index * kFrameSlotSize; 3352 FindWithType(stack_offset, 3353 DexRegisterLocation::Kind::kInStack, 3354 ref, 3355 stack_visitor); 3356 } 3357 3358 void VisitRegister(mirror::Object** ref, 3359 size_t register_index, 3360 const StackVisitor* stack_visitor) 3361 REQUIRES_SHARED(Locks::mutator_lock_) { 3362 FindWithType(register_index, 3363 DexRegisterLocation::Kind::kInRegister, 3364 ref, 3365 stack_visitor); 3366 } 3367 3368 size_t number_of_dex_registers; 3369 const CodeInfo& code_info; 3370 const CodeInfoEncoding& encoding; 3371 DexRegisterMap dex_register_map; 3372 RootVisitor& visitor; 3373 }; 3374 VisitQuickFrameWithVregCallback<StackMapVRegInfo>(); 3375 } 3376 3377 // Visitor for when we visit a root. 3378 RootVisitor& visitor_; 3379}; 3380 3381class RootCallbackVisitor { 3382 public: 3383 RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {} 3384 3385 void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const 3386 REQUIRES_SHARED(Locks::mutator_lock_) { 3387 visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg)); 3388 } 3389 3390 private: 3391 RootVisitor* const visitor_; 3392 const uint32_t tid_; 3393}; 3394 3395template <bool kPrecise> 3396void Thread::VisitRoots(RootVisitor* visitor) { 3397 const uint32_t thread_id = GetThreadId(); 3398 visitor->VisitRootIfNonNull(&tlsPtr_.opeer, RootInfo(kRootThreadObject, thread_id)); 3399 if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) { 3400 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception), 3401 RootInfo(kRootNativeStack, thread_id)); 3402 } 3403 visitor->VisitRootIfNonNull(&tlsPtr_.monitor_enter_object, RootInfo(kRootNativeStack, thread_id)); 3404 tlsPtr_.jni_env->locals.VisitRoots(visitor, RootInfo(kRootJNILocal, thread_id)); 3405 tlsPtr_.jni_env->monitors.VisitRoots(visitor, RootInfo(kRootJNIMonitor, thread_id)); 3406 HandleScopeVisitRoots(visitor, thread_id); 3407 if (tlsPtr_.debug_invoke_req != nullptr) { 3408 tlsPtr_.debug_invoke_req->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id)); 3409 } 3410 // Visit roots for deoptimization. 3411 if (tlsPtr_.stacked_shadow_frame_record != nullptr) { 3412 RootCallbackVisitor visitor_to_callback(visitor, thread_id); 3413 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, nullptr, visitor_to_callback); 3414 for (StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record; 3415 record != nullptr; 3416 record = record->GetLink()) { 3417 for (ShadowFrame* shadow_frame = record->GetShadowFrame(); 3418 shadow_frame != nullptr; 3419 shadow_frame = shadow_frame->GetLink()) { 3420 mapper.VisitShadowFrame(shadow_frame); 3421 } 3422 } 3423 } 3424 for (DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack; 3425 record != nullptr; 3426 record = record->GetLink()) { 3427 if (record->IsReference()) { 3428 visitor->VisitRootIfNonNull(record->GetReturnValueAsGCRoot(), 3429 RootInfo(kRootThreadObject, thread_id)); 3430 } 3431 visitor->VisitRootIfNonNull(record->GetPendingExceptionAsGCRoot(), 3432 RootInfo(kRootThreadObject, thread_id)); 3433 } 3434 if (tlsPtr_.frame_id_to_shadow_frame != nullptr) { 3435 RootCallbackVisitor visitor_to_callback(visitor, thread_id); 3436 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, nullptr, visitor_to_callback); 3437 for (FrameIdToShadowFrame* record = tlsPtr_.frame_id_to_shadow_frame; 3438 record != nullptr; 3439 record = record->GetNext()) { 3440 mapper.VisitShadowFrame(record->GetShadowFrame()); 3441 } 3442 } 3443 for (auto* verifier = tlsPtr_.method_verifier; verifier != nullptr; verifier = verifier->link_) { 3444 verifier->VisitRoots(visitor, RootInfo(kRootNativeStack, thread_id)); 3445 } 3446 // Visit roots on this thread's stack 3447 RuntimeContextType context; 3448 RootCallbackVisitor visitor_to_callback(visitor, thread_id); 3449 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, &context, visitor_to_callback); 3450 mapper.template WalkStack<StackVisitor::CountTransitions::kNo>(false); 3451 for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) { 3452 visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id)); 3453 } 3454} 3455 3456void Thread::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) { 3457 if ((flags & VisitRootFlags::kVisitRootFlagPrecise) != 0) { 3458 VisitRoots<true>(visitor); 3459 } else { 3460 VisitRoots<false>(visitor); 3461 } 3462} 3463 3464class VerifyRootVisitor : public SingleRootVisitor { 3465 public: 3466 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) 3467 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 3468 VerifyObject(root); 3469 } 3470}; 3471 3472void Thread::VerifyStackImpl() { 3473 if (Runtime::Current()->GetHeap()->IsObjectValidationEnabled()) { 3474 VerifyRootVisitor visitor; 3475 std::unique_ptr<Context> context(Context::Create()); 3476 RootCallbackVisitor visitor_to_callback(&visitor, GetThreadId()); 3477 ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitor_to_callback); 3478 mapper.WalkStack(); 3479 } 3480} 3481 3482// Set the stack end to that to be used during a stack overflow 3483void Thread::SetStackEndForStackOverflow() { 3484 // During stack overflow we allow use of the full stack. 3485 if (tlsPtr_.stack_end == tlsPtr_.stack_begin) { 3486 // However, we seem to have already extended to use the full stack. 3487 LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently " 3488 << GetStackOverflowReservedBytes(kRuntimeISA) << ")?"; 3489 DumpStack(LOG_STREAM(ERROR)); 3490 LOG(FATAL) << "Recursive stack overflow."; 3491 } 3492 3493 tlsPtr_.stack_end = tlsPtr_.stack_begin; 3494 3495 // Remove the stack overflow protection if is it set up. 3496 bool implicit_stack_check = !Runtime::Current()->ExplicitStackOverflowChecks(); 3497 if (implicit_stack_check) { 3498 if (!UnprotectStack()) { 3499 LOG(ERROR) << "Unable to remove stack protection for stack overflow"; 3500 } 3501 } 3502} 3503 3504void Thread::SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit) { 3505 DCHECK_LE(start, end); 3506 DCHECK_LE(end, limit); 3507 tlsPtr_.thread_local_start = start; 3508 tlsPtr_.thread_local_pos = tlsPtr_.thread_local_start; 3509 tlsPtr_.thread_local_end = end; 3510 tlsPtr_.thread_local_limit = limit; 3511 tlsPtr_.thread_local_objects = 0; 3512} 3513 3514bool Thread::HasTlab() const { 3515 bool has_tlab = tlsPtr_.thread_local_pos != nullptr; 3516 if (has_tlab) { 3517 DCHECK(tlsPtr_.thread_local_start != nullptr && tlsPtr_.thread_local_end != nullptr); 3518 } else { 3519 DCHECK(tlsPtr_.thread_local_start == nullptr && tlsPtr_.thread_local_end == nullptr); 3520 } 3521 return has_tlab; 3522} 3523 3524std::ostream& operator<<(std::ostream& os, const Thread& thread) { 3525 thread.ShortDump(os); 3526 return os; 3527} 3528 3529bool Thread::ProtectStack(bool fatal_on_error) { 3530 void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; 3531 VLOG(threads) << "Protecting stack at " << pregion; 3532 if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) { 3533 if (fatal_on_error) { 3534 LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. " 3535 "Reason: " 3536 << strerror(errno) << " size: " << kStackOverflowProtectedSize; 3537 } 3538 return false; 3539 } 3540 return true; 3541} 3542 3543bool Thread::UnprotectStack() { 3544 void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; 3545 VLOG(threads) << "Unprotecting stack at " << pregion; 3546 return mprotect(pregion, kStackOverflowProtectedSize, PROT_READ|PROT_WRITE) == 0; 3547} 3548 3549void Thread::ActivateSingleStepControl(SingleStepControl* ssc) { 3550 CHECK(Dbg::IsDebuggerActive()); 3551 CHECK(GetSingleStepControl() == nullptr) << "Single step already active in thread " << *this; 3552 CHECK(ssc != nullptr); 3553 tlsPtr_.single_step_control = ssc; 3554} 3555 3556void Thread::DeactivateSingleStepControl() { 3557 CHECK(Dbg::IsDebuggerActive()); 3558 CHECK(GetSingleStepControl() != nullptr) << "Single step not active in thread " << *this; 3559 SingleStepControl* ssc = GetSingleStepControl(); 3560 tlsPtr_.single_step_control = nullptr; 3561 delete ssc; 3562} 3563 3564void Thread::SetDebugInvokeReq(DebugInvokeReq* req) { 3565 CHECK(Dbg::IsDebuggerActive()); 3566 CHECK(GetInvokeReq() == nullptr) << "Debug invoke req already active in thread " << *this; 3567 CHECK(Thread::Current() != this) << "Debug invoke can't be dispatched by the thread itself"; 3568 CHECK(req != nullptr); 3569 tlsPtr_.debug_invoke_req = req; 3570} 3571 3572void Thread::ClearDebugInvokeReq() { 3573 CHECK(GetInvokeReq() != nullptr) << "Debug invoke req not active in thread " << *this; 3574 CHECK(Thread::Current() == this) << "Debug invoke must be finished by the thread itself"; 3575 DebugInvokeReq* req = tlsPtr_.debug_invoke_req; 3576 tlsPtr_.debug_invoke_req = nullptr; 3577 delete req; 3578} 3579 3580void Thread::PushVerifier(verifier::MethodVerifier* verifier) { 3581 verifier->link_ = tlsPtr_.method_verifier; 3582 tlsPtr_.method_verifier = verifier; 3583} 3584 3585void Thread::PopVerifier(verifier::MethodVerifier* verifier) { 3586 CHECK_EQ(tlsPtr_.method_verifier, verifier); 3587 tlsPtr_.method_verifier = verifier->link_; 3588} 3589 3590size_t Thread::NumberOfHeldMutexes() const { 3591 size_t count = 0; 3592 for (BaseMutex* mu : tlsPtr_.held_mutexes) { 3593 count += mu != nullptr ? 1 : 0; 3594 } 3595 return count; 3596} 3597 3598void Thread::DeoptimizeWithDeoptimizationException(JValue* result) { 3599 DCHECK_EQ(GetException(), Thread::GetDeoptimizationException()); 3600 ClearException(); 3601 ShadowFrame* shadow_frame = 3602 PopStackedShadowFrame(StackedShadowFrameType::kDeoptimizationShadowFrame); 3603 ObjPtr<mirror::Throwable> pending_exception; 3604 bool from_code = false; 3605 PopDeoptimizationContext(result, &pending_exception, &from_code); 3606 SetTopOfStack(nullptr); 3607 SetTopOfShadowStack(shadow_frame); 3608 3609 // Restore the exception that was pending before deoptimization then interpret the 3610 // deoptimized frames. 3611 if (pending_exception != nullptr) { 3612 SetException(pending_exception); 3613 } 3614 interpreter::EnterInterpreterFromDeoptimize(this, shadow_frame, from_code, result); 3615} 3616 3617void Thread::SetException(ObjPtr<mirror::Throwable> new_exception) { 3618 CHECK(new_exception != nullptr); 3619 // TODO: DCHECK(!IsExceptionPending()); 3620 tlsPtr_.exception = new_exception.Ptr(); 3621} 3622 3623bool Thread::IsAotCompiler() { 3624 return Runtime::Current()->IsAotCompiler(); 3625} 3626 3627mirror::Object* Thread::GetPeerFromOtherThread() const { 3628 DCHECK(tlsPtr_.jpeer == nullptr); 3629 mirror::Object* peer = tlsPtr_.opeer; 3630 if (kUseReadBarrier && Current()->GetIsGcMarking()) { 3631 // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack 3632 // may have not been flipped yet and peer may be a from-space (stale) ref. So explicitly 3633 // mark/forward it here. 3634 peer = art::ReadBarrier::Mark(peer); 3635 } 3636 return peer; 3637} 3638 3639void Thread::SetReadBarrierEntrypoints() { 3640 // Make sure entrypoints aren't null. 3641 UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active*/ true); 3642} 3643 3644} // namespace art 3645