thread.cc revision a3856d0d801f066b9b09649b3a17bdbb747f012d
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "thread.h" 18 19#if !defined(__APPLE__) 20#include <sched.h> 21#endif 22 23#include <pthread.h> 24#include <signal.h> 25#include <sys/resource.h> 26#include <sys/time.h> 27 28#include <algorithm> 29#include <bitset> 30#include <cerrno> 31#include <iostream> 32#include <list> 33#include <sstream> 34 35#include "android-base/stringprintf.h" 36 37#include "arch/context.h" 38#include "arch/context-inl.h" 39#include "art_field-inl.h" 40#include "art_method-inl.h" 41#include "base/bit_utils.h" 42#include "base/memory_tool.h" 43#include "base/mutex.h" 44#include "base/timing_logger.h" 45#include "base/to_str.h" 46#include "base/systrace.h" 47#include "class_linker-inl.h" 48#include "debugger.h" 49#include "dex_file-inl.h" 50#include "dex_file_annotations.h" 51#include "entrypoints/entrypoint_utils.h" 52#include "entrypoints/quick/quick_alloc_entrypoints.h" 53#include "gc/accounting/card_table-inl.h" 54#include "gc/accounting/heap_bitmap-inl.h" 55#include "gc/allocator/rosalloc.h" 56#include "gc/heap.h" 57#include "gc/space/space-inl.h" 58#include "handle_scope-inl.h" 59#include "indirect_reference_table-inl.h" 60#include "java_vm_ext.h" 61#include "jni_internal.h" 62#include "mirror/class_loader.h" 63#include "mirror/class-inl.h" 64#include "mirror/object_array-inl.h" 65#include "mirror/stack_trace_element.h" 66#include "monitor.h" 67#include "native_stack_dump.h" 68#include "nth_caller_visitor.h" 69#include "oat_quick_method_header.h" 70#include "obj_ptr-inl.h" 71#include "object_lock.h" 72#include "quick_exception_handler.h" 73#include "quick/quick_method_frame_info.h" 74#include "read_barrier-inl.h" 75#include "reflection.h" 76#include "runtime.h" 77#include "runtime_callbacks.h" 78#include "scoped_thread_state_change-inl.h" 79#include "ScopedLocalRef.h" 80#include "ScopedUtfChars.h" 81#include "stack.h" 82#include "stack_map.h" 83#include "thread_list.h" 84#include "thread-inl.h" 85#include "utils.h" 86#include "verifier/method_verifier.h" 87#include "verify_object.h" 88#include "well_known_classes.h" 89#include "interpreter/interpreter.h" 90 91#if ART_USE_FUTEXES 92#include "linux/futex.h" 93#include "sys/syscall.h" 94#ifndef SYS_futex 95#define SYS_futex __NR_futex 96#endif 97#endif // ART_USE_FUTEXES 98 99namespace art { 100 101using android::base::StringAppendV; 102using android::base::StringPrintf; 103 104extern "C" NO_RETURN void artDeoptimize(Thread* self); 105 106bool Thread::is_started_ = false; 107pthread_key_t Thread::pthread_key_self_; 108ConditionVariable* Thread::resume_cond_ = nullptr; 109const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA); 110bool (*Thread::is_sensitive_thread_hook_)() = nullptr; 111Thread* Thread::jit_sensitive_thread_ = nullptr; 112 113static constexpr bool kVerifyImageObjectsMarked = kIsDebugBuild; 114 115// For implicit overflow checks we reserve an extra piece of memory at the bottom 116// of the stack (lowest memory). The higher portion of the memory 117// is protected against reads and the lower is available for use while 118// throwing the StackOverflow exception. 119constexpr size_t kStackOverflowProtectedSize = 4 * kMemoryToolStackGuardSizeScale * KB; 120 121static const char* kThreadNameDuringStartup = "<native thread without managed peer>"; 122 123void Thread::InitCardTable() { 124 tlsPtr_.card_table = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin(); 125} 126 127static void UnimplementedEntryPoint() { 128 UNIMPLEMENTED(FATAL); 129} 130 131void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints); 132void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_marking); 133 134void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) { 135 CHECK(kUseReadBarrier); 136 tls32_.is_gc_marking = is_marking; 137 if (kUseReadBarrier && (kRuntimeISA == kX86_64 || kRuntimeISA == kX86)) { 138 // Disable entrypoint switching for X86 since we don't always check is_marking with the gray 139 // bit. This causes a race between GrayAllDirtyImmuneObjects and FlipThreadRoots where 140 // we may try to go slow path with a null entrypoint. The fix is to never do entrypoint 141 // switching for x86. 142 is_marking = true; 143 } 144 UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, is_marking); 145 ResetQuickAllocEntryPointsForThread(is_marking); 146} 147 148void Thread::InitTlsEntryPoints() { 149 // Insert a placeholder so we can easily tell if we call an unimplemented entry point. 150 uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.jni_entrypoints); 151 uintptr_t* end = reinterpret_cast<uintptr_t*>( 152 reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) + sizeof(tlsPtr_.quick_entrypoints)); 153 for (uintptr_t* it = begin; it != end; ++it) { 154 *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint); 155 } 156 InitEntryPoints(&tlsPtr_.jni_entrypoints, &tlsPtr_.quick_entrypoints); 157} 158 159void Thread::ResetQuickAllocEntryPointsForThread(bool is_marking) { 160 if (kUseReadBarrier && kRuntimeISA != kX86_64) { 161 // Allocation entrypoint switching is currently only implemented for X86_64. 162 is_marking = true; 163 } 164 ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints, is_marking); 165} 166 167class DeoptimizationContextRecord { 168 public: 169 DeoptimizationContextRecord(const JValue& ret_val, 170 bool is_reference, 171 bool from_code, 172 ObjPtr<mirror::Throwable> pending_exception, 173 DeoptimizationContextRecord* link) 174 : ret_val_(ret_val), 175 is_reference_(is_reference), 176 from_code_(from_code), 177 pending_exception_(pending_exception.Ptr()), 178 link_(link) {} 179 180 JValue GetReturnValue() const { return ret_val_; } 181 bool IsReference() const { return is_reference_; } 182 bool GetFromCode() const { return from_code_; } 183 ObjPtr<mirror::Throwable> GetPendingException() const { return pending_exception_; } 184 DeoptimizationContextRecord* GetLink() const { return link_; } 185 mirror::Object** GetReturnValueAsGCRoot() { 186 DCHECK(is_reference_); 187 return ret_val_.GetGCRoot(); 188 } 189 mirror::Object** GetPendingExceptionAsGCRoot() { 190 return reinterpret_cast<mirror::Object**>(&pending_exception_); 191 } 192 193 private: 194 // The value returned by the method at the top of the stack before deoptimization. 195 JValue ret_val_; 196 197 // Indicates whether the returned value is a reference. If so, the GC will visit it. 198 const bool is_reference_; 199 200 // Whether the context was created from an explicit deoptimization in the code. 201 const bool from_code_; 202 203 // The exception that was pending before deoptimization (or null if there was no pending 204 // exception). 205 mirror::Throwable* pending_exception_; 206 207 // A link to the previous DeoptimizationContextRecord. 208 DeoptimizationContextRecord* const link_; 209 210 DISALLOW_COPY_AND_ASSIGN(DeoptimizationContextRecord); 211}; 212 213class StackedShadowFrameRecord { 214 public: 215 StackedShadowFrameRecord(ShadowFrame* shadow_frame, 216 StackedShadowFrameType type, 217 StackedShadowFrameRecord* link) 218 : shadow_frame_(shadow_frame), 219 type_(type), 220 link_(link) {} 221 222 ShadowFrame* GetShadowFrame() const { return shadow_frame_; } 223 StackedShadowFrameType GetType() const { return type_; } 224 StackedShadowFrameRecord* GetLink() const { return link_; } 225 226 private: 227 ShadowFrame* const shadow_frame_; 228 const StackedShadowFrameType type_; 229 StackedShadowFrameRecord* const link_; 230 231 DISALLOW_COPY_AND_ASSIGN(StackedShadowFrameRecord); 232}; 233 234void Thread::PushDeoptimizationContext(const JValue& return_value, 235 bool is_reference, 236 bool from_code, 237 ObjPtr<mirror::Throwable> exception) { 238 DeoptimizationContextRecord* record = new DeoptimizationContextRecord( 239 return_value, 240 is_reference, 241 from_code, 242 exception, 243 tlsPtr_.deoptimization_context_stack); 244 tlsPtr_.deoptimization_context_stack = record; 245} 246 247void Thread::PopDeoptimizationContext(JValue* result, 248 ObjPtr<mirror::Throwable>* exception, 249 bool* from_code) { 250 AssertHasDeoptimizationContext(); 251 DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack; 252 tlsPtr_.deoptimization_context_stack = record->GetLink(); 253 result->SetJ(record->GetReturnValue().GetJ()); 254 *exception = record->GetPendingException(); 255 *from_code = record->GetFromCode(); 256 delete record; 257} 258 259void Thread::AssertHasDeoptimizationContext() { 260 CHECK(tlsPtr_.deoptimization_context_stack != nullptr) 261 << "No deoptimization context for thread " << *this; 262} 263 264void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type) { 265 StackedShadowFrameRecord* record = new StackedShadowFrameRecord( 266 sf, type, tlsPtr_.stacked_shadow_frame_record); 267 tlsPtr_.stacked_shadow_frame_record = record; 268} 269 270ShadowFrame* Thread::PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present) { 271 StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record; 272 if (must_be_present) { 273 DCHECK(record != nullptr); 274 } else { 275 if (record == nullptr || record->GetType() != type) { 276 return nullptr; 277 } 278 } 279 tlsPtr_.stacked_shadow_frame_record = record->GetLink(); 280 ShadowFrame* shadow_frame = record->GetShadowFrame(); 281 delete record; 282 return shadow_frame; 283} 284 285class FrameIdToShadowFrame { 286 public: 287 static FrameIdToShadowFrame* Create(size_t frame_id, 288 ShadowFrame* shadow_frame, 289 FrameIdToShadowFrame* next, 290 size_t num_vregs) { 291 // Append a bool array at the end to keep track of what vregs are updated by the debugger. 292 uint8_t* memory = new uint8_t[sizeof(FrameIdToShadowFrame) + sizeof(bool) * num_vregs]; 293 return new (memory) FrameIdToShadowFrame(frame_id, shadow_frame, next); 294 } 295 296 static void Delete(FrameIdToShadowFrame* f) { 297 uint8_t* memory = reinterpret_cast<uint8_t*>(f); 298 delete[] memory; 299 } 300 301 size_t GetFrameId() const { return frame_id_; } 302 ShadowFrame* GetShadowFrame() const { return shadow_frame_; } 303 FrameIdToShadowFrame* GetNext() const { return next_; } 304 void SetNext(FrameIdToShadowFrame* next) { next_ = next; } 305 bool* GetUpdatedVRegFlags() { 306 return updated_vreg_flags_; 307 } 308 309 private: 310 FrameIdToShadowFrame(size_t frame_id, 311 ShadowFrame* shadow_frame, 312 FrameIdToShadowFrame* next) 313 : frame_id_(frame_id), 314 shadow_frame_(shadow_frame), 315 next_(next) {} 316 317 const size_t frame_id_; 318 ShadowFrame* const shadow_frame_; 319 FrameIdToShadowFrame* next_; 320 bool updated_vreg_flags_[0]; 321 322 DISALLOW_COPY_AND_ASSIGN(FrameIdToShadowFrame); 323}; 324 325static FrameIdToShadowFrame* FindFrameIdToShadowFrame(FrameIdToShadowFrame* head, 326 size_t frame_id) { 327 FrameIdToShadowFrame* found = nullptr; 328 for (FrameIdToShadowFrame* record = head; record != nullptr; record = record->GetNext()) { 329 if (record->GetFrameId() == frame_id) { 330 if (kIsDebugBuild) { 331 // Sanity check we have at most one record for this frame. 332 CHECK(found == nullptr) << "Multiple records for the frame " << frame_id; 333 found = record; 334 } else { 335 return record; 336 } 337 } 338 } 339 return found; 340} 341 342ShadowFrame* Thread::FindDebuggerShadowFrame(size_t frame_id) { 343 FrameIdToShadowFrame* record = FindFrameIdToShadowFrame( 344 tlsPtr_.frame_id_to_shadow_frame, frame_id); 345 if (record != nullptr) { 346 return record->GetShadowFrame(); 347 } 348 return nullptr; 349} 350 351// Must only be called when FindDebuggerShadowFrame(frame_id) returns non-nullptr. 352bool* Thread::GetUpdatedVRegFlags(size_t frame_id) { 353 FrameIdToShadowFrame* record = FindFrameIdToShadowFrame( 354 tlsPtr_.frame_id_to_shadow_frame, frame_id); 355 CHECK(record != nullptr); 356 return record->GetUpdatedVRegFlags(); 357} 358 359ShadowFrame* Thread::FindOrCreateDebuggerShadowFrame(size_t frame_id, 360 uint32_t num_vregs, 361 ArtMethod* method, 362 uint32_t dex_pc) { 363 ShadowFrame* shadow_frame = FindDebuggerShadowFrame(frame_id); 364 if (shadow_frame != nullptr) { 365 return shadow_frame; 366 } 367 VLOG(deopt) << "Create pre-deopted ShadowFrame for " << ArtMethod::PrettyMethod(method); 368 shadow_frame = ShadowFrame::CreateDeoptimizedFrame(num_vregs, nullptr, method, dex_pc); 369 FrameIdToShadowFrame* record = FrameIdToShadowFrame::Create(frame_id, 370 shadow_frame, 371 tlsPtr_.frame_id_to_shadow_frame, 372 num_vregs); 373 for (uint32_t i = 0; i < num_vregs; i++) { 374 // Do this to clear all references for root visitors. 375 shadow_frame->SetVRegReference(i, nullptr); 376 // This flag will be changed to true if the debugger modifies the value. 377 record->GetUpdatedVRegFlags()[i] = false; 378 } 379 tlsPtr_.frame_id_to_shadow_frame = record; 380 return shadow_frame; 381} 382 383void Thread::RemoveDebuggerShadowFrameMapping(size_t frame_id) { 384 FrameIdToShadowFrame* head = tlsPtr_.frame_id_to_shadow_frame; 385 if (head->GetFrameId() == frame_id) { 386 tlsPtr_.frame_id_to_shadow_frame = head->GetNext(); 387 FrameIdToShadowFrame::Delete(head); 388 return; 389 } 390 FrameIdToShadowFrame* prev = head; 391 for (FrameIdToShadowFrame* record = head->GetNext(); 392 record != nullptr; 393 prev = record, record = record->GetNext()) { 394 if (record->GetFrameId() == frame_id) { 395 prev->SetNext(record->GetNext()); 396 FrameIdToShadowFrame::Delete(record); 397 return; 398 } 399 } 400 LOG(FATAL) << "No shadow frame for frame " << frame_id; 401 UNREACHABLE(); 402} 403 404void Thread::InitTid() { 405 tls32_.tid = ::art::GetTid(); 406} 407 408void Thread::InitAfterFork() { 409 // One thread (us) survived the fork, but we have a new tid so we need to 410 // update the value stashed in this Thread*. 411 InitTid(); 412} 413 414void* Thread::CreateCallback(void* arg) { 415 Thread* self = reinterpret_cast<Thread*>(arg); 416 Runtime* runtime = Runtime::Current(); 417 if (runtime == nullptr) { 418 LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self; 419 return nullptr; 420 } 421 { 422 // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true 423 // after self->Init(). 424 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_); 425 // Check that if we got here we cannot be shutting down (as shutdown should never have started 426 // while threads are being born). 427 CHECK(!runtime->IsShuttingDownLocked()); 428 // Note: given that the JNIEnv is created in the parent thread, the only failure point here is 429 // a mess in InitStackHwm. We do not have a reasonable way to recover from that, so abort 430 // the runtime in such a case. In case this ever changes, we need to make sure here to 431 // delete the tmp_jni_env, as we own it at this point. 432 CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM(), self->tlsPtr_.tmp_jni_env)); 433 self->tlsPtr_.tmp_jni_env = nullptr; 434 Runtime::Current()->EndThreadBirth(); 435 } 436 { 437 ScopedObjectAccess soa(self); 438 self->InitStringEntryPoints(); 439 440 // Copy peer into self, deleting global reference when done. 441 CHECK(self->tlsPtr_.jpeer != nullptr); 442 self->tlsPtr_.opeer = soa.Decode<mirror::Object>(self->tlsPtr_.jpeer).Ptr(); 443 self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer); 444 self->tlsPtr_.jpeer = nullptr; 445 self->SetThreadName(self->GetThreadName()->ToModifiedUtf8().c_str()); 446 447 ArtField* priorityField = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority); 448 self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer)); 449 450 runtime->GetRuntimeCallbacks()->ThreadStart(self); 451 452 // Invoke the 'run' method of our java.lang.Thread. 453 ObjPtr<mirror::Object> receiver = self->tlsPtr_.opeer; 454 jmethodID mid = WellKnownClasses::java_lang_Thread_run; 455 ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(receiver)); 456 InvokeVirtualOrInterfaceWithJValues(soa, ref.get(), mid, nullptr); 457 } 458 // Detach and delete self. 459 Runtime::Current()->GetThreadList()->Unregister(self); 460 461 return nullptr; 462} 463 464Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa, 465 ObjPtr<mirror::Object> thread_peer) { 466 ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer); 467 Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer))); 468 // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_ 469 // to stop it from going away. 470 if (kIsDebugBuild) { 471 MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_); 472 if (result != nullptr && !result->IsSuspended()) { 473 Locks::thread_list_lock_->AssertHeld(soa.Self()); 474 } 475 } 476 return result; 477} 478 479Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa, 480 jobject java_thread) { 481 return FromManagedThread(soa, soa.Decode<mirror::Object>(java_thread).Ptr()); 482} 483 484static size_t FixStackSize(size_t stack_size) { 485 // A stack size of zero means "use the default". 486 if (stack_size == 0) { 487 stack_size = Runtime::Current()->GetDefaultStackSize(); 488 } 489 490 // Dalvik used the bionic pthread default stack size for native threads, 491 // so include that here to support apps that expect large native stacks. 492 stack_size += 1 * MB; 493 494 // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN. 495 if (stack_size < PTHREAD_STACK_MIN) { 496 stack_size = PTHREAD_STACK_MIN; 497 } 498 499 if (Runtime::Current()->ExplicitStackOverflowChecks()) { 500 // It's likely that callers are trying to ensure they have at least a certain amount of 501 // stack space, so we should add our reserved space on top of what they requested, rather 502 // than implicitly take it away from them. 503 stack_size += GetStackOverflowReservedBytes(kRuntimeISA); 504 } else { 505 // If we are going to use implicit stack checks, allocate space for the protected 506 // region at the bottom of the stack. 507 stack_size += Thread::kStackOverflowImplicitCheckSize + 508 GetStackOverflowReservedBytes(kRuntimeISA); 509 } 510 511 // Some systems require the stack size to be a multiple of the system page size, so round up. 512 stack_size = RoundUp(stack_size, kPageSize); 513 514 return stack_size; 515} 516 517// Return the nearest page-aligned address below the current stack top. 518NO_INLINE 519static uint8_t* FindStackTop() { 520 return reinterpret_cast<uint8_t*>( 521 AlignDown(__builtin_frame_address(0), kPageSize)); 522} 523 524// Install a protected region in the stack. This is used to trigger a SIGSEGV if a stack 525// overflow is detected. It is located right below the stack_begin_. 526ATTRIBUTE_NO_SANITIZE_ADDRESS 527void Thread::InstallImplicitProtection() { 528 uint8_t* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; 529 // Page containing current top of stack. 530 uint8_t* stack_top = FindStackTop(); 531 532 // Try to directly protect the stack. 533 VLOG(threads) << "installing stack protected region at " << std::hex << 534 static_cast<void*>(pregion) << " to " << 535 static_cast<void*>(pregion + kStackOverflowProtectedSize - 1); 536 if (ProtectStack(/* fatal_on_error */ false)) { 537 // Tell the kernel that we won't be needing these pages any more. 538 // NB. madvise will probably write zeroes into the memory (on linux it does). 539 uint32_t unwanted_size = stack_top - pregion - kPageSize; 540 madvise(pregion, unwanted_size, MADV_DONTNEED); 541 return; 542 } 543 544 // There is a little complexity here that deserves a special mention. On some 545 // architectures, the stack is created using a VM_GROWSDOWN flag 546 // to prevent memory being allocated when it's not needed. This flag makes the 547 // kernel only allocate memory for the stack by growing down in memory. Because we 548 // want to put an mprotected region far away from that at the stack top, we need 549 // to make sure the pages for the stack are mapped in before we call mprotect. 550 // 551 // The failed mprotect in UnprotectStack is an indication of a thread with VM_GROWSDOWN 552 // with a non-mapped stack (usually only the main thread). 553 // 554 // We map in the stack by reading every page from the stack bottom (highest address) 555 // to the stack top. (We then madvise this away.) This must be done by reading from the 556 // current stack pointer downwards. Any access more than a page below the current SP 557 // might cause a segv. 558 // TODO: This comment may be out of date. It seems possible to speed this up. As 559 // this is normally done once in the zygote on startup, ignore for now. 560 // 561 // AddressSanitizer does not like the part of this functions that reads every stack page. 562 // Looks a lot like an out-of-bounds access. 563 564 // (Defensively) first remove the protection on the protected region as will want to read 565 // and write it. Ignore errors. 566 UnprotectStack(); 567 568 VLOG(threads) << "Need to map in stack for thread at " << std::hex << 569 static_cast<void*>(pregion); 570 571 // Read every page from the high address to the low. 572 volatile uint8_t dont_optimize_this; 573 UNUSED(dont_optimize_this); 574 for (uint8_t* p = stack_top; p >= pregion; p -= kPageSize) { 575 dont_optimize_this = *p; 576 } 577 578 VLOG(threads) << "(again) installing stack protected region at " << std::hex << 579 static_cast<void*>(pregion) << " to " << 580 static_cast<void*>(pregion + kStackOverflowProtectedSize - 1); 581 582 // Protect the bottom of the stack to prevent read/write to it. 583 ProtectStack(/* fatal_on_error */ true); 584 585 // Tell the kernel that we won't be needing these pages any more. 586 // NB. madvise will probably write zeroes into the memory (on linux it does). 587 uint32_t unwanted_size = stack_top - pregion - kPageSize; 588 madvise(pregion, unwanted_size, MADV_DONTNEED); 589} 590 591void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) { 592 CHECK(java_peer != nullptr); 593 Thread* self = static_cast<JNIEnvExt*>(env)->self; 594 595 if (VLOG_IS_ON(threads)) { 596 ScopedObjectAccess soa(env); 597 598 ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name); 599 ObjPtr<mirror::String> java_name = 600 f->GetObject(soa.Decode<mirror::Object>(java_peer))->AsString(); 601 std::string thread_name; 602 if (java_name != nullptr) { 603 thread_name = java_name->ToModifiedUtf8(); 604 } else { 605 thread_name = "(Unnamed)"; 606 } 607 608 VLOG(threads) << "Creating native thread for " << thread_name; 609 self->Dump(LOG_STREAM(INFO)); 610 } 611 612 Runtime* runtime = Runtime::Current(); 613 614 // Atomically start the birth of the thread ensuring the runtime isn't shutting down. 615 bool thread_start_during_shutdown = false; 616 { 617 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 618 if (runtime->IsShuttingDownLocked()) { 619 thread_start_during_shutdown = true; 620 } else { 621 runtime->StartThreadBirth(); 622 } 623 } 624 if (thread_start_during_shutdown) { 625 ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError")); 626 env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown"); 627 return; 628 } 629 630 Thread* child_thread = new Thread(is_daemon); 631 // Use global JNI ref to hold peer live while child thread starts. 632 child_thread->tlsPtr_.jpeer = env->NewGlobalRef(java_peer); 633 stack_size = FixStackSize(stack_size); 634 635 // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to 636 // assign it. 637 env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 638 reinterpret_cast<jlong>(child_thread)); 639 640 // Try to allocate a JNIEnvExt for the thread. We do this here as we might be out of memory and 641 // do not have a good way to report this on the child's side. 642 std::string error_msg; 643 std::unique_ptr<JNIEnvExt> child_jni_env_ext( 644 JNIEnvExt::Create(child_thread, Runtime::Current()->GetJavaVM(), &error_msg)); 645 646 int pthread_create_result = 0; 647 if (child_jni_env_ext.get() != nullptr) { 648 pthread_t new_pthread; 649 pthread_attr_t attr; 650 child_thread->tlsPtr_.tmp_jni_env = child_jni_env_ext.get(); 651 CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread"); 652 CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), 653 "PTHREAD_CREATE_DETACHED"); 654 CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size); 655 pthread_create_result = pthread_create(&new_pthread, 656 &attr, 657 Thread::CreateCallback, 658 child_thread); 659 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread"); 660 661 if (pthread_create_result == 0) { 662 // pthread_create started the new thread. The child is now responsible for managing the 663 // JNIEnvExt we created. 664 // Note: we can't check for tmp_jni_env == nullptr, as that would require synchronization 665 // between the threads. 666 child_jni_env_ext.release(); 667 return; 668 } 669 } 670 671 // Either JNIEnvExt::Create or pthread_create(3) failed, so clean up. 672 { 673 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 674 runtime->EndThreadBirth(); 675 } 676 // Manually delete the global reference since Thread::Init will not have been run. 677 env->DeleteGlobalRef(child_thread->tlsPtr_.jpeer); 678 child_thread->tlsPtr_.jpeer = nullptr; 679 delete child_thread; 680 child_thread = nullptr; 681 // TODO: remove from thread group? 682 env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0); 683 { 684 std::string msg(child_jni_env_ext.get() == nullptr ? 685 StringPrintf("Could not allocate JNI Env: %s", error_msg.c_str()) : 686 StringPrintf("pthread_create (%s stack) failed: %s", 687 PrettySize(stack_size).c_str(), strerror(pthread_create_result))); 688 ScopedObjectAccess soa(env); 689 soa.Self()->ThrowOutOfMemoryError(msg.c_str()); 690 } 691} 692 693bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_env_ext) { 694 // This function does all the initialization that must be run by the native thread it applies to. 695 // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so 696 // we can handshake with the corresponding native thread when it's ready.) Check this native 697 // thread hasn't been through here already... 698 CHECK(Thread::Current() == nullptr); 699 700 // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this 701 // avoids pthread_self_ ever being invalid when discovered from Thread::Current(). 702 tlsPtr_.pthread_self = pthread_self(); 703 CHECK(is_started_); 704 705 SetUpAlternateSignalStack(); 706 if (!InitStackHwm()) { 707 return false; 708 } 709 InitCpu(); 710 InitTlsEntryPoints(); 711 RemoveSuspendTrigger(); 712 InitCardTable(); 713 InitTid(); 714 interpreter::InitInterpreterTls(this); 715 716#ifdef ART_TARGET_ANDROID 717 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = this; 718#else 719 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self"); 720#endif 721 DCHECK_EQ(Thread::Current(), this); 722 723 tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this); 724 725 if (jni_env_ext != nullptr) { 726 DCHECK_EQ(jni_env_ext->vm, java_vm); 727 DCHECK_EQ(jni_env_ext->self, this); 728 tlsPtr_.jni_env = jni_env_ext; 729 } else { 730 std::string error_msg; 731 tlsPtr_.jni_env = JNIEnvExt::Create(this, java_vm, &error_msg); 732 if (tlsPtr_.jni_env == nullptr) { 733 LOG(ERROR) << "Failed to create JNIEnvExt: " << error_msg; 734 return false; 735 } 736 } 737 738 thread_list->Register(this); 739 return true; 740} 741 742template <typename PeerAction> 743Thread* Thread::Attach(const char* thread_name, bool as_daemon, PeerAction peer_action) { 744 Runtime* runtime = Runtime::Current(); 745 if (runtime == nullptr) { 746 LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name; 747 return nullptr; 748 } 749 Thread* self; 750 { 751 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_); 752 if (runtime->IsShuttingDownLocked()) { 753 LOG(WARNING) << "Thread attaching while runtime is shutting down: " << thread_name; 754 return nullptr; 755 } else { 756 Runtime::Current()->StartThreadBirth(); 757 self = new Thread(as_daemon); 758 bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM()); 759 Runtime::Current()->EndThreadBirth(); 760 if (!init_success) { 761 delete self; 762 return nullptr; 763 } 764 } 765 } 766 767 self->InitStringEntryPoints(); 768 769 CHECK_NE(self->GetState(), kRunnable); 770 self->SetState(kNative); 771 772 // Run the action that is acting on the peer. 773 if (!peer_action(self)) { 774 runtime->GetThreadList()->Unregister(self); 775 // Unregister deletes self, no need to do this here. 776 return nullptr; 777 } 778 779 if (VLOG_IS_ON(threads)) { 780 if (thread_name != nullptr) { 781 VLOG(threads) << "Attaching thread " << thread_name; 782 } else { 783 VLOG(threads) << "Attaching unnamed thread."; 784 } 785 ScopedObjectAccess soa(self); 786 self->Dump(LOG_STREAM(INFO)); 787 } 788 789 { 790 ScopedObjectAccess soa(self); 791 runtime->GetRuntimeCallbacks()->ThreadStart(self); 792 } 793 794 return self; 795} 796 797Thread* Thread::Attach(const char* thread_name, 798 bool as_daemon, 799 jobject thread_group, 800 bool create_peer) { 801 auto create_peer_action = [&](Thread* self) { 802 // If we're the main thread, ClassLinker won't be created until after we're attached, 803 // so that thread needs a two-stage attach. Regular threads don't need this hack. 804 // In the compiler, all threads need this hack, because no-one's going to be getting 805 // a native peer! 806 if (create_peer) { 807 self->CreatePeer(thread_name, as_daemon, thread_group); 808 if (self->IsExceptionPending()) { 809 // We cannot keep the exception around, as we're deleting self. Try to be helpful and log it. 810 { 811 ScopedObjectAccess soa(self); 812 LOG(ERROR) << "Exception creating thread peer:"; 813 LOG(ERROR) << self->GetException()->Dump(); 814 self->ClearException(); 815 } 816 return false; 817 } 818 } else { 819 // These aren't necessary, but they improve diagnostics for unit tests & command-line tools. 820 if (thread_name != nullptr) { 821 self->tlsPtr_.name->assign(thread_name); 822 ::art::SetThreadName(thread_name); 823 } else if (self->GetJniEnv()->check_jni) { 824 LOG(WARNING) << *Thread::Current() << " attached without supplying a name"; 825 } 826 } 827 return true; 828 }; 829 return Attach(thread_name, as_daemon, create_peer_action); 830} 831 832Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_peer) { 833 auto set_peer_action = [&](Thread* self) { 834 // Install the given peer. 835 { 836 DCHECK(self == Thread::Current()); 837 ScopedObjectAccess soa(self); 838 self->tlsPtr_.opeer = soa.Decode<mirror::Object>(thread_peer).Ptr(); 839 } 840 self->GetJniEnv()->SetLongField(thread_peer, 841 WellKnownClasses::java_lang_Thread_nativePeer, 842 reinterpret_cast<jlong>(self)); 843 return true; 844 }; 845 return Attach(thread_name, as_daemon, set_peer_action); 846} 847 848void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) { 849 Runtime* runtime = Runtime::Current(); 850 CHECK(runtime->IsStarted()); 851 JNIEnv* env = tlsPtr_.jni_env; 852 853 if (thread_group == nullptr) { 854 thread_group = runtime->GetMainThreadGroup(); 855 } 856 ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name)); 857 // Add missing null check in case of OOM b/18297817 858 if (name != nullptr && thread_name.get() == nullptr) { 859 CHECK(IsExceptionPending()); 860 return; 861 } 862 jint thread_priority = GetNativePriority(); 863 jboolean thread_is_daemon = as_daemon; 864 865 ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread)); 866 if (peer.get() == nullptr) { 867 CHECK(IsExceptionPending()); 868 return; 869 } 870 { 871 ScopedObjectAccess soa(this); 872 tlsPtr_.opeer = soa.Decode<mirror::Object>(peer.get()).Ptr(); 873 } 874 env->CallNonvirtualVoidMethod(peer.get(), 875 WellKnownClasses::java_lang_Thread, 876 WellKnownClasses::java_lang_Thread_init, 877 thread_group, thread_name.get(), thread_priority, thread_is_daemon); 878 if (IsExceptionPending()) { 879 return; 880 } 881 882 Thread* self = this; 883 DCHECK_EQ(self, Thread::Current()); 884 env->SetLongField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer, 885 reinterpret_cast<jlong>(self)); 886 887 ScopedObjectAccess soa(self); 888 StackHandleScope<1> hs(self); 889 MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName())); 890 if (peer_thread_name == nullptr) { 891 // The Thread constructor should have set the Thread.name to a 892 // non-null value. However, because we can run without code 893 // available (in the compiler, in tests), we manually assign the 894 // fields the constructor should have set. 895 if (runtime->IsActiveTransaction()) { 896 InitPeer<true>(soa, 897 tlsPtr_.opeer, 898 thread_is_daemon, 899 thread_group, 900 thread_name.get(), 901 thread_priority); 902 } else { 903 InitPeer<false>(soa, 904 tlsPtr_.opeer, 905 thread_is_daemon, 906 thread_group, 907 thread_name.get(), 908 thread_priority); 909 } 910 peer_thread_name.Assign(GetThreadName()); 911 } 912 // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null. 913 if (peer_thread_name != nullptr) { 914 SetThreadName(peer_thread_name->ToModifiedUtf8().c_str()); 915 } 916} 917 918jobject Thread::CreateCompileTimePeer(JNIEnv* env, 919 const char* name, 920 bool as_daemon, 921 jobject thread_group) { 922 Runtime* runtime = Runtime::Current(); 923 CHECK(!runtime->IsStarted()); 924 925 if (thread_group == nullptr) { 926 thread_group = runtime->GetMainThreadGroup(); 927 } 928 ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name)); 929 // Add missing null check in case of OOM b/18297817 930 if (name != nullptr && thread_name.get() == nullptr) { 931 CHECK(Thread::Current()->IsExceptionPending()); 932 return nullptr; 933 } 934 jint thread_priority = GetNativePriority(); 935 jboolean thread_is_daemon = as_daemon; 936 937 ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread)); 938 if (peer.get() == nullptr) { 939 CHECK(Thread::Current()->IsExceptionPending()); 940 return nullptr; 941 } 942 943 // We cannot call Thread.init, as it will recursively ask for currentThread. 944 945 // The Thread constructor should have set the Thread.name to a 946 // non-null value. However, because we can run without code 947 // available (in the compiler, in tests), we manually assign the 948 // fields the constructor should have set. 949 ScopedObjectAccessUnchecked soa(Thread::Current()); 950 if (runtime->IsActiveTransaction()) { 951 InitPeer<true>(soa, 952 soa.Decode<mirror::Object>(peer.get()), 953 thread_is_daemon, 954 thread_group, 955 thread_name.get(), 956 thread_priority); 957 } else { 958 InitPeer<false>(soa, 959 soa.Decode<mirror::Object>(peer.get()), 960 thread_is_daemon, 961 thread_group, 962 thread_name.get(), 963 thread_priority); 964 } 965 966 return peer.release(); 967} 968 969template<bool kTransactionActive> 970void Thread::InitPeer(ScopedObjectAccessAlreadyRunnable& soa, 971 ObjPtr<mirror::Object> peer, 972 jboolean thread_is_daemon, 973 jobject thread_group, 974 jobject thread_name, 975 jint thread_priority) { 976 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_daemon)-> 977 SetBoolean<kTransactionActive>(peer, thread_is_daemon); 978 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group)-> 979 SetObject<kTransactionActive>(peer, soa.Decode<mirror::Object>(thread_group)); 980 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name)-> 981 SetObject<kTransactionActive>(peer, soa.Decode<mirror::Object>(thread_name)); 982 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority)-> 983 SetInt<kTransactionActive>(peer, thread_priority); 984} 985 986void Thread::SetThreadName(const char* name) { 987 tlsPtr_.name->assign(name); 988 ::art::SetThreadName(name); 989 Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM")); 990} 991 992static void GetThreadStack(pthread_t thread, 993 void** stack_base, 994 size_t* stack_size, 995 size_t* guard_size) { 996#if defined(__APPLE__) 997 *stack_size = pthread_get_stacksize_np(thread); 998 void* stack_addr = pthread_get_stackaddr_np(thread); 999 1000 // Check whether stack_addr is the base or end of the stack. 1001 // (On Mac OS 10.7, it's the end.) 1002 int stack_variable; 1003 if (stack_addr > &stack_variable) { 1004 *stack_base = reinterpret_cast<uint8_t*>(stack_addr) - *stack_size; 1005 } else { 1006 *stack_base = stack_addr; 1007 } 1008 1009 // This is wrong, but there doesn't seem to be a way to get the actual value on the Mac. 1010 pthread_attr_t attributes; 1011 CHECK_PTHREAD_CALL(pthread_attr_init, (&attributes), __FUNCTION__); 1012 CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__); 1013 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__); 1014#else 1015 pthread_attr_t attributes; 1016 CHECK_PTHREAD_CALL(pthread_getattr_np, (thread, &attributes), __FUNCTION__); 1017 CHECK_PTHREAD_CALL(pthread_attr_getstack, (&attributes, stack_base, stack_size), __FUNCTION__); 1018 CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__); 1019 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__); 1020 1021#if defined(__GLIBC__) 1022 // If we're the main thread, check whether we were run with an unlimited stack. In that case, 1023 // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection 1024 // will be broken because we'll die long before we get close to 2GB. 1025 bool is_main_thread = (::art::GetTid() == getpid()); 1026 if (is_main_thread) { 1027 rlimit stack_limit; 1028 if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) { 1029 PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed"; 1030 } 1031 if (stack_limit.rlim_cur == RLIM_INFINITY) { 1032 size_t old_stack_size = *stack_size; 1033 1034 // Use the kernel default limit as our size, and adjust the base to match. 1035 *stack_size = 8 * MB; 1036 *stack_base = reinterpret_cast<uint8_t*>(*stack_base) + (old_stack_size - *stack_size); 1037 1038 VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")" 1039 << " to " << PrettySize(*stack_size) 1040 << " with base " << *stack_base; 1041 } 1042 } 1043#endif 1044 1045#endif 1046} 1047 1048bool Thread::InitStackHwm() { 1049 void* read_stack_base; 1050 size_t read_stack_size; 1051 size_t read_guard_size; 1052 GetThreadStack(tlsPtr_.pthread_self, &read_stack_base, &read_stack_size, &read_guard_size); 1053 1054 tlsPtr_.stack_begin = reinterpret_cast<uint8_t*>(read_stack_base); 1055 tlsPtr_.stack_size = read_stack_size; 1056 1057 // The minimum stack size we can cope with is the overflow reserved bytes (typically 1058 // 8K) + the protected region size (4K) + another page (4K). Typically this will 1059 // be 8+4+4 = 16K. The thread won't be able to do much with this stack even the GC takes 1060 // between 8K and 12K. 1061 uint32_t min_stack = GetStackOverflowReservedBytes(kRuntimeISA) + kStackOverflowProtectedSize 1062 + 4 * KB; 1063 if (read_stack_size <= min_stack) { 1064 // Note, as we know the stack is small, avoid operations that could use a lot of stack. 1065 LogHelper::LogLineLowStack(__PRETTY_FUNCTION__, 1066 __LINE__, 1067 ::android::base::ERROR, 1068 "Attempt to attach a thread with a too-small stack"); 1069 return false; 1070 } 1071 1072 // This is included in the SIGQUIT output, but it's useful here for thread debugging. 1073 VLOG(threads) << StringPrintf("Native stack is at %p (%s with %s guard)", 1074 read_stack_base, 1075 PrettySize(read_stack_size).c_str(), 1076 PrettySize(read_guard_size).c_str()); 1077 1078 // Set stack_end_ to the bottom of the stack saving space of stack overflows 1079 1080 Runtime* runtime = Runtime::Current(); 1081 bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler(); 1082 1083 // Valgrind on arm doesn't give the right values here. Do not install the guard page, and 1084 // effectively disable stack overflow checks (we'll get segfaults, potentially) by setting 1085 // stack_begin to 0. 1086 const bool valgrind_on_arm = 1087 (kRuntimeISA == kArm || kRuntimeISA == kArm64) && 1088 kMemoryToolIsValgrind && 1089 RUNNING_ON_MEMORY_TOOL != 0; 1090 if (valgrind_on_arm) { 1091 tlsPtr_.stack_begin = nullptr; 1092 } 1093 1094 ResetDefaultStackEnd(); 1095 1096 // Install the protected region if we are doing implicit overflow checks. 1097 if (implicit_stack_check && !valgrind_on_arm) { 1098 // The thread might have protected region at the bottom. We need 1099 // to install our own region so we need to move the limits 1100 // of the stack to make room for it. 1101 1102 tlsPtr_.stack_begin += read_guard_size + kStackOverflowProtectedSize; 1103 tlsPtr_.stack_end += read_guard_size + kStackOverflowProtectedSize; 1104 tlsPtr_.stack_size -= read_guard_size; 1105 1106 InstallImplicitProtection(); 1107 } 1108 1109 // Sanity check. 1110 CHECK_GT(FindStackTop(), reinterpret_cast<void*>(tlsPtr_.stack_end)); 1111 1112 return true; 1113} 1114 1115void Thread::ShortDump(std::ostream& os) const { 1116 os << "Thread["; 1117 if (GetThreadId() != 0) { 1118 // If we're in kStarting, we won't have a thin lock id or tid yet. 1119 os << GetThreadId() 1120 << ",tid=" << GetTid() << ','; 1121 } 1122 os << GetState() 1123 << ",Thread*=" << this 1124 << ",peer=" << tlsPtr_.opeer 1125 << ",\"" << (tlsPtr_.name != nullptr ? *tlsPtr_.name : "null") << "\"" 1126 << "]"; 1127} 1128 1129void Thread::Dump(std::ostream& os, bool dump_native_stack, BacktraceMap* backtrace_map, 1130 bool force_dump_stack) const { 1131 DumpState(os); 1132 DumpStack(os, dump_native_stack, backtrace_map, force_dump_stack); 1133} 1134 1135mirror::String* Thread::GetThreadName() const { 1136 ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name); 1137 if (tlsPtr_.opeer == nullptr) { 1138 return nullptr; 1139 } 1140 ObjPtr<mirror::Object> name = f->GetObject(tlsPtr_.opeer); 1141 return name == nullptr ? nullptr : name->AsString(); 1142} 1143 1144void Thread::GetThreadName(std::string& name) const { 1145 name.assign(*tlsPtr_.name); 1146} 1147 1148uint64_t Thread::GetCpuMicroTime() const { 1149#if defined(__linux__) 1150 clockid_t cpu_clock_id; 1151 pthread_getcpuclockid(tlsPtr_.pthread_self, &cpu_clock_id); 1152 timespec now; 1153 clock_gettime(cpu_clock_id, &now); 1154 return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000); 1155#else // __APPLE__ 1156 UNIMPLEMENTED(WARNING); 1157 return -1; 1158#endif 1159} 1160 1161// Attempt to rectify locks so that we dump thread list with required locks before exiting. 1162static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 1163 LOG(ERROR) << *thread << " suspend count already zero."; 1164 Locks::thread_suspend_count_lock_->Unlock(self); 1165 if (!Locks::mutator_lock_->IsSharedHeld(self)) { 1166 Locks::mutator_lock_->SharedTryLock(self); 1167 if (!Locks::mutator_lock_->IsSharedHeld(self)) { 1168 LOG(WARNING) << "Dumping thread list without holding mutator_lock_"; 1169 } 1170 } 1171 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) { 1172 Locks::thread_list_lock_->TryLock(self); 1173 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) { 1174 LOG(WARNING) << "Dumping thread list without holding thread_list_lock_"; 1175 } 1176 } 1177 std::ostringstream ss; 1178 Runtime::Current()->GetThreadList()->Dump(ss); 1179 LOG(FATAL) << ss.str(); 1180} 1181 1182bool Thread::ModifySuspendCountInternal(Thread* self, 1183 int delta, 1184 AtomicInteger* suspend_barrier, 1185 bool for_debugger) { 1186 if (kIsDebugBuild) { 1187 DCHECK(delta == -1 || delta == +1 || delta == -tls32_.debug_suspend_count) 1188 << delta << " " << tls32_.debug_suspend_count << " " << this; 1189 DCHECK_GE(tls32_.suspend_count, tls32_.debug_suspend_count) << this; 1190 Locks::thread_suspend_count_lock_->AssertHeld(self); 1191 if (this != self && !IsSuspended()) { 1192 Locks::thread_list_lock_->AssertHeld(self); 1193 } 1194 } 1195 if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) { 1196 UnsafeLogFatalForSuspendCount(self, this); 1197 return false; 1198 } 1199 1200 if (kUseReadBarrier && delta > 0 && this != self && tlsPtr_.flip_function != nullptr) { 1201 // Force retry of a suspend request if it's in the middle of a thread flip to avoid a 1202 // deadlock. b/31683379. 1203 return false; 1204 } 1205 1206 uint16_t flags = kSuspendRequest; 1207 if (delta > 0 && suspend_barrier != nullptr) { 1208 uint32_t available_barrier = kMaxSuspendBarriers; 1209 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 1210 if (tlsPtr_.active_suspend_barriers[i] == nullptr) { 1211 available_barrier = i; 1212 break; 1213 } 1214 } 1215 if (available_barrier == kMaxSuspendBarriers) { 1216 // No barrier spaces available, we can't add another. 1217 return false; 1218 } 1219 tlsPtr_.active_suspend_barriers[available_barrier] = suspend_barrier; 1220 flags |= kActiveSuspendBarrier; 1221 } 1222 1223 tls32_.suspend_count += delta; 1224 if (for_debugger) { 1225 tls32_.debug_suspend_count += delta; 1226 } 1227 1228 if (tls32_.suspend_count == 0) { 1229 AtomicClearFlag(kSuspendRequest); 1230 } else { 1231 // Two bits might be set simultaneously. 1232 tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flags); 1233 TriggerSuspend(); 1234 } 1235 return true; 1236} 1237 1238bool Thread::PassActiveSuspendBarriers(Thread* self) { 1239 // Grab the suspend_count lock and copy the current set of 1240 // barriers. Then clear the list and the flag. The ModifySuspendCount 1241 // function requires the lock so we prevent a race between setting 1242 // the kActiveSuspendBarrier flag and clearing it. 1243 AtomicInteger* pass_barriers[kMaxSuspendBarriers]; 1244 { 1245 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1246 if (!ReadFlag(kActiveSuspendBarrier)) { 1247 // quick exit test: the barriers have already been claimed - this is 1248 // possible as there may be a race to claim and it doesn't matter 1249 // who wins. 1250 // All of the callers of this function (except the SuspendAllInternal) 1251 // will first test the kActiveSuspendBarrier flag without lock. Here 1252 // double-check whether the barrier has been passed with the 1253 // suspend_count lock. 1254 return false; 1255 } 1256 1257 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 1258 pass_barriers[i] = tlsPtr_.active_suspend_barriers[i]; 1259 tlsPtr_.active_suspend_barriers[i] = nullptr; 1260 } 1261 AtomicClearFlag(kActiveSuspendBarrier); 1262 } 1263 1264 uint32_t barrier_count = 0; 1265 for (uint32_t i = 0; i < kMaxSuspendBarriers; i++) { 1266 AtomicInteger* pending_threads = pass_barriers[i]; 1267 if (pending_threads != nullptr) { 1268 bool done = false; 1269 do { 1270 int32_t cur_val = pending_threads->LoadRelaxed(); 1271 CHECK_GT(cur_val, 0) << "Unexpected value for PassActiveSuspendBarriers(): " << cur_val; 1272 // Reduce value by 1. 1273 done = pending_threads->CompareExchangeWeakRelaxed(cur_val, cur_val - 1); 1274#if ART_USE_FUTEXES 1275 if (done && (cur_val - 1) == 0) { // Weak CAS may fail spuriously. 1276 futex(pending_threads->Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0); 1277 } 1278#endif 1279 } while (!done); 1280 ++barrier_count; 1281 } 1282 } 1283 CHECK_GT(barrier_count, 0U); 1284 return true; 1285} 1286 1287void Thread::ClearSuspendBarrier(AtomicInteger* target) { 1288 CHECK(ReadFlag(kActiveSuspendBarrier)); 1289 bool clear_flag = true; 1290 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 1291 AtomicInteger* ptr = tlsPtr_.active_suspend_barriers[i]; 1292 if (ptr == target) { 1293 tlsPtr_.active_suspend_barriers[i] = nullptr; 1294 } else if (ptr != nullptr) { 1295 clear_flag = false; 1296 } 1297 } 1298 if (LIKELY(clear_flag)) { 1299 AtomicClearFlag(kActiveSuspendBarrier); 1300 } 1301} 1302 1303void Thread::RunCheckpointFunction() { 1304 bool done = false; 1305 do { 1306 // Grab the suspend_count lock and copy the checkpoints one by one. When the last checkpoint is 1307 // copied, clear the list and the flag. The RequestCheckpoint function will also grab this lock 1308 // to prevent a race between setting the kCheckpointRequest flag and clearing it. 1309 Closure* checkpoint = nullptr; 1310 { 1311 MutexLock mu(this, *Locks::thread_suspend_count_lock_); 1312 if (tlsPtr_.checkpoint_function != nullptr) { 1313 checkpoint = tlsPtr_.checkpoint_function; 1314 if (!checkpoint_overflow_.empty()) { 1315 // Overflow list not empty, copy the first one out and continue. 1316 tlsPtr_.checkpoint_function = checkpoint_overflow_.front(); 1317 checkpoint_overflow_.pop_front(); 1318 } else { 1319 // No overflow checkpoints, this means that we are on the last pending checkpoint. 1320 tlsPtr_.checkpoint_function = nullptr; 1321 AtomicClearFlag(kCheckpointRequest); 1322 done = true; 1323 } 1324 } else { 1325 LOG(FATAL) << "Checkpoint flag set without pending checkpoint"; 1326 } 1327 } 1328 1329 // Outside the lock, run the checkpoint functions that we collected. 1330 ScopedTrace trace("Run checkpoint function"); 1331 DCHECK(checkpoint != nullptr); 1332 checkpoint->Run(this); 1333 } while (!done); 1334} 1335 1336void Thread::RunEmptyCheckpoint() { 1337 DCHECK_EQ(Thread::Current(), this); 1338 AtomicClearFlag(kEmptyCheckpointRequest); 1339 Runtime::Current()->GetThreadList()->EmptyCheckpointBarrier()->Pass(this); 1340} 1341 1342bool Thread::RequestCheckpoint(Closure* function) { 1343 union StateAndFlags old_state_and_flags; 1344 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 1345 if (old_state_and_flags.as_struct.state != kRunnable) { 1346 return false; // Fail, thread is suspended and so can't run a checkpoint. 1347 } 1348 1349 // We must be runnable to request a checkpoint. 1350 DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable); 1351 union StateAndFlags new_state_and_flags; 1352 new_state_and_flags.as_int = old_state_and_flags.as_int; 1353 new_state_and_flags.as_struct.flags |= kCheckpointRequest; 1354 bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent( 1355 old_state_and_flags.as_int, new_state_and_flags.as_int); 1356 if (success) { 1357 // Succeeded setting checkpoint flag, now insert the actual checkpoint. 1358 if (tlsPtr_.checkpoint_function == nullptr) { 1359 tlsPtr_.checkpoint_function = function; 1360 } else { 1361 checkpoint_overflow_.push_back(function); 1362 } 1363 CHECK_EQ(ReadFlag(kCheckpointRequest), true); 1364 TriggerSuspend(); 1365 } 1366 return success; 1367} 1368 1369bool Thread::RequestEmptyCheckpoint() { 1370 union StateAndFlags old_state_and_flags; 1371 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 1372 if (old_state_and_flags.as_struct.state != kRunnable) { 1373 // If it's not runnable, we don't need to do anything because it won't be in the middle of a 1374 // heap access (eg. the read barrier). 1375 return false; 1376 } 1377 1378 // We must be runnable to request a checkpoint. 1379 DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable); 1380 union StateAndFlags new_state_and_flags; 1381 new_state_and_flags.as_int = old_state_and_flags.as_int; 1382 new_state_and_flags.as_struct.flags |= kEmptyCheckpointRequest; 1383 bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent( 1384 old_state_and_flags.as_int, new_state_and_flags.as_int); 1385 if (success) { 1386 TriggerSuspend(); 1387 } 1388 return success; 1389} 1390 1391class BarrierClosure : public Closure { 1392 public: 1393 explicit BarrierClosure(Closure* wrapped) : wrapped_(wrapped), barrier_(0) {} 1394 1395 void Run(Thread* self) OVERRIDE { 1396 wrapped_->Run(self); 1397 barrier_.Pass(self); 1398 } 1399 1400 void Wait(Thread* self) { 1401 barrier_.Increment(self, 1); 1402 } 1403 1404 private: 1405 Closure* wrapped_; 1406 Barrier barrier_; 1407}; 1408 1409void Thread::RequestSynchronousCheckpoint(Closure* function) { 1410 if (this == Thread::Current()) { 1411 // Asked to run on this thread. Just run. 1412 function->Run(this); 1413 return; 1414 } 1415 Thread* self = Thread::Current(); 1416 1417 // The current thread is not this thread. 1418 1419 for (;;) { 1420 // If this thread is runnable, try to schedule a checkpoint. Do some gymnastics to not hold the 1421 // suspend-count lock for too long. 1422 if (GetState() == ThreadState::kRunnable) { 1423 BarrierClosure barrier_closure(function); 1424 bool installed = false; 1425 { 1426 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1427 installed = RequestCheckpoint(&barrier_closure); 1428 } 1429 if (installed) { 1430 barrier_closure.Wait(self); 1431 return; 1432 } 1433 // Fall-through. 1434 } 1435 1436 // This thread is not runnable, make sure we stay suspended, then run the checkpoint. 1437 // Note: ModifySuspendCountInternal also expects the thread_list_lock to be held in 1438 // certain situations. 1439 { 1440 MutexLock mu(self, *Locks::thread_list_lock_); 1441 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1442 1443 if (!ModifySuspendCount(self, +1, nullptr, false)) { 1444 // Just retry the loop. 1445 sched_yield(); 1446 continue; 1447 } 1448 } 1449 1450 while (GetState() == ThreadState::kRunnable) { 1451 // We became runnable again. Wait till the suspend triggered in ModifySuspendCount 1452 // moves us to suspended. 1453 sched_yield(); 1454 } 1455 1456 function->Run(this); 1457 1458 { 1459 MutexLock mu(self, *Locks::thread_list_lock_); 1460 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1461 1462 DCHECK_NE(GetState(), ThreadState::kRunnable); 1463 bool updated = ModifySuspendCount(self, -1, nullptr, false); 1464 DCHECK(updated); 1465 } 1466 1467 return; // We're done, break out of the loop. 1468 } 1469} 1470 1471Closure* Thread::GetFlipFunction() { 1472 Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function); 1473 Closure* func; 1474 do { 1475 func = atomic_func->LoadRelaxed(); 1476 if (func == nullptr) { 1477 return nullptr; 1478 } 1479 } while (!atomic_func->CompareExchangeWeakSequentiallyConsistent(func, nullptr)); 1480 DCHECK(func != nullptr); 1481 return func; 1482} 1483 1484void Thread::SetFlipFunction(Closure* function) { 1485 CHECK(function != nullptr); 1486 Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function); 1487 atomic_func->StoreSequentiallyConsistent(function); 1488} 1489 1490void Thread::FullSuspendCheck() { 1491 ScopedTrace trace(__FUNCTION__); 1492 VLOG(threads) << this << " self-suspending"; 1493 // Make thread appear suspended to other threads, release mutator_lock_. 1494 // Transition to suspended and back to runnable, re-acquire share on mutator_lock_. 1495 ScopedThreadSuspension(this, kSuspended); 1496 VLOG(threads) << this << " self-reviving"; 1497} 1498 1499static std::string GetSchedulerGroupName(pid_t tid) { 1500 // /proc/<pid>/cgroup looks like this: 1501 // 2:devices:/ 1502 // 1:cpuacct,cpu:/ 1503 // We want the third field from the line whose second field contains the "cpu" token. 1504 std::string cgroup_file; 1505 if (!ReadFileToString(StringPrintf("/proc/self/task/%d/cgroup", tid), &cgroup_file)) { 1506 return ""; 1507 } 1508 std::vector<std::string> cgroup_lines; 1509 Split(cgroup_file, '\n', &cgroup_lines); 1510 for (size_t i = 0; i < cgroup_lines.size(); ++i) { 1511 std::vector<std::string> cgroup_fields; 1512 Split(cgroup_lines[i], ':', &cgroup_fields); 1513 std::vector<std::string> cgroups; 1514 Split(cgroup_fields[1], ',', &cgroups); 1515 for (size_t j = 0; j < cgroups.size(); ++j) { 1516 if (cgroups[j] == "cpu") { 1517 return cgroup_fields[2].substr(1); // Skip the leading slash. 1518 } 1519 } 1520 } 1521 return ""; 1522} 1523 1524 1525void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { 1526 std::string group_name; 1527 int priority; 1528 bool is_daemon = false; 1529 Thread* self = Thread::Current(); 1530 1531 // If flip_function is not null, it means we have run a checkpoint 1532 // before the thread wakes up to execute the flip function and the 1533 // thread roots haven't been forwarded. So the following access to 1534 // the roots (opeer or methods in the frames) would be bad. Run it 1535 // here. TODO: clean up. 1536 if (thread != nullptr) { 1537 ScopedObjectAccessUnchecked soa(self); 1538 Thread* this_thread = const_cast<Thread*>(thread); 1539 Closure* flip_func = this_thread->GetFlipFunction(); 1540 if (flip_func != nullptr) { 1541 flip_func->Run(this_thread); 1542 } 1543 } 1544 1545 // Don't do this if we are aborting since the GC may have all the threads suspended. This will 1546 // cause ScopedObjectAccessUnchecked to deadlock. 1547 if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) { 1548 ScopedObjectAccessUnchecked soa(self); 1549 priority = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority) 1550 ->GetInt(thread->tlsPtr_.opeer); 1551 is_daemon = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_daemon) 1552 ->GetBoolean(thread->tlsPtr_.opeer); 1553 1554 ObjPtr<mirror::Object> thread_group = 1555 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group) 1556 ->GetObject(thread->tlsPtr_.opeer); 1557 1558 if (thread_group != nullptr) { 1559 ArtField* group_name_field = 1560 jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_name); 1561 ObjPtr<mirror::String> group_name_string = 1562 group_name_field->GetObject(thread_group)->AsString(); 1563 group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>"; 1564 } 1565 } else { 1566 priority = GetNativePriority(); 1567 } 1568 1569 std::string scheduler_group_name(GetSchedulerGroupName(tid)); 1570 if (scheduler_group_name.empty()) { 1571 scheduler_group_name = "default"; 1572 } 1573 1574 if (thread != nullptr) { 1575 os << '"' << *thread->tlsPtr_.name << '"'; 1576 if (is_daemon) { 1577 os << " daemon"; 1578 } 1579 os << " prio=" << priority 1580 << " tid=" << thread->GetThreadId() 1581 << " " << thread->GetState(); 1582 if (thread->IsStillStarting()) { 1583 os << " (still starting up)"; 1584 } 1585 os << "\n"; 1586 } else { 1587 os << '"' << ::art::GetThreadName(tid) << '"' 1588 << " prio=" << priority 1589 << " (not attached)\n"; 1590 } 1591 1592 if (thread != nullptr) { 1593 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1594 os << " | group=\"" << group_name << "\"" 1595 << " sCount=" << thread->tls32_.suspend_count 1596 << " dsCount=" << thread->tls32_.debug_suspend_count 1597 << " flags=" << thread->tls32_.state_and_flags.as_struct.flags 1598 << " obj=" << reinterpret_cast<void*>(thread->tlsPtr_.opeer) 1599 << " self=" << reinterpret_cast<const void*>(thread) << "\n"; 1600 } 1601 1602 os << " | sysTid=" << tid 1603 << " nice=" << getpriority(PRIO_PROCESS, tid) 1604 << " cgrp=" << scheduler_group_name; 1605 if (thread != nullptr) { 1606 int policy; 1607 sched_param sp; 1608#if !defined(__APPLE__) 1609 // b/36445592 Don't use pthread_getschedparam since pthread may have exited. 1610 policy = sched_getscheduler(tid); 1611 if (policy == -1) { 1612 PLOG(WARNING) << "sched_getscheduler(" << tid << ")"; 1613 } 1614 int sched_getparam_result = sched_getparam(tid, &sp); 1615 if (sched_getparam_result == -1) { 1616 PLOG(WARNING) << "sched_getparam(" << tid << ", &sp)"; 1617 sp.sched_priority = -1; 1618 } 1619#else 1620 CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp), 1621 __FUNCTION__); 1622#endif 1623 os << " sched=" << policy << "/" << sp.sched_priority 1624 << " handle=" << reinterpret_cast<void*>(thread->tlsPtr_.pthread_self); 1625 } 1626 os << "\n"; 1627 1628 // Grab the scheduler stats for this thread. 1629 std::string scheduler_stats; 1630 if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) { 1631 scheduler_stats.resize(scheduler_stats.size() - 1); // Lose the trailing '\n'. 1632 } else { 1633 scheduler_stats = "0 0 0"; 1634 } 1635 1636 char native_thread_state = '?'; 1637 int utime = 0; 1638 int stime = 0; 1639 int task_cpu = 0; 1640 GetTaskStats(tid, &native_thread_state, &utime, &stime, &task_cpu); 1641 1642 os << " | state=" << native_thread_state 1643 << " schedstat=( " << scheduler_stats << " )" 1644 << " utm=" << utime 1645 << " stm=" << stime 1646 << " core=" << task_cpu 1647 << " HZ=" << sysconf(_SC_CLK_TCK) << "\n"; 1648 if (thread != nullptr) { 1649 os << " | stack=" << reinterpret_cast<void*>(thread->tlsPtr_.stack_begin) << "-" 1650 << reinterpret_cast<void*>(thread->tlsPtr_.stack_end) << " stackSize=" 1651 << PrettySize(thread->tlsPtr_.stack_size) << "\n"; 1652 // Dump the held mutexes. 1653 os << " | held mutexes="; 1654 for (size_t i = 0; i < kLockLevelCount; ++i) { 1655 if (i != kMonitorLock) { 1656 BaseMutex* mutex = thread->GetHeldMutex(static_cast<LockLevel>(i)); 1657 if (mutex != nullptr) { 1658 os << " \"" << mutex->GetName() << "\""; 1659 if (mutex->IsReaderWriterMutex()) { 1660 ReaderWriterMutex* rw_mutex = down_cast<ReaderWriterMutex*>(mutex); 1661 if (rw_mutex->GetExclusiveOwnerTid() == static_cast<uint64_t>(tid)) { 1662 os << "(exclusive held)"; 1663 } else { 1664 os << "(shared held)"; 1665 } 1666 } 1667 } 1668 } 1669 } 1670 os << "\n"; 1671 } 1672} 1673 1674void Thread::DumpState(std::ostream& os) const { 1675 Thread::DumpState(os, this, GetTid()); 1676} 1677 1678struct StackDumpVisitor : public StackVisitor { 1679 StackDumpVisitor(std::ostream& os_in, 1680 Thread* thread_in, 1681 Context* context, 1682 bool can_allocate_in, 1683 bool check_suspended = true, 1684 bool dump_locks_in = true) 1685 REQUIRES_SHARED(Locks::mutator_lock_) 1686 : StackVisitor(thread_in, 1687 context, 1688 StackVisitor::StackWalkKind::kIncludeInlinedFrames, 1689 check_suspended), 1690 os(os_in), 1691 can_allocate(can_allocate_in), 1692 last_method(nullptr), 1693 last_line_number(0), 1694 repetition_count(0), 1695 frame_count(0), 1696 dump_locks(dump_locks_in) {} 1697 1698 virtual ~StackDumpVisitor() { 1699 if (frame_count == 0) { 1700 os << " (no managed stack frames)\n"; 1701 } 1702 } 1703 1704 bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 1705 ArtMethod* m = GetMethod(); 1706 if (m->IsRuntimeMethod()) { 1707 return true; 1708 } 1709 m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize); 1710 const int kMaxRepetition = 3; 1711 ObjPtr<mirror::Class> c = m->GetDeclaringClass(); 1712 ObjPtr<mirror::DexCache> dex_cache = c->GetDexCache(); 1713 int line_number = -1; 1714 if (dex_cache != nullptr) { // be tolerant of bad input 1715 const DexFile* dex_file = dex_cache->GetDexFile(); 1716 line_number = annotations::GetLineNumFromPC(dex_file, m, GetDexPc(false)); 1717 } 1718 if (line_number == last_line_number && last_method == m) { 1719 ++repetition_count; 1720 } else { 1721 if (repetition_count >= kMaxRepetition) { 1722 os << " ... repeated " << (repetition_count - kMaxRepetition) << " times\n"; 1723 } 1724 repetition_count = 0; 1725 last_line_number = line_number; 1726 last_method = m; 1727 } 1728 if (repetition_count < kMaxRepetition) { 1729 os << " at " << m->PrettyMethod(false); 1730 if (m->IsNative()) { 1731 os << "(Native method)"; 1732 } else { 1733 const char* source_file(m->GetDeclaringClassSourceFile()); 1734 os << "(" << (source_file != nullptr ? source_file : "unavailable") 1735 << ":" << line_number << ")"; 1736 } 1737 os << "\n"; 1738 if (frame_count == 0) { 1739 Monitor::DescribeWait(os, GetThread()); 1740 } 1741 if (can_allocate && dump_locks) { 1742 // Visit locks, but do not abort on errors. This would trigger a nested abort. 1743 // Skip visiting locks if dump_locks is false as it would cause a bad_mutexes_held in 1744 // RegTypeCache::RegTypeCache due to thread_list_lock. 1745 Monitor::VisitLocks(this, DumpLockedObject, &os, false); 1746 } 1747 } 1748 1749 ++frame_count; 1750 return true; 1751 } 1752 1753 static void DumpLockedObject(mirror::Object* o, void* context) 1754 REQUIRES_SHARED(Locks::mutator_lock_) { 1755 std::ostream& os = *reinterpret_cast<std::ostream*>(context); 1756 os << " - locked "; 1757 if (o == nullptr) { 1758 os << "an unknown object"; 1759 } else { 1760 if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) { 1761 // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack 1762 // may have not been flipped yet and "o" may be a from-space (stale) ref, in which case the 1763 // IdentityHashCode call below will crash. So explicitly mark/forward it here. 1764 o = ReadBarrier::Mark(o); 1765 } 1766 if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) && 1767 Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) { 1768 // Getting the identity hashcode here would result in lock inflation and suspension of the 1769 // current thread, which isn't safe if this is the only runnable thread. 1770 os << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)", reinterpret_cast<intptr_t>(o), 1771 o->PrettyTypeOf().c_str()); 1772 } else { 1773 // IdentityHashCode can cause thread suspension, which would invalidate o if it moved. So 1774 // we get the pretty type beofre we call IdentityHashCode. 1775 const std::string pretty_type(o->PrettyTypeOf()); 1776 os << StringPrintf("<0x%08x> (a %s)", o->IdentityHashCode(), pretty_type.c_str()); 1777 } 1778 } 1779 os << "\n"; 1780 } 1781 1782 std::ostream& os; 1783 const bool can_allocate; 1784 ArtMethod* last_method; 1785 int last_line_number; 1786 int repetition_count; 1787 int frame_count; 1788 const bool dump_locks; 1789}; 1790 1791static bool ShouldShowNativeStack(const Thread* thread) 1792 REQUIRES_SHARED(Locks::mutator_lock_) { 1793 ThreadState state = thread->GetState(); 1794 1795 // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting. 1796 if (state > kWaiting && state < kStarting) { 1797 return true; 1798 } 1799 1800 // In an Object.wait variant or Thread.sleep? That's not interesting. 1801 if (state == kTimedWaiting || state == kSleeping || state == kWaiting) { 1802 return false; 1803 } 1804 1805 // Threads with no managed stack frames should be shown. 1806 const ManagedStack* managed_stack = thread->GetManagedStack(); 1807 if (managed_stack == nullptr || (managed_stack->GetTopQuickFrame() == nullptr && 1808 managed_stack->GetTopShadowFrame() == nullptr)) { 1809 return true; 1810 } 1811 1812 // In some other native method? That's interesting. 1813 // We don't just check kNative because native methods will be in state kSuspended if they're 1814 // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the 1815 // thread-startup states if it's early enough in their life cycle (http://b/7432159). 1816 ArtMethod* current_method = thread->GetCurrentMethod(nullptr); 1817 return current_method != nullptr && current_method->IsNative(); 1818} 1819 1820void Thread::DumpJavaStack(std::ostream& os, bool check_suspended, bool dump_locks) const { 1821 // If flip_function is not null, it means we have run a checkpoint 1822 // before the thread wakes up to execute the flip function and the 1823 // thread roots haven't been forwarded. So the following access to 1824 // the roots (locks or methods in the frames) would be bad. Run it 1825 // here. TODO: clean up. 1826 { 1827 Thread* this_thread = const_cast<Thread*>(this); 1828 Closure* flip_func = this_thread->GetFlipFunction(); 1829 if (flip_func != nullptr) { 1830 flip_func->Run(this_thread); 1831 } 1832 } 1833 1834 // Dumping the Java stack involves the verifier for locks. The verifier operates under the 1835 // assumption that there is no exception pending on entry. Thus, stash any pending exception. 1836 // Thread::Current() instead of this in case a thread is dumping the stack of another suspended 1837 // thread. 1838 StackHandleScope<1> scope(Thread::Current()); 1839 Handle<mirror::Throwable> exc; 1840 bool have_exception = false; 1841 if (IsExceptionPending()) { 1842 exc = scope.NewHandle(GetException()); 1843 const_cast<Thread*>(this)->ClearException(); 1844 have_exception = true; 1845 } 1846 1847 std::unique_ptr<Context> context(Context::Create()); 1848 StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(), 1849 !tls32_.throwing_OutOfMemoryError, check_suspended, dump_locks); 1850 dumper.WalkStack(); 1851 1852 if (have_exception) { 1853 const_cast<Thread*>(this)->SetException(exc.Get()); 1854 } 1855} 1856 1857void Thread::DumpStack(std::ostream& os, 1858 bool dump_native_stack, 1859 BacktraceMap* backtrace_map, 1860 bool force_dump_stack) const { 1861 // TODO: we call this code when dying but may not have suspended the thread ourself. The 1862 // IsSuspended check is therefore racy with the use for dumping (normally we inhibit 1863 // the race with the thread_suspend_count_lock_). 1864 bool dump_for_abort = (gAborting > 0); 1865 bool safe_to_dump = (this == Thread::Current() || IsSuspended()); 1866 if (!kIsDebugBuild) { 1867 // We always want to dump the stack for an abort, however, there is no point dumping another 1868 // thread's stack in debug builds where we'll hit the not suspended check in the stack walk. 1869 safe_to_dump = (safe_to_dump || dump_for_abort); 1870 } 1871 if (safe_to_dump || force_dump_stack) { 1872 // If we're currently in native code, dump that stack before dumping the managed stack. 1873 if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) { 1874 DumpKernelStack(os, GetTid(), " kernel: ", false); 1875 ArtMethod* method = 1876 GetCurrentMethod(nullptr, 1877 /*check_suspended*/ !force_dump_stack, 1878 /*abort_on_error*/ !(dump_for_abort || force_dump_stack)); 1879 DumpNativeStack(os, GetTid(), backtrace_map, " native: ", method); 1880 } 1881 DumpJavaStack(os, 1882 /*check_suspended*/ !force_dump_stack, 1883 /*dump_locks*/ !force_dump_stack); 1884 } else { 1885 os << "Not able to dump stack of thread that isn't suspended"; 1886 } 1887} 1888 1889void Thread::ThreadExitCallback(void* arg) { 1890 Thread* self = reinterpret_cast<Thread*>(arg); 1891 if (self->tls32_.thread_exit_check_count == 0) { 1892 LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's " 1893 "going to use a pthread_key_create destructor?): " << *self; 1894 CHECK(is_started_); 1895#ifdef ART_TARGET_ANDROID 1896 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = self; 1897#else 1898 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self"); 1899#endif 1900 self->tls32_.thread_exit_check_count = 1; 1901 } else { 1902 LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self; 1903 } 1904} 1905 1906void Thread::Startup() { 1907 CHECK(!is_started_); 1908 is_started_ = true; 1909 { 1910 // MutexLock to keep annotalysis happy. 1911 // 1912 // Note we use null for the thread because Thread::Current can 1913 // return garbage since (is_started_ == true) and 1914 // Thread::pthread_key_self_ is not yet initialized. 1915 // This was seen on glibc. 1916 MutexLock mu(nullptr, *Locks::thread_suspend_count_lock_); 1917 resume_cond_ = new ConditionVariable("Thread resumption condition variable", 1918 *Locks::thread_suspend_count_lock_); 1919 } 1920 1921 // Allocate a TLS slot. 1922 CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), 1923 "self key"); 1924 1925 // Double-check the TLS slot allocation. 1926 if (pthread_getspecific(pthread_key_self_) != nullptr) { 1927 LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr"; 1928 } 1929} 1930 1931void Thread::FinishStartup() { 1932 Runtime* runtime = Runtime::Current(); 1933 CHECK(runtime->IsStarted()); 1934 1935 // Finish attaching the main thread. 1936 ScopedObjectAccess soa(Thread::Current()); 1937 Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup()); 1938 Thread::Current()->AssertNoPendingException(); 1939 1940 Runtime::Current()->GetClassLinker()->RunRootClinits(); 1941 1942 // The thread counts as started from now on. We need to add it to the ThreadGroup. For regular 1943 // threads, this is done in Thread.start() on the Java side. 1944 { 1945 // This is only ever done once. There's no benefit in caching the method. 1946 jmethodID thread_group_add = soa.Env()->GetMethodID(WellKnownClasses::java_lang_ThreadGroup, 1947 "add", 1948 "(Ljava/lang/Thread;)V"); 1949 CHECK(thread_group_add != nullptr); 1950 ScopedLocalRef<jobject> thread_jobject( 1951 soa.Env(), soa.Env()->AddLocalReference<jobject>(Thread::Current()->GetPeer())); 1952 soa.Env()->CallNonvirtualVoidMethod(runtime->GetMainThreadGroup(), 1953 WellKnownClasses::java_lang_ThreadGroup, 1954 thread_group_add, 1955 thread_jobject.get()); 1956 Thread::Current()->AssertNoPendingException(); 1957 } 1958} 1959 1960void Thread::Shutdown() { 1961 CHECK(is_started_); 1962 is_started_ = false; 1963 CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key"); 1964 MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_); 1965 if (resume_cond_ != nullptr) { 1966 delete resume_cond_; 1967 resume_cond_ = nullptr; 1968 } 1969} 1970 1971Thread::Thread(bool daemon) 1972 : tls32_(daemon), 1973 wait_monitor_(nullptr), 1974 interrupted_(false), 1975 custom_tls_(nullptr), 1976 can_call_into_java_(true) { 1977 wait_mutex_ = new Mutex("a thread wait mutex"); 1978 wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_); 1979 tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>; 1980 tlsPtr_.name = new std::string(kThreadNameDuringStartup); 1981 1982 static_assert((sizeof(Thread) % 4) == 0U, 1983 "art::Thread has a size which is not a multiple of 4."); 1984 tls32_.state_and_flags.as_struct.flags = 0; 1985 tls32_.state_and_flags.as_struct.state = kNative; 1986 memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes)); 1987 std::fill(tlsPtr_.rosalloc_runs, 1988 tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread, 1989 gc::allocator::RosAlloc::GetDedicatedFullRun()); 1990 tlsPtr_.checkpoint_function = nullptr; 1991 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 1992 tlsPtr_.active_suspend_barriers[i] = nullptr; 1993 } 1994 tlsPtr_.flip_function = nullptr; 1995 tlsPtr_.thread_local_mark_stack = nullptr; 1996 tls32_.is_transitioning_to_runnable = false; 1997} 1998 1999bool Thread::IsStillStarting() const { 2000 // You might think you can check whether the state is kStarting, but for much of thread startup, 2001 // the thread is in kNative; it might also be in kVmWait. 2002 // You might think you can check whether the peer is null, but the peer is actually created and 2003 // assigned fairly early on, and needs to be. 2004 // It turns out that the last thing to change is the thread name; that's a good proxy for "has 2005 // this thread _ever_ entered kRunnable". 2006 return (tlsPtr_.jpeer == nullptr && tlsPtr_.opeer == nullptr) || 2007 (*tlsPtr_.name == kThreadNameDuringStartup); 2008} 2009 2010void Thread::AssertPendingException() const { 2011 CHECK(IsExceptionPending()) << "Pending exception expected."; 2012} 2013 2014void Thread::AssertPendingOOMException() const { 2015 AssertPendingException(); 2016 auto* e = GetException(); 2017 CHECK_EQ(e->GetClass(), DecodeJObject(WellKnownClasses::java_lang_OutOfMemoryError)->AsClass()) 2018 << e->Dump(); 2019} 2020 2021void Thread::AssertNoPendingException() const { 2022 if (UNLIKELY(IsExceptionPending())) { 2023 ScopedObjectAccess soa(Thread::Current()); 2024 LOG(FATAL) << "No pending exception expected: " << GetException()->Dump(); 2025 } 2026} 2027 2028void Thread::AssertNoPendingExceptionForNewException(const char* msg) const { 2029 if (UNLIKELY(IsExceptionPending())) { 2030 ScopedObjectAccess soa(Thread::Current()); 2031 LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: " 2032 << GetException()->Dump(); 2033 } 2034} 2035 2036class MonitorExitVisitor : public SingleRootVisitor { 2037 public: 2038 explicit MonitorExitVisitor(Thread* self) : self_(self) { } 2039 2040 // NO_THREAD_SAFETY_ANALYSIS due to MonitorExit. 2041 void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED) 2042 OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 2043 if (self_->HoldsLock(entered_monitor)) { 2044 LOG(WARNING) << "Calling MonitorExit on object " 2045 << entered_monitor << " (" << entered_monitor->PrettyTypeOf() << ")" 2046 << " left locked by native thread " 2047 << *Thread::Current() << " which is detaching"; 2048 entered_monitor->MonitorExit(self_); 2049 } 2050 } 2051 2052 private: 2053 Thread* const self_; 2054}; 2055 2056void Thread::Destroy() { 2057 Thread* self = this; 2058 DCHECK_EQ(self, Thread::Current()); 2059 2060 if (tlsPtr_.jni_env != nullptr) { 2061 { 2062 ScopedObjectAccess soa(self); 2063 MonitorExitVisitor visitor(self); 2064 // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited. 2065 tlsPtr_.jni_env->monitors.VisitRoots(&visitor, RootInfo(kRootVMInternal)); 2066 } 2067 // Release locally held global references which releasing may require the mutator lock. 2068 if (tlsPtr_.jpeer != nullptr) { 2069 // If pthread_create fails we don't have a jni env here. 2070 tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer); 2071 tlsPtr_.jpeer = nullptr; 2072 } 2073 if (tlsPtr_.class_loader_override != nullptr) { 2074 tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.class_loader_override); 2075 tlsPtr_.class_loader_override = nullptr; 2076 } 2077 } 2078 2079 if (tlsPtr_.opeer != nullptr) { 2080 ScopedObjectAccess soa(self); 2081 // We may need to call user-supplied managed code, do this before final clean-up. 2082 HandleUncaughtExceptions(soa); 2083 RemoveFromThreadGroup(soa); 2084 2085 // this.nativePeer = 0; 2086 if (Runtime::Current()->IsActiveTransaction()) { 2087 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer) 2088 ->SetLong<true>(tlsPtr_.opeer, 0); 2089 } else { 2090 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer) 2091 ->SetLong<false>(tlsPtr_.opeer, 0); 2092 } 2093 Runtime* runtime = Runtime::Current(); 2094 if (runtime != nullptr) { 2095 runtime->GetRuntimeCallbacks()->ThreadDeath(self); 2096 } 2097 2098 2099 // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone 2100 // who is waiting. 2101 ObjPtr<mirror::Object> lock = 2102 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer); 2103 // (This conditional is only needed for tests, where Thread.lock won't have been set.) 2104 if (lock != nullptr) { 2105 StackHandleScope<1> hs(self); 2106 Handle<mirror::Object> h_obj(hs.NewHandle(lock)); 2107 ObjectLock<mirror::Object> locker(self, h_obj); 2108 locker.NotifyAll(); 2109 } 2110 tlsPtr_.opeer = nullptr; 2111 } 2112 2113 { 2114 ScopedObjectAccess soa(self); 2115 Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this); 2116 if (kUseReadBarrier) { 2117 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this); 2118 } 2119 } 2120} 2121 2122Thread::~Thread() { 2123 CHECK(tlsPtr_.class_loader_override == nullptr); 2124 CHECK(tlsPtr_.jpeer == nullptr); 2125 CHECK(tlsPtr_.opeer == nullptr); 2126 bool initialized = (tlsPtr_.jni_env != nullptr); // Did Thread::Init run? 2127 if (initialized) { 2128 delete tlsPtr_.jni_env; 2129 tlsPtr_.jni_env = nullptr; 2130 } 2131 CHECK_NE(GetState(), kRunnable); 2132 CHECK(!ReadFlag(kCheckpointRequest)); 2133 CHECK(!ReadFlag(kEmptyCheckpointRequest)); 2134 CHECK(tlsPtr_.checkpoint_function == nullptr); 2135 CHECK_EQ(checkpoint_overflow_.size(), 0u); 2136 CHECK(tlsPtr_.flip_function == nullptr); 2137 CHECK_EQ(tls32_.is_transitioning_to_runnable, false); 2138 2139 // Make sure we processed all deoptimization requests. 2140 CHECK(tlsPtr_.deoptimization_context_stack == nullptr) << "Missed deoptimization"; 2141 CHECK(tlsPtr_.frame_id_to_shadow_frame == nullptr) << 2142 "Not all deoptimized frames have been consumed by the debugger."; 2143 2144 // We may be deleting a still born thread. 2145 SetStateUnsafe(kTerminated); 2146 2147 delete wait_cond_; 2148 delete wait_mutex_; 2149 2150 if (tlsPtr_.long_jump_context != nullptr) { 2151 delete tlsPtr_.long_jump_context; 2152 } 2153 2154 if (initialized) { 2155 CleanupCpu(); 2156 } 2157 2158 if (tlsPtr_.single_step_control != nullptr) { 2159 delete tlsPtr_.single_step_control; 2160 } 2161 delete tlsPtr_.instrumentation_stack; 2162 delete tlsPtr_.name; 2163 delete tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample; 2164 2165 Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this); 2166 2167 TearDownAlternateSignalStack(); 2168} 2169 2170void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) { 2171 if (!IsExceptionPending()) { 2172 return; 2173 } 2174 ScopedLocalRef<jobject> peer(tlsPtr_.jni_env, soa.AddLocalReference<jobject>(tlsPtr_.opeer)); 2175 ScopedThreadStateChange tsc(this, kNative); 2176 2177 // Get and clear the exception. 2178 ScopedLocalRef<jthrowable> exception(tlsPtr_.jni_env, tlsPtr_.jni_env->ExceptionOccurred()); 2179 tlsPtr_.jni_env->ExceptionClear(); 2180 2181 // Call the Thread instance's dispatchUncaughtException(Throwable) 2182 tlsPtr_.jni_env->CallVoidMethod(peer.get(), 2183 WellKnownClasses::java_lang_Thread_dispatchUncaughtException, 2184 exception.get()); 2185 2186 // If the dispatchUncaughtException threw, clear that exception too. 2187 tlsPtr_.jni_env->ExceptionClear(); 2188} 2189 2190void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) { 2191 // this.group.removeThread(this); 2192 // group can be null if we're in the compiler or a test. 2193 ObjPtr<mirror::Object> ogroup = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group) 2194 ->GetObject(tlsPtr_.opeer); 2195 if (ogroup != nullptr) { 2196 ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup)); 2197 ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(tlsPtr_.opeer)); 2198 ScopedThreadStateChange tsc(soa.Self(), kNative); 2199 tlsPtr_.jni_env->CallVoidMethod(group.get(), 2200 WellKnownClasses::java_lang_ThreadGroup_removeThread, 2201 peer.get()); 2202 } 2203} 2204 2205bool Thread::HandleScopeContains(jobject obj) const { 2206 StackReference<mirror::Object>* hs_entry = 2207 reinterpret_cast<StackReference<mirror::Object>*>(obj); 2208 for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur!= nullptr; cur = cur->GetLink()) { 2209 if (cur->Contains(hs_entry)) { 2210 return true; 2211 } 2212 } 2213 // JNI code invoked from portable code uses shadow frames rather than the handle scope. 2214 return tlsPtr_.managed_stack.ShadowFramesContain(hs_entry); 2215} 2216 2217void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) { 2218 BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor( 2219 visitor, RootInfo(kRootNativeStack, thread_id)); 2220 for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) { 2221 cur->VisitRoots(buffered_visitor); 2222 } 2223} 2224 2225ObjPtr<mirror::Object> Thread::DecodeJObject(jobject obj) const { 2226 if (obj == nullptr) { 2227 return nullptr; 2228 } 2229 IndirectRef ref = reinterpret_cast<IndirectRef>(obj); 2230 IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref); 2231 ObjPtr<mirror::Object> result; 2232 bool expect_null = false; 2233 // The "kinds" below are sorted by the frequency we expect to encounter them. 2234 if (kind == kLocal) { 2235 IndirectReferenceTable& locals = tlsPtr_.jni_env->locals; 2236 // Local references do not need a read barrier. 2237 result = locals.Get<kWithoutReadBarrier>(ref); 2238 } else if (kind == kHandleScopeOrInvalid) { 2239 // TODO: make stack indirect reference table lookup more efficient. 2240 // Check if this is a local reference in the handle scope. 2241 if (LIKELY(HandleScopeContains(obj))) { 2242 // Read from handle scope. 2243 result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr(); 2244 VerifyObject(result); 2245 } else { 2246 tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of invalid jobject %p", obj); 2247 expect_null = true; 2248 result = nullptr; 2249 } 2250 } else if (kind == kGlobal) { 2251 result = tlsPtr_.jni_env->vm->DecodeGlobal(ref); 2252 } else { 2253 DCHECK_EQ(kind, kWeakGlobal); 2254 result = tlsPtr_.jni_env->vm->DecodeWeakGlobal(const_cast<Thread*>(this), ref); 2255 if (Runtime::Current()->IsClearedJniWeakGlobal(result)) { 2256 // This is a special case where it's okay to return null. 2257 expect_null = true; 2258 result = nullptr; 2259 } 2260 } 2261 2262 if (UNLIKELY(!expect_null && result == nullptr)) { 2263 tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of deleted %s %p", 2264 ToStr<IndirectRefKind>(kind).c_str(), obj); 2265 } 2266 return result; 2267} 2268 2269bool Thread::IsJWeakCleared(jweak obj) const { 2270 CHECK(obj != nullptr); 2271 IndirectRef ref = reinterpret_cast<IndirectRef>(obj); 2272 IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref); 2273 CHECK_EQ(kind, kWeakGlobal); 2274 return tlsPtr_.jni_env->vm->IsWeakGlobalCleared(const_cast<Thread*>(this), ref); 2275} 2276 2277// Implements java.lang.Thread.interrupted. 2278bool Thread::Interrupted() { 2279 MutexLock mu(Thread::Current(), *wait_mutex_); 2280 bool interrupted = IsInterruptedLocked(); 2281 SetInterruptedLocked(false); 2282 return interrupted; 2283} 2284 2285// Implements java.lang.Thread.isInterrupted. 2286bool Thread::IsInterrupted() { 2287 MutexLock mu(Thread::Current(), *wait_mutex_); 2288 return IsInterruptedLocked(); 2289} 2290 2291void Thread::Interrupt(Thread* self) { 2292 MutexLock mu(self, *wait_mutex_); 2293 if (interrupted_) { 2294 return; 2295 } 2296 interrupted_ = true; 2297 NotifyLocked(self); 2298} 2299 2300void Thread::Notify() { 2301 Thread* self = Thread::Current(); 2302 MutexLock mu(self, *wait_mutex_); 2303 NotifyLocked(self); 2304} 2305 2306void Thread::NotifyLocked(Thread* self) { 2307 if (wait_monitor_ != nullptr) { 2308 wait_cond_->Signal(self); 2309 } 2310} 2311 2312void Thread::SetClassLoaderOverride(jobject class_loader_override) { 2313 if (tlsPtr_.class_loader_override != nullptr) { 2314 GetJniEnv()->DeleteGlobalRef(tlsPtr_.class_loader_override); 2315 } 2316 tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override); 2317} 2318 2319using ArtMethodDexPcPair = std::pair<ArtMethod*, uint32_t>; 2320 2321// Counts the stack trace depth and also fetches the first max_saved_frames frames. 2322class FetchStackTraceVisitor : public StackVisitor { 2323 public: 2324 explicit FetchStackTraceVisitor(Thread* thread, 2325 ArtMethodDexPcPair* saved_frames = nullptr, 2326 size_t max_saved_frames = 0) 2327 REQUIRES_SHARED(Locks::mutator_lock_) 2328 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), 2329 saved_frames_(saved_frames), 2330 max_saved_frames_(max_saved_frames) {} 2331 2332 bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 2333 // We want to skip frames up to and including the exception's constructor. 2334 // Note we also skip the frame if it doesn't have a method (namely the callee 2335 // save frame) 2336 ArtMethod* m = GetMethod(); 2337 if (skipping_ && !m->IsRuntimeMethod() && 2338 !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) { 2339 skipping_ = false; 2340 } 2341 if (!skipping_) { 2342 if (!m->IsRuntimeMethod()) { // Ignore runtime frames (in particular callee save). 2343 if (depth_ < max_saved_frames_) { 2344 saved_frames_[depth_].first = m; 2345 saved_frames_[depth_].second = m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc(); 2346 } 2347 ++depth_; 2348 } 2349 } else { 2350 ++skip_depth_; 2351 } 2352 return true; 2353 } 2354 2355 uint32_t GetDepth() const { 2356 return depth_; 2357 } 2358 2359 uint32_t GetSkipDepth() const { 2360 return skip_depth_; 2361 } 2362 2363 private: 2364 uint32_t depth_ = 0; 2365 uint32_t skip_depth_ = 0; 2366 bool skipping_ = true; 2367 ArtMethodDexPcPair* saved_frames_; 2368 const size_t max_saved_frames_; 2369 2370 DISALLOW_COPY_AND_ASSIGN(FetchStackTraceVisitor); 2371}; 2372 2373template<bool kTransactionActive> 2374class BuildInternalStackTraceVisitor : public StackVisitor { 2375 public: 2376 BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth) 2377 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), 2378 self_(self), 2379 skip_depth_(skip_depth), 2380 pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {} 2381 2382 bool Init(int depth) REQUIRES_SHARED(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) { 2383 // Allocate method trace as an object array where the first element is a pointer array that 2384 // contains the ArtMethod pointers and dex PCs. The rest of the elements are the declaring 2385 // class of the ArtMethod pointers. 2386 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 2387 StackHandleScope<1> hs(self_); 2388 ObjPtr<mirror::Class> array_class = class_linker->GetClassRoot(ClassLinker::kObjectArrayClass); 2389 // The first element is the methods and dex pc array, the other elements are declaring classes 2390 // for the methods to ensure classes in the stack trace don't get unloaded. 2391 Handle<mirror::ObjectArray<mirror::Object>> trace( 2392 hs.NewHandle( 2393 mirror::ObjectArray<mirror::Object>::Alloc(hs.Self(), array_class, depth + 1))); 2394 if (trace == nullptr) { 2395 // Acquire uninterruptible_ in all paths. 2396 self_->StartAssertNoThreadSuspension("Building internal stack trace"); 2397 self_->AssertPendingOOMException(); 2398 return false; 2399 } 2400 ObjPtr<mirror::PointerArray> methods_and_pcs = 2401 class_linker->AllocPointerArray(self_, depth * 2); 2402 const char* last_no_suspend_cause = 2403 self_->StartAssertNoThreadSuspension("Building internal stack trace"); 2404 if (methods_and_pcs == nullptr) { 2405 self_->AssertPendingOOMException(); 2406 return false; 2407 } 2408 trace->Set(0, methods_and_pcs); 2409 trace_ = trace.Get(); 2410 // If We are called from native, use non-transactional mode. 2411 CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause; 2412 return true; 2413 } 2414 2415 virtual ~BuildInternalStackTraceVisitor() RELEASE(Roles::uninterruptible_) { 2416 self_->EndAssertNoThreadSuspension(nullptr); 2417 } 2418 2419 bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 2420 if (trace_ == nullptr) { 2421 return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError. 2422 } 2423 if (skip_depth_ > 0) { 2424 skip_depth_--; 2425 return true; 2426 } 2427 ArtMethod* m = GetMethod(); 2428 if (m->IsRuntimeMethod()) { 2429 return true; // Ignore runtime frames (in particular callee save). 2430 } 2431 AddFrame(m, m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc()); 2432 return true; 2433 } 2434 2435 void AddFrame(ArtMethod* method, uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) { 2436 ObjPtr<mirror::PointerArray> trace_methods_and_pcs = GetTraceMethodsAndPCs(); 2437 trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>(count_, method, pointer_size_); 2438 trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>( 2439 trace_methods_and_pcs->GetLength() / 2 + count_, 2440 dex_pc, 2441 pointer_size_); 2442 // Save the declaring class of the method to ensure that the declaring classes of the methods 2443 // do not get unloaded while the stack trace is live. 2444 trace_->Set(count_ + 1, method->GetDeclaringClass()); 2445 ++count_; 2446 } 2447 2448 ObjPtr<mirror::PointerArray> GetTraceMethodsAndPCs() const REQUIRES_SHARED(Locks::mutator_lock_) { 2449 return ObjPtr<mirror::PointerArray>::DownCast(MakeObjPtr(trace_->Get(0))); 2450 } 2451 2452 mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const { 2453 return trace_; 2454 } 2455 2456 private: 2457 Thread* const self_; 2458 // How many more frames to skip. 2459 int32_t skip_depth_; 2460 // Current position down stack trace. 2461 uint32_t count_ = 0; 2462 // An object array where the first element is a pointer array that contains the ArtMethod 2463 // pointers on the stack and dex PCs. The rest of the elements are the declaring 2464 // class of the ArtMethod pointers. trace_[i+1] contains the declaring class of the ArtMethod of 2465 // the i'th frame. 2466 mirror::ObjectArray<mirror::Object>* trace_ = nullptr; 2467 // For cross compilation. 2468 const PointerSize pointer_size_; 2469 2470 DISALLOW_COPY_AND_ASSIGN(BuildInternalStackTraceVisitor); 2471}; 2472 2473template<bool kTransactionActive> 2474jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const { 2475 // Compute depth of stack, save frames if possible to avoid needing to recompute many. 2476 constexpr size_t kMaxSavedFrames = 256; 2477 std::unique_ptr<ArtMethodDexPcPair[]> saved_frames(new ArtMethodDexPcPair[kMaxSavedFrames]); 2478 FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this), 2479 &saved_frames[0], 2480 kMaxSavedFrames); 2481 count_visitor.WalkStack(); 2482 const uint32_t depth = count_visitor.GetDepth(); 2483 const uint32_t skip_depth = count_visitor.GetSkipDepth(); 2484 2485 // Build internal stack trace. 2486 BuildInternalStackTraceVisitor<kTransactionActive> build_trace_visitor(soa.Self(), 2487 const_cast<Thread*>(this), 2488 skip_depth); 2489 if (!build_trace_visitor.Init(depth)) { 2490 return nullptr; // Allocation failed. 2491 } 2492 // If we saved all of the frames we don't even need to do the actual stack walk. This is faster 2493 // than doing the stack walk twice. 2494 if (depth < kMaxSavedFrames) { 2495 for (size_t i = 0; i < depth; ++i) { 2496 build_trace_visitor.AddFrame(saved_frames[i].first, saved_frames[i].second); 2497 } 2498 } else { 2499 build_trace_visitor.WalkStack(); 2500 } 2501 2502 mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace(); 2503 if (kIsDebugBuild) { 2504 ObjPtr<mirror::PointerArray> trace_methods = build_trace_visitor.GetTraceMethodsAndPCs(); 2505 // Second half of trace_methods is dex PCs. 2506 for (uint32_t i = 0; i < static_cast<uint32_t>(trace_methods->GetLength() / 2); ++i) { 2507 auto* method = trace_methods->GetElementPtrSize<ArtMethod*>( 2508 i, Runtime::Current()->GetClassLinker()->GetImagePointerSize()); 2509 CHECK(method != nullptr); 2510 } 2511 } 2512 return soa.AddLocalReference<jobject>(trace); 2513} 2514template jobject Thread::CreateInternalStackTrace<false>( 2515 const ScopedObjectAccessAlreadyRunnable& soa) const; 2516template jobject Thread::CreateInternalStackTrace<true>( 2517 const ScopedObjectAccessAlreadyRunnable& soa) const; 2518 2519bool Thread::IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const { 2520 // Only count the depth since we do not pass a stack frame array as an argument. 2521 FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this)); 2522 count_visitor.WalkStack(); 2523 return count_visitor.GetDepth() == static_cast<uint32_t>(exception->GetStackDepth()); 2524} 2525 2526jobjectArray Thread::InternalStackTraceToStackTraceElementArray( 2527 const ScopedObjectAccessAlreadyRunnable& soa, 2528 jobject internal, 2529 jobjectArray output_array, 2530 int* stack_depth) { 2531 // Decode the internal stack trace into the depth, method trace and PC trace. 2532 // Subtract one for the methods and PC trace. 2533 int32_t depth = soa.Decode<mirror::Array>(internal)->GetLength() - 1; 2534 DCHECK_GE(depth, 0); 2535 2536 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); 2537 2538 jobjectArray result; 2539 2540 if (output_array != nullptr) { 2541 // Reuse the array we were given. 2542 result = output_array; 2543 // ...adjusting the number of frames we'll write to not exceed the array length. 2544 const int32_t traces_length = 2545 soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->GetLength(); 2546 depth = std::min(depth, traces_length); 2547 } else { 2548 // Create java_trace array and place in local reference table 2549 mirror::ObjectArray<mirror::StackTraceElement>* java_traces = 2550 class_linker->AllocStackTraceElementArray(soa.Self(), depth); 2551 if (java_traces == nullptr) { 2552 return nullptr; 2553 } 2554 result = soa.AddLocalReference<jobjectArray>(java_traces); 2555 } 2556 2557 if (stack_depth != nullptr) { 2558 *stack_depth = depth; 2559 } 2560 2561 for (int32_t i = 0; i < depth; ++i) { 2562 ObjPtr<mirror::ObjectArray<mirror::Object>> decoded_traces = 2563 soa.Decode<mirror::Object>(internal)->AsObjectArray<mirror::Object>(); 2564 // Methods and dex PC trace is element 0. 2565 DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray()); 2566 ObjPtr<mirror::PointerArray> const method_trace = 2567 ObjPtr<mirror::PointerArray>::DownCast(MakeObjPtr(decoded_traces->Get(0))); 2568 // Prepare parameters for StackTraceElement(String cls, String method, String file, int line) 2569 ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize); 2570 uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>( 2571 i + method_trace->GetLength() / 2, kRuntimePointerSize); 2572 int32_t line_number; 2573 StackHandleScope<3> hs(soa.Self()); 2574 auto class_name_object(hs.NewHandle<mirror::String>(nullptr)); 2575 auto source_name_object(hs.NewHandle<mirror::String>(nullptr)); 2576 if (method->IsProxyMethod()) { 2577 line_number = -1; 2578 class_name_object.Assign(method->GetDeclaringClass()->GetName()); 2579 // source_name_object intentionally left null for proxy methods 2580 } else { 2581 line_number = method->GetLineNumFromDexPC(dex_pc); 2582 // Allocate element, potentially triggering GC 2583 // TODO: reuse class_name_object via Class::name_? 2584 const char* descriptor = method->GetDeclaringClassDescriptor(); 2585 CHECK(descriptor != nullptr); 2586 std::string class_name(PrettyDescriptor(descriptor)); 2587 class_name_object.Assign( 2588 mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str())); 2589 if (class_name_object == nullptr) { 2590 soa.Self()->AssertPendingOOMException(); 2591 return nullptr; 2592 } 2593 const char* source_file = method->GetDeclaringClassSourceFile(); 2594 if (line_number == -1) { 2595 // Make the line_number field of StackTraceElement hold the dex pc. 2596 // source_name_object is intentionally left null if we failed to map the dex pc to 2597 // a line number (most probably because there is no debug info). See b/30183883. 2598 line_number = dex_pc; 2599 } else { 2600 if (source_file != nullptr) { 2601 source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file)); 2602 if (source_name_object == nullptr) { 2603 soa.Self()->AssertPendingOOMException(); 2604 return nullptr; 2605 } 2606 } 2607 } 2608 } 2609 const char* method_name = method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName(); 2610 CHECK(method_name != nullptr); 2611 Handle<mirror::String> method_name_object( 2612 hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name))); 2613 if (method_name_object == nullptr) { 2614 return nullptr; 2615 } 2616 ObjPtr<mirror::StackTraceElement> obj = mirror::StackTraceElement::Alloc(soa.Self(), 2617 class_name_object, 2618 method_name_object, 2619 source_name_object, 2620 line_number); 2621 if (obj == nullptr) { 2622 return nullptr; 2623 } 2624 // We are called from native: use non-transactional mode. 2625 soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->Set<false>(i, obj); 2626 } 2627 return result; 2628} 2629 2630void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) { 2631 va_list args; 2632 va_start(args, fmt); 2633 ThrowNewExceptionV(exception_class_descriptor, fmt, args); 2634 va_end(args); 2635} 2636 2637void Thread::ThrowNewExceptionV(const char* exception_class_descriptor, 2638 const char* fmt, va_list ap) { 2639 std::string msg; 2640 StringAppendV(&msg, fmt, ap); 2641 ThrowNewException(exception_class_descriptor, msg.c_str()); 2642} 2643 2644void Thread::ThrowNewException(const char* exception_class_descriptor, 2645 const char* msg) { 2646 // Callers should either clear or call ThrowNewWrappedException. 2647 AssertNoPendingExceptionForNewException(msg); 2648 ThrowNewWrappedException(exception_class_descriptor, msg); 2649} 2650 2651static ObjPtr<mirror::ClassLoader> GetCurrentClassLoader(Thread* self) 2652 REQUIRES_SHARED(Locks::mutator_lock_) { 2653 ArtMethod* method = self->GetCurrentMethod(nullptr); 2654 return method != nullptr 2655 ? method->GetDeclaringClass()->GetClassLoader() 2656 : nullptr; 2657} 2658 2659void Thread::ThrowNewWrappedException(const char* exception_class_descriptor, 2660 const char* msg) { 2661 DCHECK_EQ(this, Thread::Current()); 2662 ScopedObjectAccessUnchecked soa(this); 2663 StackHandleScope<3> hs(soa.Self()); 2664 Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetCurrentClassLoader(soa.Self()))); 2665 ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException())); 2666 ClearException(); 2667 Runtime* runtime = Runtime::Current(); 2668 auto* cl = runtime->GetClassLinker(); 2669 Handle<mirror::Class> exception_class( 2670 hs.NewHandle(cl->FindClass(this, exception_class_descriptor, class_loader))); 2671 if (UNLIKELY(exception_class == nullptr)) { 2672 CHECK(IsExceptionPending()); 2673 LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor); 2674 return; 2675 } 2676 2677 if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(soa.Self(), exception_class, true, 2678 true))) { 2679 DCHECK(IsExceptionPending()); 2680 return; 2681 } 2682 DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass()); 2683 Handle<mirror::Throwable> exception( 2684 hs.NewHandle(ObjPtr<mirror::Throwable>::DownCast(exception_class->AllocObject(this)))); 2685 2686 // If we couldn't allocate the exception, throw the pre-allocated out of memory exception. 2687 if (exception == nullptr) { 2688 SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError()); 2689 return; 2690 } 2691 2692 // Choose an appropriate constructor and set up the arguments. 2693 const char* signature; 2694 ScopedLocalRef<jstring> msg_string(GetJniEnv(), nullptr); 2695 if (msg != nullptr) { 2696 // Ensure we remember this and the method over the String allocation. 2697 msg_string.reset( 2698 soa.AddLocalReference<jstring>(mirror::String::AllocFromModifiedUtf8(this, msg))); 2699 if (UNLIKELY(msg_string.get() == nullptr)) { 2700 CHECK(IsExceptionPending()); // OOME. 2701 return; 2702 } 2703 if (cause.get() == nullptr) { 2704 signature = "(Ljava/lang/String;)V"; 2705 } else { 2706 signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V"; 2707 } 2708 } else { 2709 if (cause.get() == nullptr) { 2710 signature = "()V"; 2711 } else { 2712 signature = "(Ljava/lang/Throwable;)V"; 2713 } 2714 } 2715 ArtMethod* exception_init_method = 2716 exception_class->FindDeclaredDirectMethod("<init>", signature, cl->GetImagePointerSize()); 2717 2718 CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in " 2719 << PrettyDescriptor(exception_class_descriptor); 2720 2721 if (UNLIKELY(!runtime->IsStarted())) { 2722 // Something is trying to throw an exception without a started runtime, which is the common 2723 // case in the compiler. We won't be able to invoke the constructor of the exception, so set 2724 // the exception fields directly. 2725 if (msg != nullptr) { 2726 exception->SetDetailMessage(DecodeJObject(msg_string.get())->AsString()); 2727 } 2728 if (cause.get() != nullptr) { 2729 exception->SetCause(DecodeJObject(cause.get())->AsThrowable()); 2730 } 2731 ScopedLocalRef<jobject> trace(GetJniEnv(), 2732 Runtime::Current()->IsActiveTransaction() 2733 ? CreateInternalStackTrace<true>(soa) 2734 : CreateInternalStackTrace<false>(soa)); 2735 if (trace.get() != nullptr) { 2736 exception->SetStackState(DecodeJObject(trace.get()).Ptr()); 2737 } 2738 SetException(exception.Get()); 2739 } else { 2740 jvalue jv_args[2]; 2741 size_t i = 0; 2742 2743 if (msg != nullptr) { 2744 jv_args[i].l = msg_string.get(); 2745 ++i; 2746 } 2747 if (cause.get() != nullptr) { 2748 jv_args[i].l = cause.get(); 2749 ++i; 2750 } 2751 ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(exception.Get())); 2752 InvokeWithJValues(soa, ref.get(), jni::EncodeArtMethod(exception_init_method), jv_args); 2753 if (LIKELY(!IsExceptionPending())) { 2754 SetException(exception.Get()); 2755 } 2756 } 2757} 2758 2759void Thread::ThrowOutOfMemoryError(const char* msg) { 2760 LOG(WARNING) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s", 2761 msg, (tls32_.throwing_OutOfMemoryError ? " (recursive case)" : "")); 2762 if (!tls32_.throwing_OutOfMemoryError) { 2763 tls32_.throwing_OutOfMemoryError = true; 2764 ThrowNewException("Ljava/lang/OutOfMemoryError;", msg); 2765 tls32_.throwing_OutOfMemoryError = false; 2766 } else { 2767 Dump(LOG_STREAM(WARNING)); // The pre-allocated OOME has no stack, so help out and log one. 2768 SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError()); 2769 } 2770} 2771 2772Thread* Thread::CurrentFromGdb() { 2773 return Thread::Current(); 2774} 2775 2776void Thread::DumpFromGdb() const { 2777 std::ostringstream ss; 2778 Dump(ss); 2779 std::string str(ss.str()); 2780 // log to stderr for debugging command line processes 2781 std::cerr << str; 2782#ifdef ART_TARGET_ANDROID 2783 // log to logcat for debugging frameworks processes 2784 LOG(INFO) << str; 2785#endif 2786} 2787 2788// Explicitly instantiate 32 and 64bit thread offset dumping support. 2789template 2790void Thread::DumpThreadOffset<PointerSize::k32>(std::ostream& os, uint32_t offset); 2791template 2792void Thread::DumpThreadOffset<PointerSize::k64>(std::ostream& os, uint32_t offset); 2793 2794template<PointerSize ptr_size> 2795void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) { 2796#define DO_THREAD_OFFSET(x, y) \ 2797 if (offset == (x).Uint32Value()) { \ 2798 os << (y); \ 2799 return; \ 2800 } 2801 DO_THREAD_OFFSET(ThreadFlagsOffset<ptr_size>(), "state_and_flags") 2802 DO_THREAD_OFFSET(CardTableOffset<ptr_size>(), "card_table") 2803 DO_THREAD_OFFSET(ExceptionOffset<ptr_size>(), "exception") 2804 DO_THREAD_OFFSET(PeerOffset<ptr_size>(), "peer"); 2805 DO_THREAD_OFFSET(JniEnvOffset<ptr_size>(), "jni_env") 2806 DO_THREAD_OFFSET(SelfOffset<ptr_size>(), "self") 2807 DO_THREAD_OFFSET(StackEndOffset<ptr_size>(), "stack_end") 2808 DO_THREAD_OFFSET(ThinLockIdOffset<ptr_size>(), "thin_lock_thread_id") 2809 DO_THREAD_OFFSET(TopOfManagedStackOffset<ptr_size>(), "top_quick_frame_method") 2810 DO_THREAD_OFFSET(TopShadowFrameOffset<ptr_size>(), "top_shadow_frame") 2811 DO_THREAD_OFFSET(TopHandleScopeOffset<ptr_size>(), "top_handle_scope") 2812 DO_THREAD_OFFSET(ThreadSuspendTriggerOffset<ptr_size>(), "suspend_trigger") 2813#undef DO_THREAD_OFFSET 2814 2815#define JNI_ENTRY_POINT_INFO(x) \ 2816 if (JNI_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \ 2817 os << #x; \ 2818 return; \ 2819 } 2820 JNI_ENTRY_POINT_INFO(pDlsymLookup) 2821#undef JNI_ENTRY_POINT_INFO 2822 2823#define QUICK_ENTRY_POINT_INFO(x) \ 2824 if (QUICK_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \ 2825 os << #x; \ 2826 return; \ 2827 } 2828 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved) 2829 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved8) 2830 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved16) 2831 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved32) 2832 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved64) 2833 QUICK_ENTRY_POINT_INFO(pAllocObjectResolved) 2834 QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized) 2835 QUICK_ENTRY_POINT_INFO(pAllocObjectWithChecks) 2836 QUICK_ENTRY_POINT_INFO(pAllocStringFromBytes) 2837 QUICK_ENTRY_POINT_INFO(pAllocStringFromChars) 2838 QUICK_ENTRY_POINT_INFO(pAllocStringFromString) 2839 QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial) 2840 QUICK_ENTRY_POINT_INFO(pCheckInstanceOf) 2841 QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage) 2842 QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess) 2843 QUICK_ENTRY_POINT_INFO(pInitializeType) 2844 QUICK_ENTRY_POINT_INFO(pResolveString) 2845 QUICK_ENTRY_POINT_INFO(pSet8Instance) 2846 QUICK_ENTRY_POINT_INFO(pSet8Static) 2847 QUICK_ENTRY_POINT_INFO(pSet16Instance) 2848 QUICK_ENTRY_POINT_INFO(pSet16Static) 2849 QUICK_ENTRY_POINT_INFO(pSet32Instance) 2850 QUICK_ENTRY_POINT_INFO(pSet32Static) 2851 QUICK_ENTRY_POINT_INFO(pSet64Instance) 2852 QUICK_ENTRY_POINT_INFO(pSet64Static) 2853 QUICK_ENTRY_POINT_INFO(pSetObjInstance) 2854 QUICK_ENTRY_POINT_INFO(pSetObjStatic) 2855 QUICK_ENTRY_POINT_INFO(pGetByteInstance) 2856 QUICK_ENTRY_POINT_INFO(pGetBooleanInstance) 2857 QUICK_ENTRY_POINT_INFO(pGetByteStatic) 2858 QUICK_ENTRY_POINT_INFO(pGetBooleanStatic) 2859 QUICK_ENTRY_POINT_INFO(pGetShortInstance) 2860 QUICK_ENTRY_POINT_INFO(pGetCharInstance) 2861 QUICK_ENTRY_POINT_INFO(pGetShortStatic) 2862 QUICK_ENTRY_POINT_INFO(pGetCharStatic) 2863 QUICK_ENTRY_POINT_INFO(pGet32Instance) 2864 QUICK_ENTRY_POINT_INFO(pGet32Static) 2865 QUICK_ENTRY_POINT_INFO(pGet64Instance) 2866 QUICK_ENTRY_POINT_INFO(pGet64Static) 2867 QUICK_ENTRY_POINT_INFO(pGetObjInstance) 2868 QUICK_ENTRY_POINT_INFO(pGetObjStatic) 2869 QUICK_ENTRY_POINT_INFO(pAputObject) 2870 QUICK_ENTRY_POINT_INFO(pJniMethodStart) 2871 QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized) 2872 QUICK_ENTRY_POINT_INFO(pJniMethodEnd) 2873 QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized) 2874 QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference) 2875 QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized) 2876 QUICK_ENTRY_POINT_INFO(pQuickGenericJniTrampoline) 2877 QUICK_ENTRY_POINT_INFO(pLockObject) 2878 QUICK_ENTRY_POINT_INFO(pUnlockObject) 2879 QUICK_ENTRY_POINT_INFO(pCmpgDouble) 2880 QUICK_ENTRY_POINT_INFO(pCmpgFloat) 2881 QUICK_ENTRY_POINT_INFO(pCmplDouble) 2882 QUICK_ENTRY_POINT_INFO(pCmplFloat) 2883 QUICK_ENTRY_POINT_INFO(pCos) 2884 QUICK_ENTRY_POINT_INFO(pSin) 2885 QUICK_ENTRY_POINT_INFO(pAcos) 2886 QUICK_ENTRY_POINT_INFO(pAsin) 2887 QUICK_ENTRY_POINT_INFO(pAtan) 2888 QUICK_ENTRY_POINT_INFO(pAtan2) 2889 QUICK_ENTRY_POINT_INFO(pCbrt) 2890 QUICK_ENTRY_POINT_INFO(pCosh) 2891 QUICK_ENTRY_POINT_INFO(pExp) 2892 QUICK_ENTRY_POINT_INFO(pExpm1) 2893 QUICK_ENTRY_POINT_INFO(pHypot) 2894 QUICK_ENTRY_POINT_INFO(pLog) 2895 QUICK_ENTRY_POINT_INFO(pLog10) 2896 QUICK_ENTRY_POINT_INFO(pNextAfter) 2897 QUICK_ENTRY_POINT_INFO(pSinh) 2898 QUICK_ENTRY_POINT_INFO(pTan) 2899 QUICK_ENTRY_POINT_INFO(pTanh) 2900 QUICK_ENTRY_POINT_INFO(pFmod) 2901 QUICK_ENTRY_POINT_INFO(pL2d) 2902 QUICK_ENTRY_POINT_INFO(pFmodf) 2903 QUICK_ENTRY_POINT_INFO(pL2f) 2904 QUICK_ENTRY_POINT_INFO(pD2iz) 2905 QUICK_ENTRY_POINT_INFO(pF2iz) 2906 QUICK_ENTRY_POINT_INFO(pIdivmod) 2907 QUICK_ENTRY_POINT_INFO(pD2l) 2908 QUICK_ENTRY_POINT_INFO(pF2l) 2909 QUICK_ENTRY_POINT_INFO(pLdiv) 2910 QUICK_ENTRY_POINT_INFO(pLmod) 2911 QUICK_ENTRY_POINT_INFO(pLmul) 2912 QUICK_ENTRY_POINT_INFO(pShlLong) 2913 QUICK_ENTRY_POINT_INFO(pShrLong) 2914 QUICK_ENTRY_POINT_INFO(pUshrLong) 2915 QUICK_ENTRY_POINT_INFO(pIndexOf) 2916 QUICK_ENTRY_POINT_INFO(pStringCompareTo) 2917 QUICK_ENTRY_POINT_INFO(pMemcpy) 2918 QUICK_ENTRY_POINT_INFO(pQuickImtConflictTrampoline) 2919 QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline) 2920 QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge) 2921 QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck) 2922 QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck) 2923 QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck) 2924 QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck) 2925 QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck) 2926 QUICK_ENTRY_POINT_INFO(pInvokePolymorphic) 2927 QUICK_ENTRY_POINT_INFO(pTestSuspend) 2928 QUICK_ENTRY_POINT_INFO(pDeliverException) 2929 QUICK_ENTRY_POINT_INFO(pThrowArrayBounds) 2930 QUICK_ENTRY_POINT_INFO(pThrowDivZero) 2931 QUICK_ENTRY_POINT_INFO(pThrowNullPointer) 2932 QUICK_ENTRY_POINT_INFO(pThrowStackOverflow) 2933 QUICK_ENTRY_POINT_INFO(pDeoptimize) 2934 QUICK_ENTRY_POINT_INFO(pA64Load) 2935 QUICK_ENTRY_POINT_INFO(pA64Store) 2936 QUICK_ENTRY_POINT_INFO(pNewEmptyString) 2937 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_B) 2938 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BI) 2939 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BII) 2940 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIII) 2941 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIIString) 2942 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BString) 2943 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIICharset) 2944 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BCharset) 2945 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_C) 2946 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_CII) 2947 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_IIC) 2948 QUICK_ENTRY_POINT_INFO(pNewStringFromCodePoints) 2949 QUICK_ENTRY_POINT_INFO(pNewStringFromString) 2950 QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer) 2951 QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder) 2952 QUICK_ENTRY_POINT_INFO(pReadBarrierJni) 2953 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg00) 2954 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg01) 2955 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg02) 2956 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg03) 2957 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg04) 2958 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg05) 2959 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg06) 2960 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg07) 2961 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg08) 2962 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg09) 2963 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg10) 2964 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg11) 2965 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg12) 2966 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg13) 2967 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg14) 2968 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg15) 2969 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg16) 2970 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg17) 2971 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg18) 2972 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg19) 2973 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg20) 2974 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg21) 2975 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg22) 2976 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg23) 2977 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg24) 2978 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg25) 2979 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg26) 2980 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg27) 2981 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg28) 2982 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg29) 2983 QUICK_ENTRY_POINT_INFO(pReadBarrierSlow) 2984 QUICK_ENTRY_POINT_INFO(pReadBarrierForRootSlow) 2985 2986 QUICK_ENTRY_POINT_INFO(pJniMethodFastStart) 2987 QUICK_ENTRY_POINT_INFO(pJniMethodFastEnd) 2988#undef QUICK_ENTRY_POINT_INFO 2989 2990 os << offset; 2991} 2992 2993void Thread::QuickDeliverException() { 2994 // Get exception from thread. 2995 ObjPtr<mirror::Throwable> exception = GetException(); 2996 CHECK(exception != nullptr); 2997 if (exception == GetDeoptimizationException()) { 2998 artDeoptimize(this); 2999 UNREACHABLE(); 3000 } 3001 3002 // This is a real exception: let the instrumentation know about it. 3003 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 3004 if (instrumentation->HasExceptionCaughtListeners() && 3005 IsExceptionThrownByCurrentMethod(exception)) { 3006 // Instrumentation may cause GC so keep the exception object safe. 3007 StackHandleScope<1> hs(this); 3008 HandleWrapperObjPtr<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception)); 3009 instrumentation->ExceptionCaughtEvent(this, exception.Ptr()); 3010 } 3011 // Does instrumentation need to deoptimize the stack? 3012 // Note: we do this *after* reporting the exception to instrumentation in case it 3013 // now requires deoptimization. It may happen if a debugger is attached and requests 3014 // new events (single-step, breakpoint, ...) when the exception is reported. 3015 if (Dbg::IsForcedInterpreterNeededForException(this)) { 3016 NthCallerVisitor visitor(this, 0, false); 3017 visitor.WalkStack(); 3018 if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.caller_pc)) { 3019 // Save the exception into the deoptimization context so it can be restored 3020 // before entering the interpreter. 3021 PushDeoptimizationContext( 3022 JValue(), /*is_reference */ false, /* from_code */ false, exception); 3023 artDeoptimize(this); 3024 UNREACHABLE(); 3025 } else { 3026 LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method " 3027 << visitor.caller->PrettyMethod(); 3028 } 3029 } 3030 3031 // Don't leave exception visible while we try to find the handler, which may cause class 3032 // resolution. 3033 ClearException(); 3034 QuickExceptionHandler exception_handler(this, false); 3035 exception_handler.FindCatch(exception); 3036 exception_handler.UpdateInstrumentationStack(); 3037 exception_handler.DoLongJump(); 3038} 3039 3040Context* Thread::GetLongJumpContext() { 3041 Context* result = tlsPtr_.long_jump_context; 3042 if (result == nullptr) { 3043 result = Context::Create(); 3044 } else { 3045 tlsPtr_.long_jump_context = nullptr; // Avoid context being shared. 3046 result->Reset(); 3047 } 3048 return result; 3049} 3050 3051// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is 3052// so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack. 3053struct CurrentMethodVisitor FINAL : public StackVisitor { 3054 CurrentMethodVisitor(Thread* thread, Context* context, bool check_suspended, bool abort_on_error) 3055 REQUIRES_SHARED(Locks::mutator_lock_) 3056 : StackVisitor(thread, 3057 context, 3058 StackVisitor::StackWalkKind::kIncludeInlinedFrames, 3059 check_suspended), 3060 this_object_(nullptr), 3061 method_(nullptr), 3062 dex_pc_(0), 3063 abort_on_error_(abort_on_error) {} 3064 bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 3065 ArtMethod* m = GetMethod(); 3066 if (m->IsRuntimeMethod()) { 3067 // Continue if this is a runtime method. 3068 return true; 3069 } 3070 if (context_ != nullptr) { 3071 this_object_ = GetThisObject(); 3072 } 3073 method_ = m; 3074 dex_pc_ = GetDexPc(abort_on_error_); 3075 return false; 3076 } 3077 ObjPtr<mirror::Object> this_object_; 3078 ArtMethod* method_; 3079 uint32_t dex_pc_; 3080 const bool abort_on_error_; 3081}; 3082 3083ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, 3084 bool check_suspended, 3085 bool abort_on_error) const { 3086 CurrentMethodVisitor visitor(const_cast<Thread*>(this), 3087 nullptr, 3088 check_suspended, 3089 abort_on_error); 3090 visitor.WalkStack(false); 3091 if (dex_pc != nullptr) { 3092 *dex_pc = visitor.dex_pc_; 3093 } 3094 return visitor.method_; 3095} 3096 3097bool Thread::HoldsLock(ObjPtr<mirror::Object> object) const { 3098 return object != nullptr && object->GetLockOwnerThreadId() == GetThreadId(); 3099} 3100 3101// RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor). 3102template <typename RootVisitor, bool kPrecise = false> 3103class ReferenceMapVisitor : public StackVisitor { 3104 public: 3105 ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor) 3106 REQUIRES_SHARED(Locks::mutator_lock_) 3107 // We are visiting the references in compiled frames, so we do not need 3108 // to know the inlined frames. 3109 : StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames), 3110 visitor_(visitor) {} 3111 3112 bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 3113 if (false) { 3114 LOG(INFO) << "Visiting stack roots in " << ArtMethod::PrettyMethod(GetMethod()) 3115 << StringPrintf("@ PC:%04x", GetDexPc()); 3116 } 3117 ShadowFrame* shadow_frame = GetCurrentShadowFrame(); 3118 if (shadow_frame != nullptr) { 3119 VisitShadowFrame(shadow_frame); 3120 } else { 3121 VisitQuickFrame(); 3122 } 3123 return true; 3124 } 3125 3126 void VisitShadowFrame(ShadowFrame* shadow_frame) REQUIRES_SHARED(Locks::mutator_lock_) { 3127 ArtMethod* m = shadow_frame->GetMethod(); 3128 VisitDeclaringClass(m); 3129 DCHECK(m != nullptr); 3130 size_t num_regs = shadow_frame->NumberOfVRegs(); 3131 DCHECK(m->IsNative() || shadow_frame->HasReferenceArray()); 3132 // handle scope for JNI or References for interpreter. 3133 for (size_t reg = 0; reg < num_regs; ++reg) { 3134 mirror::Object* ref = shadow_frame->GetVRegReference(reg); 3135 if (ref != nullptr) { 3136 mirror::Object* new_ref = ref; 3137 visitor_(&new_ref, reg, this); 3138 if (new_ref != ref) { 3139 shadow_frame->SetVRegReference(reg, new_ref); 3140 } 3141 } 3142 } 3143 // Mark lock count map required for structured locking checks. 3144 shadow_frame->GetLockCountData().VisitMonitors(visitor_, -1, this); 3145 } 3146 3147 private: 3148 // Visiting the declaring class is necessary so that we don't unload the class of a method that 3149 // is executing. We need to ensure that the code stays mapped. NO_THREAD_SAFETY_ANALYSIS since 3150 // the threads do not all hold the heap bitmap lock for parallel GC. 3151 void VisitDeclaringClass(ArtMethod* method) 3152 REQUIRES_SHARED(Locks::mutator_lock_) 3153 NO_THREAD_SAFETY_ANALYSIS { 3154 ObjPtr<mirror::Class> klass = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>(); 3155 // klass can be null for runtime methods. 3156 if (klass != nullptr) { 3157 if (kVerifyImageObjectsMarked) { 3158 gc::Heap* const heap = Runtime::Current()->GetHeap(); 3159 gc::space::ContinuousSpace* space = heap->FindContinuousSpaceFromObject(klass, 3160 /*fail_ok*/true); 3161 if (space != nullptr && space->IsImageSpace()) { 3162 bool failed = false; 3163 if (!space->GetLiveBitmap()->Test(klass.Ptr())) { 3164 failed = true; 3165 LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image " << *space; 3166 } else if (!heap->GetLiveBitmap()->Test(klass.Ptr())) { 3167 failed = true; 3168 LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image through live bitmap " << *space; 3169 } 3170 if (failed) { 3171 GetThread()->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT)); 3172 space->AsImageSpace()->DumpSections(LOG_STREAM(FATAL_WITHOUT_ABORT)); 3173 LOG(FATAL_WITHOUT_ABORT) << "Method@" << method->GetDexMethodIndex() << ":" << method 3174 << " klass@" << klass.Ptr(); 3175 // Pretty info last in case it crashes. 3176 LOG(FATAL) << "Method " << method->PrettyMethod() << " klass " 3177 << klass->PrettyClass(); 3178 } 3179 } 3180 } 3181 mirror::Object* new_ref = klass.Ptr(); 3182 visitor_(&new_ref, -1, this); 3183 if (new_ref != klass) { 3184 method->CASDeclaringClass(klass.Ptr(), new_ref->AsClass()); 3185 } 3186 } 3187 } 3188 3189 template <typename T> 3190 ALWAYS_INLINE 3191 inline void VisitQuickFrameWithVregCallback() REQUIRES_SHARED(Locks::mutator_lock_) { 3192 ArtMethod** cur_quick_frame = GetCurrentQuickFrame(); 3193 DCHECK(cur_quick_frame != nullptr); 3194 ArtMethod* m = *cur_quick_frame; 3195 VisitDeclaringClass(m); 3196 3197 // Process register map (which native and runtime methods don't have) 3198 if (!m->IsNative() && !m->IsRuntimeMethod() && (!m->IsProxyMethod() || m->IsConstructor())) { 3199 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); 3200 DCHECK(method_header->IsOptimized()); 3201 auto* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>( 3202 reinterpret_cast<uintptr_t>(cur_quick_frame)); 3203 uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc()); 3204 CodeInfo code_info = method_header->GetOptimizedCodeInfo(); 3205 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 3206 StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding); 3207 DCHECK(map.IsValid()); 3208 3209 T vreg_info(m, code_info, encoding, map, visitor_); 3210 3211 // Visit stack entries that hold pointers. 3212 const size_t number_of_bits = code_info.GetNumberOfStackMaskBits(encoding); 3213 BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, map); 3214 for (size_t i = 0; i < number_of_bits; ++i) { 3215 if (stack_mask.LoadBit(i)) { 3216 auto* ref_addr = vreg_base + i; 3217 mirror::Object* ref = ref_addr->AsMirrorPtr(); 3218 if (ref != nullptr) { 3219 mirror::Object* new_ref = ref; 3220 vreg_info.VisitStack(&new_ref, i, this); 3221 if (ref != new_ref) { 3222 ref_addr->Assign(new_ref); 3223 } 3224 } 3225 } 3226 } 3227 // Visit callee-save registers that hold pointers. 3228 uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, map); 3229 for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) { 3230 if (register_mask & (1 << i)) { 3231 mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i)); 3232 if (kIsDebugBuild && ref_addr == nullptr) { 3233 std::string thread_name; 3234 GetThread()->GetThreadName(thread_name); 3235 LOG(FATAL_WITHOUT_ABORT) << "On thread " << thread_name; 3236 DescribeStack(GetThread()); 3237 LOG(FATAL) << "Found an unsaved callee-save register " << i << " (null GPRAddress) " 3238 << "set in register_mask=" << register_mask << " at " << DescribeLocation(); 3239 } 3240 if (*ref_addr != nullptr) { 3241 vreg_info.VisitRegister(ref_addr, i, this); 3242 } 3243 } 3244 } 3245 } 3246 } 3247 3248 void VisitQuickFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 3249 if (kPrecise) { 3250 VisitQuickFramePrecise(); 3251 } else { 3252 VisitQuickFrameNonPrecise(); 3253 } 3254 } 3255 3256 void VisitQuickFrameNonPrecise() REQUIRES_SHARED(Locks::mutator_lock_) { 3257 struct UndefinedVRegInfo { 3258 UndefinedVRegInfo(ArtMethod* method ATTRIBUTE_UNUSED, 3259 const CodeInfo& code_info ATTRIBUTE_UNUSED, 3260 const CodeInfoEncoding& encoding ATTRIBUTE_UNUSED, 3261 const StackMap& map ATTRIBUTE_UNUSED, 3262 RootVisitor& _visitor) 3263 : visitor(_visitor) { 3264 } 3265 3266 ALWAYS_INLINE 3267 void VisitStack(mirror::Object** ref, 3268 size_t stack_index ATTRIBUTE_UNUSED, 3269 const StackVisitor* stack_visitor) 3270 REQUIRES_SHARED(Locks::mutator_lock_) { 3271 visitor(ref, -1, stack_visitor); 3272 } 3273 3274 ALWAYS_INLINE 3275 void VisitRegister(mirror::Object** ref, 3276 size_t register_index ATTRIBUTE_UNUSED, 3277 const StackVisitor* stack_visitor) 3278 REQUIRES_SHARED(Locks::mutator_lock_) { 3279 visitor(ref, -1, stack_visitor); 3280 } 3281 3282 RootVisitor& visitor; 3283 }; 3284 VisitQuickFrameWithVregCallback<UndefinedVRegInfo>(); 3285 } 3286 3287 void VisitQuickFramePrecise() REQUIRES_SHARED(Locks::mutator_lock_) { 3288 struct StackMapVRegInfo { 3289 StackMapVRegInfo(ArtMethod* method, 3290 const CodeInfo& _code_info, 3291 const CodeInfoEncoding& _encoding, 3292 const StackMap& map, 3293 RootVisitor& _visitor) 3294 : number_of_dex_registers(method->GetCodeItem()->registers_size_), 3295 code_info(_code_info), 3296 encoding(_encoding), 3297 dex_register_map(code_info.GetDexRegisterMapOf(map, 3298 encoding, 3299 number_of_dex_registers)), 3300 visitor(_visitor) { 3301 } 3302 3303 // TODO: If necessary, we should consider caching a reverse map instead of the linear 3304 // lookups for each location. 3305 void FindWithType(const size_t index, 3306 const DexRegisterLocation::Kind kind, 3307 mirror::Object** ref, 3308 const StackVisitor* stack_visitor) 3309 REQUIRES_SHARED(Locks::mutator_lock_) { 3310 bool found = false; 3311 for (size_t dex_reg = 0; dex_reg != number_of_dex_registers; ++dex_reg) { 3312 DexRegisterLocation location = dex_register_map.GetDexRegisterLocation( 3313 dex_reg, number_of_dex_registers, code_info, encoding); 3314 if (location.GetKind() == kind && static_cast<size_t>(location.GetValue()) == index) { 3315 visitor(ref, dex_reg, stack_visitor); 3316 found = true; 3317 } 3318 } 3319 3320 if (!found) { 3321 // If nothing found, report with -1. 3322 visitor(ref, -1, stack_visitor); 3323 } 3324 } 3325 3326 void VisitStack(mirror::Object** ref, size_t stack_index, const StackVisitor* stack_visitor) 3327 REQUIRES_SHARED(Locks::mutator_lock_) { 3328 const size_t stack_offset = stack_index * kFrameSlotSize; 3329 FindWithType(stack_offset, 3330 DexRegisterLocation::Kind::kInStack, 3331 ref, 3332 stack_visitor); 3333 } 3334 3335 void VisitRegister(mirror::Object** ref, 3336 size_t register_index, 3337 const StackVisitor* stack_visitor) 3338 REQUIRES_SHARED(Locks::mutator_lock_) { 3339 FindWithType(register_index, 3340 DexRegisterLocation::Kind::kInRegister, 3341 ref, 3342 stack_visitor); 3343 } 3344 3345 size_t number_of_dex_registers; 3346 const CodeInfo& code_info; 3347 const CodeInfoEncoding& encoding; 3348 DexRegisterMap dex_register_map; 3349 RootVisitor& visitor; 3350 }; 3351 VisitQuickFrameWithVregCallback<StackMapVRegInfo>(); 3352 } 3353 3354 // Visitor for when we visit a root. 3355 RootVisitor& visitor_; 3356}; 3357 3358class RootCallbackVisitor { 3359 public: 3360 RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {} 3361 3362 void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const 3363 REQUIRES_SHARED(Locks::mutator_lock_) { 3364 visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg)); 3365 } 3366 3367 private: 3368 RootVisitor* const visitor_; 3369 const uint32_t tid_; 3370}; 3371 3372template <bool kPrecise> 3373void Thread::VisitRoots(RootVisitor* visitor) { 3374 const uint32_t thread_id = GetThreadId(); 3375 visitor->VisitRootIfNonNull(&tlsPtr_.opeer, RootInfo(kRootThreadObject, thread_id)); 3376 if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) { 3377 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception), 3378 RootInfo(kRootNativeStack, thread_id)); 3379 } 3380 visitor->VisitRootIfNonNull(&tlsPtr_.monitor_enter_object, RootInfo(kRootNativeStack, thread_id)); 3381 tlsPtr_.jni_env->locals.VisitRoots(visitor, RootInfo(kRootJNILocal, thread_id)); 3382 tlsPtr_.jni_env->monitors.VisitRoots(visitor, RootInfo(kRootJNIMonitor, thread_id)); 3383 HandleScopeVisitRoots(visitor, thread_id); 3384 if (tlsPtr_.debug_invoke_req != nullptr) { 3385 tlsPtr_.debug_invoke_req->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id)); 3386 } 3387 // Visit roots for deoptimization. 3388 if (tlsPtr_.stacked_shadow_frame_record != nullptr) { 3389 RootCallbackVisitor visitor_to_callback(visitor, thread_id); 3390 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, nullptr, visitor_to_callback); 3391 for (StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record; 3392 record != nullptr; 3393 record = record->GetLink()) { 3394 for (ShadowFrame* shadow_frame = record->GetShadowFrame(); 3395 shadow_frame != nullptr; 3396 shadow_frame = shadow_frame->GetLink()) { 3397 mapper.VisitShadowFrame(shadow_frame); 3398 } 3399 } 3400 } 3401 for (DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack; 3402 record != nullptr; 3403 record = record->GetLink()) { 3404 if (record->IsReference()) { 3405 visitor->VisitRootIfNonNull(record->GetReturnValueAsGCRoot(), 3406 RootInfo(kRootThreadObject, thread_id)); 3407 } 3408 visitor->VisitRootIfNonNull(record->GetPendingExceptionAsGCRoot(), 3409 RootInfo(kRootThreadObject, thread_id)); 3410 } 3411 if (tlsPtr_.frame_id_to_shadow_frame != nullptr) { 3412 RootCallbackVisitor visitor_to_callback(visitor, thread_id); 3413 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, nullptr, visitor_to_callback); 3414 for (FrameIdToShadowFrame* record = tlsPtr_.frame_id_to_shadow_frame; 3415 record != nullptr; 3416 record = record->GetNext()) { 3417 mapper.VisitShadowFrame(record->GetShadowFrame()); 3418 } 3419 } 3420 for (auto* verifier = tlsPtr_.method_verifier; verifier != nullptr; verifier = verifier->link_) { 3421 verifier->VisitRoots(visitor, RootInfo(kRootNativeStack, thread_id)); 3422 } 3423 // Visit roots on this thread's stack 3424 RuntimeContextType context; 3425 RootCallbackVisitor visitor_to_callback(visitor, thread_id); 3426 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, &context, visitor_to_callback); 3427 mapper.template WalkStack<StackVisitor::CountTransitions::kNo>(false); 3428 for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) { 3429 visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id)); 3430 } 3431} 3432 3433void Thread::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) { 3434 if ((flags & VisitRootFlags::kVisitRootFlagPrecise) != 0) { 3435 VisitRoots<true>(visitor); 3436 } else { 3437 VisitRoots<false>(visitor); 3438 } 3439} 3440 3441class VerifyRootVisitor : public SingleRootVisitor { 3442 public: 3443 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) 3444 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 3445 VerifyObject(root); 3446 } 3447}; 3448 3449void Thread::VerifyStackImpl() { 3450 VerifyRootVisitor visitor; 3451 std::unique_ptr<Context> context(Context::Create()); 3452 RootCallbackVisitor visitor_to_callback(&visitor, GetThreadId()); 3453 ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitor_to_callback); 3454 mapper.WalkStack(); 3455} 3456 3457// Set the stack end to that to be used during a stack overflow 3458void Thread::SetStackEndForStackOverflow() { 3459 // During stack overflow we allow use of the full stack. 3460 if (tlsPtr_.stack_end == tlsPtr_.stack_begin) { 3461 // However, we seem to have already extended to use the full stack. 3462 LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently " 3463 << GetStackOverflowReservedBytes(kRuntimeISA) << ")?"; 3464 DumpStack(LOG_STREAM(ERROR)); 3465 LOG(FATAL) << "Recursive stack overflow."; 3466 } 3467 3468 tlsPtr_.stack_end = tlsPtr_.stack_begin; 3469 3470 // Remove the stack overflow protection if is it set up. 3471 bool implicit_stack_check = !Runtime::Current()->ExplicitStackOverflowChecks(); 3472 if (implicit_stack_check) { 3473 if (!UnprotectStack()) { 3474 LOG(ERROR) << "Unable to remove stack protection for stack overflow"; 3475 } 3476 } 3477} 3478 3479void Thread::SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit) { 3480 DCHECK_LE(start, end); 3481 DCHECK_LE(end, limit); 3482 tlsPtr_.thread_local_start = start; 3483 tlsPtr_.thread_local_pos = tlsPtr_.thread_local_start; 3484 tlsPtr_.thread_local_end = end; 3485 tlsPtr_.thread_local_limit = limit; 3486 tlsPtr_.thread_local_objects = 0; 3487} 3488 3489bool Thread::HasTlab() const { 3490 bool has_tlab = tlsPtr_.thread_local_pos != nullptr; 3491 if (has_tlab) { 3492 DCHECK(tlsPtr_.thread_local_start != nullptr && tlsPtr_.thread_local_end != nullptr); 3493 } else { 3494 DCHECK(tlsPtr_.thread_local_start == nullptr && tlsPtr_.thread_local_end == nullptr); 3495 } 3496 return has_tlab; 3497} 3498 3499std::ostream& operator<<(std::ostream& os, const Thread& thread) { 3500 thread.ShortDump(os); 3501 return os; 3502} 3503 3504bool Thread::ProtectStack(bool fatal_on_error) { 3505 void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; 3506 VLOG(threads) << "Protecting stack at " << pregion; 3507 if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) { 3508 if (fatal_on_error) { 3509 LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. " 3510 "Reason: " 3511 << strerror(errno) << " size: " << kStackOverflowProtectedSize; 3512 } 3513 return false; 3514 } 3515 return true; 3516} 3517 3518bool Thread::UnprotectStack() { 3519 void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; 3520 VLOG(threads) << "Unprotecting stack at " << pregion; 3521 return mprotect(pregion, kStackOverflowProtectedSize, PROT_READ|PROT_WRITE) == 0; 3522} 3523 3524void Thread::ActivateSingleStepControl(SingleStepControl* ssc) { 3525 CHECK(Dbg::IsDebuggerActive()); 3526 CHECK(GetSingleStepControl() == nullptr) << "Single step already active in thread " << *this; 3527 CHECK(ssc != nullptr); 3528 tlsPtr_.single_step_control = ssc; 3529} 3530 3531void Thread::DeactivateSingleStepControl() { 3532 CHECK(Dbg::IsDebuggerActive()); 3533 CHECK(GetSingleStepControl() != nullptr) << "Single step not active in thread " << *this; 3534 SingleStepControl* ssc = GetSingleStepControl(); 3535 tlsPtr_.single_step_control = nullptr; 3536 delete ssc; 3537} 3538 3539void Thread::SetDebugInvokeReq(DebugInvokeReq* req) { 3540 CHECK(Dbg::IsDebuggerActive()); 3541 CHECK(GetInvokeReq() == nullptr) << "Debug invoke req already active in thread " << *this; 3542 CHECK(Thread::Current() != this) << "Debug invoke can't be dispatched by the thread itself"; 3543 CHECK(req != nullptr); 3544 tlsPtr_.debug_invoke_req = req; 3545} 3546 3547void Thread::ClearDebugInvokeReq() { 3548 CHECK(GetInvokeReq() != nullptr) << "Debug invoke req not active in thread " << *this; 3549 CHECK(Thread::Current() == this) << "Debug invoke must be finished by the thread itself"; 3550 DebugInvokeReq* req = tlsPtr_.debug_invoke_req; 3551 tlsPtr_.debug_invoke_req = nullptr; 3552 delete req; 3553} 3554 3555void Thread::PushVerifier(verifier::MethodVerifier* verifier) { 3556 verifier->link_ = tlsPtr_.method_verifier; 3557 tlsPtr_.method_verifier = verifier; 3558} 3559 3560void Thread::PopVerifier(verifier::MethodVerifier* verifier) { 3561 CHECK_EQ(tlsPtr_.method_verifier, verifier); 3562 tlsPtr_.method_verifier = verifier->link_; 3563} 3564 3565size_t Thread::NumberOfHeldMutexes() const { 3566 size_t count = 0; 3567 for (BaseMutex* mu : tlsPtr_.held_mutexes) { 3568 count += mu != nullptr ? 1 : 0; 3569 } 3570 return count; 3571} 3572 3573void Thread::DeoptimizeWithDeoptimizationException(JValue* result) { 3574 DCHECK_EQ(GetException(), Thread::GetDeoptimizationException()); 3575 ClearException(); 3576 ShadowFrame* shadow_frame = 3577 PopStackedShadowFrame(StackedShadowFrameType::kDeoptimizationShadowFrame); 3578 ObjPtr<mirror::Throwable> pending_exception; 3579 bool from_code = false; 3580 PopDeoptimizationContext(result, &pending_exception, &from_code); 3581 SetTopOfStack(nullptr); 3582 SetTopOfShadowStack(shadow_frame); 3583 3584 // Restore the exception that was pending before deoptimization then interpret the 3585 // deoptimized frames. 3586 if (pending_exception != nullptr) { 3587 SetException(pending_exception); 3588 } 3589 interpreter::EnterInterpreterFromDeoptimize(this, shadow_frame, from_code, result); 3590} 3591 3592void Thread::SetException(ObjPtr<mirror::Throwable> new_exception) { 3593 CHECK(new_exception != nullptr); 3594 // TODO: DCHECK(!IsExceptionPending()); 3595 tlsPtr_.exception = new_exception.Ptr(); 3596} 3597 3598bool Thread::IsAotCompiler() { 3599 return Runtime::Current()->IsAotCompiler(); 3600} 3601 3602mirror::Object* Thread::GetPeerFromOtherThread() const { 3603 DCHECK(tlsPtr_.jpeer == nullptr); 3604 mirror::Object* peer = tlsPtr_.opeer; 3605 if (kUseReadBarrier && Current()->GetIsGcMarking()) { 3606 // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack 3607 // may have not been flipped yet and peer may be a from-space (stale) ref. So explicitly 3608 // mark/forward it here. 3609 peer = art::ReadBarrier::Mark(peer); 3610 } 3611 return peer; 3612} 3613 3614} // namespace art 3615