thread.cc revision df13240f4b9325b34d09e20cdac4e9a0b12ead61
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "thread.h" 18 19#if !defined(__APPLE__) 20#include <sched.h> 21#endif 22 23#include <pthread.h> 24#include <signal.h> 25#include <sys/resource.h> 26#include <sys/time.h> 27 28#include <algorithm> 29#include <bitset> 30#include <cerrno> 31#include <iostream> 32#include <list> 33#include <sstream> 34 35#include "android-base/stringprintf.h" 36 37#include "arch/context-inl.h" 38#include "arch/context.h" 39#include "art_field-inl.h" 40#include "art_method-inl.h" 41#include "base/bit_utils.h" 42#include "base/memory_tool.h" 43#include "base/mutex.h" 44#include "base/systrace.h" 45#include "base/timing_logger.h" 46#include "base/to_str.h" 47#include "class_linker-inl.h" 48#include "debugger.h" 49#include "dex_file-inl.h" 50#include "dex_file_annotations.h" 51#include "dex_file_types.h" 52#include "entrypoints/entrypoint_utils.h" 53#include "entrypoints/quick/quick_alloc_entrypoints.h" 54#include "gc/accounting/card_table-inl.h" 55#include "gc/accounting/heap_bitmap-inl.h" 56#include "gc/allocator/rosalloc.h" 57#include "gc/heap.h" 58#include "gc/space/space-inl.h" 59#include "gc_root.h" 60#include "handle_scope-inl.h" 61#include "indirect_reference_table-inl.h" 62#include "interpreter/interpreter.h" 63#include "interpreter/shadow_frame.h" 64#include "java_frame_root_info.h" 65#include "java_vm_ext.h" 66#include "jni_internal.h" 67#include "mirror/class-inl.h" 68#include "mirror/class_loader.h" 69#include "mirror/object_array-inl.h" 70#include "mirror/stack_trace_element.h" 71#include "monitor.h" 72#include "native_stack_dump.h" 73#include "nativehelper/ScopedLocalRef.h" 74#include "nativehelper/ScopedUtfChars.h" 75#include "nth_caller_visitor.h" 76#include "oat_quick_method_header.h" 77#include "obj_ptr-inl.h" 78#include "object_lock.h" 79#include "quick/quick_method_frame_info.h" 80#include "quick_exception_handler.h" 81#include "read_barrier-inl.h" 82#include "reflection.h" 83#include "runtime.h" 84#include "runtime_callbacks.h" 85#include "scoped_thread_state_change-inl.h" 86#include "stack.h" 87#include "stack_map.h" 88#include "thread-inl.h" 89#include "thread_list.h" 90#include "utils.h" 91#include "verifier/method_verifier.h" 92#include "verify_object.h" 93#include "well_known_classes.h" 94 95#if ART_USE_FUTEXES 96#include "linux/futex.h" 97#include "sys/syscall.h" 98#ifndef SYS_futex 99#define SYS_futex __NR_futex 100#endif 101#endif // ART_USE_FUTEXES 102 103namespace art { 104 105using android::base::StringAppendV; 106using android::base::StringPrintf; 107 108extern "C" NO_RETURN void artDeoptimize(Thread* self); 109 110bool Thread::is_started_ = false; 111pthread_key_t Thread::pthread_key_self_; 112ConditionVariable* Thread::resume_cond_ = nullptr; 113const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA); 114bool (*Thread::is_sensitive_thread_hook_)() = nullptr; 115Thread* Thread::jit_sensitive_thread_ = nullptr; 116 117static constexpr bool kVerifyImageObjectsMarked = kIsDebugBuild; 118 119// For implicit overflow checks we reserve an extra piece of memory at the bottom 120// of the stack (lowest memory). The higher portion of the memory 121// is protected against reads and the lower is available for use while 122// throwing the StackOverflow exception. 123constexpr size_t kStackOverflowProtectedSize = 4 * kMemoryToolStackGuardSizeScale * KB; 124 125static const char* kThreadNameDuringStartup = "<native thread without managed peer>"; 126 127void Thread::InitCardTable() { 128 tlsPtr_.card_table = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin(); 129} 130 131static void UnimplementedEntryPoint() { 132 UNIMPLEMENTED(FATAL); 133} 134 135void InitEntryPoints(JniEntryPoints* jpoints, QuickEntryPoints* qpoints); 136void UpdateReadBarrierEntrypoints(QuickEntryPoints* qpoints, bool is_active); 137 138void Thread::SetIsGcMarkingAndUpdateEntrypoints(bool is_marking) { 139 CHECK(kUseReadBarrier); 140 tls32_.is_gc_marking = is_marking; 141 UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active */ is_marking); 142 ResetQuickAllocEntryPointsForThread(is_marking); 143} 144 145void Thread::InitTlsEntryPoints() { 146 // Insert a placeholder so we can easily tell if we call an unimplemented entry point. 147 uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.jni_entrypoints); 148 uintptr_t* end = reinterpret_cast<uintptr_t*>( 149 reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) + sizeof(tlsPtr_.quick_entrypoints)); 150 for (uintptr_t* it = begin; it != end; ++it) { 151 *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint); 152 } 153 InitEntryPoints(&tlsPtr_.jni_entrypoints, &tlsPtr_.quick_entrypoints); 154} 155 156void Thread::ResetQuickAllocEntryPointsForThread(bool is_marking) { 157 if (kUseReadBarrier && kRuntimeISA != kX86_64) { 158 // Allocation entrypoint switching is currently only implemented for X86_64. 159 is_marking = true; 160 } 161 ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints, is_marking); 162} 163 164class DeoptimizationContextRecord { 165 public: 166 DeoptimizationContextRecord(const JValue& ret_val, 167 bool is_reference, 168 bool from_code, 169 ObjPtr<mirror::Throwable> pending_exception, 170 DeoptimizationMethodType method_type, 171 DeoptimizationContextRecord* link) 172 : ret_val_(ret_val), 173 is_reference_(is_reference), 174 from_code_(from_code), 175 pending_exception_(pending_exception.Ptr()), 176 deopt_method_type_(method_type), 177 link_(link) {} 178 179 JValue GetReturnValue() const { return ret_val_; } 180 bool IsReference() const { return is_reference_; } 181 bool GetFromCode() const { return from_code_; } 182 ObjPtr<mirror::Throwable> GetPendingException() const { return pending_exception_; } 183 DeoptimizationContextRecord* GetLink() const { return link_; } 184 mirror::Object** GetReturnValueAsGCRoot() { 185 DCHECK(is_reference_); 186 return ret_val_.GetGCRoot(); 187 } 188 mirror::Object** GetPendingExceptionAsGCRoot() { 189 return reinterpret_cast<mirror::Object**>(&pending_exception_); 190 } 191 DeoptimizationMethodType GetDeoptimizationMethodType() const { 192 return deopt_method_type_; 193 } 194 195 private: 196 // The value returned by the method at the top of the stack before deoptimization. 197 JValue ret_val_; 198 199 // Indicates whether the returned value is a reference. If so, the GC will visit it. 200 const bool is_reference_; 201 202 // Whether the context was created from an explicit deoptimization in the code. 203 const bool from_code_; 204 205 // The exception that was pending before deoptimization (or null if there was no pending 206 // exception). 207 mirror::Throwable* pending_exception_; 208 209 // Whether the context was created for an (idempotent) runtime method. 210 const DeoptimizationMethodType deopt_method_type_; 211 212 // A link to the previous DeoptimizationContextRecord. 213 DeoptimizationContextRecord* const link_; 214 215 DISALLOW_COPY_AND_ASSIGN(DeoptimizationContextRecord); 216}; 217 218class StackedShadowFrameRecord { 219 public: 220 StackedShadowFrameRecord(ShadowFrame* shadow_frame, 221 StackedShadowFrameType type, 222 StackedShadowFrameRecord* link) 223 : shadow_frame_(shadow_frame), 224 type_(type), 225 link_(link) {} 226 227 ShadowFrame* GetShadowFrame() const { return shadow_frame_; } 228 StackedShadowFrameType GetType() const { return type_; } 229 StackedShadowFrameRecord* GetLink() const { return link_; } 230 231 private: 232 ShadowFrame* const shadow_frame_; 233 const StackedShadowFrameType type_; 234 StackedShadowFrameRecord* const link_; 235 236 DISALLOW_COPY_AND_ASSIGN(StackedShadowFrameRecord); 237}; 238 239void Thread::PushDeoptimizationContext(const JValue& return_value, 240 bool is_reference, 241 ObjPtr<mirror::Throwable> exception, 242 bool from_code, 243 DeoptimizationMethodType method_type) { 244 DeoptimizationContextRecord* record = new DeoptimizationContextRecord( 245 return_value, 246 is_reference, 247 from_code, 248 exception, 249 method_type, 250 tlsPtr_.deoptimization_context_stack); 251 tlsPtr_.deoptimization_context_stack = record; 252} 253 254void Thread::PopDeoptimizationContext(JValue* result, 255 ObjPtr<mirror::Throwable>* exception, 256 bool* from_code, 257 DeoptimizationMethodType* method_type) { 258 AssertHasDeoptimizationContext(); 259 DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack; 260 tlsPtr_.deoptimization_context_stack = record->GetLink(); 261 result->SetJ(record->GetReturnValue().GetJ()); 262 *exception = record->GetPendingException(); 263 *from_code = record->GetFromCode(); 264 *method_type = record->GetDeoptimizationMethodType(); 265 delete record; 266} 267 268void Thread::AssertHasDeoptimizationContext() { 269 CHECK(tlsPtr_.deoptimization_context_stack != nullptr) 270 << "No deoptimization context for thread " << *this; 271} 272 273void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type) { 274 StackedShadowFrameRecord* record = new StackedShadowFrameRecord( 275 sf, type, tlsPtr_.stacked_shadow_frame_record); 276 tlsPtr_.stacked_shadow_frame_record = record; 277} 278 279ShadowFrame* Thread::PopStackedShadowFrame(StackedShadowFrameType type, bool must_be_present) { 280 StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record; 281 if (must_be_present) { 282 DCHECK(record != nullptr); 283 } else { 284 if (record == nullptr || record->GetType() != type) { 285 return nullptr; 286 } 287 } 288 tlsPtr_.stacked_shadow_frame_record = record->GetLink(); 289 ShadowFrame* shadow_frame = record->GetShadowFrame(); 290 delete record; 291 return shadow_frame; 292} 293 294class FrameIdToShadowFrame { 295 public: 296 static FrameIdToShadowFrame* Create(size_t frame_id, 297 ShadowFrame* shadow_frame, 298 FrameIdToShadowFrame* next, 299 size_t num_vregs) { 300 // Append a bool array at the end to keep track of what vregs are updated by the debugger. 301 uint8_t* memory = new uint8_t[sizeof(FrameIdToShadowFrame) + sizeof(bool) * num_vregs]; 302 return new (memory) FrameIdToShadowFrame(frame_id, shadow_frame, next); 303 } 304 305 static void Delete(FrameIdToShadowFrame* f) { 306 uint8_t* memory = reinterpret_cast<uint8_t*>(f); 307 delete[] memory; 308 } 309 310 size_t GetFrameId() const { return frame_id_; } 311 ShadowFrame* GetShadowFrame() const { return shadow_frame_; } 312 FrameIdToShadowFrame* GetNext() const { return next_; } 313 void SetNext(FrameIdToShadowFrame* next) { next_ = next; } 314 bool* GetUpdatedVRegFlags() { 315 return updated_vreg_flags_; 316 } 317 318 private: 319 FrameIdToShadowFrame(size_t frame_id, 320 ShadowFrame* shadow_frame, 321 FrameIdToShadowFrame* next) 322 : frame_id_(frame_id), 323 shadow_frame_(shadow_frame), 324 next_(next) {} 325 326 const size_t frame_id_; 327 ShadowFrame* const shadow_frame_; 328 FrameIdToShadowFrame* next_; 329 bool updated_vreg_flags_[0]; 330 331 DISALLOW_COPY_AND_ASSIGN(FrameIdToShadowFrame); 332}; 333 334static FrameIdToShadowFrame* FindFrameIdToShadowFrame(FrameIdToShadowFrame* head, 335 size_t frame_id) { 336 FrameIdToShadowFrame* found = nullptr; 337 for (FrameIdToShadowFrame* record = head; record != nullptr; record = record->GetNext()) { 338 if (record->GetFrameId() == frame_id) { 339 if (kIsDebugBuild) { 340 // Sanity check we have at most one record for this frame. 341 CHECK(found == nullptr) << "Multiple records for the frame " << frame_id; 342 found = record; 343 } else { 344 return record; 345 } 346 } 347 } 348 return found; 349} 350 351ShadowFrame* Thread::FindDebuggerShadowFrame(size_t frame_id) { 352 FrameIdToShadowFrame* record = FindFrameIdToShadowFrame( 353 tlsPtr_.frame_id_to_shadow_frame, frame_id); 354 if (record != nullptr) { 355 return record->GetShadowFrame(); 356 } 357 return nullptr; 358} 359 360// Must only be called when FindDebuggerShadowFrame(frame_id) returns non-nullptr. 361bool* Thread::GetUpdatedVRegFlags(size_t frame_id) { 362 FrameIdToShadowFrame* record = FindFrameIdToShadowFrame( 363 tlsPtr_.frame_id_to_shadow_frame, frame_id); 364 CHECK(record != nullptr); 365 return record->GetUpdatedVRegFlags(); 366} 367 368ShadowFrame* Thread::FindOrCreateDebuggerShadowFrame(size_t frame_id, 369 uint32_t num_vregs, 370 ArtMethod* method, 371 uint32_t dex_pc) { 372 ShadowFrame* shadow_frame = FindDebuggerShadowFrame(frame_id); 373 if (shadow_frame != nullptr) { 374 return shadow_frame; 375 } 376 VLOG(deopt) << "Create pre-deopted ShadowFrame for " << ArtMethod::PrettyMethod(method); 377 shadow_frame = ShadowFrame::CreateDeoptimizedFrame(num_vregs, nullptr, method, dex_pc); 378 FrameIdToShadowFrame* record = FrameIdToShadowFrame::Create(frame_id, 379 shadow_frame, 380 tlsPtr_.frame_id_to_shadow_frame, 381 num_vregs); 382 for (uint32_t i = 0; i < num_vregs; i++) { 383 // Do this to clear all references for root visitors. 384 shadow_frame->SetVRegReference(i, nullptr); 385 // This flag will be changed to true if the debugger modifies the value. 386 record->GetUpdatedVRegFlags()[i] = false; 387 } 388 tlsPtr_.frame_id_to_shadow_frame = record; 389 return shadow_frame; 390} 391 392void Thread::RemoveDebuggerShadowFrameMapping(size_t frame_id) { 393 FrameIdToShadowFrame* head = tlsPtr_.frame_id_to_shadow_frame; 394 if (head->GetFrameId() == frame_id) { 395 tlsPtr_.frame_id_to_shadow_frame = head->GetNext(); 396 FrameIdToShadowFrame::Delete(head); 397 return; 398 } 399 FrameIdToShadowFrame* prev = head; 400 for (FrameIdToShadowFrame* record = head->GetNext(); 401 record != nullptr; 402 prev = record, record = record->GetNext()) { 403 if (record->GetFrameId() == frame_id) { 404 prev->SetNext(record->GetNext()); 405 FrameIdToShadowFrame::Delete(record); 406 return; 407 } 408 } 409 LOG(FATAL) << "No shadow frame for frame " << frame_id; 410 UNREACHABLE(); 411} 412 413void Thread::InitTid() { 414 tls32_.tid = ::art::GetTid(); 415} 416 417void Thread::InitAfterFork() { 418 // One thread (us) survived the fork, but we have a new tid so we need to 419 // update the value stashed in this Thread*. 420 InitTid(); 421} 422 423void* Thread::CreateCallback(void* arg) { 424 Thread* self = reinterpret_cast<Thread*>(arg); 425 Runtime* runtime = Runtime::Current(); 426 if (runtime == nullptr) { 427 LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self; 428 return nullptr; 429 } 430 { 431 // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true 432 // after self->Init(). 433 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_); 434 // Check that if we got here we cannot be shutting down (as shutdown should never have started 435 // while threads are being born). 436 CHECK(!runtime->IsShuttingDownLocked()); 437 // Note: given that the JNIEnv is created in the parent thread, the only failure point here is 438 // a mess in InitStackHwm. We do not have a reasonable way to recover from that, so abort 439 // the runtime in such a case. In case this ever changes, we need to make sure here to 440 // delete the tmp_jni_env, as we own it at this point. 441 CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM(), self->tlsPtr_.tmp_jni_env)); 442 self->tlsPtr_.tmp_jni_env = nullptr; 443 Runtime::Current()->EndThreadBirth(); 444 } 445 { 446 ScopedObjectAccess soa(self); 447 self->InitStringEntryPoints(); 448 449 // Copy peer into self, deleting global reference when done. 450 CHECK(self->tlsPtr_.jpeer != nullptr); 451 self->tlsPtr_.opeer = soa.Decode<mirror::Object>(self->tlsPtr_.jpeer).Ptr(); 452 self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer); 453 self->tlsPtr_.jpeer = nullptr; 454 self->SetThreadName(self->GetThreadName()->ToModifiedUtf8().c_str()); 455 456 ArtField* priorityField = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority); 457 self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer)); 458 459 runtime->GetRuntimeCallbacks()->ThreadStart(self); 460 461 // Invoke the 'run' method of our java.lang.Thread. 462 ObjPtr<mirror::Object> receiver = self->tlsPtr_.opeer; 463 jmethodID mid = WellKnownClasses::java_lang_Thread_run; 464 ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(receiver)); 465 InvokeVirtualOrInterfaceWithJValues(soa, ref.get(), mid, nullptr); 466 } 467 // Detach and delete self. 468 Runtime::Current()->GetThreadList()->Unregister(self); 469 470 return nullptr; 471} 472 473Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa, 474 ObjPtr<mirror::Object> thread_peer) { 475 ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer); 476 Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer))); 477 // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_ 478 // to stop it from going away. 479 if (kIsDebugBuild) { 480 MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_); 481 if (result != nullptr && !result->IsSuspended()) { 482 Locks::thread_list_lock_->AssertHeld(soa.Self()); 483 } 484 } 485 return result; 486} 487 488Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa, 489 jobject java_thread) { 490 return FromManagedThread(soa, soa.Decode<mirror::Object>(java_thread).Ptr()); 491} 492 493static size_t FixStackSize(size_t stack_size) { 494 // A stack size of zero means "use the default". 495 if (stack_size == 0) { 496 stack_size = Runtime::Current()->GetDefaultStackSize(); 497 } 498 499 // Dalvik used the bionic pthread default stack size for native threads, 500 // so include that here to support apps that expect large native stacks. 501 stack_size += 1 * MB; 502 503 // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN. 504 if (stack_size < PTHREAD_STACK_MIN) { 505 stack_size = PTHREAD_STACK_MIN; 506 } 507 508 if (Runtime::Current()->ExplicitStackOverflowChecks()) { 509 // It's likely that callers are trying to ensure they have at least a certain amount of 510 // stack space, so we should add our reserved space on top of what they requested, rather 511 // than implicitly take it away from them. 512 stack_size += GetStackOverflowReservedBytes(kRuntimeISA); 513 } else { 514 // If we are going to use implicit stack checks, allocate space for the protected 515 // region at the bottom of the stack. 516 stack_size += Thread::kStackOverflowImplicitCheckSize + 517 GetStackOverflowReservedBytes(kRuntimeISA); 518 } 519 520 // Some systems require the stack size to be a multiple of the system page size, so round up. 521 stack_size = RoundUp(stack_size, kPageSize); 522 523 return stack_size; 524} 525 526// Return the nearest page-aligned address below the current stack top. 527NO_INLINE 528static uint8_t* FindStackTop() { 529 return reinterpret_cast<uint8_t*>( 530 AlignDown(__builtin_frame_address(0), kPageSize)); 531} 532 533// Install a protected region in the stack. This is used to trigger a SIGSEGV if a stack 534// overflow is detected. It is located right below the stack_begin_. 535ATTRIBUTE_NO_SANITIZE_ADDRESS 536void Thread::InstallImplicitProtection() { 537 uint8_t* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; 538 // Page containing current top of stack. 539 uint8_t* stack_top = FindStackTop(); 540 541 // Try to directly protect the stack. 542 VLOG(threads) << "installing stack protected region at " << std::hex << 543 static_cast<void*>(pregion) << " to " << 544 static_cast<void*>(pregion + kStackOverflowProtectedSize - 1); 545 if (ProtectStack(/* fatal_on_error */ false)) { 546 // Tell the kernel that we won't be needing these pages any more. 547 // NB. madvise will probably write zeroes into the memory (on linux it does). 548 uint32_t unwanted_size = stack_top - pregion - kPageSize; 549 madvise(pregion, unwanted_size, MADV_DONTNEED); 550 return; 551 } 552 553 // There is a little complexity here that deserves a special mention. On some 554 // architectures, the stack is created using a VM_GROWSDOWN flag 555 // to prevent memory being allocated when it's not needed. This flag makes the 556 // kernel only allocate memory for the stack by growing down in memory. Because we 557 // want to put an mprotected region far away from that at the stack top, we need 558 // to make sure the pages for the stack are mapped in before we call mprotect. 559 // 560 // The failed mprotect in UnprotectStack is an indication of a thread with VM_GROWSDOWN 561 // with a non-mapped stack (usually only the main thread). 562 // 563 // We map in the stack by reading every page from the stack bottom (highest address) 564 // to the stack top. (We then madvise this away.) This must be done by reading from the 565 // current stack pointer downwards. 566 // 567 // Accesses too far below the current machine register corresponding to the stack pointer (e.g., 568 // ESP on x86[-32], SP on ARM) might cause a SIGSEGV (at least on x86 with newer kernels). We 569 // thus have to move the stack pointer. We do this portably by using a recursive function with a 570 // large stack frame size. 571 572 // (Defensively) first remove the protection on the protected region as we'll want to read 573 // and write it. Ignore errors. 574 UnprotectStack(); 575 576 VLOG(threads) << "Need to map in stack for thread at " << std::hex << 577 static_cast<void*>(pregion); 578 579 struct RecurseDownStack { 580 // This function has an intentionally large stack size. 581#pragma GCC diagnostic push 582#pragma GCC diagnostic ignored "-Wframe-larger-than=" 583 NO_INLINE 584 static void Touch(uintptr_t target) { 585 volatile size_t zero = 0; 586 // Use a large local volatile array to ensure a large frame size. Do not use anything close 587 // to a full page for ASAN. It would be nice to ensure the frame size is at most a page, but 588 // there is no pragma support for this. 589 // Note: for ASAN we need to shrink the array a bit, as there's other overhead. 590 constexpr size_t kAsanMultiplier = 591#ifdef ADDRESS_SANITIZER 592 2u; 593#else 594 1u; 595#endif 596 volatile char space[kPageSize - (kAsanMultiplier * 256)]; 597 char sink ATTRIBUTE_UNUSED = space[zero]; 598 if (reinterpret_cast<uintptr_t>(space) >= target + kPageSize) { 599 Touch(target); 600 } 601 zero *= 2; // Try to avoid tail recursion. 602 } 603#pragma GCC diagnostic pop 604 }; 605 RecurseDownStack::Touch(reinterpret_cast<uintptr_t>(pregion)); 606 607 VLOG(threads) << "(again) installing stack protected region at " << std::hex << 608 static_cast<void*>(pregion) << " to " << 609 static_cast<void*>(pregion + kStackOverflowProtectedSize - 1); 610 611 // Protect the bottom of the stack to prevent read/write to it. 612 ProtectStack(/* fatal_on_error */ true); 613 614 // Tell the kernel that we won't be needing these pages any more. 615 // NB. madvise will probably write zeroes into the memory (on linux it does). 616 uint32_t unwanted_size = stack_top - pregion - kPageSize; 617 madvise(pregion, unwanted_size, MADV_DONTNEED); 618} 619 620void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) { 621 CHECK(java_peer != nullptr); 622 Thread* self = static_cast<JNIEnvExt*>(env)->self; 623 624 if (VLOG_IS_ON(threads)) { 625 ScopedObjectAccess soa(env); 626 627 ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name); 628 ObjPtr<mirror::String> java_name = 629 f->GetObject(soa.Decode<mirror::Object>(java_peer))->AsString(); 630 std::string thread_name; 631 if (java_name != nullptr) { 632 thread_name = java_name->ToModifiedUtf8(); 633 } else { 634 thread_name = "(Unnamed)"; 635 } 636 637 VLOG(threads) << "Creating native thread for " << thread_name; 638 self->Dump(LOG_STREAM(INFO)); 639 } 640 641 Runtime* runtime = Runtime::Current(); 642 643 // Atomically start the birth of the thread ensuring the runtime isn't shutting down. 644 bool thread_start_during_shutdown = false; 645 { 646 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 647 if (runtime->IsShuttingDownLocked()) { 648 thread_start_during_shutdown = true; 649 } else { 650 runtime->StartThreadBirth(); 651 } 652 } 653 if (thread_start_during_shutdown) { 654 ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError")); 655 env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown"); 656 return; 657 } 658 659 Thread* child_thread = new Thread(is_daemon); 660 // Use global JNI ref to hold peer live while child thread starts. 661 child_thread->tlsPtr_.jpeer = env->NewGlobalRef(java_peer); 662 stack_size = FixStackSize(stack_size); 663 664 // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to 665 // assign it. 666 env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 667 reinterpret_cast<jlong>(child_thread)); 668 669 // Try to allocate a JNIEnvExt for the thread. We do this here as we might be out of memory and 670 // do not have a good way to report this on the child's side. 671 std::string error_msg; 672 std::unique_ptr<JNIEnvExt> child_jni_env_ext( 673 JNIEnvExt::Create(child_thread, Runtime::Current()->GetJavaVM(), &error_msg)); 674 675 int pthread_create_result = 0; 676 if (child_jni_env_ext.get() != nullptr) { 677 pthread_t new_pthread; 678 pthread_attr_t attr; 679 child_thread->tlsPtr_.tmp_jni_env = child_jni_env_ext.get(); 680 CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread"); 681 CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), 682 "PTHREAD_CREATE_DETACHED"); 683 CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size); 684 pthread_create_result = pthread_create(&new_pthread, 685 &attr, 686 Thread::CreateCallback, 687 child_thread); 688 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread"); 689 690 if (pthread_create_result == 0) { 691 // pthread_create started the new thread. The child is now responsible for managing the 692 // JNIEnvExt we created. 693 // Note: we can't check for tmp_jni_env == nullptr, as that would require synchronization 694 // between the threads. 695 child_jni_env_ext.release(); 696 return; 697 } 698 } 699 700 // Either JNIEnvExt::Create or pthread_create(3) failed, so clean up. 701 { 702 MutexLock mu(self, *Locks::runtime_shutdown_lock_); 703 runtime->EndThreadBirth(); 704 } 705 // Manually delete the global reference since Thread::Init will not have been run. 706 env->DeleteGlobalRef(child_thread->tlsPtr_.jpeer); 707 child_thread->tlsPtr_.jpeer = nullptr; 708 delete child_thread; 709 child_thread = nullptr; 710 // TODO: remove from thread group? 711 env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0); 712 { 713 std::string msg(child_jni_env_ext.get() == nullptr ? 714 StringPrintf("Could not allocate JNI Env: %s", error_msg.c_str()) : 715 StringPrintf("pthread_create (%s stack) failed: %s", 716 PrettySize(stack_size).c_str(), strerror(pthread_create_result))); 717 ScopedObjectAccess soa(env); 718 soa.Self()->ThrowOutOfMemoryError(msg.c_str()); 719 } 720} 721 722bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_env_ext) { 723 // This function does all the initialization that must be run by the native thread it applies to. 724 // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so 725 // we can handshake with the corresponding native thread when it's ready.) Check this native 726 // thread hasn't been through here already... 727 CHECK(Thread::Current() == nullptr); 728 729 // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this 730 // avoids pthread_self_ ever being invalid when discovered from Thread::Current(). 731 tlsPtr_.pthread_self = pthread_self(); 732 CHECK(is_started_); 733 734 SetUpAlternateSignalStack(); 735 if (!InitStackHwm()) { 736 return false; 737 } 738 InitCpu(); 739 InitTlsEntryPoints(); 740 RemoveSuspendTrigger(); 741 InitCardTable(); 742 InitTid(); 743 interpreter::InitInterpreterTls(this); 744 745#ifdef ART_TARGET_ANDROID 746 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = this; 747#else 748 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self"); 749#endif 750 DCHECK_EQ(Thread::Current(), this); 751 752 tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this); 753 754 if (jni_env_ext != nullptr) { 755 DCHECK_EQ(jni_env_ext->vm, java_vm); 756 DCHECK_EQ(jni_env_ext->self, this); 757 tlsPtr_.jni_env = jni_env_ext; 758 } else { 759 std::string error_msg; 760 tlsPtr_.jni_env = JNIEnvExt::Create(this, java_vm, &error_msg); 761 if (tlsPtr_.jni_env == nullptr) { 762 LOG(ERROR) << "Failed to create JNIEnvExt: " << error_msg; 763 return false; 764 } 765 } 766 767 thread_list->Register(this); 768 return true; 769} 770 771template <typename PeerAction> 772Thread* Thread::Attach(const char* thread_name, bool as_daemon, PeerAction peer_action) { 773 Runtime* runtime = Runtime::Current(); 774 if (runtime == nullptr) { 775 LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name; 776 return nullptr; 777 } 778 Thread* self; 779 { 780 MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_); 781 if (runtime->IsShuttingDownLocked()) { 782 LOG(WARNING) << "Thread attaching while runtime is shutting down: " << thread_name; 783 return nullptr; 784 } else { 785 Runtime::Current()->StartThreadBirth(); 786 self = new Thread(as_daemon); 787 bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM()); 788 Runtime::Current()->EndThreadBirth(); 789 if (!init_success) { 790 delete self; 791 return nullptr; 792 } 793 } 794 } 795 796 self->InitStringEntryPoints(); 797 798 CHECK_NE(self->GetState(), kRunnable); 799 self->SetState(kNative); 800 801 // Run the action that is acting on the peer. 802 if (!peer_action(self)) { 803 runtime->GetThreadList()->Unregister(self); 804 // Unregister deletes self, no need to do this here. 805 return nullptr; 806 } 807 808 if (VLOG_IS_ON(threads)) { 809 if (thread_name != nullptr) { 810 VLOG(threads) << "Attaching thread " << thread_name; 811 } else { 812 VLOG(threads) << "Attaching unnamed thread."; 813 } 814 ScopedObjectAccess soa(self); 815 self->Dump(LOG_STREAM(INFO)); 816 } 817 818 { 819 ScopedObjectAccess soa(self); 820 runtime->GetRuntimeCallbacks()->ThreadStart(self); 821 } 822 823 return self; 824} 825 826Thread* Thread::Attach(const char* thread_name, 827 bool as_daemon, 828 jobject thread_group, 829 bool create_peer) { 830 auto create_peer_action = [&](Thread* self) { 831 // If we're the main thread, ClassLinker won't be created until after we're attached, 832 // so that thread needs a two-stage attach. Regular threads don't need this hack. 833 // In the compiler, all threads need this hack, because no-one's going to be getting 834 // a native peer! 835 if (create_peer) { 836 self->CreatePeer(thread_name, as_daemon, thread_group); 837 if (self->IsExceptionPending()) { 838 // We cannot keep the exception around, as we're deleting self. Try to be helpful and log it. 839 { 840 ScopedObjectAccess soa(self); 841 LOG(ERROR) << "Exception creating thread peer:"; 842 LOG(ERROR) << self->GetException()->Dump(); 843 self->ClearException(); 844 } 845 return false; 846 } 847 } else { 848 // These aren't necessary, but they improve diagnostics for unit tests & command-line tools. 849 if (thread_name != nullptr) { 850 self->tlsPtr_.name->assign(thread_name); 851 ::art::SetThreadName(thread_name); 852 } else if (self->GetJniEnv()->check_jni) { 853 LOG(WARNING) << *Thread::Current() << " attached without supplying a name"; 854 } 855 } 856 return true; 857 }; 858 return Attach(thread_name, as_daemon, create_peer_action); 859} 860 861Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_peer) { 862 auto set_peer_action = [&](Thread* self) { 863 // Install the given peer. 864 { 865 DCHECK(self == Thread::Current()); 866 ScopedObjectAccess soa(self); 867 self->tlsPtr_.opeer = soa.Decode<mirror::Object>(thread_peer).Ptr(); 868 } 869 self->GetJniEnv()->SetLongField(thread_peer, 870 WellKnownClasses::java_lang_Thread_nativePeer, 871 reinterpret_cast<jlong>(self)); 872 return true; 873 }; 874 return Attach(thread_name, as_daemon, set_peer_action); 875} 876 877void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) { 878 Runtime* runtime = Runtime::Current(); 879 CHECK(runtime->IsStarted()); 880 JNIEnv* env = tlsPtr_.jni_env; 881 882 if (thread_group == nullptr) { 883 thread_group = runtime->GetMainThreadGroup(); 884 } 885 ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name)); 886 // Add missing null check in case of OOM b/18297817 887 if (name != nullptr && thread_name.get() == nullptr) { 888 CHECK(IsExceptionPending()); 889 return; 890 } 891 jint thread_priority = GetNativePriority(); 892 jboolean thread_is_daemon = as_daemon; 893 894 ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread)); 895 if (peer.get() == nullptr) { 896 CHECK(IsExceptionPending()); 897 return; 898 } 899 { 900 ScopedObjectAccess soa(this); 901 tlsPtr_.opeer = soa.Decode<mirror::Object>(peer.get()).Ptr(); 902 } 903 env->CallNonvirtualVoidMethod(peer.get(), 904 WellKnownClasses::java_lang_Thread, 905 WellKnownClasses::java_lang_Thread_init, 906 thread_group, thread_name.get(), thread_priority, thread_is_daemon); 907 if (IsExceptionPending()) { 908 return; 909 } 910 911 Thread* self = this; 912 DCHECK_EQ(self, Thread::Current()); 913 env->SetLongField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer, 914 reinterpret_cast<jlong>(self)); 915 916 ScopedObjectAccess soa(self); 917 StackHandleScope<1> hs(self); 918 MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName())); 919 if (peer_thread_name == nullptr) { 920 // The Thread constructor should have set the Thread.name to a 921 // non-null value. However, because we can run without code 922 // available (in the compiler, in tests), we manually assign the 923 // fields the constructor should have set. 924 if (runtime->IsActiveTransaction()) { 925 InitPeer<true>(soa, 926 tlsPtr_.opeer, 927 thread_is_daemon, 928 thread_group, 929 thread_name.get(), 930 thread_priority); 931 } else { 932 InitPeer<false>(soa, 933 tlsPtr_.opeer, 934 thread_is_daemon, 935 thread_group, 936 thread_name.get(), 937 thread_priority); 938 } 939 peer_thread_name.Assign(GetThreadName()); 940 } 941 // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null. 942 if (peer_thread_name != nullptr) { 943 SetThreadName(peer_thread_name->ToModifiedUtf8().c_str()); 944 } 945} 946 947jobject Thread::CreateCompileTimePeer(JNIEnv* env, 948 const char* name, 949 bool as_daemon, 950 jobject thread_group) { 951 Runtime* runtime = Runtime::Current(); 952 CHECK(!runtime->IsStarted()); 953 954 if (thread_group == nullptr) { 955 thread_group = runtime->GetMainThreadGroup(); 956 } 957 ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name)); 958 // Add missing null check in case of OOM b/18297817 959 if (name != nullptr && thread_name.get() == nullptr) { 960 CHECK(Thread::Current()->IsExceptionPending()); 961 return nullptr; 962 } 963 jint thread_priority = GetNativePriority(); 964 jboolean thread_is_daemon = as_daemon; 965 966 ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread)); 967 if (peer.get() == nullptr) { 968 CHECK(Thread::Current()->IsExceptionPending()); 969 return nullptr; 970 } 971 972 // We cannot call Thread.init, as it will recursively ask for currentThread. 973 974 // The Thread constructor should have set the Thread.name to a 975 // non-null value. However, because we can run without code 976 // available (in the compiler, in tests), we manually assign the 977 // fields the constructor should have set. 978 ScopedObjectAccessUnchecked soa(Thread::Current()); 979 if (runtime->IsActiveTransaction()) { 980 InitPeer<true>(soa, 981 soa.Decode<mirror::Object>(peer.get()), 982 thread_is_daemon, 983 thread_group, 984 thread_name.get(), 985 thread_priority); 986 } else { 987 InitPeer<false>(soa, 988 soa.Decode<mirror::Object>(peer.get()), 989 thread_is_daemon, 990 thread_group, 991 thread_name.get(), 992 thread_priority); 993 } 994 995 return peer.release(); 996} 997 998template<bool kTransactionActive> 999void Thread::InitPeer(ScopedObjectAccessAlreadyRunnable& soa, 1000 ObjPtr<mirror::Object> peer, 1001 jboolean thread_is_daemon, 1002 jobject thread_group, 1003 jobject thread_name, 1004 jint thread_priority) { 1005 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_daemon)-> 1006 SetBoolean<kTransactionActive>(peer, thread_is_daemon); 1007 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group)-> 1008 SetObject<kTransactionActive>(peer, soa.Decode<mirror::Object>(thread_group)); 1009 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name)-> 1010 SetObject<kTransactionActive>(peer, soa.Decode<mirror::Object>(thread_name)); 1011 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority)-> 1012 SetInt<kTransactionActive>(peer, thread_priority); 1013} 1014 1015void Thread::SetThreadName(const char* name) { 1016 tlsPtr_.name->assign(name); 1017 ::art::SetThreadName(name); 1018 Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM")); 1019} 1020 1021static void GetThreadStack(pthread_t thread, 1022 void** stack_base, 1023 size_t* stack_size, 1024 size_t* guard_size) { 1025#if defined(__APPLE__) 1026 *stack_size = pthread_get_stacksize_np(thread); 1027 void* stack_addr = pthread_get_stackaddr_np(thread); 1028 1029 // Check whether stack_addr is the base or end of the stack. 1030 // (On Mac OS 10.7, it's the end.) 1031 int stack_variable; 1032 if (stack_addr > &stack_variable) { 1033 *stack_base = reinterpret_cast<uint8_t*>(stack_addr) - *stack_size; 1034 } else { 1035 *stack_base = stack_addr; 1036 } 1037 1038 // This is wrong, but there doesn't seem to be a way to get the actual value on the Mac. 1039 pthread_attr_t attributes; 1040 CHECK_PTHREAD_CALL(pthread_attr_init, (&attributes), __FUNCTION__); 1041 CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__); 1042 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__); 1043#else 1044 pthread_attr_t attributes; 1045 CHECK_PTHREAD_CALL(pthread_getattr_np, (thread, &attributes), __FUNCTION__); 1046 CHECK_PTHREAD_CALL(pthread_attr_getstack, (&attributes, stack_base, stack_size), __FUNCTION__); 1047 CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__); 1048 CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__); 1049 1050#if defined(__GLIBC__) 1051 // If we're the main thread, check whether we were run with an unlimited stack. In that case, 1052 // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection 1053 // will be broken because we'll die long before we get close to 2GB. 1054 bool is_main_thread = (::art::GetTid() == getpid()); 1055 if (is_main_thread) { 1056 rlimit stack_limit; 1057 if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) { 1058 PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed"; 1059 } 1060 if (stack_limit.rlim_cur == RLIM_INFINITY) { 1061 size_t old_stack_size = *stack_size; 1062 1063 // Use the kernel default limit as our size, and adjust the base to match. 1064 *stack_size = 8 * MB; 1065 *stack_base = reinterpret_cast<uint8_t*>(*stack_base) + (old_stack_size - *stack_size); 1066 1067 VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")" 1068 << " to " << PrettySize(*stack_size) 1069 << " with base " << *stack_base; 1070 } 1071 } 1072#endif 1073 1074#endif 1075} 1076 1077bool Thread::InitStackHwm() { 1078 void* read_stack_base; 1079 size_t read_stack_size; 1080 size_t read_guard_size; 1081 GetThreadStack(tlsPtr_.pthread_self, &read_stack_base, &read_stack_size, &read_guard_size); 1082 1083 tlsPtr_.stack_begin = reinterpret_cast<uint8_t*>(read_stack_base); 1084 tlsPtr_.stack_size = read_stack_size; 1085 1086 // The minimum stack size we can cope with is the overflow reserved bytes (typically 1087 // 8K) + the protected region size (4K) + another page (4K). Typically this will 1088 // be 8+4+4 = 16K. The thread won't be able to do much with this stack even the GC takes 1089 // between 8K and 12K. 1090 uint32_t min_stack = GetStackOverflowReservedBytes(kRuntimeISA) + kStackOverflowProtectedSize 1091 + 4 * KB; 1092 if (read_stack_size <= min_stack) { 1093 // Note, as we know the stack is small, avoid operations that could use a lot of stack. 1094 LogHelper::LogLineLowStack(__PRETTY_FUNCTION__, 1095 __LINE__, 1096 ::android::base::ERROR, 1097 "Attempt to attach a thread with a too-small stack"); 1098 return false; 1099 } 1100 1101 // This is included in the SIGQUIT output, but it's useful here for thread debugging. 1102 VLOG(threads) << StringPrintf("Native stack is at %p (%s with %s guard)", 1103 read_stack_base, 1104 PrettySize(read_stack_size).c_str(), 1105 PrettySize(read_guard_size).c_str()); 1106 1107 // Set stack_end_ to the bottom of the stack saving space of stack overflows 1108 1109 Runtime* runtime = Runtime::Current(); 1110 bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler(); 1111 1112 // Valgrind on arm doesn't give the right values here. Do not install the guard page, and 1113 // effectively disable stack overflow checks (we'll get segfaults, potentially) by setting 1114 // stack_begin to 0. 1115 const bool valgrind_on_arm = 1116 (kRuntimeISA == kArm || kRuntimeISA == kArm64) && 1117 kMemoryToolIsValgrind && 1118 RUNNING_ON_MEMORY_TOOL != 0; 1119 if (valgrind_on_arm) { 1120 tlsPtr_.stack_begin = nullptr; 1121 } 1122 1123 ResetDefaultStackEnd(); 1124 1125 // Install the protected region if we are doing implicit overflow checks. 1126 if (implicit_stack_check && !valgrind_on_arm) { 1127 // The thread might have protected region at the bottom. We need 1128 // to install our own region so we need to move the limits 1129 // of the stack to make room for it. 1130 1131 tlsPtr_.stack_begin += read_guard_size + kStackOverflowProtectedSize; 1132 tlsPtr_.stack_end += read_guard_size + kStackOverflowProtectedSize; 1133 tlsPtr_.stack_size -= read_guard_size; 1134 1135 InstallImplicitProtection(); 1136 } 1137 1138 // Sanity check. 1139 CHECK_GT(FindStackTop(), reinterpret_cast<void*>(tlsPtr_.stack_end)); 1140 1141 return true; 1142} 1143 1144void Thread::ShortDump(std::ostream& os) const { 1145 os << "Thread["; 1146 if (GetThreadId() != 0) { 1147 // If we're in kStarting, we won't have a thin lock id or tid yet. 1148 os << GetThreadId() 1149 << ",tid=" << GetTid() << ','; 1150 } 1151 os << GetState() 1152 << ",Thread*=" << this 1153 << ",peer=" << tlsPtr_.opeer 1154 << ",\"" << (tlsPtr_.name != nullptr ? *tlsPtr_.name : "null") << "\"" 1155 << "]"; 1156} 1157 1158void Thread::Dump(std::ostream& os, bool dump_native_stack, BacktraceMap* backtrace_map, 1159 bool force_dump_stack) const { 1160 DumpState(os); 1161 DumpStack(os, dump_native_stack, backtrace_map, force_dump_stack); 1162} 1163 1164mirror::String* Thread::GetThreadName() const { 1165 ArtField* f = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_name); 1166 if (tlsPtr_.opeer == nullptr) { 1167 return nullptr; 1168 } 1169 ObjPtr<mirror::Object> name = f->GetObject(tlsPtr_.opeer); 1170 return name == nullptr ? nullptr : name->AsString(); 1171} 1172 1173void Thread::GetThreadName(std::string& name) const { 1174 name.assign(*tlsPtr_.name); 1175} 1176 1177uint64_t Thread::GetCpuMicroTime() const { 1178#if defined(__linux__) 1179 clockid_t cpu_clock_id; 1180 pthread_getcpuclockid(tlsPtr_.pthread_self, &cpu_clock_id); 1181 timespec now; 1182 clock_gettime(cpu_clock_id, &now); 1183 return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000); 1184#else // __APPLE__ 1185 UNIMPLEMENTED(WARNING); 1186 return -1; 1187#endif 1188} 1189 1190// Attempt to rectify locks so that we dump thread list with required locks before exiting. 1191static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS { 1192 LOG(ERROR) << *thread << " suspend count already zero."; 1193 Locks::thread_suspend_count_lock_->Unlock(self); 1194 if (!Locks::mutator_lock_->IsSharedHeld(self)) { 1195 Locks::mutator_lock_->SharedTryLock(self); 1196 if (!Locks::mutator_lock_->IsSharedHeld(self)) { 1197 LOG(WARNING) << "Dumping thread list without holding mutator_lock_"; 1198 } 1199 } 1200 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) { 1201 Locks::thread_list_lock_->TryLock(self); 1202 if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) { 1203 LOG(WARNING) << "Dumping thread list without holding thread_list_lock_"; 1204 } 1205 } 1206 std::ostringstream ss; 1207 Runtime::Current()->GetThreadList()->Dump(ss); 1208 LOG(FATAL) << ss.str(); 1209} 1210 1211bool Thread::ModifySuspendCountInternal(Thread* self, 1212 int delta, 1213 AtomicInteger* suspend_barrier, 1214 SuspendReason reason) { 1215 if (kIsDebugBuild) { 1216 DCHECK(delta == -1 || delta == +1 || delta == -tls32_.debug_suspend_count) 1217 << reason << " " << delta << " " << tls32_.debug_suspend_count << " " << this; 1218 DCHECK_GE(tls32_.suspend_count, tls32_.debug_suspend_count) << this; 1219 Locks::thread_suspend_count_lock_->AssertHeld(self); 1220 if (this != self && !IsSuspended()) { 1221 Locks::thread_list_lock_->AssertHeld(self); 1222 } 1223 } 1224 // User code suspensions need to be checked more closely since they originate from code outside of 1225 // the runtime's control. 1226 if (UNLIKELY(reason == SuspendReason::kForUserCode)) { 1227 Locks::user_code_suspension_lock_->AssertHeld(self); 1228 if (UNLIKELY(delta + tls32_.user_code_suspend_count < 0)) { 1229 LOG(ERROR) << "attempting to modify suspend count in an illegal way."; 1230 return false; 1231 } 1232 } 1233 if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) { 1234 UnsafeLogFatalForSuspendCount(self, this); 1235 return false; 1236 } 1237 1238 if (kUseReadBarrier && delta > 0 && this != self && tlsPtr_.flip_function != nullptr) { 1239 // Force retry of a suspend request if it's in the middle of a thread flip to avoid a 1240 // deadlock. b/31683379. 1241 return false; 1242 } 1243 1244 uint16_t flags = kSuspendRequest; 1245 if (delta > 0 && suspend_barrier != nullptr) { 1246 uint32_t available_barrier = kMaxSuspendBarriers; 1247 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 1248 if (tlsPtr_.active_suspend_barriers[i] == nullptr) { 1249 available_barrier = i; 1250 break; 1251 } 1252 } 1253 if (available_barrier == kMaxSuspendBarriers) { 1254 // No barrier spaces available, we can't add another. 1255 return false; 1256 } 1257 tlsPtr_.active_suspend_barriers[available_barrier] = suspend_barrier; 1258 flags |= kActiveSuspendBarrier; 1259 } 1260 1261 tls32_.suspend_count += delta; 1262 switch (reason) { 1263 case SuspendReason::kForDebugger: 1264 tls32_.debug_suspend_count += delta; 1265 break; 1266 case SuspendReason::kForUserCode: 1267 tls32_.user_code_suspend_count += delta; 1268 break; 1269 case SuspendReason::kInternal: 1270 break; 1271 } 1272 1273 if (tls32_.suspend_count == 0) { 1274 AtomicClearFlag(kSuspendRequest); 1275 } else { 1276 // Two bits might be set simultaneously. 1277 tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flags); 1278 TriggerSuspend(); 1279 } 1280 return true; 1281} 1282 1283bool Thread::PassActiveSuspendBarriers(Thread* self) { 1284 // Grab the suspend_count lock and copy the current set of 1285 // barriers. Then clear the list and the flag. The ModifySuspendCount 1286 // function requires the lock so we prevent a race between setting 1287 // the kActiveSuspendBarrier flag and clearing it. 1288 AtomicInteger* pass_barriers[kMaxSuspendBarriers]; 1289 { 1290 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1291 if (!ReadFlag(kActiveSuspendBarrier)) { 1292 // quick exit test: the barriers have already been claimed - this is 1293 // possible as there may be a race to claim and it doesn't matter 1294 // who wins. 1295 // All of the callers of this function (except the SuspendAllInternal) 1296 // will first test the kActiveSuspendBarrier flag without lock. Here 1297 // double-check whether the barrier has been passed with the 1298 // suspend_count lock. 1299 return false; 1300 } 1301 1302 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 1303 pass_barriers[i] = tlsPtr_.active_suspend_barriers[i]; 1304 tlsPtr_.active_suspend_barriers[i] = nullptr; 1305 } 1306 AtomicClearFlag(kActiveSuspendBarrier); 1307 } 1308 1309 uint32_t barrier_count = 0; 1310 for (uint32_t i = 0; i < kMaxSuspendBarriers; i++) { 1311 AtomicInteger* pending_threads = pass_barriers[i]; 1312 if (pending_threads != nullptr) { 1313 bool done = false; 1314 do { 1315 int32_t cur_val = pending_threads->LoadRelaxed(); 1316 CHECK_GT(cur_val, 0) << "Unexpected value for PassActiveSuspendBarriers(): " << cur_val; 1317 // Reduce value by 1. 1318 done = pending_threads->CompareExchangeWeakRelaxed(cur_val, cur_val - 1); 1319#if ART_USE_FUTEXES 1320 if (done && (cur_val - 1) == 0) { // Weak CAS may fail spuriously. 1321 futex(pending_threads->Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0); 1322 } 1323#endif 1324 } while (!done); 1325 ++barrier_count; 1326 } 1327 } 1328 CHECK_GT(barrier_count, 0U); 1329 return true; 1330} 1331 1332void Thread::ClearSuspendBarrier(AtomicInteger* target) { 1333 CHECK(ReadFlag(kActiveSuspendBarrier)); 1334 bool clear_flag = true; 1335 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 1336 AtomicInteger* ptr = tlsPtr_.active_suspend_barriers[i]; 1337 if (ptr == target) { 1338 tlsPtr_.active_suspend_barriers[i] = nullptr; 1339 } else if (ptr != nullptr) { 1340 clear_flag = false; 1341 } 1342 } 1343 if (LIKELY(clear_flag)) { 1344 AtomicClearFlag(kActiveSuspendBarrier); 1345 } 1346} 1347 1348void Thread::RunCheckpointFunction() { 1349 bool done = false; 1350 do { 1351 // Grab the suspend_count lock and copy the checkpoints one by one. When the last checkpoint is 1352 // copied, clear the list and the flag. The RequestCheckpoint function will also grab this lock 1353 // to prevent a race between setting the kCheckpointRequest flag and clearing it. 1354 Closure* checkpoint = nullptr; 1355 { 1356 MutexLock mu(this, *Locks::thread_suspend_count_lock_); 1357 if (tlsPtr_.checkpoint_function != nullptr) { 1358 checkpoint = tlsPtr_.checkpoint_function; 1359 if (!checkpoint_overflow_.empty()) { 1360 // Overflow list not empty, copy the first one out and continue. 1361 tlsPtr_.checkpoint_function = checkpoint_overflow_.front(); 1362 checkpoint_overflow_.pop_front(); 1363 } else { 1364 // No overflow checkpoints, this means that we are on the last pending checkpoint. 1365 tlsPtr_.checkpoint_function = nullptr; 1366 AtomicClearFlag(kCheckpointRequest); 1367 done = true; 1368 } 1369 } else { 1370 LOG(FATAL) << "Checkpoint flag set without pending checkpoint"; 1371 } 1372 } 1373 1374 // Outside the lock, run the checkpoint functions that we collected. 1375 ScopedTrace trace("Run checkpoint function"); 1376 DCHECK(checkpoint != nullptr); 1377 checkpoint->Run(this); 1378 } while (!done); 1379} 1380 1381void Thread::RunEmptyCheckpoint() { 1382 DCHECK_EQ(Thread::Current(), this); 1383 AtomicClearFlag(kEmptyCheckpointRequest); 1384 Runtime::Current()->GetThreadList()->EmptyCheckpointBarrier()->Pass(this); 1385} 1386 1387bool Thread::RequestCheckpoint(Closure* function) { 1388 union StateAndFlags old_state_and_flags; 1389 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 1390 if (old_state_and_flags.as_struct.state != kRunnable) { 1391 return false; // Fail, thread is suspended and so can't run a checkpoint. 1392 } 1393 1394 // We must be runnable to request a checkpoint. 1395 DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable); 1396 union StateAndFlags new_state_and_flags; 1397 new_state_and_flags.as_int = old_state_and_flags.as_int; 1398 new_state_and_flags.as_struct.flags |= kCheckpointRequest; 1399 bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent( 1400 old_state_and_flags.as_int, new_state_and_flags.as_int); 1401 if (success) { 1402 // Succeeded setting checkpoint flag, now insert the actual checkpoint. 1403 if (tlsPtr_.checkpoint_function == nullptr) { 1404 tlsPtr_.checkpoint_function = function; 1405 } else { 1406 checkpoint_overflow_.push_back(function); 1407 } 1408 CHECK_EQ(ReadFlag(kCheckpointRequest), true); 1409 TriggerSuspend(); 1410 } 1411 return success; 1412} 1413 1414bool Thread::RequestEmptyCheckpoint() { 1415 union StateAndFlags old_state_and_flags; 1416 old_state_and_flags.as_int = tls32_.state_and_flags.as_int; 1417 if (old_state_and_flags.as_struct.state != kRunnable) { 1418 // If it's not runnable, we don't need to do anything because it won't be in the middle of a 1419 // heap access (eg. the read barrier). 1420 return false; 1421 } 1422 1423 // We must be runnable to request a checkpoint. 1424 DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable); 1425 union StateAndFlags new_state_and_flags; 1426 new_state_and_flags.as_int = old_state_and_flags.as_int; 1427 new_state_and_flags.as_struct.flags |= kEmptyCheckpointRequest; 1428 bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent( 1429 old_state_and_flags.as_int, new_state_and_flags.as_int); 1430 if (success) { 1431 TriggerSuspend(); 1432 } 1433 return success; 1434} 1435 1436class BarrierClosure : public Closure { 1437 public: 1438 explicit BarrierClosure(Closure* wrapped) : wrapped_(wrapped), barrier_(0) {} 1439 1440 void Run(Thread* self) OVERRIDE { 1441 wrapped_->Run(self); 1442 barrier_.Pass(self); 1443 } 1444 1445 void Wait(Thread* self) { 1446 barrier_.Increment(self, 1); 1447 } 1448 1449 private: 1450 Closure* wrapped_; 1451 Barrier barrier_; 1452}; 1453 1454bool Thread::RequestSynchronousCheckpoint(Closure* function) { 1455 if (this == Thread::Current()) { 1456 // Asked to run on this thread. Just run. 1457 function->Run(this); 1458 return true; 1459 } 1460 Thread* self = Thread::Current(); 1461 1462 // The current thread is not this thread. 1463 1464 if (GetState() == ThreadState::kTerminated) { 1465 return false; 1466 } 1467 1468 // Note: we're holding the thread-list lock. The thread cannot die at this point. 1469 struct ScopedThreadListLockUnlock { 1470 explicit ScopedThreadListLockUnlock(Thread* self_in) RELEASE(*Locks::thread_list_lock_) 1471 : self_thread(self_in) { 1472 Locks::thread_list_lock_->AssertHeld(self_thread); 1473 Locks::thread_list_lock_->Unlock(self_thread); 1474 } 1475 1476 ~ScopedThreadListLockUnlock() ACQUIRE(*Locks::thread_list_lock_) { 1477 Locks::thread_list_lock_->AssertNotHeld(self_thread); 1478 Locks::thread_list_lock_->Lock(self_thread); 1479 } 1480 1481 Thread* self_thread; 1482 }; 1483 1484 for (;;) { 1485 // If this thread is runnable, try to schedule a checkpoint. Do some gymnastics to not hold the 1486 // suspend-count lock for too long. 1487 if (GetState() == ThreadState::kRunnable) { 1488 BarrierClosure barrier_closure(function); 1489 bool installed = false; 1490 { 1491 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1492 installed = RequestCheckpoint(&barrier_closure); 1493 } 1494 if (installed) { 1495 // Relinquish the thread-list lock, temporarily. We should not wait holding any locks. 1496 ScopedThreadListLockUnlock stllu(self); 1497 ScopedThreadSuspension sts(self, ThreadState::kWaiting); 1498 barrier_closure.Wait(self); 1499 return true; 1500 } 1501 // Fall-through. 1502 } 1503 1504 // This thread is not runnable, make sure we stay suspended, then run the checkpoint. 1505 // Note: ModifySuspendCountInternal also expects the thread_list_lock to be held in 1506 // certain situations. 1507 { 1508 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1509 1510 if (!ModifySuspendCount(self, +1, nullptr, SuspendReason::kInternal)) { 1511 // Just retry the loop. 1512 sched_yield(); 1513 continue; 1514 } 1515 } 1516 1517 { 1518 ScopedThreadListLockUnlock stllu(self); 1519 { 1520 ScopedThreadSuspension sts(self, ThreadState::kWaiting); 1521 while (GetState() == ThreadState::kRunnable) { 1522 // We became runnable again. Wait till the suspend triggered in ModifySuspendCount 1523 // moves us to suspended. 1524 sched_yield(); 1525 } 1526 } 1527 1528 function->Run(this); 1529 } 1530 1531 { 1532 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1533 1534 DCHECK_NE(GetState(), ThreadState::kRunnable); 1535 bool updated = ModifySuspendCount(self, -1, nullptr, SuspendReason::kInternal); 1536 DCHECK(updated); 1537 } 1538 1539 { 1540 // Imitate ResumeAll, the thread may be waiting on Thread::resume_cond_ since we raised its 1541 // suspend count. Now the suspend_count_ is lowered so we must do the broadcast. 1542 MutexLock mu2(self, *Locks::thread_suspend_count_lock_); 1543 Thread::resume_cond_->Broadcast(self); 1544 } 1545 1546 return true; // We're done, break out of the loop. 1547 } 1548} 1549 1550Closure* Thread::GetFlipFunction() { 1551 Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function); 1552 Closure* func; 1553 do { 1554 func = atomic_func->LoadRelaxed(); 1555 if (func == nullptr) { 1556 return nullptr; 1557 } 1558 } while (!atomic_func->CompareExchangeWeakSequentiallyConsistent(func, nullptr)); 1559 DCHECK(func != nullptr); 1560 return func; 1561} 1562 1563void Thread::SetFlipFunction(Closure* function) { 1564 CHECK(function != nullptr); 1565 Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function); 1566 atomic_func->StoreSequentiallyConsistent(function); 1567} 1568 1569void Thread::FullSuspendCheck() { 1570 ScopedTrace trace(__FUNCTION__); 1571 VLOG(threads) << this << " self-suspending"; 1572 // Make thread appear suspended to other threads, release mutator_lock_. 1573 // Transition to suspended and back to runnable, re-acquire share on mutator_lock_. 1574 ScopedThreadSuspension(this, kSuspended); 1575 VLOG(threads) << this << " self-reviving"; 1576} 1577 1578static std::string GetSchedulerGroupName(pid_t tid) { 1579 // /proc/<pid>/cgroup looks like this: 1580 // 2:devices:/ 1581 // 1:cpuacct,cpu:/ 1582 // We want the third field from the line whose second field contains the "cpu" token. 1583 std::string cgroup_file; 1584 if (!ReadFileToString(StringPrintf("/proc/self/task/%d/cgroup", tid), &cgroup_file)) { 1585 return ""; 1586 } 1587 std::vector<std::string> cgroup_lines; 1588 Split(cgroup_file, '\n', &cgroup_lines); 1589 for (size_t i = 0; i < cgroup_lines.size(); ++i) { 1590 std::vector<std::string> cgroup_fields; 1591 Split(cgroup_lines[i], ':', &cgroup_fields); 1592 std::vector<std::string> cgroups; 1593 Split(cgroup_fields[1], ',', &cgroups); 1594 for (size_t j = 0; j < cgroups.size(); ++j) { 1595 if (cgroups[j] == "cpu") { 1596 return cgroup_fields[2].substr(1); // Skip the leading slash. 1597 } 1598 } 1599 } 1600 return ""; 1601} 1602 1603 1604void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) { 1605 std::string group_name; 1606 int priority; 1607 bool is_daemon = false; 1608 Thread* self = Thread::Current(); 1609 1610 // If flip_function is not null, it means we have run a checkpoint 1611 // before the thread wakes up to execute the flip function and the 1612 // thread roots haven't been forwarded. So the following access to 1613 // the roots (opeer or methods in the frames) would be bad. Run it 1614 // here. TODO: clean up. 1615 if (thread != nullptr) { 1616 ScopedObjectAccessUnchecked soa(self); 1617 Thread* this_thread = const_cast<Thread*>(thread); 1618 Closure* flip_func = this_thread->GetFlipFunction(); 1619 if (flip_func != nullptr) { 1620 flip_func->Run(this_thread); 1621 } 1622 } 1623 1624 // Don't do this if we are aborting since the GC may have all the threads suspended. This will 1625 // cause ScopedObjectAccessUnchecked to deadlock. 1626 if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) { 1627 ScopedObjectAccessUnchecked soa(self); 1628 priority = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_priority) 1629 ->GetInt(thread->tlsPtr_.opeer); 1630 is_daemon = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_daemon) 1631 ->GetBoolean(thread->tlsPtr_.opeer); 1632 1633 ObjPtr<mirror::Object> thread_group = 1634 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group) 1635 ->GetObject(thread->tlsPtr_.opeer); 1636 1637 if (thread_group != nullptr) { 1638 ArtField* group_name_field = 1639 jni::DecodeArtField(WellKnownClasses::java_lang_ThreadGroup_name); 1640 ObjPtr<mirror::String> group_name_string = 1641 group_name_field->GetObject(thread_group)->AsString(); 1642 group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>"; 1643 } 1644 } else { 1645 priority = GetNativePriority(); 1646 } 1647 1648 std::string scheduler_group_name(GetSchedulerGroupName(tid)); 1649 if (scheduler_group_name.empty()) { 1650 scheduler_group_name = "default"; 1651 } 1652 1653 if (thread != nullptr) { 1654 os << '"' << *thread->tlsPtr_.name << '"'; 1655 if (is_daemon) { 1656 os << " daemon"; 1657 } 1658 os << " prio=" << priority 1659 << " tid=" << thread->GetThreadId() 1660 << " " << thread->GetState(); 1661 if (thread->IsStillStarting()) { 1662 os << " (still starting up)"; 1663 } 1664 os << "\n"; 1665 } else { 1666 os << '"' << ::art::GetThreadName(tid) << '"' 1667 << " prio=" << priority 1668 << " (not attached)\n"; 1669 } 1670 1671 if (thread != nullptr) { 1672 MutexLock mu(self, *Locks::thread_suspend_count_lock_); 1673 os << " | group=\"" << group_name << "\"" 1674 << " sCount=" << thread->tls32_.suspend_count 1675 << " dsCount=" << thread->tls32_.debug_suspend_count 1676 << " flags=" << thread->tls32_.state_and_flags.as_struct.flags 1677 << " obj=" << reinterpret_cast<void*>(thread->tlsPtr_.opeer) 1678 << " self=" << reinterpret_cast<const void*>(thread) << "\n"; 1679 } 1680 1681 os << " | sysTid=" << tid 1682 << " nice=" << getpriority(PRIO_PROCESS, tid) 1683 << " cgrp=" << scheduler_group_name; 1684 if (thread != nullptr) { 1685 int policy; 1686 sched_param sp; 1687#if !defined(__APPLE__) 1688 // b/36445592 Don't use pthread_getschedparam since pthread may have exited. 1689 policy = sched_getscheduler(tid); 1690 if (policy == -1) { 1691 PLOG(WARNING) << "sched_getscheduler(" << tid << ")"; 1692 } 1693 int sched_getparam_result = sched_getparam(tid, &sp); 1694 if (sched_getparam_result == -1) { 1695 PLOG(WARNING) << "sched_getparam(" << tid << ", &sp)"; 1696 sp.sched_priority = -1; 1697 } 1698#else 1699 CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp), 1700 __FUNCTION__); 1701#endif 1702 os << " sched=" << policy << "/" << sp.sched_priority 1703 << " handle=" << reinterpret_cast<void*>(thread->tlsPtr_.pthread_self); 1704 } 1705 os << "\n"; 1706 1707 // Grab the scheduler stats for this thread. 1708 std::string scheduler_stats; 1709 if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) { 1710 scheduler_stats.resize(scheduler_stats.size() - 1); // Lose the trailing '\n'. 1711 } else { 1712 scheduler_stats = "0 0 0"; 1713 } 1714 1715 char native_thread_state = '?'; 1716 int utime = 0; 1717 int stime = 0; 1718 int task_cpu = 0; 1719 GetTaskStats(tid, &native_thread_state, &utime, &stime, &task_cpu); 1720 1721 os << " | state=" << native_thread_state 1722 << " schedstat=( " << scheduler_stats << " )" 1723 << " utm=" << utime 1724 << " stm=" << stime 1725 << " core=" << task_cpu 1726 << " HZ=" << sysconf(_SC_CLK_TCK) << "\n"; 1727 if (thread != nullptr) { 1728 os << " | stack=" << reinterpret_cast<void*>(thread->tlsPtr_.stack_begin) << "-" 1729 << reinterpret_cast<void*>(thread->tlsPtr_.stack_end) << " stackSize=" 1730 << PrettySize(thread->tlsPtr_.stack_size) << "\n"; 1731 // Dump the held mutexes. 1732 os << " | held mutexes="; 1733 for (size_t i = 0; i < kLockLevelCount; ++i) { 1734 if (i != kMonitorLock) { 1735 BaseMutex* mutex = thread->GetHeldMutex(static_cast<LockLevel>(i)); 1736 if (mutex != nullptr) { 1737 os << " \"" << mutex->GetName() << "\""; 1738 if (mutex->IsReaderWriterMutex()) { 1739 ReaderWriterMutex* rw_mutex = down_cast<ReaderWriterMutex*>(mutex); 1740 if (rw_mutex->GetExclusiveOwnerTid() == tid) { 1741 os << "(exclusive held)"; 1742 } else { 1743 os << "(shared held)"; 1744 } 1745 } 1746 } 1747 } 1748 } 1749 os << "\n"; 1750 } 1751} 1752 1753void Thread::DumpState(std::ostream& os) const { 1754 Thread::DumpState(os, this, GetTid()); 1755} 1756 1757struct StackDumpVisitor : public StackVisitor { 1758 StackDumpVisitor(std::ostream& os_in, 1759 Thread* thread_in, 1760 Context* context, 1761 bool can_allocate_in, 1762 bool check_suspended = true, 1763 bool dump_locks_in = true) 1764 REQUIRES_SHARED(Locks::mutator_lock_) 1765 : StackVisitor(thread_in, 1766 context, 1767 StackVisitor::StackWalkKind::kIncludeInlinedFrames, 1768 check_suspended), 1769 os(os_in), 1770 can_allocate(can_allocate_in), 1771 last_method(nullptr), 1772 last_line_number(0), 1773 repetition_count(0), 1774 frame_count(0), 1775 dump_locks(dump_locks_in) {} 1776 1777 virtual ~StackDumpVisitor() { 1778 if (frame_count == 0) { 1779 os << " (no managed stack frames)\n"; 1780 } 1781 } 1782 1783 bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 1784 ArtMethod* m = GetMethod(); 1785 if (m->IsRuntimeMethod()) { 1786 return true; 1787 } 1788 m = m->GetInterfaceMethodIfProxy(kRuntimePointerSize); 1789 const int kMaxRepetition = 3; 1790 ObjPtr<mirror::Class> c = m->GetDeclaringClass(); 1791 ObjPtr<mirror::DexCache> dex_cache = c->GetDexCache(); 1792 int line_number = -1; 1793 if (dex_cache != nullptr) { // be tolerant of bad input 1794 const DexFile* dex_file = dex_cache->GetDexFile(); 1795 line_number = annotations::GetLineNumFromPC(dex_file, m, GetDexPc(false)); 1796 } 1797 if (line_number == last_line_number && last_method == m) { 1798 ++repetition_count; 1799 } else { 1800 if (repetition_count >= kMaxRepetition) { 1801 os << " ... repeated " << (repetition_count - kMaxRepetition) << " times\n"; 1802 } 1803 repetition_count = 0; 1804 last_line_number = line_number; 1805 last_method = m; 1806 } 1807 if (repetition_count < kMaxRepetition) { 1808 os << " at " << m->PrettyMethod(false); 1809 if (m->IsNative()) { 1810 os << "(Native method)"; 1811 } else { 1812 const char* source_file(m->GetDeclaringClassSourceFile()); 1813 os << "(" << (source_file != nullptr ? source_file : "unavailable") 1814 << ":" << line_number << ")"; 1815 } 1816 os << "\n"; 1817 if (frame_count == 0) { 1818 Monitor::DescribeWait(os, GetThread()); 1819 } 1820 if (can_allocate && dump_locks) { 1821 // Visit locks, but do not abort on errors. This would trigger a nested abort. 1822 // Skip visiting locks if dump_locks is false as it would cause a bad_mutexes_held in 1823 // RegTypeCache::RegTypeCache due to thread_list_lock. 1824 Monitor::VisitLocks(this, DumpLockedObject, &os, false); 1825 } 1826 } 1827 1828 ++frame_count; 1829 return true; 1830 } 1831 1832 static void DumpLockedObject(mirror::Object* o, void* context) 1833 REQUIRES_SHARED(Locks::mutator_lock_) { 1834 std::ostream& os = *reinterpret_cast<std::ostream*>(context); 1835 os << " - locked "; 1836 if (o == nullptr) { 1837 os << "an unknown object"; 1838 } else { 1839 if (kUseReadBarrier && Thread::Current()->GetIsGcMarking()) { 1840 // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack 1841 // may have not been flipped yet and "o" may be a from-space (stale) ref, in which case the 1842 // IdentityHashCode call below will crash. So explicitly mark/forward it here. 1843 o = ReadBarrier::Mark(o); 1844 } 1845 if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) && 1846 Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) { 1847 // Getting the identity hashcode here would result in lock inflation and suspension of the 1848 // current thread, which isn't safe if this is the only runnable thread. 1849 os << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)", reinterpret_cast<intptr_t>(o), 1850 o->PrettyTypeOf().c_str()); 1851 } else { 1852 // IdentityHashCode can cause thread suspension, which would invalidate o if it moved. So 1853 // we get the pretty type beofre we call IdentityHashCode. 1854 const std::string pretty_type(o->PrettyTypeOf()); 1855 os << StringPrintf("<0x%08x> (a %s)", o->IdentityHashCode(), pretty_type.c_str()); 1856 } 1857 } 1858 os << "\n"; 1859 } 1860 1861 std::ostream& os; 1862 const bool can_allocate; 1863 ArtMethod* last_method; 1864 int last_line_number; 1865 int repetition_count; 1866 int frame_count; 1867 const bool dump_locks; 1868}; 1869 1870static bool ShouldShowNativeStack(const Thread* thread) 1871 REQUIRES_SHARED(Locks::mutator_lock_) { 1872 ThreadState state = thread->GetState(); 1873 1874 // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting. 1875 if (state > kWaiting && state < kStarting) { 1876 return true; 1877 } 1878 1879 // In an Object.wait variant or Thread.sleep? That's not interesting. 1880 if (state == kTimedWaiting || state == kSleeping || state == kWaiting) { 1881 return false; 1882 } 1883 1884 // Threads with no managed stack frames should be shown. 1885 const ManagedStack* managed_stack = thread->GetManagedStack(); 1886 if (managed_stack == nullptr || (managed_stack->GetTopQuickFrame() == nullptr && 1887 managed_stack->GetTopShadowFrame() == nullptr)) { 1888 return true; 1889 } 1890 1891 // In some other native method? That's interesting. 1892 // We don't just check kNative because native methods will be in state kSuspended if they're 1893 // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the 1894 // thread-startup states if it's early enough in their life cycle (http://b/7432159). 1895 ArtMethod* current_method = thread->GetCurrentMethod(nullptr); 1896 return current_method != nullptr && current_method->IsNative(); 1897} 1898 1899void Thread::DumpJavaStack(std::ostream& os, bool check_suspended, bool dump_locks) const { 1900 // If flip_function is not null, it means we have run a checkpoint 1901 // before the thread wakes up to execute the flip function and the 1902 // thread roots haven't been forwarded. So the following access to 1903 // the roots (locks or methods in the frames) would be bad. Run it 1904 // here. TODO: clean up. 1905 { 1906 Thread* this_thread = const_cast<Thread*>(this); 1907 Closure* flip_func = this_thread->GetFlipFunction(); 1908 if (flip_func != nullptr) { 1909 flip_func->Run(this_thread); 1910 } 1911 } 1912 1913 // Dumping the Java stack involves the verifier for locks. The verifier operates under the 1914 // assumption that there is no exception pending on entry. Thus, stash any pending exception. 1915 // Thread::Current() instead of this in case a thread is dumping the stack of another suspended 1916 // thread. 1917 StackHandleScope<1> scope(Thread::Current()); 1918 Handle<mirror::Throwable> exc; 1919 bool have_exception = false; 1920 if (IsExceptionPending()) { 1921 exc = scope.NewHandle(GetException()); 1922 const_cast<Thread*>(this)->ClearException(); 1923 have_exception = true; 1924 } 1925 1926 std::unique_ptr<Context> context(Context::Create()); 1927 StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(), 1928 !tls32_.throwing_OutOfMemoryError, check_suspended, dump_locks); 1929 dumper.WalkStack(); 1930 1931 if (have_exception) { 1932 const_cast<Thread*>(this)->SetException(exc.Get()); 1933 } 1934} 1935 1936void Thread::DumpStack(std::ostream& os, 1937 bool dump_native_stack, 1938 BacktraceMap* backtrace_map, 1939 bool force_dump_stack) const { 1940 // TODO: we call this code when dying but may not have suspended the thread ourself. The 1941 // IsSuspended check is therefore racy with the use for dumping (normally we inhibit 1942 // the race with the thread_suspend_count_lock_). 1943 bool dump_for_abort = (gAborting > 0); 1944 bool safe_to_dump = (this == Thread::Current() || IsSuspended()); 1945 if (!kIsDebugBuild) { 1946 // We always want to dump the stack for an abort, however, there is no point dumping another 1947 // thread's stack in debug builds where we'll hit the not suspended check in the stack walk. 1948 safe_to_dump = (safe_to_dump || dump_for_abort); 1949 } 1950 if (safe_to_dump || force_dump_stack) { 1951 // If we're currently in native code, dump that stack before dumping the managed stack. 1952 if (dump_native_stack && (dump_for_abort || force_dump_stack || ShouldShowNativeStack(this))) { 1953 DumpKernelStack(os, GetTid(), " kernel: ", false); 1954 ArtMethod* method = 1955 GetCurrentMethod(nullptr, 1956 /*check_suspended*/ !force_dump_stack, 1957 /*abort_on_error*/ !(dump_for_abort || force_dump_stack)); 1958 DumpNativeStack(os, GetTid(), backtrace_map, " native: ", method); 1959 } 1960 DumpJavaStack(os, 1961 /*check_suspended*/ !force_dump_stack, 1962 /*dump_locks*/ !force_dump_stack); 1963 } else { 1964 os << "Not able to dump stack of thread that isn't suspended"; 1965 } 1966} 1967 1968void Thread::ThreadExitCallback(void* arg) { 1969 Thread* self = reinterpret_cast<Thread*>(arg); 1970 if (self->tls32_.thread_exit_check_count == 0) { 1971 LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's " 1972 "going to use a pthread_key_create destructor?): " << *self; 1973 CHECK(is_started_); 1974#ifdef ART_TARGET_ANDROID 1975 __get_tls()[TLS_SLOT_ART_THREAD_SELF] = self; 1976#else 1977 CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self"); 1978#endif 1979 self->tls32_.thread_exit_check_count = 1; 1980 } else { 1981 LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self; 1982 } 1983} 1984 1985void Thread::Startup() { 1986 CHECK(!is_started_); 1987 is_started_ = true; 1988 { 1989 // MutexLock to keep annotalysis happy. 1990 // 1991 // Note we use null for the thread because Thread::Current can 1992 // return garbage since (is_started_ == true) and 1993 // Thread::pthread_key_self_ is not yet initialized. 1994 // This was seen on glibc. 1995 MutexLock mu(nullptr, *Locks::thread_suspend_count_lock_); 1996 resume_cond_ = new ConditionVariable("Thread resumption condition variable", 1997 *Locks::thread_suspend_count_lock_); 1998 } 1999 2000 // Allocate a TLS slot. 2001 CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), 2002 "self key"); 2003 2004 // Double-check the TLS slot allocation. 2005 if (pthread_getspecific(pthread_key_self_) != nullptr) { 2006 LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr"; 2007 } 2008} 2009 2010void Thread::FinishStartup() { 2011 Runtime* runtime = Runtime::Current(); 2012 CHECK(runtime->IsStarted()); 2013 2014 // Finish attaching the main thread. 2015 ScopedObjectAccess soa(Thread::Current()); 2016 Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup()); 2017 Thread::Current()->AssertNoPendingException(); 2018 2019 Runtime::Current()->GetClassLinker()->RunRootClinits(); 2020 2021 // The thread counts as started from now on. We need to add it to the ThreadGroup. For regular 2022 // threads, this is done in Thread.start() on the Java side. 2023 { 2024 // This is only ever done once. There's no benefit in caching the method. 2025 jmethodID thread_group_add = soa.Env()->GetMethodID(WellKnownClasses::java_lang_ThreadGroup, 2026 "add", 2027 "(Ljava/lang/Thread;)V"); 2028 CHECK(thread_group_add != nullptr); 2029 ScopedLocalRef<jobject> thread_jobject( 2030 soa.Env(), soa.Env()->AddLocalReference<jobject>(Thread::Current()->GetPeer())); 2031 soa.Env()->CallNonvirtualVoidMethod(runtime->GetMainThreadGroup(), 2032 WellKnownClasses::java_lang_ThreadGroup, 2033 thread_group_add, 2034 thread_jobject.get()); 2035 Thread::Current()->AssertNoPendingException(); 2036 } 2037} 2038 2039void Thread::Shutdown() { 2040 CHECK(is_started_); 2041 is_started_ = false; 2042 CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key"); 2043 MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_); 2044 if (resume_cond_ != nullptr) { 2045 delete resume_cond_; 2046 resume_cond_ = nullptr; 2047 } 2048} 2049 2050Thread::Thread(bool daemon) 2051 : tls32_(daemon), 2052 wait_monitor_(nullptr), 2053 custom_tls_(nullptr), 2054 can_call_into_java_(true) { 2055 wait_mutex_ = new Mutex("a thread wait mutex"); 2056 wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_); 2057 tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>; 2058 tlsPtr_.name = new std::string(kThreadNameDuringStartup); 2059 2060 static_assert((sizeof(Thread) % 4) == 0U, 2061 "art::Thread has a size which is not a multiple of 4."); 2062 tls32_.state_and_flags.as_struct.flags = 0; 2063 tls32_.state_and_flags.as_struct.state = kNative; 2064 tls32_.interrupted.StoreRelaxed(false); 2065 memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes)); 2066 std::fill(tlsPtr_.rosalloc_runs, 2067 tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBracketsInThread, 2068 gc::allocator::RosAlloc::GetDedicatedFullRun()); 2069 tlsPtr_.checkpoint_function = nullptr; 2070 for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) { 2071 tlsPtr_.active_suspend_barriers[i] = nullptr; 2072 } 2073 tlsPtr_.flip_function = nullptr; 2074 tlsPtr_.thread_local_mark_stack = nullptr; 2075 tls32_.is_transitioning_to_runnable = false; 2076} 2077 2078bool Thread::IsStillStarting() const { 2079 // You might think you can check whether the state is kStarting, but for much of thread startup, 2080 // the thread is in kNative; it might also be in kVmWait. 2081 // You might think you can check whether the peer is null, but the peer is actually created and 2082 // assigned fairly early on, and needs to be. 2083 // It turns out that the last thing to change is the thread name; that's a good proxy for "has 2084 // this thread _ever_ entered kRunnable". 2085 return (tlsPtr_.jpeer == nullptr && tlsPtr_.opeer == nullptr) || 2086 (*tlsPtr_.name == kThreadNameDuringStartup); 2087} 2088 2089void Thread::AssertPendingException() const { 2090 CHECK(IsExceptionPending()) << "Pending exception expected."; 2091} 2092 2093void Thread::AssertPendingOOMException() const { 2094 AssertPendingException(); 2095 auto* e = GetException(); 2096 CHECK_EQ(e->GetClass(), DecodeJObject(WellKnownClasses::java_lang_OutOfMemoryError)->AsClass()) 2097 << e->Dump(); 2098} 2099 2100void Thread::AssertNoPendingException() const { 2101 if (UNLIKELY(IsExceptionPending())) { 2102 ScopedObjectAccess soa(Thread::Current()); 2103 LOG(FATAL) << "No pending exception expected: " << GetException()->Dump(); 2104 } 2105} 2106 2107void Thread::AssertNoPendingExceptionForNewException(const char* msg) const { 2108 if (UNLIKELY(IsExceptionPending())) { 2109 ScopedObjectAccess soa(Thread::Current()); 2110 LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: " 2111 << GetException()->Dump(); 2112 } 2113} 2114 2115class MonitorExitVisitor : public SingleRootVisitor { 2116 public: 2117 explicit MonitorExitVisitor(Thread* self) : self_(self) { } 2118 2119 // NO_THREAD_SAFETY_ANALYSIS due to MonitorExit. 2120 void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED) 2121 OVERRIDE NO_THREAD_SAFETY_ANALYSIS { 2122 if (self_->HoldsLock(entered_monitor)) { 2123 LOG(WARNING) << "Calling MonitorExit on object " 2124 << entered_monitor << " (" << entered_monitor->PrettyTypeOf() << ")" 2125 << " left locked by native thread " 2126 << *Thread::Current() << " which is detaching"; 2127 entered_monitor->MonitorExit(self_); 2128 } 2129 } 2130 2131 private: 2132 Thread* const self_; 2133}; 2134 2135void Thread::Destroy() { 2136 Thread* self = this; 2137 DCHECK_EQ(self, Thread::Current()); 2138 2139 if (tlsPtr_.jni_env != nullptr) { 2140 { 2141 ScopedObjectAccess soa(self); 2142 MonitorExitVisitor visitor(self); 2143 // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited. 2144 tlsPtr_.jni_env->monitors.VisitRoots(&visitor, RootInfo(kRootVMInternal)); 2145 } 2146 // Release locally held global references which releasing may require the mutator lock. 2147 if (tlsPtr_.jpeer != nullptr) { 2148 // If pthread_create fails we don't have a jni env here. 2149 tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer); 2150 tlsPtr_.jpeer = nullptr; 2151 } 2152 if (tlsPtr_.class_loader_override != nullptr) { 2153 tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.class_loader_override); 2154 tlsPtr_.class_loader_override = nullptr; 2155 } 2156 } 2157 2158 if (tlsPtr_.opeer != nullptr) { 2159 ScopedObjectAccess soa(self); 2160 // We may need to call user-supplied managed code, do this before final clean-up. 2161 HandleUncaughtExceptions(soa); 2162 RemoveFromThreadGroup(soa); 2163 Runtime* runtime = Runtime::Current(); 2164 if (runtime != nullptr) { 2165 runtime->GetRuntimeCallbacks()->ThreadDeath(self); 2166 } 2167 2168 // this.nativePeer = 0; 2169 if (Runtime::Current()->IsActiveTransaction()) { 2170 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer) 2171 ->SetLong<true>(tlsPtr_.opeer, 0); 2172 } else { 2173 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_nativePeer) 2174 ->SetLong<false>(tlsPtr_.opeer, 0); 2175 } 2176 2177 // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone 2178 // who is waiting. 2179 ObjPtr<mirror::Object> lock = 2180 jni::DecodeArtField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer); 2181 // (This conditional is only needed for tests, where Thread.lock won't have been set.) 2182 if (lock != nullptr) { 2183 StackHandleScope<1> hs(self); 2184 Handle<mirror::Object> h_obj(hs.NewHandle(lock)); 2185 ObjectLock<mirror::Object> locker(self, h_obj); 2186 locker.NotifyAll(); 2187 } 2188 tlsPtr_.opeer = nullptr; 2189 } 2190 2191 { 2192 ScopedObjectAccess soa(self); 2193 Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this); 2194 if (kUseReadBarrier) { 2195 Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this); 2196 } 2197 } 2198} 2199 2200Thread::~Thread() { 2201 CHECK(tlsPtr_.class_loader_override == nullptr); 2202 CHECK(tlsPtr_.jpeer == nullptr); 2203 CHECK(tlsPtr_.opeer == nullptr); 2204 bool initialized = (tlsPtr_.jni_env != nullptr); // Did Thread::Init run? 2205 if (initialized) { 2206 delete tlsPtr_.jni_env; 2207 tlsPtr_.jni_env = nullptr; 2208 } 2209 CHECK_NE(GetState(), kRunnable); 2210 CHECK(!ReadFlag(kCheckpointRequest)); 2211 CHECK(!ReadFlag(kEmptyCheckpointRequest)); 2212 CHECK(tlsPtr_.checkpoint_function == nullptr); 2213 CHECK_EQ(checkpoint_overflow_.size(), 0u); 2214 CHECK(tlsPtr_.flip_function == nullptr); 2215 CHECK_EQ(tls32_.is_transitioning_to_runnable, false); 2216 2217 // Make sure we processed all deoptimization requests. 2218 CHECK(tlsPtr_.deoptimization_context_stack == nullptr) << "Missed deoptimization"; 2219 CHECK(tlsPtr_.frame_id_to_shadow_frame == nullptr) << 2220 "Not all deoptimized frames have been consumed by the debugger."; 2221 2222 // We may be deleting a still born thread. 2223 SetStateUnsafe(kTerminated); 2224 2225 delete wait_cond_; 2226 delete wait_mutex_; 2227 2228 if (tlsPtr_.long_jump_context != nullptr) { 2229 delete tlsPtr_.long_jump_context; 2230 } 2231 2232 if (initialized) { 2233 CleanupCpu(); 2234 } 2235 2236 if (tlsPtr_.single_step_control != nullptr) { 2237 delete tlsPtr_.single_step_control; 2238 } 2239 delete tlsPtr_.instrumentation_stack; 2240 delete tlsPtr_.name; 2241 delete tlsPtr_.deps_or_stack_trace_sample.stack_trace_sample; 2242 2243 Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this); 2244 2245 TearDownAlternateSignalStack(); 2246} 2247 2248void Thread::HandleUncaughtExceptions(ScopedObjectAccessAlreadyRunnable& soa) { 2249 if (!IsExceptionPending()) { 2250 return; 2251 } 2252 ScopedLocalRef<jobject> peer(tlsPtr_.jni_env, soa.AddLocalReference<jobject>(tlsPtr_.opeer)); 2253 ScopedThreadStateChange tsc(this, kNative); 2254 2255 // Get and clear the exception. 2256 ScopedLocalRef<jthrowable> exception(tlsPtr_.jni_env, tlsPtr_.jni_env->ExceptionOccurred()); 2257 tlsPtr_.jni_env->ExceptionClear(); 2258 2259 // Call the Thread instance's dispatchUncaughtException(Throwable) 2260 tlsPtr_.jni_env->CallVoidMethod(peer.get(), 2261 WellKnownClasses::java_lang_Thread_dispatchUncaughtException, 2262 exception.get()); 2263 2264 // If the dispatchUncaughtException threw, clear that exception too. 2265 tlsPtr_.jni_env->ExceptionClear(); 2266} 2267 2268void Thread::RemoveFromThreadGroup(ScopedObjectAccessAlreadyRunnable& soa) { 2269 // this.group.removeThread(this); 2270 // group can be null if we're in the compiler or a test. 2271 ObjPtr<mirror::Object> ogroup = jni::DecodeArtField(WellKnownClasses::java_lang_Thread_group) 2272 ->GetObject(tlsPtr_.opeer); 2273 if (ogroup != nullptr) { 2274 ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup)); 2275 ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(tlsPtr_.opeer)); 2276 ScopedThreadStateChange tsc(soa.Self(), kNative); 2277 tlsPtr_.jni_env->CallVoidMethod(group.get(), 2278 WellKnownClasses::java_lang_ThreadGroup_removeThread, 2279 peer.get()); 2280 } 2281} 2282 2283bool Thread::HandleScopeContains(jobject obj) const { 2284 StackReference<mirror::Object>* hs_entry = 2285 reinterpret_cast<StackReference<mirror::Object>*>(obj); 2286 for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur!= nullptr; cur = cur->GetLink()) { 2287 if (cur->Contains(hs_entry)) { 2288 return true; 2289 } 2290 } 2291 // JNI code invoked from portable code uses shadow frames rather than the handle scope. 2292 return tlsPtr_.managed_stack.ShadowFramesContain(hs_entry); 2293} 2294 2295void Thread::HandleScopeVisitRoots(RootVisitor* visitor, pid_t thread_id) { 2296 BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor( 2297 visitor, RootInfo(kRootNativeStack, thread_id)); 2298 for (BaseHandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) { 2299 cur->VisitRoots(buffered_visitor); 2300 } 2301} 2302 2303ObjPtr<mirror::Object> Thread::DecodeJObject(jobject obj) const { 2304 if (obj == nullptr) { 2305 return nullptr; 2306 } 2307 IndirectRef ref = reinterpret_cast<IndirectRef>(obj); 2308 IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref); 2309 ObjPtr<mirror::Object> result; 2310 bool expect_null = false; 2311 // The "kinds" below are sorted by the frequency we expect to encounter them. 2312 if (kind == kLocal) { 2313 IndirectReferenceTable& locals = tlsPtr_.jni_env->locals; 2314 // Local references do not need a read barrier. 2315 result = locals.Get<kWithoutReadBarrier>(ref); 2316 } else if (kind == kHandleScopeOrInvalid) { 2317 // TODO: make stack indirect reference table lookup more efficient. 2318 // Check if this is a local reference in the handle scope. 2319 if (LIKELY(HandleScopeContains(obj))) { 2320 // Read from handle scope. 2321 result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr(); 2322 VerifyObject(result); 2323 } else { 2324 tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of invalid jobject %p", obj); 2325 expect_null = true; 2326 result = nullptr; 2327 } 2328 } else if (kind == kGlobal) { 2329 result = tlsPtr_.jni_env->vm->DecodeGlobal(ref); 2330 } else { 2331 DCHECK_EQ(kind, kWeakGlobal); 2332 result = tlsPtr_.jni_env->vm->DecodeWeakGlobal(const_cast<Thread*>(this), ref); 2333 if (Runtime::Current()->IsClearedJniWeakGlobal(result)) { 2334 // This is a special case where it's okay to return null. 2335 expect_null = true; 2336 result = nullptr; 2337 } 2338 } 2339 2340 if (UNLIKELY(!expect_null && result == nullptr)) { 2341 tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of deleted %s %p", 2342 ToStr<IndirectRefKind>(kind).c_str(), obj); 2343 } 2344 return result; 2345} 2346 2347bool Thread::IsJWeakCleared(jweak obj) const { 2348 CHECK(obj != nullptr); 2349 IndirectRef ref = reinterpret_cast<IndirectRef>(obj); 2350 IndirectRefKind kind = IndirectReferenceTable::GetIndirectRefKind(ref); 2351 CHECK_EQ(kind, kWeakGlobal); 2352 return tlsPtr_.jni_env->vm->IsWeakGlobalCleared(const_cast<Thread*>(this), ref); 2353} 2354 2355// Implements java.lang.Thread.interrupted. 2356bool Thread::Interrupted() { 2357 DCHECK_EQ(Thread::Current(), this); 2358 // No other thread can concurrently reset the interrupted flag. 2359 bool interrupted = tls32_.interrupted.LoadSequentiallyConsistent(); 2360 if (interrupted) { 2361 tls32_.interrupted.StoreSequentiallyConsistent(false); 2362 } 2363 return interrupted; 2364} 2365 2366// Implements java.lang.Thread.isInterrupted. 2367bool Thread::IsInterrupted() { 2368 return tls32_.interrupted.LoadSequentiallyConsistent(); 2369} 2370 2371void Thread::Interrupt(Thread* self) { 2372 MutexLock mu(self, *wait_mutex_); 2373 if (tls32_.interrupted.LoadSequentiallyConsistent()) { 2374 return; 2375 } 2376 tls32_.interrupted.StoreSequentiallyConsistent(true); 2377 NotifyLocked(self); 2378} 2379 2380void Thread::Notify() { 2381 Thread* self = Thread::Current(); 2382 MutexLock mu(self, *wait_mutex_); 2383 NotifyLocked(self); 2384} 2385 2386void Thread::NotifyLocked(Thread* self) { 2387 if (wait_monitor_ != nullptr) { 2388 wait_cond_->Signal(self); 2389 } 2390} 2391 2392void Thread::SetClassLoaderOverride(jobject class_loader_override) { 2393 if (tlsPtr_.class_loader_override != nullptr) { 2394 GetJniEnv()->DeleteGlobalRef(tlsPtr_.class_loader_override); 2395 } 2396 tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override); 2397} 2398 2399using ArtMethodDexPcPair = std::pair<ArtMethod*, uint32_t>; 2400 2401// Counts the stack trace depth and also fetches the first max_saved_frames frames. 2402class FetchStackTraceVisitor : public StackVisitor { 2403 public: 2404 explicit FetchStackTraceVisitor(Thread* thread, 2405 ArtMethodDexPcPair* saved_frames = nullptr, 2406 size_t max_saved_frames = 0) 2407 REQUIRES_SHARED(Locks::mutator_lock_) 2408 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), 2409 saved_frames_(saved_frames), 2410 max_saved_frames_(max_saved_frames) {} 2411 2412 bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 2413 // We want to skip frames up to and including the exception's constructor. 2414 // Note we also skip the frame if it doesn't have a method (namely the callee 2415 // save frame) 2416 ArtMethod* m = GetMethod(); 2417 if (skipping_ && !m->IsRuntimeMethod() && 2418 !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) { 2419 skipping_ = false; 2420 } 2421 if (!skipping_) { 2422 if (!m->IsRuntimeMethod()) { // Ignore runtime frames (in particular callee save). 2423 if (depth_ < max_saved_frames_) { 2424 saved_frames_[depth_].first = m; 2425 saved_frames_[depth_].second = m->IsProxyMethod() ? dex::kDexNoIndex : GetDexPc(); 2426 } 2427 ++depth_; 2428 } 2429 } else { 2430 ++skip_depth_; 2431 } 2432 return true; 2433 } 2434 2435 uint32_t GetDepth() const { 2436 return depth_; 2437 } 2438 2439 uint32_t GetSkipDepth() const { 2440 return skip_depth_; 2441 } 2442 2443 private: 2444 uint32_t depth_ = 0; 2445 uint32_t skip_depth_ = 0; 2446 bool skipping_ = true; 2447 ArtMethodDexPcPair* saved_frames_; 2448 const size_t max_saved_frames_; 2449 2450 DISALLOW_COPY_AND_ASSIGN(FetchStackTraceVisitor); 2451}; 2452 2453template<bool kTransactionActive> 2454class BuildInternalStackTraceVisitor : public StackVisitor { 2455 public: 2456 BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth) 2457 : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames), 2458 self_(self), 2459 skip_depth_(skip_depth), 2460 pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {} 2461 2462 bool Init(int depth) REQUIRES_SHARED(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) { 2463 // Allocate method trace as an object array where the first element is a pointer array that 2464 // contains the ArtMethod pointers and dex PCs. The rest of the elements are the declaring 2465 // class of the ArtMethod pointers. 2466 ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); 2467 StackHandleScope<1> hs(self_); 2468 ObjPtr<mirror::Class> array_class = class_linker->GetClassRoot(ClassLinker::kObjectArrayClass); 2469 // The first element is the methods and dex pc array, the other elements are declaring classes 2470 // for the methods to ensure classes in the stack trace don't get unloaded. 2471 Handle<mirror::ObjectArray<mirror::Object>> trace( 2472 hs.NewHandle( 2473 mirror::ObjectArray<mirror::Object>::Alloc(hs.Self(), array_class, depth + 1))); 2474 if (trace == nullptr) { 2475 // Acquire uninterruptible_ in all paths. 2476 self_->StartAssertNoThreadSuspension("Building internal stack trace"); 2477 self_->AssertPendingOOMException(); 2478 return false; 2479 } 2480 ObjPtr<mirror::PointerArray> methods_and_pcs = 2481 class_linker->AllocPointerArray(self_, depth * 2); 2482 const char* last_no_suspend_cause = 2483 self_->StartAssertNoThreadSuspension("Building internal stack trace"); 2484 if (methods_and_pcs == nullptr) { 2485 self_->AssertPendingOOMException(); 2486 return false; 2487 } 2488 trace->Set(0, methods_and_pcs); 2489 trace_ = trace.Get(); 2490 // If We are called from native, use non-transactional mode. 2491 CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause; 2492 return true; 2493 } 2494 2495 virtual ~BuildInternalStackTraceVisitor() RELEASE(Roles::uninterruptible_) { 2496 self_->EndAssertNoThreadSuspension(nullptr); 2497 } 2498 2499 bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 2500 if (trace_ == nullptr) { 2501 return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError. 2502 } 2503 if (skip_depth_ > 0) { 2504 skip_depth_--; 2505 return true; 2506 } 2507 ArtMethod* m = GetMethod(); 2508 if (m->IsRuntimeMethod()) { 2509 return true; // Ignore runtime frames (in particular callee save). 2510 } 2511 AddFrame(m, m->IsProxyMethod() ? dex::kDexNoIndex : GetDexPc()); 2512 return true; 2513 } 2514 2515 void AddFrame(ArtMethod* method, uint32_t dex_pc) REQUIRES_SHARED(Locks::mutator_lock_) { 2516 ObjPtr<mirror::PointerArray> trace_methods_and_pcs = GetTraceMethodsAndPCs(); 2517 trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>(count_, method, pointer_size_); 2518 trace_methods_and_pcs->SetElementPtrSize<kTransactionActive>( 2519 trace_methods_and_pcs->GetLength() / 2 + count_, 2520 dex_pc, 2521 pointer_size_); 2522 // Save the declaring class of the method to ensure that the declaring classes of the methods 2523 // do not get unloaded while the stack trace is live. 2524 trace_->Set(count_ + 1, method->GetDeclaringClass()); 2525 ++count_; 2526 } 2527 2528 ObjPtr<mirror::PointerArray> GetTraceMethodsAndPCs() const REQUIRES_SHARED(Locks::mutator_lock_) { 2529 return ObjPtr<mirror::PointerArray>::DownCast(MakeObjPtr(trace_->Get(0))); 2530 } 2531 2532 mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const { 2533 return trace_; 2534 } 2535 2536 private: 2537 Thread* const self_; 2538 // How many more frames to skip. 2539 int32_t skip_depth_; 2540 // Current position down stack trace. 2541 uint32_t count_ = 0; 2542 // An object array where the first element is a pointer array that contains the ArtMethod 2543 // pointers on the stack and dex PCs. The rest of the elements are the declaring 2544 // class of the ArtMethod pointers. trace_[i+1] contains the declaring class of the ArtMethod of 2545 // the i'th frame. 2546 mirror::ObjectArray<mirror::Object>* trace_ = nullptr; 2547 // For cross compilation. 2548 const PointerSize pointer_size_; 2549 2550 DISALLOW_COPY_AND_ASSIGN(BuildInternalStackTraceVisitor); 2551}; 2552 2553template<bool kTransactionActive> 2554jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const { 2555 // Compute depth of stack, save frames if possible to avoid needing to recompute many. 2556 constexpr size_t kMaxSavedFrames = 256; 2557 std::unique_ptr<ArtMethodDexPcPair[]> saved_frames(new ArtMethodDexPcPair[kMaxSavedFrames]); 2558 FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this), 2559 &saved_frames[0], 2560 kMaxSavedFrames); 2561 count_visitor.WalkStack(); 2562 const uint32_t depth = count_visitor.GetDepth(); 2563 const uint32_t skip_depth = count_visitor.GetSkipDepth(); 2564 2565 // Build internal stack trace. 2566 BuildInternalStackTraceVisitor<kTransactionActive> build_trace_visitor(soa.Self(), 2567 const_cast<Thread*>(this), 2568 skip_depth); 2569 if (!build_trace_visitor.Init(depth)) { 2570 return nullptr; // Allocation failed. 2571 } 2572 // If we saved all of the frames we don't even need to do the actual stack walk. This is faster 2573 // than doing the stack walk twice. 2574 if (depth < kMaxSavedFrames) { 2575 for (size_t i = 0; i < depth; ++i) { 2576 build_trace_visitor.AddFrame(saved_frames[i].first, saved_frames[i].second); 2577 } 2578 } else { 2579 build_trace_visitor.WalkStack(); 2580 } 2581 2582 mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace(); 2583 if (kIsDebugBuild) { 2584 ObjPtr<mirror::PointerArray> trace_methods = build_trace_visitor.GetTraceMethodsAndPCs(); 2585 // Second half of trace_methods is dex PCs. 2586 for (uint32_t i = 0; i < static_cast<uint32_t>(trace_methods->GetLength() / 2); ++i) { 2587 auto* method = trace_methods->GetElementPtrSize<ArtMethod*>( 2588 i, Runtime::Current()->GetClassLinker()->GetImagePointerSize()); 2589 CHECK(method != nullptr); 2590 } 2591 } 2592 return soa.AddLocalReference<jobject>(trace); 2593} 2594template jobject Thread::CreateInternalStackTrace<false>( 2595 const ScopedObjectAccessAlreadyRunnable& soa) const; 2596template jobject Thread::CreateInternalStackTrace<true>( 2597 const ScopedObjectAccessAlreadyRunnable& soa) const; 2598 2599bool Thread::IsExceptionThrownByCurrentMethod(ObjPtr<mirror::Throwable> exception) const { 2600 // Only count the depth since we do not pass a stack frame array as an argument. 2601 FetchStackTraceVisitor count_visitor(const_cast<Thread*>(this)); 2602 count_visitor.WalkStack(); 2603 return count_visitor.GetDepth() == static_cast<uint32_t>(exception->GetStackDepth()); 2604} 2605 2606jobjectArray Thread::InternalStackTraceToStackTraceElementArray( 2607 const ScopedObjectAccessAlreadyRunnable& soa, 2608 jobject internal, 2609 jobjectArray output_array, 2610 int* stack_depth) { 2611 // Decode the internal stack trace into the depth, method trace and PC trace. 2612 // Subtract one for the methods and PC trace. 2613 int32_t depth = soa.Decode<mirror::Array>(internal)->GetLength() - 1; 2614 DCHECK_GE(depth, 0); 2615 2616 ClassLinker* const class_linker = Runtime::Current()->GetClassLinker(); 2617 2618 jobjectArray result; 2619 2620 if (output_array != nullptr) { 2621 // Reuse the array we were given. 2622 result = output_array; 2623 // ...adjusting the number of frames we'll write to not exceed the array length. 2624 const int32_t traces_length = 2625 soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->GetLength(); 2626 depth = std::min(depth, traces_length); 2627 } else { 2628 // Create java_trace array and place in local reference table 2629 mirror::ObjectArray<mirror::StackTraceElement>* java_traces = 2630 class_linker->AllocStackTraceElementArray(soa.Self(), depth); 2631 if (java_traces == nullptr) { 2632 return nullptr; 2633 } 2634 result = soa.AddLocalReference<jobjectArray>(java_traces); 2635 } 2636 2637 if (stack_depth != nullptr) { 2638 *stack_depth = depth; 2639 } 2640 2641 for (int32_t i = 0; i < depth; ++i) { 2642 ObjPtr<mirror::ObjectArray<mirror::Object>> decoded_traces = 2643 soa.Decode<mirror::Object>(internal)->AsObjectArray<mirror::Object>(); 2644 // Methods and dex PC trace is element 0. 2645 DCHECK(decoded_traces->Get(0)->IsIntArray() || decoded_traces->Get(0)->IsLongArray()); 2646 ObjPtr<mirror::PointerArray> const method_trace = 2647 ObjPtr<mirror::PointerArray>::DownCast(MakeObjPtr(decoded_traces->Get(0))); 2648 // Prepare parameters for StackTraceElement(String cls, String method, String file, int line) 2649 ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, kRuntimePointerSize); 2650 uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>( 2651 i + method_trace->GetLength() / 2, kRuntimePointerSize); 2652 int32_t line_number; 2653 StackHandleScope<3> hs(soa.Self()); 2654 auto class_name_object(hs.NewHandle<mirror::String>(nullptr)); 2655 auto source_name_object(hs.NewHandle<mirror::String>(nullptr)); 2656 if (method->IsProxyMethod()) { 2657 line_number = -1; 2658 class_name_object.Assign(method->GetDeclaringClass()->GetName()); 2659 // source_name_object intentionally left null for proxy methods 2660 } else { 2661 line_number = method->GetLineNumFromDexPC(dex_pc); 2662 // Allocate element, potentially triggering GC 2663 // TODO: reuse class_name_object via Class::name_? 2664 const char* descriptor = method->GetDeclaringClassDescriptor(); 2665 CHECK(descriptor != nullptr); 2666 std::string class_name(PrettyDescriptor(descriptor)); 2667 class_name_object.Assign( 2668 mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str())); 2669 if (class_name_object == nullptr) { 2670 soa.Self()->AssertPendingOOMException(); 2671 return nullptr; 2672 } 2673 const char* source_file = method->GetDeclaringClassSourceFile(); 2674 if (line_number == -1) { 2675 // Make the line_number field of StackTraceElement hold the dex pc. 2676 // source_name_object is intentionally left null if we failed to map the dex pc to 2677 // a line number (most probably because there is no debug info). See b/30183883. 2678 line_number = dex_pc; 2679 } else { 2680 if (source_file != nullptr) { 2681 source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file)); 2682 if (source_name_object == nullptr) { 2683 soa.Self()->AssertPendingOOMException(); 2684 return nullptr; 2685 } 2686 } 2687 } 2688 } 2689 const char* method_name = method->GetInterfaceMethodIfProxy(kRuntimePointerSize)->GetName(); 2690 CHECK(method_name != nullptr); 2691 Handle<mirror::String> method_name_object( 2692 hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name))); 2693 if (method_name_object == nullptr) { 2694 return nullptr; 2695 } 2696 ObjPtr<mirror::StackTraceElement> obj = mirror::StackTraceElement::Alloc(soa.Self(), 2697 class_name_object, 2698 method_name_object, 2699 source_name_object, 2700 line_number); 2701 if (obj == nullptr) { 2702 return nullptr; 2703 } 2704 // We are called from native: use non-transactional mode. 2705 soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>>(result)->Set<false>(i, obj); 2706 } 2707 return result; 2708} 2709 2710void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) { 2711 va_list args; 2712 va_start(args, fmt); 2713 ThrowNewExceptionV(exception_class_descriptor, fmt, args); 2714 va_end(args); 2715} 2716 2717void Thread::ThrowNewExceptionV(const char* exception_class_descriptor, 2718 const char* fmt, va_list ap) { 2719 std::string msg; 2720 StringAppendV(&msg, fmt, ap); 2721 ThrowNewException(exception_class_descriptor, msg.c_str()); 2722} 2723 2724void Thread::ThrowNewException(const char* exception_class_descriptor, 2725 const char* msg) { 2726 // Callers should either clear or call ThrowNewWrappedException. 2727 AssertNoPendingExceptionForNewException(msg); 2728 ThrowNewWrappedException(exception_class_descriptor, msg); 2729} 2730 2731static ObjPtr<mirror::ClassLoader> GetCurrentClassLoader(Thread* self) 2732 REQUIRES_SHARED(Locks::mutator_lock_) { 2733 ArtMethod* method = self->GetCurrentMethod(nullptr); 2734 return method != nullptr 2735 ? method->GetDeclaringClass()->GetClassLoader() 2736 : nullptr; 2737} 2738 2739void Thread::ThrowNewWrappedException(const char* exception_class_descriptor, 2740 const char* msg) { 2741 DCHECK_EQ(this, Thread::Current()); 2742 ScopedObjectAccessUnchecked soa(this); 2743 StackHandleScope<3> hs(soa.Self()); 2744 Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetCurrentClassLoader(soa.Self()))); 2745 ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException())); 2746 ClearException(); 2747 Runtime* runtime = Runtime::Current(); 2748 auto* cl = runtime->GetClassLinker(); 2749 Handle<mirror::Class> exception_class( 2750 hs.NewHandle(cl->FindClass(this, exception_class_descriptor, class_loader))); 2751 if (UNLIKELY(exception_class == nullptr)) { 2752 CHECK(IsExceptionPending()); 2753 LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor); 2754 return; 2755 } 2756 2757 if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(soa.Self(), exception_class, true, 2758 true))) { 2759 DCHECK(IsExceptionPending()); 2760 return; 2761 } 2762 DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass()); 2763 Handle<mirror::Throwable> exception( 2764 hs.NewHandle(ObjPtr<mirror::Throwable>::DownCast(exception_class->AllocObject(this)))); 2765 2766 // If we couldn't allocate the exception, throw the pre-allocated out of memory exception. 2767 if (exception == nullptr) { 2768 SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError()); 2769 return; 2770 } 2771 2772 // Choose an appropriate constructor and set up the arguments. 2773 const char* signature; 2774 ScopedLocalRef<jstring> msg_string(GetJniEnv(), nullptr); 2775 if (msg != nullptr) { 2776 // Ensure we remember this and the method over the String allocation. 2777 msg_string.reset( 2778 soa.AddLocalReference<jstring>(mirror::String::AllocFromModifiedUtf8(this, msg))); 2779 if (UNLIKELY(msg_string.get() == nullptr)) { 2780 CHECK(IsExceptionPending()); // OOME. 2781 return; 2782 } 2783 if (cause.get() == nullptr) { 2784 signature = "(Ljava/lang/String;)V"; 2785 } else { 2786 signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V"; 2787 } 2788 } else { 2789 if (cause.get() == nullptr) { 2790 signature = "()V"; 2791 } else { 2792 signature = "(Ljava/lang/Throwable;)V"; 2793 } 2794 } 2795 ArtMethod* exception_init_method = 2796 exception_class->FindConstructor(signature, cl->GetImagePointerSize()); 2797 2798 CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in " 2799 << PrettyDescriptor(exception_class_descriptor); 2800 2801 if (UNLIKELY(!runtime->IsStarted())) { 2802 // Something is trying to throw an exception without a started runtime, which is the common 2803 // case in the compiler. We won't be able to invoke the constructor of the exception, so set 2804 // the exception fields directly. 2805 if (msg != nullptr) { 2806 exception->SetDetailMessage(DecodeJObject(msg_string.get())->AsString()); 2807 } 2808 if (cause.get() != nullptr) { 2809 exception->SetCause(DecodeJObject(cause.get())->AsThrowable()); 2810 } 2811 ScopedLocalRef<jobject> trace(GetJniEnv(), 2812 Runtime::Current()->IsActiveTransaction() 2813 ? CreateInternalStackTrace<true>(soa) 2814 : CreateInternalStackTrace<false>(soa)); 2815 if (trace.get() != nullptr) { 2816 exception->SetStackState(DecodeJObject(trace.get()).Ptr()); 2817 } 2818 SetException(exception.Get()); 2819 } else { 2820 jvalue jv_args[2]; 2821 size_t i = 0; 2822 2823 if (msg != nullptr) { 2824 jv_args[i].l = msg_string.get(); 2825 ++i; 2826 } 2827 if (cause.get() != nullptr) { 2828 jv_args[i].l = cause.get(); 2829 ++i; 2830 } 2831 ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(exception.Get())); 2832 InvokeWithJValues(soa, ref.get(), jni::EncodeArtMethod(exception_init_method), jv_args); 2833 if (LIKELY(!IsExceptionPending())) { 2834 SetException(exception.Get()); 2835 } 2836 } 2837} 2838 2839void Thread::ThrowOutOfMemoryError(const char* msg) { 2840 LOG(WARNING) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s", 2841 msg, (tls32_.throwing_OutOfMemoryError ? " (recursive case)" : "")); 2842 if (!tls32_.throwing_OutOfMemoryError) { 2843 tls32_.throwing_OutOfMemoryError = true; 2844 ThrowNewException("Ljava/lang/OutOfMemoryError;", msg); 2845 tls32_.throwing_OutOfMemoryError = false; 2846 } else { 2847 Dump(LOG_STREAM(WARNING)); // The pre-allocated OOME has no stack, so help out and log one. 2848 SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError()); 2849 } 2850} 2851 2852Thread* Thread::CurrentFromGdb() { 2853 return Thread::Current(); 2854} 2855 2856void Thread::DumpFromGdb() const { 2857 std::ostringstream ss; 2858 Dump(ss); 2859 std::string str(ss.str()); 2860 // log to stderr for debugging command line processes 2861 std::cerr << str; 2862#ifdef ART_TARGET_ANDROID 2863 // log to logcat for debugging frameworks processes 2864 LOG(INFO) << str; 2865#endif 2866} 2867 2868// Explicitly instantiate 32 and 64bit thread offset dumping support. 2869template 2870void Thread::DumpThreadOffset<PointerSize::k32>(std::ostream& os, uint32_t offset); 2871template 2872void Thread::DumpThreadOffset<PointerSize::k64>(std::ostream& os, uint32_t offset); 2873 2874template<PointerSize ptr_size> 2875void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) { 2876#define DO_THREAD_OFFSET(x, y) \ 2877 if (offset == (x).Uint32Value()) { \ 2878 os << (y); \ 2879 return; \ 2880 } 2881 DO_THREAD_OFFSET(ThreadFlagsOffset<ptr_size>(), "state_and_flags") 2882 DO_THREAD_OFFSET(CardTableOffset<ptr_size>(), "card_table") 2883 DO_THREAD_OFFSET(ExceptionOffset<ptr_size>(), "exception") 2884 DO_THREAD_OFFSET(PeerOffset<ptr_size>(), "peer"); 2885 DO_THREAD_OFFSET(JniEnvOffset<ptr_size>(), "jni_env") 2886 DO_THREAD_OFFSET(SelfOffset<ptr_size>(), "self") 2887 DO_THREAD_OFFSET(StackEndOffset<ptr_size>(), "stack_end") 2888 DO_THREAD_OFFSET(ThinLockIdOffset<ptr_size>(), "thin_lock_thread_id") 2889 DO_THREAD_OFFSET(IsGcMarkingOffset<ptr_size>(), "is_gc_marking") 2890 DO_THREAD_OFFSET(TopOfManagedStackOffset<ptr_size>(), "top_quick_frame_method") 2891 DO_THREAD_OFFSET(TopShadowFrameOffset<ptr_size>(), "top_shadow_frame") 2892 DO_THREAD_OFFSET(TopHandleScopeOffset<ptr_size>(), "top_handle_scope") 2893 DO_THREAD_OFFSET(ThreadSuspendTriggerOffset<ptr_size>(), "suspend_trigger") 2894#undef DO_THREAD_OFFSET 2895 2896#define JNI_ENTRY_POINT_INFO(x) \ 2897 if (JNI_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \ 2898 os << #x; \ 2899 return; \ 2900 } 2901 JNI_ENTRY_POINT_INFO(pDlsymLookup) 2902#undef JNI_ENTRY_POINT_INFO 2903 2904#define QUICK_ENTRY_POINT_INFO(x) \ 2905 if (QUICK_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \ 2906 os << #x; \ 2907 return; \ 2908 } 2909 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved) 2910 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved8) 2911 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved16) 2912 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved32) 2913 QUICK_ENTRY_POINT_INFO(pAllocArrayResolved64) 2914 QUICK_ENTRY_POINT_INFO(pAllocObjectResolved) 2915 QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized) 2916 QUICK_ENTRY_POINT_INFO(pAllocObjectWithChecks) 2917 QUICK_ENTRY_POINT_INFO(pAllocStringFromBytes) 2918 QUICK_ENTRY_POINT_INFO(pAllocStringFromChars) 2919 QUICK_ENTRY_POINT_INFO(pAllocStringFromString) 2920 QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial) 2921 QUICK_ENTRY_POINT_INFO(pCheckInstanceOf) 2922 QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage) 2923 QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess) 2924 QUICK_ENTRY_POINT_INFO(pInitializeType) 2925 QUICK_ENTRY_POINT_INFO(pResolveString) 2926 QUICK_ENTRY_POINT_INFO(pSet8Instance) 2927 QUICK_ENTRY_POINT_INFO(pSet8Static) 2928 QUICK_ENTRY_POINT_INFO(pSet16Instance) 2929 QUICK_ENTRY_POINT_INFO(pSet16Static) 2930 QUICK_ENTRY_POINT_INFO(pSet32Instance) 2931 QUICK_ENTRY_POINT_INFO(pSet32Static) 2932 QUICK_ENTRY_POINT_INFO(pSet64Instance) 2933 QUICK_ENTRY_POINT_INFO(pSet64Static) 2934 QUICK_ENTRY_POINT_INFO(pSetObjInstance) 2935 QUICK_ENTRY_POINT_INFO(pSetObjStatic) 2936 QUICK_ENTRY_POINT_INFO(pGetByteInstance) 2937 QUICK_ENTRY_POINT_INFO(pGetBooleanInstance) 2938 QUICK_ENTRY_POINT_INFO(pGetByteStatic) 2939 QUICK_ENTRY_POINT_INFO(pGetBooleanStatic) 2940 QUICK_ENTRY_POINT_INFO(pGetShortInstance) 2941 QUICK_ENTRY_POINT_INFO(pGetCharInstance) 2942 QUICK_ENTRY_POINT_INFO(pGetShortStatic) 2943 QUICK_ENTRY_POINT_INFO(pGetCharStatic) 2944 QUICK_ENTRY_POINT_INFO(pGet32Instance) 2945 QUICK_ENTRY_POINT_INFO(pGet32Static) 2946 QUICK_ENTRY_POINT_INFO(pGet64Instance) 2947 QUICK_ENTRY_POINT_INFO(pGet64Static) 2948 QUICK_ENTRY_POINT_INFO(pGetObjInstance) 2949 QUICK_ENTRY_POINT_INFO(pGetObjStatic) 2950 QUICK_ENTRY_POINT_INFO(pAputObject) 2951 QUICK_ENTRY_POINT_INFO(pJniMethodStart) 2952 QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized) 2953 QUICK_ENTRY_POINT_INFO(pJniMethodEnd) 2954 QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized) 2955 QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference) 2956 QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized) 2957 QUICK_ENTRY_POINT_INFO(pQuickGenericJniTrampoline) 2958 QUICK_ENTRY_POINT_INFO(pLockObject) 2959 QUICK_ENTRY_POINT_INFO(pUnlockObject) 2960 QUICK_ENTRY_POINT_INFO(pCmpgDouble) 2961 QUICK_ENTRY_POINT_INFO(pCmpgFloat) 2962 QUICK_ENTRY_POINT_INFO(pCmplDouble) 2963 QUICK_ENTRY_POINT_INFO(pCmplFloat) 2964 QUICK_ENTRY_POINT_INFO(pCos) 2965 QUICK_ENTRY_POINT_INFO(pSin) 2966 QUICK_ENTRY_POINT_INFO(pAcos) 2967 QUICK_ENTRY_POINT_INFO(pAsin) 2968 QUICK_ENTRY_POINT_INFO(pAtan) 2969 QUICK_ENTRY_POINT_INFO(pAtan2) 2970 QUICK_ENTRY_POINT_INFO(pCbrt) 2971 QUICK_ENTRY_POINT_INFO(pCosh) 2972 QUICK_ENTRY_POINT_INFO(pExp) 2973 QUICK_ENTRY_POINT_INFO(pExpm1) 2974 QUICK_ENTRY_POINT_INFO(pHypot) 2975 QUICK_ENTRY_POINT_INFO(pLog) 2976 QUICK_ENTRY_POINT_INFO(pLog10) 2977 QUICK_ENTRY_POINT_INFO(pNextAfter) 2978 QUICK_ENTRY_POINT_INFO(pSinh) 2979 QUICK_ENTRY_POINT_INFO(pTan) 2980 QUICK_ENTRY_POINT_INFO(pTanh) 2981 QUICK_ENTRY_POINT_INFO(pFmod) 2982 QUICK_ENTRY_POINT_INFO(pL2d) 2983 QUICK_ENTRY_POINT_INFO(pFmodf) 2984 QUICK_ENTRY_POINT_INFO(pL2f) 2985 QUICK_ENTRY_POINT_INFO(pD2iz) 2986 QUICK_ENTRY_POINT_INFO(pF2iz) 2987 QUICK_ENTRY_POINT_INFO(pIdivmod) 2988 QUICK_ENTRY_POINT_INFO(pD2l) 2989 QUICK_ENTRY_POINT_INFO(pF2l) 2990 QUICK_ENTRY_POINT_INFO(pLdiv) 2991 QUICK_ENTRY_POINT_INFO(pLmod) 2992 QUICK_ENTRY_POINT_INFO(pLmul) 2993 QUICK_ENTRY_POINT_INFO(pShlLong) 2994 QUICK_ENTRY_POINT_INFO(pShrLong) 2995 QUICK_ENTRY_POINT_INFO(pUshrLong) 2996 QUICK_ENTRY_POINT_INFO(pIndexOf) 2997 QUICK_ENTRY_POINT_INFO(pStringCompareTo) 2998 QUICK_ENTRY_POINT_INFO(pMemcpy) 2999 QUICK_ENTRY_POINT_INFO(pQuickImtConflictTrampoline) 3000 QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline) 3001 QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge) 3002 QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck) 3003 QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck) 3004 QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck) 3005 QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck) 3006 QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck) 3007 QUICK_ENTRY_POINT_INFO(pInvokePolymorphic) 3008 QUICK_ENTRY_POINT_INFO(pTestSuspend) 3009 QUICK_ENTRY_POINT_INFO(pDeliverException) 3010 QUICK_ENTRY_POINT_INFO(pThrowArrayBounds) 3011 QUICK_ENTRY_POINT_INFO(pThrowDivZero) 3012 QUICK_ENTRY_POINT_INFO(pThrowNullPointer) 3013 QUICK_ENTRY_POINT_INFO(pThrowStackOverflow) 3014 QUICK_ENTRY_POINT_INFO(pDeoptimize) 3015 QUICK_ENTRY_POINT_INFO(pA64Load) 3016 QUICK_ENTRY_POINT_INFO(pA64Store) 3017 QUICK_ENTRY_POINT_INFO(pNewEmptyString) 3018 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_B) 3019 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BI) 3020 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BII) 3021 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIII) 3022 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIIString) 3023 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BString) 3024 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIICharset) 3025 QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BCharset) 3026 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_C) 3027 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_CII) 3028 QUICK_ENTRY_POINT_INFO(pNewStringFromChars_IIC) 3029 QUICK_ENTRY_POINT_INFO(pNewStringFromCodePoints) 3030 QUICK_ENTRY_POINT_INFO(pNewStringFromString) 3031 QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer) 3032 QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder) 3033 QUICK_ENTRY_POINT_INFO(pReadBarrierJni) 3034 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg00) 3035 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg01) 3036 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg02) 3037 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg03) 3038 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg04) 3039 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg05) 3040 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg06) 3041 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg07) 3042 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg08) 3043 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg09) 3044 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg10) 3045 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg11) 3046 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg12) 3047 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg13) 3048 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg14) 3049 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg15) 3050 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg16) 3051 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg17) 3052 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg18) 3053 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg19) 3054 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg20) 3055 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg21) 3056 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg22) 3057 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg23) 3058 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg24) 3059 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg25) 3060 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg26) 3061 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg27) 3062 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg28) 3063 QUICK_ENTRY_POINT_INFO(pReadBarrierMarkReg29) 3064 QUICK_ENTRY_POINT_INFO(pReadBarrierSlow) 3065 QUICK_ENTRY_POINT_INFO(pReadBarrierForRootSlow) 3066 3067 QUICK_ENTRY_POINT_INFO(pJniMethodFastStart) 3068 QUICK_ENTRY_POINT_INFO(pJniMethodFastEnd) 3069#undef QUICK_ENTRY_POINT_INFO 3070 3071 os << offset; 3072} 3073 3074void Thread::QuickDeliverException() { 3075 // Get exception from thread. 3076 ObjPtr<mirror::Throwable> exception = GetException(); 3077 CHECK(exception != nullptr); 3078 if (exception == GetDeoptimizationException()) { 3079 artDeoptimize(this); 3080 UNREACHABLE(); 3081 } 3082 3083 // This is a real exception: let the instrumentation know about it. 3084 instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation(); 3085 if (instrumentation->HasExceptionThrownListeners() && 3086 IsExceptionThrownByCurrentMethod(exception)) { 3087 // Instrumentation may cause GC so keep the exception object safe. 3088 StackHandleScope<1> hs(this); 3089 HandleWrapperObjPtr<mirror::Throwable> h_exception(hs.NewHandleWrapper(&exception)); 3090 instrumentation->ExceptionThrownEvent(this, exception.Ptr()); 3091 } 3092 // Does instrumentation need to deoptimize the stack? 3093 // Note: we do this *after* reporting the exception to instrumentation in case it 3094 // now requires deoptimization. It may happen if a debugger is attached and requests 3095 // new events (single-step, breakpoint, ...) when the exception is reported. 3096 if (Dbg::IsForcedInterpreterNeededForException(this)) { 3097 NthCallerVisitor visitor(this, 0, false); 3098 visitor.WalkStack(); 3099 if (Runtime::Current()->IsAsyncDeoptimizeable(visitor.caller_pc)) { 3100 // method_type shouldn't matter due to exception handling. 3101 const DeoptimizationMethodType method_type = DeoptimizationMethodType::kDefault; 3102 // Save the exception into the deoptimization context so it can be restored 3103 // before entering the interpreter. 3104 PushDeoptimizationContext( 3105 JValue(), 3106 false /* is_reference */, 3107 exception, 3108 false /* from_code */, 3109 method_type); 3110 artDeoptimize(this); 3111 UNREACHABLE(); 3112 } else { 3113 LOG(WARNING) << "Got a deoptimization request on un-deoptimizable method " 3114 << visitor.caller->PrettyMethod(); 3115 } 3116 } 3117 3118 // Don't leave exception visible while we try to find the handler, which may cause class 3119 // resolution. 3120 ClearException(); 3121 QuickExceptionHandler exception_handler(this, false); 3122 exception_handler.FindCatch(exception); 3123 exception_handler.UpdateInstrumentationStack(); 3124 exception_handler.DoLongJump(); 3125} 3126 3127Context* Thread::GetLongJumpContext() { 3128 Context* result = tlsPtr_.long_jump_context; 3129 if (result == nullptr) { 3130 result = Context::Create(); 3131 } else { 3132 tlsPtr_.long_jump_context = nullptr; // Avoid context being shared. 3133 result->Reset(); 3134 } 3135 return result; 3136} 3137 3138// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is 3139// so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack. 3140struct CurrentMethodVisitor FINAL : public StackVisitor { 3141 CurrentMethodVisitor(Thread* thread, Context* context, bool check_suspended, bool abort_on_error) 3142 REQUIRES_SHARED(Locks::mutator_lock_) 3143 : StackVisitor(thread, 3144 context, 3145 StackVisitor::StackWalkKind::kIncludeInlinedFrames, 3146 check_suspended), 3147 this_object_(nullptr), 3148 method_(nullptr), 3149 dex_pc_(0), 3150 abort_on_error_(abort_on_error) {} 3151 bool VisitFrame() OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 3152 ArtMethod* m = GetMethod(); 3153 if (m->IsRuntimeMethod()) { 3154 // Continue if this is a runtime method. 3155 return true; 3156 } 3157 if (context_ != nullptr) { 3158 this_object_ = GetThisObject(); 3159 } 3160 method_ = m; 3161 dex_pc_ = GetDexPc(abort_on_error_); 3162 return false; 3163 } 3164 ObjPtr<mirror::Object> this_object_; 3165 ArtMethod* method_; 3166 uint32_t dex_pc_; 3167 const bool abort_on_error_; 3168}; 3169 3170ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, 3171 bool check_suspended, 3172 bool abort_on_error) const { 3173 CurrentMethodVisitor visitor(const_cast<Thread*>(this), 3174 nullptr, 3175 check_suspended, 3176 abort_on_error); 3177 visitor.WalkStack(false); 3178 if (dex_pc != nullptr) { 3179 *dex_pc = visitor.dex_pc_; 3180 } 3181 return visitor.method_; 3182} 3183 3184bool Thread::HoldsLock(ObjPtr<mirror::Object> object) const { 3185 return object != nullptr && object->GetLockOwnerThreadId() == GetThreadId(); 3186} 3187 3188// RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor). 3189template <typename RootVisitor, bool kPrecise = false> 3190class ReferenceMapVisitor : public StackVisitor { 3191 public: 3192 ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor) 3193 REQUIRES_SHARED(Locks::mutator_lock_) 3194 // We are visiting the references in compiled frames, so we do not need 3195 // to know the inlined frames. 3196 : StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames), 3197 visitor_(visitor) {} 3198 3199 bool VisitFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 3200 if (false) { 3201 LOG(INFO) << "Visiting stack roots in " << ArtMethod::PrettyMethod(GetMethod()) 3202 << StringPrintf("@ PC:%04x", GetDexPc()); 3203 } 3204 ShadowFrame* shadow_frame = GetCurrentShadowFrame(); 3205 if (shadow_frame != nullptr) { 3206 VisitShadowFrame(shadow_frame); 3207 } else { 3208 VisitQuickFrame(); 3209 } 3210 return true; 3211 } 3212 3213 void VisitShadowFrame(ShadowFrame* shadow_frame) REQUIRES_SHARED(Locks::mutator_lock_) { 3214 ArtMethod* m = shadow_frame->GetMethod(); 3215 VisitDeclaringClass(m); 3216 DCHECK(m != nullptr); 3217 size_t num_regs = shadow_frame->NumberOfVRegs(); 3218 DCHECK(m->IsNative() || shadow_frame->HasReferenceArray()); 3219 // handle scope for JNI or References for interpreter. 3220 for (size_t reg = 0; reg < num_regs; ++reg) { 3221 mirror::Object* ref = shadow_frame->GetVRegReference(reg); 3222 if (ref != nullptr) { 3223 mirror::Object* new_ref = ref; 3224 visitor_(&new_ref, reg, this); 3225 if (new_ref != ref) { 3226 shadow_frame->SetVRegReference(reg, new_ref); 3227 } 3228 } 3229 } 3230 // Mark lock count map required for structured locking checks. 3231 shadow_frame->GetLockCountData().VisitMonitors(visitor_, -1, this); 3232 } 3233 3234 private: 3235 // Visiting the declaring class is necessary so that we don't unload the class of a method that 3236 // is executing. We need to ensure that the code stays mapped. NO_THREAD_SAFETY_ANALYSIS since 3237 // the threads do not all hold the heap bitmap lock for parallel GC. 3238 void VisitDeclaringClass(ArtMethod* method) 3239 REQUIRES_SHARED(Locks::mutator_lock_) 3240 NO_THREAD_SAFETY_ANALYSIS { 3241 ObjPtr<mirror::Class> klass = method->GetDeclaringClassUnchecked<kWithoutReadBarrier>(); 3242 // klass can be null for runtime methods. 3243 if (klass != nullptr) { 3244 if (kVerifyImageObjectsMarked) { 3245 gc::Heap* const heap = Runtime::Current()->GetHeap(); 3246 gc::space::ContinuousSpace* space = heap->FindContinuousSpaceFromObject(klass, 3247 /*fail_ok*/true); 3248 if (space != nullptr && space->IsImageSpace()) { 3249 bool failed = false; 3250 if (!space->GetLiveBitmap()->Test(klass.Ptr())) { 3251 failed = true; 3252 LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image " << *space; 3253 } else if (!heap->GetLiveBitmap()->Test(klass.Ptr())) { 3254 failed = true; 3255 LOG(FATAL_WITHOUT_ABORT) << "Unmarked object in image through live bitmap " << *space; 3256 } 3257 if (failed) { 3258 GetThread()->Dump(LOG_STREAM(FATAL_WITHOUT_ABORT)); 3259 space->AsImageSpace()->DumpSections(LOG_STREAM(FATAL_WITHOUT_ABORT)); 3260 LOG(FATAL_WITHOUT_ABORT) << "Method@" << method->GetDexMethodIndex() << ":" << method 3261 << " klass@" << klass.Ptr(); 3262 // Pretty info last in case it crashes. 3263 LOG(FATAL) << "Method " << method->PrettyMethod() << " klass " 3264 << klass->PrettyClass(); 3265 } 3266 } 3267 } 3268 mirror::Object* new_ref = klass.Ptr(); 3269 visitor_(&new_ref, -1, this); 3270 if (new_ref != klass) { 3271 method->CASDeclaringClass(klass.Ptr(), new_ref->AsClass()); 3272 } 3273 } 3274 } 3275 3276 template <typename T> 3277 ALWAYS_INLINE 3278 inline void VisitQuickFrameWithVregCallback() REQUIRES_SHARED(Locks::mutator_lock_) { 3279 ArtMethod** cur_quick_frame = GetCurrentQuickFrame(); 3280 DCHECK(cur_quick_frame != nullptr); 3281 ArtMethod* m = *cur_quick_frame; 3282 VisitDeclaringClass(m); 3283 3284 // Process register map (which native and runtime methods don't have) 3285 if (!m->IsNative() && !m->IsRuntimeMethod() && (!m->IsProxyMethod() || m->IsConstructor())) { 3286 const OatQuickMethodHeader* method_header = GetCurrentOatQuickMethodHeader(); 3287 DCHECK(method_header->IsOptimized()); 3288 auto* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>( 3289 reinterpret_cast<uintptr_t>(cur_quick_frame)); 3290 uintptr_t native_pc_offset = method_header->NativeQuickPcOffset(GetCurrentQuickFramePc()); 3291 CodeInfo code_info = method_header->GetOptimizedCodeInfo(); 3292 CodeInfoEncoding encoding = code_info.ExtractEncoding(); 3293 StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding); 3294 DCHECK(map.IsValid()); 3295 3296 T vreg_info(m, code_info, encoding, map, visitor_); 3297 3298 // Visit stack entries that hold pointers. 3299 const size_t number_of_bits = code_info.GetNumberOfStackMaskBits(encoding); 3300 BitMemoryRegion stack_mask = code_info.GetStackMaskOf(encoding, map); 3301 for (size_t i = 0; i < number_of_bits; ++i) { 3302 if (stack_mask.LoadBit(i)) { 3303 auto* ref_addr = vreg_base + i; 3304 mirror::Object* ref = ref_addr->AsMirrorPtr(); 3305 if (ref != nullptr) { 3306 mirror::Object* new_ref = ref; 3307 vreg_info.VisitStack(&new_ref, i, this); 3308 if (ref != new_ref) { 3309 ref_addr->Assign(new_ref); 3310 } 3311 } 3312 } 3313 } 3314 // Visit callee-save registers that hold pointers. 3315 uint32_t register_mask = code_info.GetRegisterMaskOf(encoding, map); 3316 for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) { 3317 if (register_mask & (1 << i)) { 3318 mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i)); 3319 if (kIsDebugBuild && ref_addr == nullptr) { 3320 std::string thread_name; 3321 GetThread()->GetThreadName(thread_name); 3322 LOG(FATAL_WITHOUT_ABORT) << "On thread " << thread_name; 3323 DescribeStack(GetThread()); 3324 LOG(FATAL) << "Found an unsaved callee-save register " << i << " (null GPRAddress) " 3325 << "set in register_mask=" << register_mask << " at " << DescribeLocation(); 3326 } 3327 if (*ref_addr != nullptr) { 3328 vreg_info.VisitRegister(ref_addr, i, this); 3329 } 3330 } 3331 } 3332 } 3333 } 3334 3335 void VisitQuickFrame() REQUIRES_SHARED(Locks::mutator_lock_) { 3336 if (kPrecise) { 3337 VisitQuickFramePrecise(); 3338 } else { 3339 VisitQuickFrameNonPrecise(); 3340 } 3341 } 3342 3343 void VisitQuickFrameNonPrecise() REQUIRES_SHARED(Locks::mutator_lock_) { 3344 struct UndefinedVRegInfo { 3345 UndefinedVRegInfo(ArtMethod* method ATTRIBUTE_UNUSED, 3346 const CodeInfo& code_info ATTRIBUTE_UNUSED, 3347 const CodeInfoEncoding& encoding ATTRIBUTE_UNUSED, 3348 const StackMap& map ATTRIBUTE_UNUSED, 3349 RootVisitor& _visitor) 3350 : visitor(_visitor) { 3351 } 3352 3353 ALWAYS_INLINE 3354 void VisitStack(mirror::Object** ref, 3355 size_t stack_index ATTRIBUTE_UNUSED, 3356 const StackVisitor* stack_visitor) 3357 REQUIRES_SHARED(Locks::mutator_lock_) { 3358 visitor(ref, -1, stack_visitor); 3359 } 3360 3361 ALWAYS_INLINE 3362 void VisitRegister(mirror::Object** ref, 3363 size_t register_index ATTRIBUTE_UNUSED, 3364 const StackVisitor* stack_visitor) 3365 REQUIRES_SHARED(Locks::mutator_lock_) { 3366 visitor(ref, -1, stack_visitor); 3367 } 3368 3369 RootVisitor& visitor; 3370 }; 3371 VisitQuickFrameWithVregCallback<UndefinedVRegInfo>(); 3372 } 3373 3374 void VisitQuickFramePrecise() REQUIRES_SHARED(Locks::mutator_lock_) { 3375 struct StackMapVRegInfo { 3376 StackMapVRegInfo(ArtMethod* method, 3377 const CodeInfo& _code_info, 3378 const CodeInfoEncoding& _encoding, 3379 const StackMap& map, 3380 RootVisitor& _visitor) 3381 : number_of_dex_registers(method->GetCodeItem()->registers_size_), 3382 code_info(_code_info), 3383 encoding(_encoding), 3384 dex_register_map(code_info.GetDexRegisterMapOf(map, 3385 encoding, 3386 number_of_dex_registers)), 3387 visitor(_visitor) { 3388 } 3389 3390 // TODO: If necessary, we should consider caching a reverse map instead of the linear 3391 // lookups for each location. 3392 void FindWithType(const size_t index, 3393 const DexRegisterLocation::Kind kind, 3394 mirror::Object** ref, 3395 const StackVisitor* stack_visitor) 3396 REQUIRES_SHARED(Locks::mutator_lock_) { 3397 bool found = false; 3398 for (size_t dex_reg = 0; dex_reg != number_of_dex_registers; ++dex_reg) { 3399 DexRegisterLocation location = dex_register_map.GetDexRegisterLocation( 3400 dex_reg, number_of_dex_registers, code_info, encoding); 3401 if (location.GetKind() == kind && static_cast<size_t>(location.GetValue()) == index) { 3402 visitor(ref, dex_reg, stack_visitor); 3403 found = true; 3404 } 3405 } 3406 3407 if (!found) { 3408 // If nothing found, report with -1. 3409 visitor(ref, -1, stack_visitor); 3410 } 3411 } 3412 3413 void VisitStack(mirror::Object** ref, size_t stack_index, const StackVisitor* stack_visitor) 3414 REQUIRES_SHARED(Locks::mutator_lock_) { 3415 const size_t stack_offset = stack_index * kFrameSlotSize; 3416 FindWithType(stack_offset, 3417 DexRegisterLocation::Kind::kInStack, 3418 ref, 3419 stack_visitor); 3420 } 3421 3422 void VisitRegister(mirror::Object** ref, 3423 size_t register_index, 3424 const StackVisitor* stack_visitor) 3425 REQUIRES_SHARED(Locks::mutator_lock_) { 3426 FindWithType(register_index, 3427 DexRegisterLocation::Kind::kInRegister, 3428 ref, 3429 stack_visitor); 3430 } 3431 3432 size_t number_of_dex_registers; 3433 const CodeInfo& code_info; 3434 const CodeInfoEncoding& encoding; 3435 DexRegisterMap dex_register_map; 3436 RootVisitor& visitor; 3437 }; 3438 VisitQuickFrameWithVregCallback<StackMapVRegInfo>(); 3439 } 3440 3441 // Visitor for when we visit a root. 3442 RootVisitor& visitor_; 3443}; 3444 3445class RootCallbackVisitor { 3446 public: 3447 RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {} 3448 3449 void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const 3450 REQUIRES_SHARED(Locks::mutator_lock_) { 3451 visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg)); 3452 } 3453 3454 private: 3455 RootVisitor* const visitor_; 3456 const uint32_t tid_; 3457}; 3458 3459template <bool kPrecise> 3460void Thread::VisitRoots(RootVisitor* visitor) { 3461 const pid_t thread_id = GetThreadId(); 3462 visitor->VisitRootIfNonNull(&tlsPtr_.opeer, RootInfo(kRootThreadObject, thread_id)); 3463 if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) { 3464 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception), 3465 RootInfo(kRootNativeStack, thread_id)); 3466 } 3467 visitor->VisitRootIfNonNull(&tlsPtr_.monitor_enter_object, RootInfo(kRootNativeStack, thread_id)); 3468 tlsPtr_.jni_env->locals.VisitRoots(visitor, RootInfo(kRootJNILocal, thread_id)); 3469 tlsPtr_.jni_env->monitors.VisitRoots(visitor, RootInfo(kRootJNIMonitor, thread_id)); 3470 HandleScopeVisitRoots(visitor, thread_id); 3471 if (tlsPtr_.debug_invoke_req != nullptr) { 3472 tlsPtr_.debug_invoke_req->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id)); 3473 } 3474 // Visit roots for deoptimization. 3475 if (tlsPtr_.stacked_shadow_frame_record != nullptr) { 3476 RootCallbackVisitor visitor_to_callback(visitor, thread_id); 3477 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, nullptr, visitor_to_callback); 3478 for (StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record; 3479 record != nullptr; 3480 record = record->GetLink()) { 3481 for (ShadowFrame* shadow_frame = record->GetShadowFrame(); 3482 shadow_frame != nullptr; 3483 shadow_frame = shadow_frame->GetLink()) { 3484 mapper.VisitShadowFrame(shadow_frame); 3485 } 3486 } 3487 } 3488 for (DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack; 3489 record != nullptr; 3490 record = record->GetLink()) { 3491 if (record->IsReference()) { 3492 visitor->VisitRootIfNonNull(record->GetReturnValueAsGCRoot(), 3493 RootInfo(kRootThreadObject, thread_id)); 3494 } 3495 visitor->VisitRootIfNonNull(record->GetPendingExceptionAsGCRoot(), 3496 RootInfo(kRootThreadObject, thread_id)); 3497 } 3498 if (tlsPtr_.frame_id_to_shadow_frame != nullptr) { 3499 RootCallbackVisitor visitor_to_callback(visitor, thread_id); 3500 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, nullptr, visitor_to_callback); 3501 for (FrameIdToShadowFrame* record = tlsPtr_.frame_id_to_shadow_frame; 3502 record != nullptr; 3503 record = record->GetNext()) { 3504 mapper.VisitShadowFrame(record->GetShadowFrame()); 3505 } 3506 } 3507 for (auto* verifier = tlsPtr_.method_verifier; verifier != nullptr; verifier = verifier->link_) { 3508 verifier->VisitRoots(visitor, RootInfo(kRootNativeStack, thread_id)); 3509 } 3510 // Visit roots on this thread's stack 3511 RuntimeContextType context; 3512 RootCallbackVisitor visitor_to_callback(visitor, thread_id); 3513 ReferenceMapVisitor<RootCallbackVisitor, kPrecise> mapper(this, &context, visitor_to_callback); 3514 mapper.template WalkStack<StackVisitor::CountTransitions::kNo>(false); 3515 for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) { 3516 visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id)); 3517 } 3518} 3519 3520void Thread::VisitRoots(RootVisitor* visitor, VisitRootFlags flags) { 3521 if ((flags & VisitRootFlags::kVisitRootFlagPrecise) != 0) { 3522 VisitRoots<true>(visitor); 3523 } else { 3524 VisitRoots<false>(visitor); 3525 } 3526} 3527 3528class VerifyRootVisitor : public SingleRootVisitor { 3529 public: 3530 void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED) 3531 OVERRIDE REQUIRES_SHARED(Locks::mutator_lock_) { 3532 VerifyObject(root); 3533 } 3534}; 3535 3536void Thread::VerifyStackImpl() { 3537 if (Runtime::Current()->GetHeap()->IsObjectValidationEnabled()) { 3538 VerifyRootVisitor visitor; 3539 std::unique_ptr<Context> context(Context::Create()); 3540 RootCallbackVisitor visitor_to_callback(&visitor, GetThreadId()); 3541 ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitor_to_callback); 3542 mapper.WalkStack(); 3543 } 3544} 3545 3546// Set the stack end to that to be used during a stack overflow 3547void Thread::SetStackEndForStackOverflow() { 3548 // During stack overflow we allow use of the full stack. 3549 if (tlsPtr_.stack_end == tlsPtr_.stack_begin) { 3550 // However, we seem to have already extended to use the full stack. 3551 LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently " 3552 << GetStackOverflowReservedBytes(kRuntimeISA) << ")?"; 3553 DumpStack(LOG_STREAM(ERROR)); 3554 LOG(FATAL) << "Recursive stack overflow."; 3555 } 3556 3557 tlsPtr_.stack_end = tlsPtr_.stack_begin; 3558 3559 // Remove the stack overflow protection if is it set up. 3560 bool implicit_stack_check = !Runtime::Current()->ExplicitStackOverflowChecks(); 3561 if (implicit_stack_check) { 3562 if (!UnprotectStack()) { 3563 LOG(ERROR) << "Unable to remove stack protection for stack overflow"; 3564 } 3565 } 3566} 3567 3568void Thread::SetTlab(uint8_t* start, uint8_t* end, uint8_t* limit) { 3569 DCHECK_LE(start, end); 3570 DCHECK_LE(end, limit); 3571 tlsPtr_.thread_local_start = start; 3572 tlsPtr_.thread_local_pos = tlsPtr_.thread_local_start; 3573 tlsPtr_.thread_local_end = end; 3574 tlsPtr_.thread_local_limit = limit; 3575 tlsPtr_.thread_local_objects = 0; 3576} 3577 3578bool Thread::HasTlab() const { 3579 bool has_tlab = tlsPtr_.thread_local_pos != nullptr; 3580 if (has_tlab) { 3581 DCHECK(tlsPtr_.thread_local_start != nullptr && tlsPtr_.thread_local_end != nullptr); 3582 } else { 3583 DCHECK(tlsPtr_.thread_local_start == nullptr && tlsPtr_.thread_local_end == nullptr); 3584 } 3585 return has_tlab; 3586} 3587 3588std::ostream& operator<<(std::ostream& os, const Thread& thread) { 3589 thread.ShortDump(os); 3590 return os; 3591} 3592 3593bool Thread::ProtectStack(bool fatal_on_error) { 3594 void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; 3595 VLOG(threads) << "Protecting stack at " << pregion; 3596 if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) { 3597 if (fatal_on_error) { 3598 LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. " 3599 "Reason: " 3600 << strerror(errno) << " size: " << kStackOverflowProtectedSize; 3601 } 3602 return false; 3603 } 3604 return true; 3605} 3606 3607bool Thread::UnprotectStack() { 3608 void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize; 3609 VLOG(threads) << "Unprotecting stack at " << pregion; 3610 return mprotect(pregion, kStackOverflowProtectedSize, PROT_READ|PROT_WRITE) == 0; 3611} 3612 3613void Thread::ActivateSingleStepControl(SingleStepControl* ssc) { 3614 CHECK(Dbg::IsDebuggerActive()); 3615 CHECK(GetSingleStepControl() == nullptr) << "Single step already active in thread " << *this; 3616 CHECK(ssc != nullptr); 3617 tlsPtr_.single_step_control = ssc; 3618} 3619 3620void Thread::DeactivateSingleStepControl() { 3621 CHECK(Dbg::IsDebuggerActive()); 3622 CHECK(GetSingleStepControl() != nullptr) << "Single step not active in thread " << *this; 3623 SingleStepControl* ssc = GetSingleStepControl(); 3624 tlsPtr_.single_step_control = nullptr; 3625 delete ssc; 3626} 3627 3628void Thread::SetDebugInvokeReq(DebugInvokeReq* req) { 3629 CHECK(Dbg::IsDebuggerActive()); 3630 CHECK(GetInvokeReq() == nullptr) << "Debug invoke req already active in thread " << *this; 3631 CHECK(Thread::Current() != this) << "Debug invoke can't be dispatched by the thread itself"; 3632 CHECK(req != nullptr); 3633 tlsPtr_.debug_invoke_req = req; 3634} 3635 3636void Thread::ClearDebugInvokeReq() { 3637 CHECK(GetInvokeReq() != nullptr) << "Debug invoke req not active in thread " << *this; 3638 CHECK(Thread::Current() == this) << "Debug invoke must be finished by the thread itself"; 3639 DebugInvokeReq* req = tlsPtr_.debug_invoke_req; 3640 tlsPtr_.debug_invoke_req = nullptr; 3641 delete req; 3642} 3643 3644void Thread::PushVerifier(verifier::MethodVerifier* verifier) { 3645 verifier->link_ = tlsPtr_.method_verifier; 3646 tlsPtr_.method_verifier = verifier; 3647} 3648 3649void Thread::PopVerifier(verifier::MethodVerifier* verifier) { 3650 CHECK_EQ(tlsPtr_.method_verifier, verifier); 3651 tlsPtr_.method_verifier = verifier->link_; 3652} 3653 3654size_t Thread::NumberOfHeldMutexes() const { 3655 size_t count = 0; 3656 for (BaseMutex* mu : tlsPtr_.held_mutexes) { 3657 count += mu != nullptr ? 1 : 0; 3658 } 3659 return count; 3660} 3661 3662void Thread::DeoptimizeWithDeoptimizationException(JValue* result) { 3663 DCHECK_EQ(GetException(), Thread::GetDeoptimizationException()); 3664 ClearException(); 3665 ShadowFrame* shadow_frame = 3666 PopStackedShadowFrame(StackedShadowFrameType::kDeoptimizationShadowFrame); 3667 ObjPtr<mirror::Throwable> pending_exception; 3668 bool from_code = false; 3669 DeoptimizationMethodType method_type; 3670 PopDeoptimizationContext(result, &pending_exception, &from_code, &method_type); 3671 SetTopOfStack(nullptr); 3672 SetTopOfShadowStack(shadow_frame); 3673 3674 // Restore the exception that was pending before deoptimization then interpret the 3675 // deoptimized frames. 3676 if (pending_exception != nullptr) { 3677 SetException(pending_exception); 3678 } 3679 interpreter::EnterInterpreterFromDeoptimize(this, 3680 shadow_frame, 3681 result, 3682 from_code, 3683 method_type); 3684} 3685 3686void Thread::SetException(ObjPtr<mirror::Throwable> new_exception) { 3687 CHECK(new_exception != nullptr); 3688 // TODO: DCHECK(!IsExceptionPending()); 3689 tlsPtr_.exception = new_exception.Ptr(); 3690} 3691 3692bool Thread::IsAotCompiler() { 3693 return Runtime::Current()->IsAotCompiler(); 3694} 3695 3696mirror::Object* Thread::GetPeerFromOtherThread() const { 3697 DCHECK(tlsPtr_.jpeer == nullptr); 3698 mirror::Object* peer = tlsPtr_.opeer; 3699 if (kUseReadBarrier && Current()->GetIsGcMarking()) { 3700 // We may call Thread::Dump() in the middle of the CC thread flip and this thread's stack 3701 // may have not been flipped yet and peer may be a from-space (stale) ref. So explicitly 3702 // mark/forward it here. 3703 peer = art::ReadBarrier::Mark(peer); 3704 } 3705 return peer; 3706} 3707 3708void Thread::SetReadBarrierEntrypoints() { 3709 // Make sure entrypoints aren't null. 3710 UpdateReadBarrierEntrypoints(&tlsPtr_.quick_entrypoints, /* is_active*/ true); 3711} 3712 3713} // namespace art 3714