thread.h revision d6a23bd327c38b08aaf6846d426fd6824fe9780b
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#ifndef ART_SRC_THREAD_H_ 18#define ART_SRC_THREAD_H_ 19 20#include <pthread.h> 21 22#include <bitset> 23#include <deque> 24#include <iosfwd> 25#include <list> 26#include <string> 27 28#include "base/macros.h" 29#include "globals.h" 30#include "jvalue.h" 31#include "oat/runtime/oat_support_entrypoints.h" 32#include "locks.h" 33#include "offsets.h" 34#include "root_visitor.h" 35#include "runtime_stats.h" 36#include "stack.h" 37#include "stack_indirect_reference_table.h" 38#include "thread_state.h" 39#include "throw_location.h" 40#include "UniquePtr.h" 41 42namespace art { 43 44namespace mirror { 45class AbstractMethod; 46class Array; 47class Class; 48class ClassLoader; 49class Object; 50template<class T> class ObjectArray; 51template<class T> class PrimitiveArray; 52typedef PrimitiveArray<int32_t> IntArray; 53class StackTraceElement; 54class StaticStorageBase; 55class Throwable; 56} // namespace mirror 57class BaseMutex; 58class ClassLinker; 59class Closure; 60class Context; 61struct DebugInvokeReq; 62class DexFile; 63struct JavaVMExt; 64struct JNIEnvExt; 65class Monitor; 66class Runtime; 67class ScopedObjectAccess; 68class ScopedObjectAccessUnchecked; 69class ShadowFrame; 70class Thread; 71class ThreadList; 72 73// Thread priorities. These must match the Thread.MIN_PRIORITY, 74// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants. 75enum ThreadPriority { 76 kMinThreadPriority = 1, 77 kNormThreadPriority = 5, 78 kMaxThreadPriority = 10, 79}; 80 81enum ThreadFlag { 82 kSuspendRequest = 1, // If set implies that suspend_count_ > 0 and the Thread should enter the 83 // safepoint handler. 84 kCheckpointRequest = 2 // Request that the thread do some checkpoint work and then continue. 85}; 86 87class PACKED(4) Thread { 88 public: 89 // Space to throw a StackOverflowError in. 90 static const size_t kStackOverflowReservedBytes = 16 * KB; 91 92 // Creates a new native thread corresponding to the given managed peer. 93 // Used to implement Thread.start. 94 static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon); 95 96 // Attaches the calling native thread to the runtime, returning the new native peer. 97 // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls. 98 static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group, 99 bool create_peer); 100 101 // Reset internal state of child thread after fork. 102 void InitAfterFork(); 103 104 static Thread* Current() { 105 // We rely on Thread::Current returning NULL for a detached thread, so it's not obvious 106 // that we can replace this with a direct %fs access on x86. 107 if(!is_started_) { 108 return NULL; 109 } else { 110 void* thread = pthread_getspecific(Thread::pthread_key_self_); 111 return reinterpret_cast<Thread*>(thread); 112 } 113 } 114 115 static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, 116 mirror::Object* thread_peer) 117 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) 118 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) 119 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 120 static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, jobject thread) 121 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_) 122 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) 123 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 124 125 // Translates 172 to pAllocArrayFromCode and so on. 126 static void DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers); 127 128 // Dumps a one-line summary of thread state (used for operator<<). 129 void ShortDump(std::ostream& os) const; 130 131 // Dumps the detailed thread state and the thread stack (used for SIGQUIT). 132 void Dump(std::ostream& os) const 133 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) 134 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 135 136 // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which 137 // case we use 'tid' to identify the thread, and we'll include as much information as we can. 138 static void DumpState(std::ostream& os, const Thread* thread, pid_t tid) 139 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) 140 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 141 142 ThreadState GetState() const { 143 return static_cast<ThreadState>(state_and_flags_.as_struct.state); 144 } 145 146 ThreadState SetState(ThreadState new_state); 147 148 int GetSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) { 149 return suspend_count_; 150 } 151 152 int GetDebugSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) { 153 return debug_suspend_count_; 154 } 155 156 bool IsSuspended() const { 157 union StateAndFlags state_and_flags = state_and_flags_; 158 return state_and_flags.as_struct.state != kRunnable && 159 (state_and_flags.as_struct.flags & kSuspendRequest) != 0; 160 } 161 162 void ModifySuspendCount(Thread* self, int delta, bool for_debugger) 163 EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_); 164 165 bool RequestCheckpoint(Closure* function); 166 167 // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of 168 // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero. 169 void FullSuspendCheck() 170 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) 171 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 172 173 // Transition from non-runnable to runnable state acquiring share on mutator_lock_. 174 ThreadState TransitionFromSuspendedToRunnable() 175 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) 176 SHARED_LOCK_FUNCTION(Locks::mutator_lock_) 177 ALWAYS_INLINE; 178 179 // Transition from runnable into a state where mutator privileges are denied. Releases share of 180 // mutator lock. 181 void TransitionFromRunnableToSuspended(ThreadState new_state) 182 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) 183 UNLOCK_FUNCTION(Locks::mutator_lock_) 184 ALWAYS_INLINE; 185 186 // Wait for a debugger suspension on the thread associated with the given peer. Returns the 187 // thread on success, else NULL. If the thread should be suspended then request_suspension should 188 // be true on entry. If the suspension times out then *timeout is set to true. 189 static Thread* SuspendForDebugger(jobject peer, bool request_suspension, bool* timed_out) 190 LOCKS_EXCLUDED(Locks::mutator_lock_, 191 Locks::thread_list_lock_, 192 Locks::thread_suspend_count_lock_); 193 194 // Once called thread suspension will cause an assertion failure. 195#ifndef NDEBUG 196 const char* StartAssertNoThreadSuspension(const char* cause) { 197 CHECK(cause != NULL); 198 const char* previous_cause = last_no_thread_suspension_cause_; 199 no_thread_suspension_++; 200 last_no_thread_suspension_cause_ = cause; 201 return previous_cause; 202 } 203#else 204 const char* StartAssertNoThreadSuspension(const char* cause) { 205 CHECK(cause != NULL); 206 return NULL; 207 } 208#endif 209 210 // End region where no thread suspension is expected. 211#ifndef NDEBUG 212 void EndAssertNoThreadSuspension(const char* old_cause) { 213 CHECK(old_cause != NULL || no_thread_suspension_ == 1); 214 CHECK_GT(no_thread_suspension_, 0U); 215 no_thread_suspension_--; 216 last_no_thread_suspension_cause_ = old_cause; 217 } 218#else 219 void EndAssertNoThreadSuspension(const char*) { 220 } 221#endif 222 223 224 void AssertThreadSuspensionIsAllowable(bool check_locks = true) const; 225 226 bool IsDaemon() const { 227 return daemon_; 228 } 229 230 bool HoldsLock(mirror::Object*); 231 232 /* 233 * Changes the priority of this thread to match that of the java.lang.Thread object. 234 * 235 * We map a priority value from 1-10 to Linux "nice" values, where lower 236 * numbers indicate higher priority. 237 */ 238 void SetNativePriority(int newPriority); 239 240 /* 241 * Returns the thread priority for the current thread by querying the system. 242 * This is useful when attaching a thread through JNI. 243 * 244 * Returns a value from 1 to 10 (compatible with java.lang.Thread values). 245 */ 246 static int GetNativePriority(); 247 248 uint32_t GetThinLockId() const { 249 return thin_lock_id_; 250 } 251 252 pid_t GetTid() const { 253 return tid_; 254 } 255 256 // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer. 257 mirror::String* GetThreadName(const ScopedObjectAccessUnchecked& ts) const 258 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 259 260 // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code, 261 // allocation, or locking. 262 void GetThreadName(std::string& name) const; 263 264 // Sets the thread's name. 265 void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 266 267 mirror::Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 268 CHECK(jpeer_ == NULL); 269 return opeer_; 270 } 271 272 bool HasPeer() const { 273 return jpeer_ != NULL || opeer_ != NULL; 274 } 275 276 RuntimeStats* GetStats() { 277 return &stats_; 278 } 279 280 bool IsStillStarting() const; 281 282 bool IsExceptionPending() const { 283 return exception_ != NULL; 284 } 285 286 mirror::Throwable* GetException(ThrowLocation* throw_location) const 287 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 288 if (throw_location != NULL) { 289 *throw_location = throw_location_; 290 } 291 return exception_; 292 } 293 294 void AssertNoPendingException() const; 295 296 void SetException(const ThrowLocation& throw_location, mirror::Throwable* new_exception) 297 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 298 CHECK(new_exception != NULL); 299 // TODO: DCHECK(!IsExceptionPending()); 300 exception_ = new_exception; 301 throw_location_ = throw_location; 302 } 303 304 void ClearException() { 305 exception_ = NULL; 306 throw_location_.Clear(); 307 } 308 309 // Find catch block and perform long jump to appropriate exception handle 310 void QuickDeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 311 312 Context* GetLongJumpContext(); 313 void ReleaseLongJumpContext(Context* context) { 314 DCHECK(long_jump_context_ == NULL); 315 long_jump_context_ = context; 316 } 317 318 mirror::AbstractMethod* GetCurrentMethod(uint32_t* dex_pc) const 319 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 320 321 ThrowLocation GetCurrentLocationForThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 322 323 void SetTopOfStack(void* stack, uintptr_t pc) { 324 mirror::AbstractMethod** top_method = reinterpret_cast<mirror::AbstractMethod**>(stack); 325 managed_stack_.SetTopQuickFrame(top_method); 326 managed_stack_.SetTopQuickFramePc(pc); 327 } 328 329 void SetTopOfShadowStack(ShadowFrame* top) { 330 managed_stack_.SetTopShadowFrame(top); 331 } 332 333 bool HasManagedStack() const { 334 return managed_stack_.GetTopQuickFrame() != NULL || managed_stack_.GetTopShadowFrame() != NULL; 335 } 336 337 // If 'msg' is NULL, no detail message is set. 338 void ThrowNewException(const ThrowLocation& throw_location, 339 const char* exception_class_descriptor, const char* msg) 340 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 341 342 // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be 343 // used as the new exception's cause. 344 void ThrowNewWrappedException(const ThrowLocation& throw_location, 345 const char* exception_class_descriptor, 346 const char* msg) 347 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 348 349 void ThrowNewExceptionF(const ThrowLocation& throw_location, 350 const char* exception_class_descriptor, const char* fmt, ...) 351 __attribute__((format(printf, 4, 5))) 352 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 353 354 void ThrowNewExceptionV(const ThrowLocation& throw_location, 355 const char* exception_class_descriptor, const char* fmt, va_list ap) 356 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 357 358 // OutOfMemoryError is special, because we need to pre-allocate an instance. 359 // Only the GC should call this. 360 void ThrowOutOfMemoryError(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 361 362 static void Startup(); 363 static void FinishStartup(); 364 static void Shutdown(); 365 366 // JNI methods 367 JNIEnvExt* GetJniEnv() const { 368 return jni_env_; 369 } 370 371 // Convert a jobject into a Object* 372 mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 373 374 // Implements java.lang.Thread.interrupted. 375 bool Interrupted(); 376 // Implements java.lang.Thread.isInterrupted. 377 bool IsInterrupted(); 378 void Interrupt(); 379 void Notify(); 380 381 mirror::ClassLoader* GetClassLoaderOverride() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { 382 return class_loader_override_; 383 } 384 385 void SetClassLoaderOverride(mirror::ClassLoader* class_loader_override) { 386 class_loader_override_ = class_loader_override; 387 } 388 389 // Create the internal representation of a stack trace, that is more time 390 // and space efficient to compute than the StackTraceElement[] 391 jobject CreateInternalStackTrace(const ScopedObjectAccessUnchecked& soa) const 392 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 393 394 // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a 395 // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many 396 // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated 397 // with the number of valid frames in the returned array. 398 static jobjectArray InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal, 399 jobjectArray output_array = NULL, int* stack_depth = NULL); 400 401 void VisitRoots(RootVisitor* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 402 403 void VerifyRoots(VerifyRootVisitor* visitor, void* arg) 404 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 405 406 void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 407 408 // 409 // Offsets of various members of native Thread class, used by compiled code. 410 // 411 412 static ThreadOffset SelfOffset() { 413 return ThreadOffset(OFFSETOF_MEMBER(Thread, self_)); 414 } 415 416 static ThreadOffset ExceptionOffset() { 417 return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_)); 418 } 419 420 static ThreadOffset PeerOffset() { 421 return ThreadOffset(OFFSETOF_MEMBER(Thread, opeer_)); 422 } 423 424 static ThreadOffset ThinLockIdOffset() { 425 return ThreadOffset(OFFSETOF_MEMBER(Thread, thin_lock_id_)); 426 } 427 428 static ThreadOffset CardTableOffset() { 429 return ThreadOffset(OFFSETOF_MEMBER(Thread, card_table_)); 430 } 431 432 static ThreadOffset ThreadFlagsOffset() { 433 return ThreadOffset(OFFSETOF_MEMBER(Thread, state_and_flags_)); 434 } 435 436 // Size of stack less any space reserved for stack overflow 437 size_t GetStackSize() const { 438 return stack_size_ - (stack_end_ - stack_begin_); 439 } 440 441 byte* GetStackEnd() const { 442 return stack_end_; 443 } 444 445 // Set the stack end to that to be used during a stack overflow 446 void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 447 448 // Set the stack end to that to be used during regular execution 449 void ResetDefaultStackEnd() { 450 // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room 451 // to throw a StackOverflowError. 452 stack_end_ = stack_begin_ + kStackOverflowReservedBytes; 453 } 454 455 bool IsHandlingStackOverflow() const { 456 return stack_end_ == stack_begin_; 457 } 458 459 static ThreadOffset StackEndOffset() { 460 return ThreadOffset(OFFSETOF_MEMBER(Thread, stack_end_)); 461 } 462 463 static ThreadOffset JniEnvOffset() { 464 return ThreadOffset(OFFSETOF_MEMBER(Thread, jni_env_)); 465 } 466 467 static ThreadOffset TopOfManagedStackOffset() { 468 return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) + 469 ManagedStack::TopQuickFrameOffset()); 470 } 471 472 static ThreadOffset TopOfManagedStackPcOffset() { 473 return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) + 474 ManagedStack::TopQuickFramePcOffset()); 475 } 476 477 const ManagedStack* GetManagedStack() const { 478 return &managed_stack_; 479 } 480 481 // Linked list recording fragments of managed stack. 482 void PushManagedStackFragment(ManagedStack* fragment) { 483 managed_stack_.PushManagedStackFragment(fragment); 484 } 485 void PopManagedStackFragment(const ManagedStack& fragment) { 486 managed_stack_.PopManagedStackFragment(fragment); 487 } 488 489 ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) { 490 return managed_stack_.PushShadowFrame(new_top_frame); 491 } 492 493 ShadowFrame* PopShadowFrame() { 494 return managed_stack_.PopShadowFrame(); 495 } 496 497 static ThreadOffset TopShadowFrameOffset() { 498 return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) + 499 ManagedStack::TopShadowFrameOffset()); 500 } 501 502 // Number of references allocated in JNI ShadowFrames on this thread 503 size_t NumJniShadowFrameReferences() const { 504 return managed_stack_.NumJniShadowFrameReferences(); 505 } 506 507 // Number of references in SIRTs on this thread 508 size_t NumSirtReferences(); 509 510 // Number of references allocated in SIRTs & JNI shadow frames on this thread 511 size_t NumStackReferences() { 512 return NumSirtReferences() + NumJniShadowFrameReferences(); 513 }; 514 515 // Is the given obj in this thread's stack indirect reference table? 516 bool SirtContains(jobject obj) const; 517 518 void SirtVisitRoots(RootVisitor* visitor, void* arg); 519 520 void PushSirt(StackIndirectReferenceTable* sirt) { 521 sirt->SetLink(top_sirt_); 522 top_sirt_ = sirt; 523 } 524 525 StackIndirectReferenceTable* PopSirt() { 526 StackIndirectReferenceTable* sirt = top_sirt_; 527 DCHECK(sirt != NULL); 528 top_sirt_ = top_sirt_->GetLink(); 529 return sirt; 530 } 531 532 static ThreadOffset TopSirtOffset() { 533 return ThreadOffset(OFFSETOF_MEMBER(Thread, top_sirt_)); 534 } 535 536 DebugInvokeReq* GetInvokeReq() { 537 return debug_invoke_req_; 538 } 539 540 void SetDeoptimizationShadowFrame(ShadowFrame* sf); 541 void SetDeoptimizationReturnValue(const JValue& ret_val); 542 543 ShadowFrame* GetAndClearDeoptimizationShadowFrame(JValue* ret_val); 544 545 std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() { 546 return instrumentation_stack_; 547 } 548 549 BaseMutex* GetHeldMutex(LockLevel level) const { 550 return held_mutexes_[level]; 551 } 552 553 void SetHeldMutex(LockLevel level, BaseMutex* mutex) { 554 held_mutexes_[level] = mutex; 555 } 556 557 void RunCheckpointFunction(); 558 559 bool ReadFlag(ThreadFlag flag) const { 560 return (state_and_flags_.as_struct.flags & flag) != 0; 561 } 562 563 bool TestAllFlags() const { 564 return (state_and_flags_.as_struct.flags != 0); 565 } 566 567 void AtomicSetFlag(ThreadFlag flag); 568 569 void AtomicClearFlag(ThreadFlag flag); 570 571 private: 572 // We have no control over the size of 'bool', but want our boolean fields 573 // to be 4-byte quantities. 574 typedef uint32_t bool32_t; 575 576 explicit Thread(bool daemon); 577 ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_, 578 Locks::thread_suspend_count_lock_); 579 void Destroy(); 580 friend class ThreadList; // For ~Thread and Destroy. 581 582 void CreatePeer(const char* name, bool as_daemon, jobject thread_group); 583 friend class Runtime; // For CreatePeer. 584 585 // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and 586 // Dbg::Disconnected. 587 ThreadState SetStateUnsafe(ThreadState new_state) { 588 ThreadState old_state = GetState(); 589 state_and_flags_.as_struct.state = new_state; 590 return old_state; 591 } 592 friend class SignalCatcher; // For SetStateUnsafe. 593 friend class Dbg; // For SetStateUnsafe. 594 595 void VerifyStackImpl() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 596 597 void DumpState(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 598 void DumpStack(std::ostream& os) const 599 LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_) 600 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 601 602 // Out-of-line conveniences for debugging in gdb. 603 static Thread* CurrentFromGdb(); // Like Thread::Current. 604 // Like Thread::Dump(std::cerr). 605 void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 606 607 static void* CreateCallback(void* arg); 608 609 void HandleUncaughtExceptions(ScopedObjectAccess& soa) 610 SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 611 void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); 612 613 void Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_); 614 void InitCardTable(); 615 void InitCpu(); 616 void InitFunctionPointers(); 617 void InitTid(); 618 void InitPthreadKeySelf(); 619 void InitStackHwm(); 620 621 void SetUpAlternateSignalStack(); 622 void TearDownAlternateSignalStack(); 623 624 void NotifyLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_); 625 626 static void ThreadExitCallback(void* arg); 627 628 // Has Thread::Startup been called? 629 static bool is_started_; 630 631 // TLS key used to retrieve the Thread*. 632 static pthread_key_t pthread_key_self_; 633 634 // Used to notify threads that they should attempt to resume, they will suspend again if 635 // their suspend count is > 0. 636 static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_); 637 638 // --- Frequently accessed fields first for short offsets --- 639 640 // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to 641 // change from being Suspended to Runnable without a suspend request occurring. 642 union StateAndFlags { 643 struct PACKED(4) { 644 // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See 645 // ThreadFlags for bit field meanings. 646 volatile uint16_t flags; 647 // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable) 648 // transitions. Changing to Runnable requires that the suspend_request be part of the atomic 649 // operation. If a thread is suspended and a suspend_request is present, a thread may not 650 // change to Runnable as a GC or other operation is in progress. 651 volatile uint16_t state; 652 } as_struct; 653 volatile int32_t as_int; 654 }; 655 union StateAndFlags state_and_flags_; 656 COMPILE_ASSERT(sizeof(union StateAndFlags) == sizeof(int32_t), 657 sizeof_state_and_flags_and_int32_are_different); 658 659 // A non-zero value is used to tell the current thread to enter a safe point 660 // at the next poll. 661 int suspend_count_ GUARDED_BY(Locks::thread_suspend_count_lock_); 662 663 // The biased card table, see CardTable for details 664 byte* card_table_; 665 666 // The pending exception or NULL. 667 mirror::Throwable* exception_; 668 669 // The end of this thread's stack. This is the lowest safely-addressable address on the stack. 670 // We leave extra space so there's room for the code that throws StackOverflowError. 671 byte* stack_end_; 672 673 // The top of the managed stack often manipulated directly by compiler generated code. 674 ManagedStack managed_stack_; 675 676 // Every thread may have an associated JNI environment 677 JNIEnvExt* jni_env_; 678 679 // Initialized to "this". On certain architectures (such as x86) reading 680 // off of Thread::Current is easy but getting the address of Thread::Current 681 // is hard. This field can be read off of Thread::Current to give the address. 682 Thread* self_; 683 684 // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread 685 // start up, until the thread is registered and the local opeer_ is used. 686 mirror::Object* opeer_; 687 jobject jpeer_; 688 689 // The "lowest addressable byte" of the stack 690 byte* stack_begin_; 691 692 // Size of the stack 693 size_t stack_size_; 694 695 // Thin lock thread id. This is a small integer used by the thin lock implementation. 696 // This is not to be confused with the native thread's tid, nor is it the value returned 697 // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One 698 // important difference between this id and the ids visible to managed code is that these 699 // ones get reused (to ensure that they fit in the number of bits available). 700 uint32_t thin_lock_id_; 701 702 // System thread id. 703 pid_t tid_; 704 705 ThrowLocation throw_location_; 706 707 // Guards the 'interrupted_' and 'wait_monitor_' members. 708 mutable Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER; 709 ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_); 710 // Pointer to the monitor lock we're currently waiting on (or NULL). 711 Monitor* wait_monitor_ GUARDED_BY(wait_mutex_); 712 // Thread "interrupted" status; stays raised until queried or thrown. 713 bool32_t interrupted_ GUARDED_BY(wait_mutex_); 714 // The next thread in the wait set this thread is part of. 715 Thread* wait_next_; 716 // If we're blocked in MonitorEnter, this is the object we're trying to lock. 717 mirror::Object* monitor_enter_object_; 718 719 friend class Monitor; 720 friend class MonitorInfo; 721 722 // Top of linked list of stack indirect reference tables or NULL for none 723 StackIndirectReferenceTable* top_sirt_; 724 725 Runtime* runtime_; 726 727 RuntimeStats stats_; 728 729 // Needed to get the right ClassLoader in JNI_OnLoad, but also 730 // useful for testing. 731 mirror::ClassLoader* class_loader_override_; 732 733 // Thread local, lazily allocated, long jump context. Used to deliver exceptions. 734 Context* long_jump_context_; 735 736 // A boolean telling us whether we're recursively throwing OOME. 737 bool32_t throwing_OutOfMemoryError_; 738 739 // How much of 'suspend_count_' is by request of the debugger, used to set things right 740 // when the debugger detaches. Must be <= suspend_count_. 741 int debug_suspend_count_ GUARDED_BY(Locks::thread_suspend_count_lock_); 742 743 // JDWP invoke-during-breakpoint support. 744 DebugInvokeReq* debug_invoke_req_; 745 746 // Shadow frame that is used temporarily during the deoptimization of a method. 747 ShadowFrame* deoptimization_shadow_frame_; 748 JValue deoptimization_return_value_; 749 750 // Additional stack used by method instrumentation to store method and return pc values. 751 // Stored as a pointer since std::deque is not PACKED. 752 std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack_; 753 754 // A cached copy of the java.lang.Thread's name. 755 std::string* name_; 756 757 // Is the thread a daemon? 758 const bool32_t daemon_; 759 760 // A cached pthread_t for the pthread underlying this Thread*. 761 pthread_t pthread_self_; 762 763 // Support for Mutex lock hierarchy bug detection. 764 BaseMutex* held_mutexes_[kLockLevelCount]; 765 766 // A positive value implies we're in a region where thread suspension isn't expected. 767 uint32_t no_thread_suspension_; 768 769 // Cause for last suspension. 770 const char* last_no_thread_suspension_cause_; 771 772 // Pending checkpoint functions. 773 Closure* checkpoint_function_; 774 775 public: 776 // Runtime support function pointers 777 // TODO: move this near the top, since changing its offset requires all oats to be recompiled! 778 EntryPoints entrypoints_; 779 780 private: 781 // How many times has our pthread key's destructor been called? 782 uint32_t thread_exit_check_count_; 783 784 friend class ScopedThreadStateChange; 785 786 DISALLOW_COPY_AND_ASSIGN(Thread); 787}; 788 789std::ostream& operator<<(std::ostream& os, const Thread& thread); 790std::ostream& operator<<(std::ostream& os, const ThreadState& state); 791 792} // namespace art 793 794#endif // ART_SRC_THREAD_H_ 795