thread.h revision 1ffa32f0be7becec4907b26ead353e4b17e1219c
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_SRC_THREAD_H_
18#define ART_SRC_THREAD_H_
19
20#include <pthread.h>
21
22#include <bitset>
23#include <deque>
24#include <iosfwd>
25#include <list>
26#include <string>
27
28#include "base/macros.h"
29#include "globals.h"
30#include "jvalue.h"
31#include "oat/runtime/oat_support_entrypoints.h"
32#include "locks.h"
33#include "offsets.h"
34#include "root_visitor.h"
35#include "runtime_stats.h"
36#include "stack.h"
37#include "stack_indirect_reference_table.h"
38#include "thread_state.h"
39#include "UniquePtr.h"
40
41namespace art {
42
43namespace mirror {
44class AbstractMethod;
45class Array;
46class Class;
47class ClassLoader;
48class Object;
49template<class T> class ObjectArray;
50template<class T> class PrimitiveArray;
51typedef PrimitiveArray<int32_t> IntArray;
52class StackTraceElement;
53class StaticStorageBase;
54class Throwable;
55}  // namespace mirror
56class BaseMutex;
57class ClassLinker;
58class Closure;
59class Context;
60struct DebugInvokeReq;
61class DexFile;
62struct JavaVMExt;
63struct JNIEnvExt;
64class Monitor;
65class Runtime;
66class ScopedObjectAccess;
67class ScopedObjectAccessUnchecked;
68class ShadowFrame;
69class Thread;
70class ThreadList;
71
72// Thread priorities. These must match the Thread.MIN_PRIORITY,
73// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
74enum ThreadPriority {
75  kMinThreadPriority = 1,
76  kNormThreadPriority = 5,
77  kMaxThreadPriority = 10,
78};
79
80enum ThreadFlag {
81  kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0 and the Thread should enter the
82                          // safepoint handler.
83  kCheckpointRequest = 2, // Request that the thread do some checkpoint work and then continue.
84  kEnterInterpreter = 4,  // Instruct managed code it should enter the interpreter.
85};
86
87class PACKED(4) Thread {
88 public:
89  // Space to throw a StackOverflowError in.
90  static const size_t kStackOverflowReservedBytes = 10 * KB;
91
92  // Creates a new native thread corresponding to the given managed peer.
93  // Used to implement Thread.start.
94  static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
95
96  // Attaches the calling native thread to the runtime, returning the new native peer.
97  // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
98  static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
99                        bool create_peer);
100
101  // Reset internal state of child thread after fork.
102  void InitAfterFork();
103
104  static Thread* Current() __attribute__ ((pure)) {
105    // We rely on Thread::Current returning NULL for a detached thread, so it's not obvious
106    // that we can replace this with a direct %fs access on x86.
107    void* thread = pthread_getspecific(Thread::pthread_key_self_);
108    return reinterpret_cast<Thread*>(thread);
109  }
110
111  static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts,
112                                   mirror::Object* thread_peer)
113      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
114      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
115      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
116  static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, jobject thread)
117      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
118      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
119      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
120
121  // Translates 172 to pAllocArrayFromCode and so on.
122  static void DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers);
123
124  // Dumps a one-line summary of thread state (used for operator<<).
125  void ShortDump(std::ostream& os) const;
126
127  // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
128  void Dump(std::ostream& os) const
129      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
130      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
131
132  // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which
133  // case we use 'tid' to identify the thread, and we'll include as much information as we can.
134  static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
135      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
136      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
137
138  ThreadState GetState() const {
139    return static_cast<ThreadState>(state_and_flags_.as_struct.state);
140  }
141
142  ThreadState SetState(ThreadState new_state);
143
144  int GetSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
145    return suspend_count_;
146  }
147
148  int GetDebugSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
149    return debug_suspend_count_;
150  }
151
152  bool IsSuspended() const {
153    union StateAndFlags state_and_flags = state_and_flags_;
154    return state_and_flags.as_struct.state != kRunnable &&
155        (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
156  }
157
158  void ModifySuspendCount(Thread* self, int delta, bool for_debugger)
159      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
160
161  bool RequestCheckpoint(Closure* function);
162
163  // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
164  // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
165  void FullSuspendCheck()
166      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
167      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
168
169  // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
170  ThreadState TransitionFromSuspendedToRunnable()
171      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
172      SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
173      ALWAYS_INLINE;
174
175  // Transition from runnable into a state where mutator privileges are denied. Releases share of
176  // mutator lock.
177  void TransitionFromRunnableToSuspended(ThreadState new_state)
178      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
179      UNLOCK_FUNCTION(Locks::mutator_lock_)
180      ALWAYS_INLINE;
181
182  // Wait for a debugger suspension on the thread associated with the given peer. Returns the
183  // thread on success, else NULL. If the thread should be suspended then request_suspension should
184  // be true on entry. If the suspension times out then *timeout is set to true.
185  static Thread* SuspendForDebugger(jobject peer,  bool request_suspension, bool* timed_out)
186      LOCKS_EXCLUDED(Locks::mutator_lock_,
187                     Locks::thread_list_lock_,
188                     Locks::thread_suspend_count_lock_);
189
190  // Once called thread suspension will cause an assertion failure.
191#ifndef NDEBUG
192  const char* StartAssertNoThreadSuspension(const char* cause) {
193    CHECK(cause != NULL);
194    const char* previous_cause = last_no_thread_suspension_cause_;
195    no_thread_suspension_++;
196    last_no_thread_suspension_cause_ = cause;
197    return previous_cause;
198  }
199#else
200  const char* StartAssertNoThreadSuspension(const char* cause) {
201    CHECK(cause != NULL);
202    return NULL;
203  }
204#endif
205
206  // End region where no thread suspension is expected.
207#ifndef NDEBUG
208  void EndAssertNoThreadSuspension(const char* old_cause) {
209    CHECK(old_cause != NULL || no_thread_suspension_ == 1);
210    CHECK_GT(no_thread_suspension_, 0U);
211    no_thread_suspension_--;
212    last_no_thread_suspension_cause_ = old_cause;
213  }
214#else
215  void EndAssertNoThreadSuspension(const char*) {
216  }
217#endif
218
219
220  void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
221
222  bool IsDaemon() const {
223    return daemon_;
224  }
225
226  bool HoldsLock(mirror::Object*);
227
228  /*
229   * Changes the priority of this thread to match that of the java.lang.Thread object.
230   *
231   * We map a priority value from 1-10 to Linux "nice" values, where lower
232   * numbers indicate higher priority.
233   */
234  void SetNativePriority(int newPriority);
235
236  /*
237   * Returns the thread priority for the current thread by querying the system.
238   * This is useful when attaching a thread through JNI.
239   *
240   * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
241   */
242  static int GetNativePriority();
243
244  uint32_t GetThinLockId() const {
245    return thin_lock_id_;
246  }
247
248  pid_t GetTid() const {
249    return tid_;
250  }
251
252  // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer.
253  mirror::String* GetThreadName(const ScopedObjectAccessUnchecked& ts) const
254      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
255
256  // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
257  // allocation, or locking.
258  void GetThreadName(std::string& name) const;
259
260  // Sets the thread's name.
261  void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
262
263  mirror::Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
264    CHECK(jpeer_ == NULL);
265    return opeer_;
266  }
267
268  bool HasPeer() const {
269    return jpeer_ != NULL || opeer_ != NULL;
270  }
271
272  RuntimeStats* GetStats() {
273    return &stats_;
274  }
275
276  bool IsStillStarting() const;
277
278  bool IsExceptionPending() const {
279    return exception_ != NULL;
280  }
281
282  mirror::Throwable* GetException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
283    return exception_;
284  }
285
286  void AssertNoPendingException() const;
287
288  void SetException(mirror::Throwable* new_exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
289    CHECK(new_exception != NULL);
290    // TODO: DCHECK(!IsExceptionPending());
291    exception_ = new_exception;
292  }
293
294  void ClearException() {
295    exception_ = NULL;
296  }
297
298  void DeliverException(mirror::Throwable* exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
299    if (exception == NULL) {
300      ThrowNewException("Ljava/lang/NullPointerException;", "throw with null exception");
301    } else {
302      SetException(exception);
303    }
304  }
305
306  // Find catch block and perform long jump to appropriate exception handle
307  void QuickDeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
308
309  Context* GetLongJumpContext();
310  void ReleaseLongJumpContext(Context* context) {
311    DCHECK(long_jump_context_ == NULL);
312    long_jump_context_ = context;
313  }
314
315  mirror::AbstractMethod* GetCurrentMethod(uint32_t* dex_pc = NULL, size_t* frame_id = NULL) const
316      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
317
318  void SetTopOfStack(void* stack, uintptr_t pc) {
319    mirror::AbstractMethod** top_method = reinterpret_cast<mirror::AbstractMethod**>(stack);
320    managed_stack_.SetTopQuickFrame(top_method);
321    managed_stack_.SetTopQuickFramePc(pc);
322  }
323
324  void SetTopOfShadowStack(ShadowFrame* top) {
325    managed_stack_.SetTopShadowFrame(top);
326  }
327
328  bool HasManagedStack() const {
329    return managed_stack_.GetTopQuickFrame() != NULL || managed_stack_.GetTopShadowFrame() != NULL;
330  }
331
332  // If 'msg' is NULL, no detail message is set.
333  void ThrowNewException(const char* exception_class_descriptor, const char* msg)
334      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
335
336  // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
337  // used as the new exception's cause.
338  void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
339      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
340
341  void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
342      __attribute__((format(printf, 3, 4)))
343      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
344
345  void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
346      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
347
348  // OutOfMemoryError is special, because we need to pre-allocate an instance.
349  // Only the GC should call this.
350  void ThrowOutOfMemoryError(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
351
352  //QuickFrameIterator FindExceptionHandler(void* throw_pc, void** handler_pc);
353
354  void* FindExceptionHandlerInMethod(const mirror::AbstractMethod* method,
355                                     void* throw_pc,
356                                     const DexFile& dex_file,
357                                     ClassLinker* class_linker);
358
359  static void Startup();
360  static void FinishStartup();
361  static void Shutdown();
362
363  // JNI methods
364  JNIEnvExt* GetJniEnv() const {
365    return jni_env_;
366  }
367
368  // Convert a jobject into a Object*
369  mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
370
371  // Implements java.lang.Thread.interrupted.
372  bool Interrupted();
373  // Implements java.lang.Thread.isInterrupted.
374  bool IsInterrupted();
375  void Interrupt();
376  void Notify();
377
378  mirror::ClassLoader* GetClassLoaderOverride() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
379    return class_loader_override_;
380  }
381
382  void SetClassLoaderOverride(mirror::ClassLoader* class_loader_override) {
383    class_loader_override_ = class_loader_override;
384  }
385
386  // Create the internal representation of a stack trace, that is more time
387  // and space efficient to compute than the StackTraceElement[]
388  jobject CreateInternalStackTrace(const ScopedObjectAccessUnchecked& soa) const
389      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
390
391  // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
392  // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
393  // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
394  // with the number of valid frames in the returned array.
395  static jobjectArray InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
396      jobjectArray output_array = NULL, int* stack_depth = NULL);
397
398  void VisitRoots(RootVisitor* visitor, void* arg)
399      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
400
401  void VerifyRoots(VerifyRootVisitor* visitor, void* arg)
402      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
403
404#if VERIFY_OBJECT_ENABLED
405  void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
406#else
407  void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){}
408#endif
409
410  //
411  // Offsets of various members of native Thread class, used by compiled code.
412  //
413
414  static ThreadOffset SelfOffset() {
415    return ThreadOffset(OFFSETOF_MEMBER(Thread, self_));
416  }
417
418  static ThreadOffset ExceptionOffset() {
419    return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_));
420  }
421
422  static ThreadOffset PeerOffset() {
423    return ThreadOffset(OFFSETOF_MEMBER(Thread, opeer_));
424  }
425
426  static ThreadOffset ThinLockIdOffset() {
427    return ThreadOffset(OFFSETOF_MEMBER(Thread, thin_lock_id_));
428  }
429
430  static ThreadOffset CardTableOffset() {
431    return ThreadOffset(OFFSETOF_MEMBER(Thread, card_table_));
432  }
433
434  static ThreadOffset ThreadFlagsOffset() {
435    return ThreadOffset(OFFSETOF_MEMBER(Thread, state_and_flags_));
436  }
437
438  // Size of stack less any space reserved for stack overflow
439  size_t GetStackSize() const {
440    return stack_size_ - (stack_end_ - stack_begin_);
441  }
442
443  byte* GetStackEnd() const {
444    return stack_end_;
445  }
446
447  // Set the stack end to that to be used during a stack overflow
448  void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
449
450  // Set the stack end to that to be used during regular execution
451  void ResetDefaultStackEnd() {
452    // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
453    // to throw a StackOverflowError.
454    stack_end_ = stack_begin_ + kStackOverflowReservedBytes;
455  }
456
457  bool IsHandlingStackOverflow() const {
458    return stack_end_ == stack_begin_;
459  }
460
461  static ThreadOffset StackEndOffset() {
462    return ThreadOffset(OFFSETOF_MEMBER(Thread, stack_end_));
463  }
464
465  static ThreadOffset JniEnvOffset() {
466    return ThreadOffset(OFFSETOF_MEMBER(Thread, jni_env_));
467  }
468
469  static ThreadOffset TopOfManagedStackOffset() {
470    return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
471                        ManagedStack::TopQuickFrameOffset());
472  }
473
474  static ThreadOffset TopOfManagedStackPcOffset() {
475    return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
476                        ManagedStack::TopQuickFramePcOffset());
477  }
478
479  const ManagedStack* GetManagedStack() const {
480    return &managed_stack_;
481  }
482
483  // Linked list recording fragments of managed stack.
484  void PushManagedStackFragment(ManagedStack* fragment) {
485    managed_stack_.PushManagedStackFragment(fragment);
486  }
487  void PopManagedStackFragment(const ManagedStack& fragment) {
488    managed_stack_.PopManagedStackFragment(fragment);
489  }
490
491  ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
492    return managed_stack_.PushShadowFrame(new_top_frame);
493  }
494
495  ShadowFrame* PopShadowFrame() {
496    return managed_stack_.PopShadowFrame();
497  }
498
499  static ThreadOffset TopShadowFrameOffset() {
500    return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
501                        ManagedStack::TopShadowFrameOffset());
502  }
503
504  // Number of references allocated in JNI ShadowFrames on this thread
505  size_t NumJniShadowFrameReferences() const {
506    return managed_stack_.NumJniShadowFrameReferences();
507  }
508
509  // Number of references in SIRTs on this thread
510  size_t NumSirtReferences();
511
512  // Number of references allocated in SIRTs & JNI shadow frames on this thread
513  size_t NumStackReferences() {
514    return NumSirtReferences() + NumJniShadowFrameReferences();
515  };
516
517  // Is the given obj in this thread's stack indirect reference table?
518  bool SirtContains(jobject obj) const;
519
520  void SirtVisitRoots(RootVisitor* visitor, void* arg);
521
522  void PushSirt(StackIndirectReferenceTable* sirt) {
523    sirt->SetLink(top_sirt_);
524    top_sirt_ = sirt;
525  }
526
527  StackIndirectReferenceTable* PopSirt() {
528    StackIndirectReferenceTable* sirt = top_sirt_;
529    DCHECK(sirt != NULL);
530    top_sirt_ = top_sirt_->GetLink();
531    return sirt;
532  }
533
534  static ThreadOffset TopSirtOffset() {
535    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_sirt_));
536  }
537
538  DebugInvokeReq* GetInvokeReq() {
539    return debug_invoke_req_;
540  }
541
542  void SetDebuggerUpdatesEnabled(bool enabled);
543
544  void SetDeoptimizationShadowFrame(ShadowFrame* sf, const JValue& ret_val);
545
546  ShadowFrame* GetAndClearDeoptimizationShadowFrame(JValue* ret_val);
547
548  const std::deque<InstrumentationStackFrame>* GetInstrumentationStack() const {
549    return instrumentation_stack_;
550  }
551
552  bool IsInstrumentationStackEmpty() const {
553    return instrumentation_stack_->empty();
554  }
555
556  void PushInstrumentationStackFrame(const InstrumentationStackFrame& frame) {
557    instrumentation_stack_->push_front(frame);
558  }
559
560  void PushBackInstrumentationStackFrame(const InstrumentationStackFrame& frame) {
561    instrumentation_stack_->push_back(frame);
562  }
563
564  InstrumentationStackFrame PopInstrumentationStackFrame() {
565    InstrumentationStackFrame frame = instrumentation_stack_->front();
566    instrumentation_stack_->pop_front();
567    return frame;
568  }
569
570  BaseMutex* GetHeldMutex(LockLevel level) const {
571    return held_mutexes_[level];
572  }
573
574  void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
575    held_mutexes_[level] = mutex;
576  }
577
578  void RunCheckpointFunction() {
579    CHECK(checkpoint_function_ != NULL);
580    checkpoint_function_->Run(this);
581  }
582
583  bool ReadFlag(ThreadFlag flag) const {
584    return (state_and_flags_.as_struct.flags & flag) != 0;
585  }
586
587  void AtomicSetFlag(ThreadFlag flag);
588
589  void AtomicClearFlag(ThreadFlag flag);
590
591 private:
592  // We have no control over the size of 'bool', but want our boolean fields
593  // to be 4-byte quantities.
594  typedef uint32_t bool32_t;
595
596  explicit Thread(bool daemon);
597  ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_,
598                           Locks::thread_suspend_count_lock_);
599  void Destroy();
600  friend class ThreadList;  // For ~Thread and Destroy.
601
602  void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
603  friend class Runtime; // For CreatePeer.
604
605  // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit and ~Thread.
606  ThreadState SetStateUnsafe(ThreadState new_state) {
607    ThreadState old_state = GetState();
608    state_and_flags_.as_struct.state = new_state;
609    return old_state;
610  }
611  friend class SignalCatcher;  // For SetStateUnsafe.
612
613  void DumpState(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
614  void DumpStack(std::ostream& os) const
615      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
616      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
617
618  // Out-of-line conveniences for debugging in gdb.
619  static Thread* CurrentFromGdb(); // Like Thread::Current.
620  // Like Thread::Dump(std::cerr).
621  void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
622
623  static void* CreateCallback(void* arg);
624
625  void HandleUncaughtExceptions(ScopedObjectAccess& soa)
626      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
627  void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
628
629  void Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
630  void InitCardTable();
631  void InitCpu();
632  void InitFunctionPointers();
633  void InitTid();
634  void InitPthreadKeySelf();
635  void InitStackHwm();
636
637  void NotifyLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_);
638
639  static void ThreadExitCallback(void* arg);
640
641  // TLS key used to retrieve the Thread*.
642  static pthread_key_t pthread_key_self_;
643
644  // Used to notify threads that they should attempt to resume, they will suspend again if
645  // their suspend count is > 0.
646  static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
647
648  // --- Frequently accessed fields first for short offsets ---
649
650  // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
651  // change from being Suspended to Runnable without a suspend request occurring.
652  union StateAndFlags {
653    struct PACKED(4) {
654      // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
655      // ThreadFlags for bit field meanings.
656      volatile uint16_t flags;
657      // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
658      // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
659      // operation. If a thread is suspended and a suspend_request is present, a thread may not
660      // change to Runnable as a GC or other operation is in progress.
661      volatile uint16_t state;
662    } as_struct;
663    volatile int32_t as_int;
664  };
665  union StateAndFlags state_and_flags_;
666  COMPILE_ASSERT(sizeof(union StateAndFlags) == sizeof(int32_t),
667                 sizeof_state_and_flags_and_int32_are_different);
668
669  // A non-zero value is used to tell the current thread to enter a safe point
670  // at the next poll.
671  int suspend_count_ GUARDED_BY(Locks::thread_suspend_count_lock_);
672
673  // The biased card table, see CardTable for details
674  byte* card_table_;
675
676  // The pending exception or NULL.
677  mirror::Throwable* exception_;
678
679  // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
680  // We leave extra space so there's room for the code that throws StackOverflowError.
681  byte* stack_end_;
682
683  // The top of the managed stack often manipulated directly by compiler generated code.
684  ManagedStack managed_stack_;
685
686  // Every thread may have an associated JNI environment
687  JNIEnvExt* jni_env_;
688
689  // Initialized to "this". On certain architectures (such as x86) reading
690  // off of Thread::Current is easy but getting the address of Thread::Current
691  // is hard. This field can be read off of Thread::Current to give the address.
692  Thread* self_;
693
694  // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
695  // start up, until the thread is registered and the local opeer_ is used.
696  mirror::Object* opeer_;
697  jobject jpeer_;
698
699  // The "lowest addressable byte" of the stack
700  byte* stack_begin_;
701
702  // Size of the stack
703  size_t stack_size_;
704
705  // Thin lock thread id. This is a small integer used by the thin lock implementation.
706  // This is not to be confused with the native thread's tid, nor is it the value returned
707  // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
708  // important difference between this id and the ids visible to managed code is that these
709  // ones get reused (to ensure that they fit in the number of bits available).
710  uint32_t thin_lock_id_;
711
712  // System thread id.
713  pid_t tid_;
714
715  // Guards the 'interrupted_' and 'wait_monitor_' members.
716  mutable Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
717  ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
718  // Pointer to the monitor lock we're currently waiting on (or NULL).
719  Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
720  // Thread "interrupted" status; stays raised until queried or thrown.
721  bool32_t interrupted_ GUARDED_BY(wait_mutex_);
722  // The next thread in the wait set this thread is part of.
723  Thread* wait_next_;
724  // If we're blocked in MonitorEnter, this is the object we're trying to lock.
725  mirror::Object* monitor_enter_object_;
726
727  friend class Monitor;
728  friend class MonitorInfo;
729
730  // Top of linked list of stack indirect reference tables or NULL for none
731  StackIndirectReferenceTable* top_sirt_;
732
733  Runtime* runtime_;
734
735  RuntimeStats stats_;
736
737  // Needed to get the right ClassLoader in JNI_OnLoad, but also
738  // useful for testing.
739  mirror::ClassLoader* class_loader_override_;
740
741  // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
742  Context* long_jump_context_;
743
744  // A boolean telling us whether we're recursively throwing OOME.
745  bool32_t throwing_OutOfMemoryError_;
746
747  // How much of 'suspend_count_' is by request of the debugger, used to set things right
748  // when the debugger detaches. Must be <= suspend_count_.
749  int debug_suspend_count_ GUARDED_BY(Locks::thread_suspend_count_lock_);
750
751  // JDWP invoke-during-breakpoint support.
752  DebugInvokeReq* debug_invoke_req_;
753
754  // Shadow frame that is used temporarily during the deoptimization of a method.
755  ShadowFrame* deoptimization_shadow_frame_;
756  JValue deoptimization_return_value_;
757
758  // Additional stack used by method instrumentation to store method and return pc values.
759  // Stored as a pointer since std::deque is not PACKED.
760  std::deque<InstrumentationStackFrame>* instrumentation_stack_;
761
762  // A cached copy of the java.lang.Thread's name.
763  std::string* name_;
764
765  // Is the thread a daemon?
766  const bool32_t daemon_;
767
768  // A cached pthread_t for the pthread underlying this Thread*.
769  pthread_t pthread_self_;
770
771  // Support for Mutex lock hierarchy bug detection.
772  BaseMutex* held_mutexes_[kMaxMutexLevel + 1];
773
774  // A positive value implies we're in a region where thread suspension isn't expected.
775  uint32_t no_thread_suspension_;
776
777  // Cause for last suspension.
778  const char* last_no_thread_suspension_cause_;
779
780  // Pending checkpoint functions.
781  Closure* checkpoint_function_;
782
783 public:
784  // Runtime support function pointers
785  // TODO: move this near the top, since changing its offset requires all oats to be recompiled!
786  EntryPoints entrypoints_;
787
788 private:
789  // How many times has our pthread key's destructor been called?
790  uint32_t thread_exit_check_count_;
791
792  friend class ScopedThreadStateChange;
793
794  DISALLOW_COPY_AND_ASSIGN(Thread);
795};
796
797std::ostream& operator<<(std::ostream& os, const Thread& thread);
798std::ostream& operator<<(std::ostream& os, const ThreadState& state);
799
800}  // namespace art
801
802#endif  // ART_SRC_THREAD_H_
803