thread.h revision 474b6da273c7ce6df50a4e51eb9929a77e1611c3
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_SRC_THREAD_H_
18#define ART_SRC_THREAD_H_
19
20#include <pthread.h>
21
22#include <bitset>
23#include <iosfwd>
24#include <list>
25#include <string>
26#include <vector>
27
28#include "dex_file.h"
29#include "globals.h"
30#include "jni_internal.h"
31#include "logging.h"
32#include "macros.h"
33#include "mutex.h"
34#include "mem_map.h"
35#include "oat/runtime/oat_support_entrypoints.h"
36#include "offsets.h"
37#include "runtime_stats.h"
38#include "stack.h"
39#include "trace.h"
40#include "UniquePtr.h"
41#ifdef ART_USE_GREENLAND_COMPILER
42#include "greenland/runtime_entry_points.h"
43#endif
44
45namespace art {
46
47class Array;
48class Class;
49class ClassLinker;
50class ClassLoader;
51class Context;
52struct DebugInvokeReq;
53class AbstractMethod;
54class Monitor;
55class Object;
56class Runtime;
57class ScopedObjectAccess;
58class ScopedObjectAccessUnchecked;
59class ShadowFrame;
60class StackIndirectReferenceTable;
61class StackTraceElement;
62class StaticStorageBase;
63class Thread;
64class ThreadList;
65class Throwable;
66
67template<class T> class ObjectArray;
68template<class T> class PrimitiveArray;
69typedef PrimitiveArray<int32_t> IntArray;
70
71// Thread priorities. These must match the Thread.MIN_PRIORITY,
72// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
73enum ThreadPriority {
74  kMinThreadPriority = 1,
75  kNormThreadPriority = 5,
76  kMaxThreadPriority = 10,
77};
78
79enum ThreadState {
80  kTerminated                     = 0,   // Thread.TERMINATED     JDWP TS_ZOMBIE
81  kRunnable                       = 1,   // Thread.RUNNABLE       JDWP TS_RUNNING
82  kTimedWaiting                   = 2,   // Thread.TIMED_WAITING  JDWP TS_WAIT    - in Object.wait() with a timeout
83  kBlocked                        = 3,   // Thread.BLOCKED        JDWP TS_MONITOR - blocked on a monitor
84  kWaiting                        = 4,   // Thread.WAITING        JDWP TS_WAIT    - in Object.wait()
85  kWaitingForGcToComplete         = 5,   // Thread.WAITING        JDWP TS_WAIT    - blocked waiting for GC
86  kWaitingPerformingGc            = 6,   // Thread.WAITING        JDWP TS_WAIT    - performing GC
87  kWaitingForDebuggerSend         = 7,   // Thread.WAITING        JDWP TS_WAIT    - blocked waiting for events to be sent
88  kWaitingForDebuggerToAttach     = 8,   // Thread.WAITING        JDWP TS_WAIT    - blocked waiting for debugger to attach
89  kWaitingInMainDebuggerLoop      = 9,   // Thread.WAITING        JDWP TS_WAIT    - blocking/reading/processing debugger events
90  kWaitingForDebuggerSuspension   = 10,  // Thread.WAITING        JDWP TS_WAIT    - waiting for debugger suspend all
91  kWaitingForJniOnLoad            = 11,  // Thread.WAITING        JDWP TS_WAIT    - waiting for execution of dlopen and JNI on load code
92  kWaitingForSignalCatcherOutput  = 12,  // Thread.WAITING        JDWP TS_WAIT    - waiting for signal catcher IO to complete
93  kWaitingInMainSignalCatcherLoop = 13,  // Thread.WAITING        JDWP TS_WAIT    - blocking/reading/processing signals
94  kStarting                       = 14,  // Thread.NEW            JDWP TS_WAIT    - native thread started, not yet ready to run managed code
95  kNative                         = 15,  // Thread.RUNNABLE       JDWP TS_RUNNING - running in a JNI native method
96  kSuspended                      = 16,  // Thread.RUNNABLE       JDWP TS_RUNNING - suspended by GC or debugger
97};
98
99enum ThreadFlag {
100  kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0.
101  kExceptionPending = 2,  // If set implies that exception_ != NULL.
102  kEnterInterpreter = 4,  // Instruct managed code it should enter the interpreter.
103};
104
105class PACKED Thread {
106 public:
107  // Space to throw a StackOverflowError in.
108#if !defined(ART_USE_LLVM_COMPILER)
109  static const size_t kStackOverflowReservedBytes = 4 * KB;
110#else  // LLVM_x86 requires more memory to throw stack overflow exception.
111  static const size_t kStackOverflowReservedBytes = 8 * KB;
112#endif
113
114  // Creates a new native thread corresponding to the given managed peer.
115  // Used to implement Thread.start.
116  static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
117
118  // Attaches the calling native thread to the runtime, returning the new native peer.
119  // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
120  static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group);
121
122  // Reset internal state of child thread after fork.
123  void InitAfterFork();
124
125  static Thread* Current() __attribute__ ((pure)) {
126    // We rely on Thread::Current returning NULL for a detached thread, so it's not obvious
127    // that we can replace this with a direct %fs access on x86.
128    void* thread = pthread_getspecific(Thread::pthread_key_self_);
129    return reinterpret_cast<Thread*>(thread);
130  }
131
132  static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, Object* thread_peer)
133      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
134      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
135  static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, jobject thread)
136      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
137      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
138
139  // Translates 172 to pAllocArrayFromCode and so on.
140  static void DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers);
141
142  // Dumps a one-line summary of thread state (used for operator<<).
143  void ShortDump(std::ostream& os) const;
144
145  // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
146  void Dump(std::ostream& os) const
147      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
148      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
149
150  // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which
151  // case we use 'tid' to identify the thread, and we'll include as much information as we can.
152  static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
153      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_);
154
155  ThreadState GetState() const {
156    return static_cast<ThreadState>(state_and_flags_.state);
157  }
158
159  ThreadState SetState(ThreadState new_state);
160
161  int GetSuspendCount() const
162      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
163    Locks::thread_suspend_count_lock_->AssertHeld();
164    return suspend_count_;
165  }
166
167  int GetDebugSuspendCount() const
168      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
169    Locks::thread_suspend_count_lock_->AssertHeld();
170    return debug_suspend_count_;
171  }
172
173  bool IsSuspended() const
174      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
175    int suspend_count = GetSuspendCount();
176    return suspend_count != 0 && GetState() != kRunnable;
177  }
178
179  void ModifySuspendCount(int delta, bool for_debugger)
180      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
181
182  // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
183  // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
184  void FullSuspendCheck()
185      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
186      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
187
188  // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
189  ThreadState TransitionFromSuspendedToRunnable()
190      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
191      SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
192
193  // Transition from runnable into a state where mutator privileges are denied. Releases share of
194  // mutator lock.
195  void TransitionFromRunnableToSuspended(ThreadState new_state)
196      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
197      UNLOCK_FUNCTION(Locks::mutator_lock_);
198
199  // Wait for a debugger suspension on the thread associated with the given peer. Returns the
200  // thread on success, else NULL. If the thread should be suspended then request_suspension should
201  // be true on entry. If the suspension times out then *timeout is set to true.
202  static Thread* SuspendForDebugger(jobject peer,  bool request_suspension, bool* timeout)
203      LOCKS_EXCLUDED(Locks::mutator_lock_,
204                     Locks::thread_list_lock_,
205                     Locks::thread_suspend_count_lock_);
206
207  // Once called thread suspension will cause an assertion failure.
208#ifndef NDEBUG
209  const char* StartAssertNoThreadSuspension(const char* cause) {
210    CHECK(cause != NULL);
211    const char* previous_cause = last_no_thread_suspension_cause_;
212    no_thread_suspension_++;
213    last_no_thread_suspension_cause_ = cause;
214    return previous_cause;
215  }
216#else
217  const char* StartAssertNoThreadSuspension(const char* cause) {
218    CHECK(cause != NULL);
219    return NULL;
220  }
221#endif
222
223  // End region where no thread suspension is expected.
224#ifndef NDEBUG
225  void EndAssertNoThreadSuspension(const char* old_cause) {
226    CHECK(old_cause != NULL || no_thread_suspension_ == 1);
227    CHECK_GT(no_thread_suspension_, 0U);
228    no_thread_suspension_--;
229    last_no_thread_suspension_cause_ = old_cause;
230  }
231#else
232  void EndAssertNoThreadSuspension(const char*) {
233  }
234#endif
235
236
237#ifndef NDEBUG
238  void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
239#else
240  void AssertThreadSuspensionIsAllowable(bool check_locks = true) const {
241    UNUSED(check_locks);  // Keep GCC happy about unused parameters.
242  }
243#endif
244
245  bool IsDaemon() const {
246    return daemon_;
247  }
248
249  bool HoldsLock(Object*);
250
251  /*
252   * Changes the priority of this thread to match that of the java.lang.Thread object.
253   *
254   * We map a priority value from 1-10 to Linux "nice" values, where lower
255   * numbers indicate higher priority.
256   */
257  void SetNativePriority(int newPriority);
258
259  /*
260   * Returns the thread priority for the current thread by querying the system.
261   * This is useful when attaching a thread through JNI.
262   *
263   * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
264   */
265  static int GetNativePriority();
266
267  uint32_t GetThinLockId() const {
268    return thin_lock_id_;
269  }
270
271  pid_t GetTid() const {
272    return tid_;
273  }
274
275  // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer.
276  String* GetThreadName(const ScopedObjectAccessUnchecked& ts) const
277      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
278
279  // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
280  // allocation, or locking.
281  void GetThreadName(std::string& name) const;
282
283  // Sets the thread's name.
284  void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
285
286  Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
287    return peer_;
288  }
289
290  bool HasPeer() const {
291    return peer_ != NULL;
292  }
293
294  Object* GetThreadGroup(const ScopedObjectAccessUnchecked& ts) const
295      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
296
297  RuntimeStats* GetStats() {
298    return &stats_;
299  }
300
301  bool IsStillStarting() const;
302
303  bool IsExceptionPending() const {
304    bool result = ReadFlag(kExceptionPending);
305    DCHECK_EQ(result, exception_ != NULL);
306    return result;
307  }
308
309  Throwable* GetException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
310    return exception_;
311  }
312
313  void AssertNoPendingException() const;
314
315  void SetException(Throwable* new_exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
316    CHECK(new_exception != NULL);
317    // TODO: DCHECK(!IsExceptionPending());
318    exception_ = new_exception;
319    AtomicSetFlag(kExceptionPending);
320    DCHECK(IsExceptionPending());
321  }
322
323  void ClearException() {
324    exception_ = NULL;
325    AtomicClearFlag(kExceptionPending);
326    DCHECK(!IsExceptionPending());
327  }
328
329  // Find catch block and perform long jump to appropriate exception handle
330  void DeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
331
332  Context* GetLongJumpContext();
333  void ReleaseLongJumpContext(Context* context) {
334    DCHECK(long_jump_context_ == NULL);
335    long_jump_context_ = context;
336  }
337
338  AbstractMethod* GetCurrentMethod(uint32_t* dex_pc = NULL, size_t* frame_id = NULL) const
339      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
340
341  void SetTopOfStack(void* stack, uintptr_t pc) {
342    AbstractMethod** top_method = reinterpret_cast<AbstractMethod**>(stack);
343    managed_stack_.SetTopQuickFrame(top_method);
344    managed_stack_.SetTopQuickFramePc(pc);
345  }
346
347  bool HasManagedStack() const {
348    return managed_stack_.GetTopQuickFrame() != NULL || managed_stack_.GetTopShadowFrame() != NULL;
349  }
350
351  // If 'msg' is NULL, no detail message is set.
352  void ThrowNewException(const char* exception_class_descriptor, const char* msg)
353      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
354
355  // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
356  // used as the new exception's cause.
357  void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
358      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
359
360  void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
361      __attribute__((format(printf, 3, 4)))
362      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
363
364  void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
365      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
366
367  // OutOfMemoryError is special, because we need to pre-allocate an instance.
368  // Only the GC should call this.
369  void ThrowOutOfMemoryError(const char* msg)
370      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
371
372  //QuickFrameIterator FindExceptionHandler(void* throw_pc, void** handler_pc);
373
374  void* FindExceptionHandlerInMethod(const AbstractMethod* method,
375                                     void* throw_pc,
376                                     const DexFile& dex_file,
377                                     ClassLinker* class_linker);
378
379  static void Startup();
380  static void FinishStartup();
381  static void Shutdown();
382
383  // JNI methods
384  JNIEnvExt* GetJniEnv() const {
385    return jni_env_;
386  }
387
388  // Convert a jobject into a Object*
389  Object* DecodeJObject(jobject obj)
390      LOCKS_EXCLUDED(JavaVMExt::globals_lock,
391                     JavaVMExt::weak_globals_lock)
392      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
393
394  // Implements java.lang.Thread.interrupted.
395  bool Interrupted() {
396    MutexLock mu(*wait_mutex_);
397    bool interrupted = interrupted_;
398    interrupted_ = false;
399    return interrupted;
400  }
401
402  // Implements java.lang.Thread.isInterrupted.
403  bool IsInterrupted() {
404    MutexLock mu(*wait_mutex_);
405    return interrupted_;
406  }
407
408  void Interrupt() {
409    MutexLock mu(*wait_mutex_);
410    if (interrupted_) {
411      return;
412    }
413    interrupted_ = true;
414    NotifyLocked();
415  }
416
417  void Notify() {
418    MutexLock mu(*wait_mutex_);
419    NotifyLocked();
420  }
421
422  ClassLoader* GetClassLoaderOverride() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
423    return class_loader_override_;
424  }
425
426  void SetClassLoaderOverride(ClassLoader* class_loader_override) {
427    class_loader_override_ = class_loader_override;
428  }
429
430  // Create the internal representation of a stack trace, that is more time
431  // and space efficient to compute than the StackTraceElement[]
432  jobject CreateInternalStackTrace(const ScopedObjectAccess& soa) const
433      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
434
435  // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
436  // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
437  // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
438  // with the number of valid frames in the returned array.
439  static jobjectArray InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
440      jobjectArray output_array = NULL, int* stack_depth = NULL);
441
442  void VisitRoots(Heap::RootVisitor* visitor, void* arg)
443      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
444
445#if VERIFY_OBJECT_ENABLED
446  void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
447#else
448  void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){}
449#endif
450
451  //
452  // Offsets of various members of native Thread class, used by compiled code.
453  //
454
455  static ThreadOffset SelfOffset() {
456    return ThreadOffset(OFFSETOF_MEMBER(Thread, self_));
457  }
458
459  static ThreadOffset ExceptionOffset() {
460    return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_));
461  }
462
463  static ThreadOffset ThinLockIdOffset() {
464    return ThreadOffset(OFFSETOF_MEMBER(Thread, thin_lock_id_));
465  }
466
467  static ThreadOffset CardTableOffset() {
468    return ThreadOffset(OFFSETOF_MEMBER(Thread, card_table_));
469  }
470
471  static ThreadOffset ThreadFlagsOffset() {
472    return ThreadOffset(OFFSETOF_MEMBER(Thread, state_and_flags_));
473  }
474
475  // Size of stack less any space reserved for stack overflow
476  size_t GetStackSize() {
477    return stack_size_ - (stack_end_ - stack_begin_);
478  }
479
480  // Set the stack end to that to be used during a stack overflow
481  void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
482
483  // Set the stack end to that to be used during regular execution
484  void ResetDefaultStackEnd() {
485    // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
486    // to throw a StackOverflowError.
487    stack_end_ = stack_begin_ + kStackOverflowReservedBytes;
488  }
489
490  static ThreadOffset StackEndOffset() {
491    return ThreadOffset(OFFSETOF_MEMBER(Thread, stack_end_));
492  }
493
494  static ThreadOffset JniEnvOffset() {
495    return ThreadOffset(OFFSETOF_MEMBER(Thread, jni_env_));
496  }
497
498  static ThreadOffset TopOfManagedStackOffset() {
499    return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
500                        ManagedStack::TopQuickFrameOffset());
501  }
502
503  static ThreadOffset TopOfManagedStackPcOffset() {
504    return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
505                        ManagedStack::TopQuickFramePcOffset());
506  }
507
508  const ManagedStack* GetManagedStack() const {
509    return &managed_stack_;
510  }
511
512  // Linked list recording fragments of managed stack.
513  void PushManagedStackFragment(ManagedStack* fragment) {
514    managed_stack_.PushManagedStackFragment(fragment);
515  }
516  void PopManagedStackFragment(const ManagedStack& fragment) {
517    managed_stack_.PopManagedStackFragment(fragment);
518  }
519
520  ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
521    return managed_stack_.PushShadowFrame(new_top_frame);
522  }
523
524  ShadowFrame* PopShadowFrame() {
525    return managed_stack_.PopShadowFrame();
526  }
527
528  static ThreadOffset TopShadowFrameOffset() {
529    return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
530                        ManagedStack::TopShadowFrameOffset());
531  }
532
533  // Number of references allocated in ShadowFrames on this thread
534  size_t NumShadowFrameReferences() const {
535    return managed_stack_.NumShadowFrameReferences();
536  }
537
538  // Number of references in SIRTs on this thread
539  size_t NumSirtReferences();
540
541  // Number of references allocated in SIRTs & shadow frames on this thread
542  size_t NumStackReferences() {
543    return NumSirtReferences() + NumShadowFrameReferences();
544  };
545
546  // Is the given obj in this thread's stack indirect reference table?
547  bool SirtContains(jobject obj);
548
549  void SirtVisitRoots(Heap::RootVisitor* visitor, void* arg);
550
551  void PushSirt(StackIndirectReferenceTable* sirt);
552  StackIndirectReferenceTable* PopSirt();
553
554  static ThreadOffset TopSirtOffset() {
555    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_sirt_));
556  }
557
558  DebugInvokeReq* GetInvokeReq() {
559    return debug_invoke_req_;
560  }
561
562  void SetDebuggerUpdatesEnabled(bool enabled);
563
564  const std::vector<TraceStackFrame>* GetTraceStack() const {
565    return trace_stack_;
566  }
567
568  bool IsTraceStackEmpty() const {
569    return trace_stack_->empty();
570  }
571
572  void PushTraceStackFrame(const TraceStackFrame& frame) {
573    trace_stack_->push_back(frame);
574  }
575
576  TraceStackFrame PopTraceStackFrame() {
577    TraceStackFrame frame = trace_stack_->back();
578    trace_stack_->pop_back();
579    return frame;
580  }
581
582  BaseMutex* GetHeldMutex(MutexLevel level) const {
583    return held_mutexes_[level];
584  }
585
586  void SetHeldMutex(MutexLevel level, BaseMutex* mutex) {
587    held_mutexes_[level] = mutex;
588  }
589
590 private:
591  // We have no control over the size of 'bool', but want our boolean fields
592  // to be 4-byte quantities.
593  typedef uint32_t bool32_t;
594
595  explicit Thread(bool daemon);
596  ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_,
597                           Locks::thread_suspend_count_lock_);
598  void Destroy();
599  friend class ThreadList;  // For ~Thread and Destroy.
600
601  void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
602  friend class Runtime; // For CreatePeer.
603
604  // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit and ~Thread.
605  ThreadState SetStateUnsafe(ThreadState new_state) {
606    ThreadState old_state = GetState();
607    state_and_flags_.state = new_state;
608    return old_state;
609  }
610  friend class SignalCatcher;  // For SetStateUnsafe.
611
612  void DumpState(std::ostream& os) const;
613  void DumpStack(std::ostream& os) const
614      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
615      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
616
617  // Out-of-line conveniences for debugging in gdb.
618  static Thread* CurrentFromGdb(); // Like Thread::Current.
619  // Like Thread::Dump(std::cerr).
620  void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
621
622  static void* CreateCallback(void* arg);
623
624  void HandleUncaughtExceptions(const ScopedObjectAccess& soa)
625      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
626  void RemoveFromThreadGroup(const ScopedObjectAccess& soa)
627      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
628
629  void Init();
630  void InitCardTable();
631  void InitCpu();
632  void InitFunctionPointers();
633  void InitTid();
634  void InitPthreadKeySelf();
635  void InitStackHwm();
636
637  void NotifyLocked() EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
638    if (wait_monitor_ != NULL) {
639      wait_cond_->Signal();
640    }
641  }
642
643  bool ReadFlag(ThreadFlag flag) const {
644    return (state_and_flags_.flags & flag) != 0;
645  }
646
647  void AtomicSetFlag(ThreadFlag flag);
648
649  void AtomicClearFlag(ThreadFlag flag);
650
651  static void ThreadExitCallback(void* arg);
652
653  // TLS key used to retrieve the Thread*.
654  static pthread_key_t pthread_key_self_;
655
656  // Used to notify threads that they should attempt to resume, they will suspend again if
657  // their suspend count is > 0.
658  static ConditionVariable* resume_cond_
659      GUARDED_BY(Locks::thread_suspend_count_lock_);
660
661  // --- Frequently accessed fields first for short offsets ---
662
663  // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
664  // change from being Suspended to Runnable without a suspend request occurring.
665  struct PACKED StateAndFlags {
666    // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
667    // ThreadFlags for bit field meanings.
668    volatile uint16_t flags;
669    // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
670    // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
671    // operation. If a thread is suspended and a suspend_request is present, a thread may not
672    // change to Runnable as a GC or other operation is in progress.
673    uint16_t state;
674  };
675  struct StateAndFlags state_and_flags_;
676  COMPILE_ASSERT(sizeof(struct StateAndFlags) == sizeof(int32_t),
677                 sizeof_state_and_flags_and_int32_are_different);
678
679  // A non-zero value is used to tell the current thread to enter a safe point
680  // at the next poll.
681  int suspend_count_ GUARDED_BY(Locks::thread_suspend_count_lock_);
682
683  // The biased card table, see CardTable for details
684  byte* card_table_;
685
686  // The pending exception or NULL.
687  Throwable* exception_;
688
689  // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
690  // We leave extra space so there's room for the code that throws StackOverflowError.
691  byte* stack_end_;
692
693  // The top of the managed stack often manipulated directly by compiler generated code.
694  ManagedStack managed_stack_;
695
696  // Every thread may have an associated JNI environment
697  JNIEnvExt* jni_env_;
698
699  // Initialized to "this". On certain architectures (such as x86) reading
700  // off of Thread::Current is easy but getting the address of Thread::Current
701  // is hard. This field can be read off of Thread::Current to give the address.
702  Thread* self_;
703
704  // Our managed peer (an instance of java.lang.Thread).
705  Object* peer_;
706
707  // The "lowest addressable byte" of the stack
708  byte* stack_begin_;
709
710  // Size of the stack
711  size_t stack_size_;
712
713  // Thin lock thread id. This is a small integer used by the thin lock implementation.
714  // This is not to be confused with the native thread's tid, nor is it the value returned
715  // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
716  // important difference between this id and the ids visible to managed code is that these
717  // ones get reused (to ensure that they fit in the number of bits available).
718  uint32_t thin_lock_id_;
719
720  // System thread id.
721  pid_t tid_;
722
723  // Guards the 'interrupted_' and 'wait_monitor_' members.
724  mutable Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
725  ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
726  // Pointer to the monitor lock we're currently waiting on (or NULL).
727  Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
728  // Thread "interrupted" status; stays raised until queried or thrown.
729  bool32_t interrupted_ GUARDED_BY(wait_mutex_);
730  // The next thread in the wait set this thread is part of.
731  Thread* wait_next_;
732  // If we're blocked in MonitorEnter, this is the object we're trying to lock.
733  Object* monitor_enter_object_;
734
735  friend class Monitor;
736
737  // Top of linked list of stack indirect reference tables or NULL for none
738  StackIndirectReferenceTable* top_sirt_;
739
740  Runtime* runtime_;
741
742  RuntimeStats stats_;
743
744  // Needed to get the right ClassLoader in JNI_OnLoad, but also
745  // useful for testing.
746  ClassLoader* class_loader_override_;
747
748  // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
749  Context* long_jump_context_;
750
751  // A boolean telling us whether we're recursively throwing OOME.
752  bool32_t throwing_OutOfMemoryError_;
753
754  // How much of 'suspend_count_' is by request of the debugger, used to set things right
755  // when the debugger detaches. Must be <= suspend_count_.
756  int debug_suspend_count_ GUARDED_BY(Locks::thread_suspend_count_lock_);
757
758  // JDWP invoke-during-breakpoint support.
759  DebugInvokeReq* debug_invoke_req_;
760
761  // Additional stack used by method tracer to store method and return pc values.
762  // Stored as a pointer since std::vector is not PACKED.
763  std::vector<TraceStackFrame>* trace_stack_;
764
765  // A cached copy of the java.lang.Thread's name.
766  std::string* name_;
767
768  // Is the thread a daemon?
769  const bool32_t daemon_;
770
771  // A cached pthread_t for the pthread underlying this Thread*.
772  pthread_t pthread_self_;
773
774  // Support for Mutex lock hierarchy bug detection.
775  BaseMutex* held_mutexes_[kMaxMutexLevel + 1];
776
777  // A positive value implies we're in a region where thread suspension isn't expected.
778  uint32_t no_thread_suspension_;
779
780  // Cause for last suspension.
781  const char* last_no_thread_suspension_cause_;
782
783 public:
784  // Runtime support function pointers
785  // TODO: move this near the top, since changing its offset requires all oats to be recompiled!
786  EntryPoints entrypoints_;
787#ifdef ART_USE_GREENLAND_COMPILER
788  RuntimeEntryPoints runtime_entry_points_;
789#endif
790
791 private:
792  // How many times has our pthread key's destructor been called?
793  uint32_t thread_exit_check_count_;
794
795  friend class ScopedThreadStateChange;
796
797  DISALLOW_COPY_AND_ASSIGN(Thread);
798};
799
800std::ostream& operator<<(std::ostream& os, const Thread& thread);
801std::ostream& operator<<(std::ostream& os, const ThreadState& state);
802
803}  // namespace art
804
805#endif  // ART_SRC_THREAD_H_
806