thread.h revision 6f1c94968ada57da433debf8e2d1b38a80ceb510
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_SRC_THREAD_H_
18#define ART_SRC_THREAD_H_
19
20#include <pthread.h>
21
22#include <bitset>
23#include <iosfwd>
24#include <list>
25#include <string>
26#include <vector>
27
28#include "globals.h"
29#include "macros.h"
30#include "oat/runtime/oat_support_entrypoints.h"
31#include "locks.h"
32#include "offsets.h"
33#include "runtime_stats.h"
34#include "stack.h"
35#include "stack_indirect_reference_table.h"
36#include "trace.h"
37#include "UniquePtr.h"
38#ifdef ART_USE_GREENLAND_COMPILER
39#include "greenland/runtime_entry_points.h"
40#endif
41
42namespace art {
43
44class AbstractMethod;
45class Array;
46class BaseMutex;
47class Class;
48class ClassLinker;
49class ClassLoader;
50class Context;
51struct DebugInvokeReq;
52class DexFile;
53struct JavaVMExt;
54struct JNIEnvExt;
55class Monitor;
56class Object;
57class Runtime;
58class ScopedObjectAccess;
59class ScopedObjectAccessUnchecked;
60class ShadowFrame;
61class StackIndirectReferenceTable;
62class StackTraceElement;
63class StaticStorageBase;
64class Thread;
65class ThreadList;
66class Throwable;
67
68template<class T> class ObjectArray;
69template<class T> class PrimitiveArray;
70typedef PrimitiveArray<int32_t> IntArray;
71
72// Thread priorities. These must match the Thread.MIN_PRIORITY,
73// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
74enum ThreadPriority {
75  kMinThreadPriority = 1,
76  kNormThreadPriority = 5,
77  kMaxThreadPriority = 10,
78};
79
80enum ThreadState {
81  kTerminated                     = 0,   // Thread.TERMINATED     JDWP TS_ZOMBIE
82  kRunnable                       = 1,   // Thread.RUNNABLE       JDWP TS_RUNNING
83  kTimedWaiting                   = 2,   // Thread.TIMED_WAITING  JDWP TS_WAIT    - in Object.wait() with a timeout
84  kBlocked                        = 3,   // Thread.BLOCKED        JDWP TS_MONITOR - blocked on a monitor
85  kWaiting                        = 4,   // Thread.WAITING        JDWP TS_WAIT    - in Object.wait()
86  kWaitingForGcToComplete         = 5,   // Thread.WAITING        JDWP TS_WAIT    - blocked waiting for GC
87  kWaitingPerformingGc            = 6,   // Thread.WAITING        JDWP TS_WAIT    - performing GC
88  kWaitingForDebuggerSend         = 7,   // Thread.WAITING        JDWP TS_WAIT    - blocked waiting for events to be sent
89  kWaitingForDebuggerToAttach     = 8,   // Thread.WAITING        JDWP TS_WAIT    - blocked waiting for debugger to attach
90  kWaitingInMainDebuggerLoop      = 9,   // Thread.WAITING        JDWP TS_WAIT    - blocking/reading/processing debugger events
91  kWaitingForDebuggerSuspension   = 10,  // Thread.WAITING        JDWP TS_WAIT    - waiting for debugger suspend all
92  kWaitingForJniOnLoad            = 11,  // Thread.WAITING        JDWP TS_WAIT    - waiting for execution of dlopen and JNI on load code
93  kWaitingForSignalCatcherOutput  = 12,  // Thread.WAITING        JDWP TS_WAIT    - waiting for signal catcher IO to complete
94  kWaitingInMainSignalCatcherLoop = 13,  // Thread.WAITING        JDWP TS_WAIT    - blocking/reading/processing signals
95  kStarting                       = 14,  // Thread.NEW            JDWP TS_WAIT    - native thread started, not yet ready to run managed code
96  kNative                         = 15,  // Thread.RUNNABLE       JDWP TS_RUNNING - running in a JNI native method
97  kSuspended                      = 16,  // Thread.RUNNABLE       JDWP TS_RUNNING - suspended by GC or debugger
98};
99
100enum ThreadFlag {
101  kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0.
102  kExceptionPending = 2,  // If set implies that exception_ != NULL.
103  kEnterInterpreter = 4,  // Instruct managed code it should enter the interpreter.
104};
105
106class PACKED Thread {
107 public:
108  // Space to throw a StackOverflowError in.
109#if !defined(ART_USE_LLVM_COMPILER)
110  static const size_t kStackOverflowReservedBytes = 4 * KB;
111#else  // LLVM_x86 requires more memory to throw stack overflow exception.
112  static const size_t kStackOverflowReservedBytes = 8 * KB;
113#endif
114
115  // Creates a new native thread corresponding to the given managed peer.
116  // Used to implement Thread.start.
117  static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
118
119  // Attaches the calling native thread to the runtime, returning the new native peer.
120  // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
121  static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group);
122
123  // Reset internal state of child thread after fork.
124  void InitAfterFork();
125
126  static Thread* Current() __attribute__ ((pure)) {
127    // We rely on Thread::Current returning NULL for a detached thread, so it's not obvious
128    // that we can replace this with a direct %fs access on x86.
129    void* thread = pthread_getspecific(Thread::pthread_key_self_);
130    return reinterpret_cast<Thread*>(thread);
131  }
132
133  static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, Object* thread_peer)
134      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
135      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
136  static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, jobject thread)
137      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
138      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
139
140  // Translates 172 to pAllocArrayFromCode and so on.
141  static void DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers);
142
143  // Dumps a one-line summary of thread state (used for operator<<).
144  void ShortDump(std::ostream& os) const;
145
146  // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
147  void Dump(std::ostream& os) const
148      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
149      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
150
151  // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which
152  // case we use 'tid' to identify the thread, and we'll include as much information as we can.
153  static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
154      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_);
155
156  ThreadState GetState() const {
157    return static_cast<ThreadState>(state_and_flags_.as_struct.state);
158  }
159
160  ThreadState SetState(ThreadState new_state);
161
162  int GetSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
163    return suspend_count_;
164  }
165
166  int GetDebugSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
167    return debug_suspend_count_;
168  }
169
170  bool IsSuspended() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
171    return GetState() != kRunnable && ReadFlag(kSuspendRequest);
172  }
173
174  void ModifySuspendCount(Thread* self, int delta, bool for_debugger)
175      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
176
177  // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
178  // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
179  void FullSuspendCheck()
180      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
181      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
182
183  // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
184  ThreadState TransitionFromSuspendedToRunnable()
185      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
186      SHARED_LOCK_FUNCTION(Locks::mutator_lock_);
187
188  // Transition from runnable into a state where mutator privileges are denied. Releases share of
189  // mutator lock.
190  void TransitionFromRunnableToSuspended(ThreadState new_state)
191      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
192      UNLOCK_FUNCTION(Locks::mutator_lock_);
193
194  // Wait for a debugger suspension on the thread associated with the given peer. Returns the
195  // thread on success, else NULL. If the thread should be suspended then request_suspension should
196  // be true on entry. If the suspension times out then *timeout is set to true.
197  static Thread* SuspendForDebugger(jobject peer,  bool request_suspension, bool* timeout)
198      LOCKS_EXCLUDED(Locks::mutator_lock_,
199                     Locks::thread_list_lock_,
200                     Locks::thread_suspend_count_lock_);
201
202  // Once called thread suspension will cause an assertion failure.
203#ifndef NDEBUG
204  const char* StartAssertNoThreadSuspension(const char* cause) {
205    CHECK(cause != NULL);
206    const char* previous_cause = last_no_thread_suspension_cause_;
207    no_thread_suspension_++;
208    last_no_thread_suspension_cause_ = cause;
209    return previous_cause;
210  }
211#else
212  const char* StartAssertNoThreadSuspension(const char* cause) {
213    CHECK(cause != NULL);
214    return NULL;
215  }
216#endif
217
218  // End region where no thread suspension is expected.
219#ifndef NDEBUG
220  void EndAssertNoThreadSuspension(const char* old_cause) {
221    CHECK(old_cause != NULL || no_thread_suspension_ == 1);
222    CHECK_GT(no_thread_suspension_, 0U);
223    no_thread_suspension_--;
224    last_no_thread_suspension_cause_ = old_cause;
225  }
226#else
227  void EndAssertNoThreadSuspension(const char*) {
228  }
229#endif
230
231
232#ifndef NDEBUG
233  void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
234#else
235  void AssertThreadSuspensionIsAllowable(bool check_locks = true) const {
236    UNUSED(check_locks);  // Keep GCC happy about unused parameters.
237  }
238#endif
239
240  bool IsDaemon() const {
241    return daemon_;
242  }
243
244  bool HoldsLock(Object*);
245
246  /*
247   * Changes the priority of this thread to match that of the java.lang.Thread object.
248   *
249   * We map a priority value from 1-10 to Linux "nice" values, where lower
250   * numbers indicate higher priority.
251   */
252  void SetNativePriority(int newPriority);
253
254  /*
255   * Returns the thread priority for the current thread by querying the system.
256   * This is useful when attaching a thread through JNI.
257   *
258   * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
259   */
260  static int GetNativePriority();
261
262  uint32_t GetThinLockId() const {
263    return thin_lock_id_;
264  }
265
266  pid_t GetTid() const {
267    return tid_;
268  }
269
270  // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer.
271  String* GetThreadName(const ScopedObjectAccessUnchecked& ts) const
272      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
273
274  // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
275  // allocation, or locking.
276  void GetThreadName(std::string& name) const;
277
278  // Sets the thread's name.
279  void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
280
281  jobject GetPeer() const {
282    return peer_;
283  }
284
285  bool HasPeer() const {
286    return peer_ != NULL;
287  }
288
289  RuntimeStats* GetStats() {
290    return &stats_;
291  }
292
293  bool IsStillStarting() const;
294
295  bool IsExceptionPending() const {
296    bool result = ReadFlag(kExceptionPending);
297    DCHECK_EQ(result, exception_ != NULL);
298    return result;
299  }
300
301  Throwable* GetException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
302    return exception_;
303  }
304
305  void AssertNoPendingException() const;
306
307  void SetException(Throwable* new_exception) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
308    CHECK(new_exception != NULL);
309    // TODO: DCHECK(!IsExceptionPending());
310    exception_ = new_exception;
311    AtomicSetFlag(kExceptionPending);
312    DCHECK(IsExceptionPending());
313  }
314
315  void ClearException() {
316    exception_ = NULL;
317    AtomicClearFlag(kExceptionPending);
318    DCHECK(!IsExceptionPending());
319  }
320
321  // Find catch block and perform long jump to appropriate exception handle
322  void DeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
323
324  Context* GetLongJumpContext();
325  void ReleaseLongJumpContext(Context* context) {
326    DCHECK(long_jump_context_ == NULL);
327    long_jump_context_ = context;
328  }
329
330  AbstractMethod* GetCurrentMethod(uint32_t* dex_pc = NULL, size_t* frame_id = NULL) const
331      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
332
333  void SetTopOfStack(void* stack, uintptr_t pc) {
334    AbstractMethod** top_method = reinterpret_cast<AbstractMethod**>(stack);
335    managed_stack_.SetTopQuickFrame(top_method);
336    managed_stack_.SetTopQuickFramePc(pc);
337  }
338
339  bool HasManagedStack() const {
340    return managed_stack_.GetTopQuickFrame() != NULL || managed_stack_.GetTopShadowFrame() != NULL;
341  }
342
343  // If 'msg' is NULL, no detail message is set.
344  void ThrowNewException(const char* exception_class_descriptor, const char* msg)
345      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
346
347  // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
348  // used as the new exception's cause.
349  void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
350      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
351
352  void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
353      __attribute__((format(printf, 3, 4)))
354      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
355
356  void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
357      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
358
359  // OutOfMemoryError is special, because we need to pre-allocate an instance.
360  // Only the GC should call this.
361  void ThrowOutOfMemoryError(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
362
363  //QuickFrameIterator FindExceptionHandler(void* throw_pc, void** handler_pc);
364
365  void* FindExceptionHandlerInMethod(const AbstractMethod* method,
366                                     void* throw_pc,
367                                     const DexFile& dex_file,
368                                     ClassLinker* class_linker);
369
370  static void Startup();
371  static void FinishStartup();
372  static void Shutdown();
373
374  // JNI methods
375  JNIEnvExt* GetJniEnv() const {
376    return jni_env_;
377  }
378
379  // Convert a jobject into a Object*
380  Object* DecodeJObject(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
381
382  // Implements java.lang.Thread.interrupted.
383  bool Interrupted();
384  // Implements java.lang.Thread.isInterrupted.
385  bool IsInterrupted();
386  void Interrupt();
387  void Notify();
388
389  ClassLoader* GetClassLoaderOverride() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
390    return class_loader_override_;
391  }
392
393  void SetClassLoaderOverride(ClassLoader* class_loader_override) {
394    class_loader_override_ = class_loader_override;
395  }
396
397  // Create the internal representation of a stack trace, that is more time
398  // and space efficient to compute than the StackTraceElement[]
399  jobject CreateInternalStackTrace(const ScopedObjectAccess& soa) const
400      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
401
402  // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
403  // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
404  // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
405  // with the number of valid frames in the returned array.
406  static jobjectArray InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
407      jobjectArray output_array = NULL, int* stack_depth = NULL);
408
409  void VisitRoots(Heap::RootVisitor* visitor, void* arg)
410      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
411
412  void VerifyRoots(Heap::VerifyRootVisitor* visitor, void* arg)
413      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
414
415#if VERIFY_OBJECT_ENABLED
416  void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
417#else
418  void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_){}
419#endif
420
421  //
422  // Offsets of various members of native Thread class, used by compiled code.
423  //
424
425  static ThreadOffset SelfOffset() {
426    return ThreadOffset(OFFSETOF_MEMBER(Thread, self_));
427  }
428
429  static ThreadOffset ExceptionOffset() {
430    return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_));
431  }
432
433  static ThreadOffset ThinLockIdOffset() {
434    return ThreadOffset(OFFSETOF_MEMBER(Thread, thin_lock_id_));
435  }
436
437  static ThreadOffset CardTableOffset() {
438    return ThreadOffset(OFFSETOF_MEMBER(Thread, card_table_));
439  }
440
441  static ThreadOffset ThreadFlagsOffset() {
442    return ThreadOffset(OFFSETOF_MEMBER(Thread, state_and_flags_));
443  }
444
445  // Size of stack less any space reserved for stack overflow
446  size_t GetStackSize() {
447    return stack_size_ - (stack_end_ - stack_begin_);
448  }
449
450  // Set the stack end to that to be used during a stack overflow
451  void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
452
453  // Set the stack end to that to be used during regular execution
454  void ResetDefaultStackEnd() {
455    // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
456    // to throw a StackOverflowError.
457    stack_end_ = stack_begin_ + kStackOverflowReservedBytes;
458  }
459
460  bool IsHandlingStackOverflow() const {
461    return stack_end_ == stack_begin_;
462  }
463
464  static ThreadOffset StackEndOffset() {
465    return ThreadOffset(OFFSETOF_MEMBER(Thread, stack_end_));
466  }
467
468  static ThreadOffset JniEnvOffset() {
469    return ThreadOffset(OFFSETOF_MEMBER(Thread, jni_env_));
470  }
471
472  static ThreadOffset TopOfManagedStackOffset() {
473    return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
474                        ManagedStack::TopQuickFrameOffset());
475  }
476
477  static ThreadOffset TopOfManagedStackPcOffset() {
478    return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
479                        ManagedStack::TopQuickFramePcOffset());
480  }
481
482  const ManagedStack* GetManagedStack() const {
483    return &managed_stack_;
484  }
485
486  // Linked list recording fragments of managed stack.
487  void PushManagedStackFragment(ManagedStack* fragment) {
488    managed_stack_.PushManagedStackFragment(fragment);
489  }
490  void PopManagedStackFragment(const ManagedStack& fragment) {
491    managed_stack_.PopManagedStackFragment(fragment);
492  }
493
494  ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
495    return managed_stack_.PushShadowFrame(new_top_frame);
496  }
497
498  ShadowFrame* PopShadowFrame() {
499    return managed_stack_.PopShadowFrame();
500  }
501
502  static ThreadOffset TopShadowFrameOffset() {
503    return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
504                        ManagedStack::TopShadowFrameOffset());
505  }
506
507  // Number of references allocated in ShadowFrames on this thread
508  size_t NumShadowFrameReferences() const {
509    return managed_stack_.NumShadowFrameReferences();
510  }
511
512  // Number of references in SIRTs on this thread
513  size_t NumSirtReferences();
514
515  // Number of references allocated in SIRTs & shadow frames on this thread
516  size_t NumStackReferences() {
517    return NumSirtReferences() + NumShadowFrameReferences();
518  };
519
520  // Is the given obj in this thread's stack indirect reference table?
521  bool SirtContains(jobject obj);
522
523  void SirtVisitRoots(Heap::RootVisitor* visitor, void* arg);
524
525  void PushSirt(StackIndirectReferenceTable* sirt) {
526    sirt->SetLink(top_sirt_);
527    top_sirt_ = sirt;
528  }
529
530  StackIndirectReferenceTable* PopSirt() {
531    StackIndirectReferenceTable* sirt = top_sirt_;
532    DCHECK(sirt != NULL);
533    top_sirt_ = top_sirt_->GetLink();
534    return sirt;
535  }
536
537  static ThreadOffset TopSirtOffset() {
538    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_sirt_));
539  }
540
541  DebugInvokeReq* GetInvokeReq() {
542    return debug_invoke_req_;
543  }
544
545  void SetDebuggerUpdatesEnabled(bool enabled);
546
547  const std::vector<TraceStackFrame>* GetTraceStack() const {
548    return trace_stack_;
549  }
550
551  bool IsTraceStackEmpty() const {
552    return trace_stack_->empty();
553  }
554
555  void PushTraceStackFrame(const TraceStackFrame& frame) {
556    trace_stack_->push_back(frame);
557  }
558
559  TraceStackFrame PopTraceStackFrame() {
560    TraceStackFrame frame = trace_stack_->back();
561    trace_stack_->pop_back();
562    return frame;
563  }
564
565  BaseMutex* GetHeldMutex(LockLevel level) const {
566    return held_mutexes_[level];
567  }
568
569  void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
570    held_mutexes_[level] = mutex;
571  }
572
573 private:
574  // We have no control over the size of 'bool', but want our boolean fields
575  // to be 4-byte quantities.
576  typedef uint32_t bool32_t;
577
578  explicit Thread(bool daemon);
579  ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_,
580                           Locks::thread_suspend_count_lock_);
581  void Destroy();
582  friend class ThreadList;  // For ~Thread and Destroy.
583
584  void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
585  friend class Runtime; // For CreatePeer.
586
587  // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit and ~Thread.
588  ThreadState SetStateUnsafe(ThreadState new_state) {
589    ThreadState old_state = GetState();
590    state_and_flags_.as_struct.state = new_state;
591    return old_state;
592  }
593  friend class SignalCatcher;  // For SetStateUnsafe.
594
595  void DumpState(std::ostream& os) const;
596  void DumpStack(std::ostream& os) const
597      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
598      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
599
600  // Out-of-line conveniences for debugging in gdb.
601  static Thread* CurrentFromGdb(); // Like Thread::Current.
602  // Like Thread::Dump(std::cerr).
603  void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
604
605  static void* CreateCallback(void* arg);
606
607  void HandleUncaughtExceptions();
608  void RemoveFromThreadGroup();
609
610  void Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
611  void InitCardTable();
612  void InitCpu();
613  void InitFunctionPointers();
614  void InitTid();
615  void InitPthreadKeySelf();
616  void InitStackHwm();
617
618  void NotifyLocked() EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_);
619
620  bool ReadFlag(ThreadFlag flag) const {
621    return (state_and_flags_.as_struct.flags & flag) != 0;
622  }
623
624  void AtomicSetFlag(ThreadFlag flag);
625
626  void AtomicClearFlag(ThreadFlag flag);
627
628  static void ThreadExitCallback(void* arg);
629
630  // TLS key used to retrieve the Thread*.
631  static pthread_key_t pthread_key_self_;
632
633  // Used to notify threads that they should attempt to resume, they will suspend again if
634  // their suspend count is > 0.
635  static ConditionVariable* resume_cond_
636      GUARDED_BY(Locks::thread_suspend_count_lock_);
637
638  // --- Frequently accessed fields first for short offsets ---
639
640  // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
641  // change from being Suspended to Runnable without a suspend request occurring.
642  union StateAndFlags {
643    struct PACKED {
644      // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
645      // ThreadFlags for bit field meanings.
646      volatile uint16_t flags;
647      // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
648      // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
649      // operation. If a thread is suspended and a suspend_request is present, a thread may not
650      // change to Runnable as a GC or other operation is in progress.
651      volatile uint16_t state;
652    } as_struct;
653    volatile int32_t as_int;
654  };
655  union StateAndFlags state_and_flags_;
656  COMPILE_ASSERT(sizeof(union StateAndFlags) == sizeof(int32_t),
657                 sizeof_state_and_flags_and_int32_are_different);
658
659  // A non-zero value is used to tell the current thread to enter a safe point
660  // at the next poll.
661  int suspend_count_ GUARDED_BY(Locks::thread_suspend_count_lock_);
662
663  // The biased card table, see CardTable for details
664  byte* card_table_;
665
666  // The pending exception or NULL.
667  Throwable* exception_;
668
669  // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
670  // We leave extra space so there's room for the code that throws StackOverflowError.
671  byte* stack_end_;
672
673  // The top of the managed stack often manipulated directly by compiler generated code.
674  ManagedStack managed_stack_;
675
676  // Every thread may have an associated JNI environment
677  JNIEnvExt* jni_env_;
678
679  // Initialized to "this". On certain architectures (such as x86) reading
680  // off of Thread::Current is easy but getting the address of Thread::Current
681  // is hard. This field can be read off of Thread::Current to give the address.
682  Thread* self_;
683
684  // Our managed peer (an instance of java.lang.Thread).
685  jobject peer_;
686
687  // The "lowest addressable byte" of the stack
688  byte* stack_begin_;
689
690  // Size of the stack
691  size_t stack_size_;
692
693  // Thin lock thread id. This is a small integer used by the thin lock implementation.
694  // This is not to be confused with the native thread's tid, nor is it the value returned
695  // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
696  // important difference between this id and the ids visible to managed code is that these
697  // ones get reused (to ensure that they fit in the number of bits available).
698  uint32_t thin_lock_id_;
699
700  // System thread id.
701  pid_t tid_;
702
703  // Guards the 'interrupted_' and 'wait_monitor_' members.
704  mutable Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
705  ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
706  // Pointer to the monitor lock we're currently waiting on (or NULL).
707  Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
708  // Thread "interrupted" status; stays raised until queried or thrown.
709  bool32_t interrupted_ GUARDED_BY(wait_mutex_);
710  // The next thread in the wait set this thread is part of.
711  Thread* wait_next_;
712  // If we're blocked in MonitorEnter, this is the object we're trying to lock.
713  Object* monitor_enter_object_;
714
715  friend class Monitor;
716
717  // Top of linked list of stack indirect reference tables or NULL for none
718  StackIndirectReferenceTable* top_sirt_;
719
720  Runtime* runtime_;
721
722  RuntimeStats stats_;
723
724  // Needed to get the right ClassLoader in JNI_OnLoad, but also
725  // useful for testing.
726  ClassLoader* class_loader_override_;
727
728  // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
729  Context* long_jump_context_;
730
731  // A boolean telling us whether we're recursively throwing OOME.
732  bool32_t throwing_OutOfMemoryError_;
733
734  // How much of 'suspend_count_' is by request of the debugger, used to set things right
735  // when the debugger detaches. Must be <= suspend_count_.
736  int debug_suspend_count_ GUARDED_BY(Locks::thread_suspend_count_lock_);
737
738  // JDWP invoke-during-breakpoint support.
739  DebugInvokeReq* debug_invoke_req_;
740
741  // Additional stack used by method tracer to store method and return pc values.
742  // Stored as a pointer since std::vector is not PACKED.
743  std::vector<TraceStackFrame>* trace_stack_;
744
745  // A cached copy of the java.lang.Thread's name.
746  std::string* name_;
747
748  // Is the thread a daemon?
749  const bool32_t daemon_;
750
751  // A cached pthread_t for the pthread underlying this Thread*.
752  pthread_t pthread_self_;
753
754  // Support for Mutex lock hierarchy bug detection.
755  BaseMutex* held_mutexes_[kMaxMutexLevel + 1];
756
757  // A positive value implies we're in a region where thread suspension isn't expected.
758  uint32_t no_thread_suspension_;
759
760  // Cause for last suspension.
761  const char* last_no_thread_suspension_cause_;
762
763 public:
764  // Runtime support function pointers
765  // TODO: move this near the top, since changing its offset requires all oats to be recompiled!
766  EntryPoints entrypoints_;
767#ifdef ART_USE_GREENLAND_COMPILER
768  RuntimeEntryPoints runtime_entry_points_;
769#endif
770
771 private:
772  // How many times has our pthread key's destructor been called?
773  uint32_t thread_exit_check_count_;
774
775  friend class ScopedThreadStateChange;
776
777  DISALLOW_COPY_AND_ASSIGN(Thread);
778};
779
780std::ostream& operator<<(std::ostream& os, const Thread& thread);
781std::ostream& operator<<(std::ostream& os, const ThreadState& state);
782
783}  // namespace art
784
785#endif  // ART_SRC_THREAD_H_
786