thread.h revision 9da7f59c9059397182b9a97e898a42ec06d4d646
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_SRC_THREAD_H_
18#define ART_SRC_THREAD_H_
19
20#include <pthread.h>
21
22#include <bitset>
23#include <iosfwd>
24#include <list>
25#include <string>
26#include <vector>
27
28#include "dex_file.h"
29#include "globals.h"
30#include "jni_internal.h"
31#include "logging.h"
32#include "macros.h"
33#include "mutex.h"
34#include "mem_map.h"
35#include "oat/runtime/oat_support_entrypoints.h"
36#include "offsets.h"
37#include "runtime_stats.h"
38#include "stack.h"
39#include "trace.h"
40#include "UniquePtr.h"
41
42namespace art {
43
44class Array;
45class Class;
46class ClassLinker;
47class ClassLoader;
48class Context;
49struct DebugInvokeReq;
50class Method;
51class Monitor;
52class Object;
53class Runtime;
54class ScopedObjectAccess;
55class ScopedObjectAccessUnchecked;
56class ShadowFrame;
57class StackIndirectReferenceTable;
58class StackTraceElement;
59class StaticStorageBase;
60class Thread;
61class ThreadList;
62class Throwable;
63
64template<class T> class ObjectArray;
65template<class T> class PrimitiveArray;
66typedef PrimitiveArray<int32_t> IntArray;
67
68// Thread priorities. These must match the Thread.MIN_PRIORITY,
69// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
70enum ThreadPriority {
71  kMinThreadPriority = 1,
72  kNormThreadPriority = 5,
73  kMaxThreadPriority = 10,
74};
75
76enum ThreadState {
77  kTerminated                     = 0,   // Thread.TERMINATED     JDWP TS_ZOMBIE
78  kRunnable                       = 1,   // Thread.RUNNABLE       JDWP TS_RUNNING
79  kTimedWaiting                   = 2,   // Thread.TIMED_WAITING  JDWP TS_WAIT    - in Object.wait() with a timeout
80  kBlocked                        = 3,   // Thread.BLOCKED        JDWP TS_MONITOR - blocked on a monitor
81  kWaiting                        = 4,   // Thread.WAITING        JDWP TS_WAIT    - in Object.wait()
82  kWaitingForGcToComplete         = 5,   // Thread.WAITING        JDWP TS_WAIT    - blocked waiting for GC
83  kWaitingPerformingGc            = 6,   // Thread.WAITING        JDWP TS_WAIT    - performing GC
84  kWaitingForDebuggerSend         = 7,   // Thread.WAITING        JDWP TS_WAIT    - blocked waiting for events to be sent
85  kWaitingForDebuggerToAttach     = 8,   // Thread.WAITING        JDWP TS_WAIT    - blocked waiting for debugger to attach
86  kWaitingInMainDebuggerLoop      = 9,   // Thread.WAITING        JDWP TS_WAIT    - blocking/reading/processing debugger events
87  kWaitingForDebuggerSuspension   = 10,  // Thread.WAITING        JDWP TS_WAIT    - waiting for debugger suspend all
88  kWaitingForJniOnLoad            = 11,  // Thread.WAITING        JDWP TS_WAIT    - waiting for execution of dlopen and JNI on load code
89  kWaitingForSignalCatcherOutput  = 12,  // Thread.WAITING        JDWP TS_WAIT    - waiting for signal catcher IO to complete
90  kWaitingInMainSignalCatcherLoop = 13,  // Thread.WAITING        JDWP TS_WAIT    - blocking/reading/processing signals
91  kStarting                       = 14,  // Thread.NEW            JDWP TS_WAIT    - native thread started, not yet ready to run managed code
92  kNative                         = 15,  // Thread.RUNNABLE       JDWP TS_RUNNING - running in a JNI native method
93  kSuspended                      = 16,  // Thread.RUNNABLE       JDWP TS_RUNNING - suspended by GC or debugger
94};
95
96class PACKED Thread {
97 public:
98  // Space to throw a StackOverflowError in.
99#if !defined(ART_USE_LLVM_COMPILER)
100  static const size_t kStackOverflowReservedBytes = 4 * KB;
101#else  // LLVM_x86 requires more memory to throw stack overflow exception.
102  static const size_t kStackOverflowReservedBytes = 8 * KB;
103#endif
104
105  // Creates a new native thread corresponding to the given managed peer.
106  // Used to implement Thread.start.
107  static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
108
109  // Attaches the calling native thread to the runtime, returning the new native peer.
110  // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
111  static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group);
112
113  // Reset internal state of child thread after fork.
114  void InitAfterFork();
115
116  static Thread* Current() __attribute__ ((pure)) {
117    // We rely on Thread::Current returning NULL for a detached thread, so it's not obvious
118    // that we can replace this with a direct %fs access on x86.
119    void* thread = pthread_getspecific(Thread::pthread_key_self_);
120    return reinterpret_cast<Thread*>(thread);
121  }
122
123  static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, Object* thread_peer)
124      LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_)
125      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
126  static Thread* FromManagedThread(const ScopedObjectAccessUnchecked& ts, jobject thread)
127      LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_)
128      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
129
130  // Translates 172 to pAllocArrayFromCode and so on.
131  static void DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers);
132
133  // Dumps a one-line summary of thread state (used for operator<<).
134  void ShortDump(std::ostream& os) const;
135
136  // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
137  void Dump(std::ostream& os) const
138      LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_)
139      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
140
141  // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which
142  // case we use 'tid' to identify the thread, and we'll include as much information as we can.
143  static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
144      LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_);
145
146  ThreadState GetState() const
147      EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) {
148    GlobalSynchronization::thread_suspend_count_lock_->AssertHeld();
149    return state_;
150  }
151
152  ThreadState SetState(ThreadState new_state)
153      EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) {
154    GlobalSynchronization::thread_suspend_count_lock_->AssertHeld();
155    ThreadState old_state = state_;
156    if (new_state == kRunnable) {
157      // Sanity, should never become runnable with a pending suspension and should always hold
158      // share of mutator_lock_.
159      CHECK_EQ(GetSuspendCount(), 0);
160      GlobalSynchronization::mutator_lock_->AssertSharedHeld();
161    }
162    state_ = new_state;
163    return old_state;
164  }
165
166  int GetSuspendCount() const
167      EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) {
168    GlobalSynchronization::thread_suspend_count_lock_->AssertHeld();
169    return suspend_count_;
170  }
171
172  int GetDebugSuspendCount() const
173      EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) {
174    GlobalSynchronization::thread_suspend_count_lock_->AssertHeld();
175    return debug_suspend_count_;
176  }
177
178  bool IsSuspended() const
179      EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_) {
180    int suspend_count = GetSuspendCount();
181    return suspend_count != 0 && GetState() != kRunnable;
182  }
183
184  void ModifySuspendCount(int delta, bool for_debugger)
185      EXCLUSIVE_LOCKS_REQUIRED(GlobalSynchronization::thread_suspend_count_lock_);
186
187  // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
188  // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
189  void FullSuspendCheck()
190      LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_)
191      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
192
193  // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
194  ThreadState TransitionFromSuspendedToRunnable()
195      LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_)
196      SHARED_LOCK_FUNCTION(GlobalSynchronization::mutator_lock_);
197
198  // Transition from runnable into a state where mutator privileges are denied. Releases share of
199  // mutator lock.
200  void TransitionFromRunnableToSuspended(ThreadState new_state)
201      LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_)
202      UNLOCK_FUNCTION(GlobalSynchronization::mutator_lock_);
203
204  // Wait for a debugger suspension on the thread associated with the given peer. Returns the
205  // thread on success, else NULL. If the thread should be suspended then request_suspension should
206  // be true on entry. If the suspension times out then *timeout is set to true.
207  static Thread* SuspendForDebugger(jobject peer,  bool request_suspension, bool* timeout)
208      LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_,
209                     GlobalSynchronization::thread_list_lock_,
210                     GlobalSynchronization::thread_suspend_count_lock_);
211
212  // Once called thread suspension will cause an assertion failure.
213#ifndef NDEBUG
214  const char* StartAssertNoThreadSuspension(const char* cause) {
215    CHECK(cause != NULL);
216    const char* previous_cause = last_no_thread_suspension_cause_;
217    no_thread_suspension_++;
218    last_no_thread_suspension_cause_ = cause;
219    return previous_cause;
220  }
221#else
222  const char* StartAssertNoThreadSuspension(const char* cause) {
223    CHECK(cause != NULL);
224    return NULL;
225  }
226#endif
227
228  // End region where no thread suspension is expected.
229#ifndef NDEBUG
230  void EndAssertNoThreadSuspension(const char* old_cause) {
231    CHECK(old_cause != NULL || no_thread_suspension_ == 1);
232    CHECK_GT(no_thread_suspension_, 0U);
233    no_thread_suspension_--;
234    last_no_thread_suspension_cause_ = old_cause;
235  }
236#else
237  void EndAssertNoThreadSuspension(const char*) {
238  }
239#endif
240
241
242#ifndef NDEBUG
243  void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
244#else
245  void AssertThreadSuspensionIsAllowable(bool check_locks = true) const {
246    check_locks = !check_locks;  // Keep GCC happy about unused parameters.
247  }
248#endif
249
250  bool CanAccessDirectReferences() const {
251#ifdef MOVING_GARBAGE_COLLECTOR
252    // TODO: when we have a moving collector, we'll need: return state_ == kRunnable;
253#endif
254    return true;
255  }
256
257  bool IsDaemon() const {
258    return daemon_;
259  }
260
261  bool HoldsLock(Object*);
262
263  /*
264   * Changes the priority of this thread to match that of the java.lang.Thread object.
265   *
266   * We map a priority value from 1-10 to Linux "nice" values, where lower
267   * numbers indicate higher priority.
268   */
269  void SetNativePriority(int newPriority);
270
271  /*
272   * Returns the thread priority for the current thread by querying the system.
273   * This is useful when attaching a thread through JNI.
274   *
275   * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
276   */
277  static int GetNativePriority();
278
279  uint32_t GetThinLockId() const {
280    return thin_lock_id_;
281  }
282
283  pid_t GetTid() const {
284    return tid_;
285  }
286
287  // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer.
288  String* GetThreadName(const ScopedObjectAccessUnchecked& ts) const
289      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
290
291  // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
292  // allocation, or locking.
293  void GetThreadName(std::string& name) const;
294
295  // Sets the thread's name.
296  void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
297
298  Object* GetPeer() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
299    return peer_;
300  }
301
302  bool HasPeer() const {
303    return peer_ != NULL;
304  }
305
306  Object* GetThreadGroup(const ScopedObjectAccessUnchecked& ts) const
307      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
308
309  RuntimeStats* GetStats() {
310    return &stats_;
311  }
312
313  bool IsStillStarting() const;
314
315  bool IsExceptionPending() const {
316    return exception_ != NULL;
317  }
318
319  Throwable* GetException() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
320    DCHECK(CanAccessDirectReferences());
321    return exception_;
322  }
323
324  void AssertNoPendingException() const;
325
326  void SetException(Throwable* new_exception)
327      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
328    DCHECK(CanAccessDirectReferences());
329    CHECK(new_exception != NULL);
330    // TODO: CHECK(exception_ == NULL);
331    exception_ = new_exception;  // TODO
332  }
333
334  void ClearException() {
335    exception_ = NULL;
336  }
337
338  // Find catch block and perform long jump to appropriate exception handle
339  void DeliverException() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
340
341  Context* GetLongJumpContext();
342  void ReleaseLongJumpContext(Context* context) {
343    DCHECK(long_jump_context_ == NULL);
344    long_jump_context_ = context;
345  }
346
347  Method* GetCurrentMethod(uint32_t* dex_pc = NULL, size_t* frame_id = NULL) const
348      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
349
350  void SetTopOfStack(void* stack, uintptr_t pc) {
351    Method** top_method = reinterpret_cast<Method**>(stack);
352    managed_stack_.SetTopQuickFrame(top_method);
353    managed_stack_.SetTopQuickFramePc(pc);
354  }
355
356  bool HasManagedStack() const {
357    return managed_stack_.GetTopQuickFrame() != NULL || managed_stack_.GetTopShadowFrame() != NULL;
358  }
359
360  // If 'msg' is NULL, no detail message is set.
361  void ThrowNewException(const char* exception_class_descriptor, const char* msg)
362      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
363
364  // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
365  // used as the new exception's cause.
366  void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
367      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
368
369  void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
370      __attribute__((format(printf, 3, 4)))
371      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
372
373  void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
374      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
375
376  // OutOfMemoryError is special, because we need to pre-allocate an instance.
377  // Only the GC should call this.
378  void ThrowOutOfMemoryError(const char* msg)
379      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
380
381  //QuickFrameIterator FindExceptionHandler(void* throw_pc, void** handler_pc);
382
383  void* FindExceptionHandlerInMethod(const Method* method,
384                                     void* throw_pc,
385                                     const DexFile& dex_file,
386                                     ClassLinker* class_linker);
387
388  static void Startup();
389  static void FinishStartup();
390  static void Shutdown();
391
392  // JNI methods
393  JNIEnvExt* GetJniEnv() const {
394    return jni_env_;
395  }
396
397  // Convert a jobject into a Object*
398  Object* DecodeJObject(jobject obj)
399      LOCKS_EXCLUDED(JavaVMExt::globals_lock,
400                     JavaVMExt::weak_globals_lock)
401      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
402
403  // Implements java.lang.Thread.interrupted.
404  bool Interrupted() {
405    MutexLock mu(*wait_mutex_);
406    bool interrupted = interrupted_;
407    interrupted_ = false;
408    return interrupted;
409  }
410
411  // Implements java.lang.Thread.isInterrupted.
412  bool IsInterrupted() {
413    MutexLock mu(*wait_mutex_);
414    return interrupted_;
415  }
416
417  void Interrupt() {
418    MutexLock mu(*wait_mutex_);
419    if (interrupted_) {
420      return;
421    }
422    interrupted_ = true;
423    NotifyLocked();
424  }
425
426  void Notify() {
427    MutexLock mu(*wait_mutex_);
428    NotifyLocked();
429  }
430
431  ClassLoader* GetClassLoaderOverride()
432      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_) {
433    DCHECK(CanAccessDirectReferences());
434    return class_loader_override_;
435  }
436
437  void SetClassLoaderOverride(ClassLoader* class_loader_override) {
438    class_loader_override_ = class_loader_override;
439  }
440
441  // Create the internal representation of a stack trace, that is more time
442  // and space efficient to compute than the StackTraceElement[]
443  jobject CreateInternalStackTrace(const ScopedObjectAccess& soa) const
444      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
445
446  // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
447  // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
448  // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
449  // with the number of valid frames in the returned array.
450  static jobjectArray InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
451      jobjectArray output_array = NULL, int* stack_depth = NULL);
452
453  void VisitRoots(Heap::RootVisitor* visitor, void* arg)
454      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
455
456#if VERIFY_OBJECT_ENABLED
457  void VerifyStack() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
458#else
459  void VerifyStack() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_){}
460#endif
461
462  //
463  // Offsets of various members of native Thread class, used by compiled code.
464  //
465
466  static ThreadOffset SelfOffset() {
467    return ThreadOffset(OFFSETOF_MEMBER(Thread, self_));
468  }
469
470  static ThreadOffset ExceptionOffset() {
471    return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_));
472  }
473
474  static ThreadOffset ThinLockIdOffset() {
475    return ThreadOffset(OFFSETOF_MEMBER(Thread, thin_lock_id_));
476  }
477
478  static ThreadOffset CardTableOffset() {
479    return ThreadOffset(OFFSETOF_MEMBER(Thread, card_table_));
480  }
481
482  static ThreadOffset SuspendCountOffset() {
483    return ThreadOffset(OFFSETOF_MEMBER(Thread, suspend_count_));
484  }
485
486  static ThreadOffset StateOffset() {
487    return ThreadOffset(OFFSETOF_VOLATILE_MEMBER(Thread, state_));
488  }
489
490  // Size of stack less any space reserved for stack overflow
491  size_t GetStackSize() {
492    return stack_size_ - (stack_end_ - stack_begin_);
493  }
494
495  // Set the stack end to that to be used during a stack overflow
496  void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
497
498  // Set the stack end to that to be used during regular execution
499  void ResetDefaultStackEnd() {
500    // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
501    // to throw a StackOverflowError.
502    stack_end_ = stack_begin_ + kStackOverflowReservedBytes;
503  }
504
505  static ThreadOffset StackEndOffset() {
506    return ThreadOffset(OFFSETOF_MEMBER(Thread, stack_end_));
507  }
508
509  static ThreadOffset JniEnvOffset() {
510    return ThreadOffset(OFFSETOF_MEMBER(Thread, jni_env_));
511  }
512
513  static ThreadOffset TopOfManagedStackOffset() {
514    return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
515                        ManagedStack::TopQuickFrameOffset());
516  }
517
518  static ThreadOffset TopOfManagedStackPcOffset() {
519    return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
520                        ManagedStack::TopQuickFramePcOffset());
521  }
522
523  const ManagedStack* GetManagedStack() const {
524    return &managed_stack_;
525  }
526
527  // Linked list recording fragments of managed stack.
528  void PushManagedStackFragment(ManagedStack* fragment) {
529    managed_stack_.PushManagedStackFragment(fragment);
530  }
531  void PopManagedStackFragment(const ManagedStack& fragment) {
532    managed_stack_.PopManagedStackFragment(fragment);
533  }
534
535  ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
536    return managed_stack_.PushShadowFrame(new_top_frame);
537  }
538
539  ShadowFrame* PopShadowFrame() {
540    return managed_stack_.PopShadowFrame();
541  }
542
543  static ThreadOffset TopShadowFrameOffset() {
544    return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
545                        ManagedStack::TopShadowFrameOffset());
546  }
547
548  // Number of references allocated in ShadowFrames on this thread
549  size_t NumShadowFrameReferences() const {
550    return managed_stack_.NumShadowFrameReferences();
551  }
552
553  // Number of references in SIRTs on this thread
554  size_t NumSirtReferences();
555
556  // Number of references allocated in SIRTs & shadow frames on this thread
557  size_t NumStackReferences() {
558    return NumSirtReferences() + NumShadowFrameReferences();
559  };
560
561  // Is the given obj in this thread's stack indirect reference table?
562  bool SirtContains(jobject obj);
563
564  void SirtVisitRoots(Heap::RootVisitor* visitor, void* arg);
565
566  void PushSirt(StackIndirectReferenceTable* sirt);
567  StackIndirectReferenceTable* PopSirt();
568
569  static ThreadOffset TopSirtOffset() {
570    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_sirt_));
571  }
572
573  DebugInvokeReq* GetInvokeReq() {
574    return debug_invoke_req_;
575  }
576
577  void SetDebuggerUpdatesEnabled(bool enabled);
578
579  const std::vector<TraceStackFrame>* GetTraceStack() const {
580    return trace_stack_;
581  }
582
583  bool IsTraceStackEmpty() const {
584    return trace_stack_->empty();
585  }
586
587  void PushTraceStackFrame(const TraceStackFrame& frame) {
588    trace_stack_->push_back(frame);
589  }
590
591  TraceStackFrame PopTraceStackFrame() {
592    TraceStackFrame frame = trace_stack_->back();
593    trace_stack_->pop_back();
594    return frame;
595  }
596
597  BaseMutex* GetHeldMutex(MutexLevel level) const {
598    return held_mutexes_[level];
599  }
600
601  void SetHeldMutex(MutexLevel level, BaseMutex* mutex) {
602    held_mutexes_[level] = mutex;
603  }
604
605 private:
606  // We have no control over the size of 'bool', but want our boolean fields
607  // to be 4-byte quantities.
608  typedef uint32_t bool32_t;
609
610  explicit Thread(bool daemon);
611  ~Thread() LOCKS_EXCLUDED(GlobalSynchronization::mutator_lock_,
612                           GlobalSynchronization::thread_suspend_count_lock_);
613  void Destroy();
614  friend class ThreadList;  // For ~Thread and Destroy.
615
616  void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
617  friend class Runtime; // For CreatePeer.
618
619  // TODO: remove, callers should use GetState and hold the appropriate locks. Used only by
620  //       ShortDump.
621  ThreadState GetStateUnsafe() const NO_THREAD_SAFETY_ANALYSIS {
622    return state_;
623  }
624
625  void DumpState(std::ostream& os) const;
626  void DumpStack(std::ostream& os) const
627      LOCKS_EXCLUDED(GlobalSynchronization::thread_suspend_count_lock_)
628      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
629
630  // Out-of-line conveniences for debugging in gdb.
631  static Thread* CurrentFromGdb(); // Like Thread::Current.
632  // Like Thread::Dump(std::cerr).
633  void DumpFromGdb() const SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
634
635  static void* CreateCallback(void* arg);
636
637  void HandleUncaughtExceptions(const ScopedObjectAccess& soa)
638      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
639  void RemoveFromThreadGroup(const ScopedObjectAccess& soa)
640      SHARED_LOCKS_REQUIRED(GlobalSynchronization::mutator_lock_);
641
642  void Init();
643  void InitCardTable();
644  void InitCpu();
645  void InitFunctionPointers();
646  void InitTid();
647  void InitPthreadKeySelf();
648  void InitStackHwm();
649
650  void NotifyLocked() EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
651    if (wait_monitor_ != NULL) {
652      wait_cond_->Signal();
653    }
654  }
655
656  static void ThreadExitCallback(void* arg);
657
658  // TLS key used to retrieve the Thread*.
659  static pthread_key_t pthread_key_self_;
660
661  // Used to notify threads that they should attempt to resume, they will suspend again if
662  // their suspend count is > 0.
663  static ConditionVariable* resume_cond_
664      GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_);
665
666  // --- Frequently accessed fields first for short offsets ---
667
668  // A non-zero value is used to tell the current thread to enter a safe point
669  // at the next poll.
670  int suspend_count_ GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_);
671
672  // The biased card table, see CardTable for details
673  byte* card_table_;
674
675  // The pending exception or NULL.
676  Throwable* exception_;
677
678  // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
679  // We leave extra space so there's room for the code that throws StackOverflowError.
680  byte* stack_end_;
681
682  // The top of the managed stack often manipulated directly by compiler generated code.
683  ManagedStack managed_stack_;
684
685  // Every thread may have an associated JNI environment
686  JNIEnvExt* jni_env_;
687
688  // Initialized to "this". On certain architectures (such as x86) reading
689  // off of Thread::Current is easy but getting the address of Thread::Current
690  // is hard. This field can be read off of Thread::Current to give the address.
691  Thread* self_;
692
693  volatile ThreadState state_ GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_);
694
695  // Our managed peer (an instance of java.lang.Thread).
696  Object* peer_;
697
698  // The "lowest addressable byte" of the stack
699  byte* stack_begin_;
700
701  // Size of the stack
702  size_t stack_size_;
703
704  // Thin lock thread id. This is a small integer used by the thin lock implementation.
705  // This is not to be confused with the native thread's tid, nor is it the value returned
706  // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
707  // important difference between this id and the ids visible to managed code is that these
708  // ones get reused (to ensure that they fit in the number of bits available).
709  uint32_t thin_lock_id_;
710
711  // System thread id.
712  pid_t tid_;
713
714  // Guards the 'interrupted_' and 'wait_monitor_' members.
715  mutable Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
716  ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
717  // Pointer to the monitor lock we're currently waiting on (or NULL).
718  Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
719  // Thread "interrupted" status; stays raised until queried or thrown.
720  bool32_t interrupted_ GUARDED_BY(wait_mutex_);
721  // The next thread in the wait set this thread is part of.
722  Thread* wait_next_;
723  // If we're blocked in MonitorEnter, this is the object we're trying to lock.
724  Object* monitor_enter_object_;
725
726  friend class Monitor;
727
728  // Top of linked list of stack indirect reference tables or NULL for none
729  StackIndirectReferenceTable* top_sirt_;
730
731  Runtime* runtime_;
732
733  RuntimeStats stats_;
734
735  // Needed to get the right ClassLoader in JNI_OnLoad, but also
736  // useful for testing.
737  ClassLoader* class_loader_override_;
738
739  // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
740  Context* long_jump_context_;
741
742  // A boolean telling us whether we're recursively throwing OOME.
743  bool32_t throwing_OutOfMemoryError_;
744
745  // How much of 'suspend_count_' is by request of the debugger, used to set things right
746  // when the debugger detaches. Must be <= suspend_count_.
747  int debug_suspend_count_ GUARDED_BY(GlobalSynchronization::thread_suspend_count_lock_);
748
749  // JDWP invoke-during-breakpoint support.
750  DebugInvokeReq* debug_invoke_req_;
751
752  // Additional stack used by method tracer to store method and return pc values.
753  // Stored as a pointer since std::vector is not PACKED.
754  std::vector<TraceStackFrame>* trace_stack_;
755
756  // A cached copy of the java.lang.Thread's name.
757  std::string* name_;
758
759  // Is the thread a daemon?
760  const bool32_t daemon_;
761
762  // A cached pthread_t for the pthread underlying this Thread*.
763  pthread_t pthread_self_;
764
765  // Support for Mutex lock hierarchy bug detection.
766  BaseMutex* held_mutexes_[kMaxMutexLevel + 1];
767
768  // A positive value implies we're in a region where thread suspension isn't expected.
769  uint32_t no_thread_suspension_;
770
771  // Cause for last suspension.
772  const char* last_no_thread_suspension_cause_;
773
774 public:
775  // Runtime support function pointers
776  // TODO: move this near the top, since changing its offset requires all oats to be recompiled!
777  EntryPoints entrypoints_;
778
779 private:
780  // How many times has our pthread key's destructor been called?
781  uint32_t thread_exit_check_count_;
782
783  DISALLOW_COPY_AND_ASSIGN(Thread);
784};
785
786std::ostream& operator<<(std::ostream& os, const Thread& thread);
787std::ostream& operator<<(std::ostream& os, const ThreadState& state);
788
789}  // namespace art
790
791#endif  // ART_SRC_THREAD_H_
792