thread.h revision 0399dde18753aa9bd2bd0d7cf60beef154d164a4
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_SRC_THREAD_H_
18#define ART_SRC_THREAD_H_
19
20#include <pthread.h>
21
22#include <bitset>
23#include <iosfwd>
24#include <list>
25#include <string>
26#include <vector>
27
28#include "dex_file.h"
29#include "globals.h"
30#include "jni_internal.h"
31#include "logging.h"
32#include "macros.h"
33#include "mutex.h"
34#include "mem_map.h"
35#include "oat/runtime/oat_support_entrypoints.h"
36#include "offsets.h"
37#include "runtime_stats.h"
38#include "stack.h"
39#include "trace.h"
40#include "UniquePtr.h"
41
42namespace art {
43
44class Array;
45class Class;
46class ClassLinker;
47class ClassLoader;
48class Context;
49class DebugInvokeReq;
50class Method;
51class Monitor;
52class Object;
53class Runtime;
54class ShadowFrame;
55class StackIndirectReferenceTable;
56class StackTraceElement;
57class StaticStorageBase;
58class Thread;
59class ThreadList;
60class Throwable;
61
62template<class T> class ObjectArray;
63template<class T> class PrimitiveArray;
64typedef PrimitiveArray<int32_t> IntArray;
65
66// Thread priorities. These must match the Thread.MIN_PRIORITY,
67// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
68enum ThreadPriority {
69  kMinThreadPriority = 1,
70  kNormThreadPriority = 5,
71  kMaxThreadPriority = 10,
72};
73
74enum ThreadState {
75  kTerminated   = 0, // Thread.TERMINATED     JDWP TS_ZOMBIE
76  kRunnable     = 1, // Thread.RUNNABLE       JDWP TS_RUNNING
77  kTimedWaiting = 2, // Thread.TIMED_WAITING  JDWP TS_WAIT    - in Object.wait() with a timeout
78  kBlocked      = 3, // Thread.BLOCKED        JDWP TS_MONITOR - blocked on a monitor
79  kWaiting      = 4, // Thread.WAITING        JDWP TS_WAIT    - in Object.wait()
80  kStarting     = 5, // Thread.NEW                            - native thread started, not yet ready to run managed code
81  kNative       = 6, //                                       - running in a JNI native method
82  kVmWait       = 7, //                                       - waiting on an internal runtime resource
83  kSuspended    = 8, //                                       - suspended by GC or debugger
84};
85
86class PACKED Thread {
87 public:
88  // Space to throw a StackOverflowError in.
89#if !defined(ART_USE_LLVM_COMPILER)
90  static const size_t kStackOverflowReservedBytes = 4 * KB;
91#else  // LLVM_x86 requires more memory to throw stack overflow exception.
92  static const size_t kStackOverflowReservedBytes = 8 * KB;
93#endif
94
95  // Creates a new native thread corresponding to the given managed peer.
96  // Used to implement Thread.start.
97  static void CreateNativeThread(Object* peer, size_t stack_size);
98
99  // Attaches the calling native thread to the runtime, returning the new native peer.
100  // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
101  static Thread* Attach(const char* thread_name, bool as_daemon, Object* thread_group);
102
103  // Reset internal state of child thread after fork.
104  void InitAfterFork();
105
106  static Thread* Current() __attribute__ ((pure)) {
107    // We rely on Thread::Current returning NULL for a detached thread, so it's not obvious
108    // that we can replace this with a direct %fs access on x86.
109    void* thread = pthread_getspecific(Thread::pthread_key_self_);
110    return reinterpret_cast<Thread*>(thread);
111  }
112
113  static Thread* FromManagedThread(Object* thread_peer);
114  static Thread* FromManagedThread(JNIEnv* env, jobject thread);
115  static uint32_t LockOwnerFromThreadLock(Object* thread_lock);
116
117  // Translates 172 to pAllocArrayFromCode and so on.
118  static void DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers);
119
120  // When full == true, dumps the detailed thread state and the thread stack (used for SIGQUIT).
121  // When full == false, dumps a one-line summary of thread state (used for operator<<).
122  void Dump(std::ostream& os, bool full = true) const;
123
124  // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which
125  // case we use 'tid' to identify the thread, and we'll include as much information as we can.
126  static void DumpState(std::ostream& os, const Thread* thread, pid_t tid);
127
128  ThreadState GetState() const {
129    return state_;
130  }
131
132  ThreadState SetState(ThreadState new_state);
133  void SetStateWithoutSuspendCheck(ThreadState new_state);
134
135  bool IsDaemon();
136  bool IsSuspended();
137
138  void WaitUntilSuspended();
139
140  // Once called thread suspension will cause an assertion failure.
141  void StartAssertNoThreadSuspension() {
142#ifndef NDEBUG
143    no_thread_suspension_++;
144#endif
145  }
146  // End region where no thread suspension is expected.
147  void EndAssertNoThreadSuspension() {
148#ifndef NDEBUG
149    DCHECK_GT(no_thread_suspension_, 0U);
150    no_thread_suspension_--;
151#endif
152  }
153
154  void AssertThreadSuspensionIsAllowable() const {
155    DCHECK_EQ(0u, no_thread_suspension_);
156  }
157
158  bool CanAccessDirectReferences() const {
159#ifdef MOVING_GARBAGE_COLLECTOR
160    // TODO: when we have a moving collector, we'll need: return state_ == kRunnable;
161#endif
162    return true;
163  }
164
165  bool HoldsLock(Object*);
166
167  /*
168   * Changes the priority of this thread to match that of the java.lang.Thread object.
169   *
170   * We map a priority value from 1-10 to Linux "nice" values, where lower
171   * numbers indicate higher priority.
172   */
173  void SetNativePriority(int newPriority);
174
175  /*
176   * Returns the thread priority for the current thread by querying the system.
177   * This is useful when attaching a thread through JNI.
178   *
179   * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
180   */
181  static int GetNativePriority();
182
183  // Returns the "main" ThreadGroup, used when attaching user threads.
184  static Object* GetMainThreadGroup();
185  // Returns the "system" ThreadGroup, used when attaching our internal threads.
186  static Object* GetSystemThreadGroup();
187
188  uint32_t GetThinLockId() const {
189    return thin_lock_id_;
190  }
191
192  pid_t GetTid() const {
193    return tid_;
194  }
195
196  // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer.
197  String* GetThreadName() const;
198
199  // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
200  // allocation, or locking.
201  void GetThreadName(std::string& name) const;
202
203  // Sets the thread's name.
204  void SetThreadName(const char* name);
205
206  Object* GetPeer() const {
207    return peer_;
208  }
209
210  Object* GetThreadGroup() const;
211
212  RuntimeStats* GetStats() {
213    return &stats_;
214  }
215
216  int GetSuspendCount() const {
217    return suspend_count_;
218  }
219
220  bool IsStillStarting() const;
221
222  bool IsExceptionPending() const {
223    return exception_ != NULL;
224  }
225
226  Throwable* GetException() const {
227    DCHECK(CanAccessDirectReferences());
228    return exception_;
229  }
230
231  void SetException(Throwable* new_exception) {
232    DCHECK(CanAccessDirectReferences());
233    CHECK(new_exception != NULL);
234    // TODO: CHECK(exception_ == NULL);
235    exception_ = new_exception;  // TODO
236  }
237
238  void ClearException() {
239    exception_ = NULL;
240  }
241
242  // Find catch block and perform long jump to appropriate exception handle
243  void DeliverException();
244
245  Context* GetLongJumpContext();
246  void ReleaseLongJumpContext(Context* context) {
247    DCHECK(long_jump_context_ == NULL);
248    long_jump_context_ = context;
249  }
250
251  Method* GetCurrentMethod(uint32_t* dex_pc = NULL, size_t* frame_id = NULL) const;
252
253  void SetTopOfStack(void* stack, uintptr_t pc) {
254    Method** top_method = reinterpret_cast<Method**>(stack);
255    managed_stack_.SetTopQuickFrame(top_method);
256    managed_stack_.SetTopQuickFramePc(pc);
257  }
258
259  bool HasManagedStack() const {
260    return managed_stack_.GetTopQuickFrame() != NULL || managed_stack_.GetTopShadowFrame() != NULL;
261  }
262
263  // If 'msg' is NULL, no detail message is set.
264  void ThrowNewException(const char* exception_class_descriptor, const char* msg);
265
266  // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
267  // used as the new exception's cause.
268  void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg);
269
270  void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
271      __attribute__((format(printf, 3, 4)));
272
273  void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap);
274
275  // OutOfMemoryError is special, because we need to pre-allocate an instance.
276  // Only the GC should call this.
277  void ThrowOutOfMemoryError(const char* msg);
278
279  //QuickFrameIterator FindExceptionHandler(void* throw_pc, void** handler_pc);
280
281  void* FindExceptionHandlerInMethod(const Method* method,
282                                     void* throw_pc,
283                                     const DexFile& dex_file,
284                                     ClassLinker* class_linker);
285
286  static void Startup();
287  static void FinishStartup();
288  static void Shutdown();
289
290  // JNI methods
291  JNIEnvExt* GetJniEnv() const {
292    return jni_env_;
293  }
294
295  // Convert a jobject into a Object*
296  Object* DecodeJObject(jobject obj);
297
298  // Implements java.lang.Thread.interrupted.
299  bool Interrupted() {
300    MutexLock mu(*wait_mutex_);
301    bool interrupted = interrupted_;
302    interrupted_ = false;
303    return interrupted;
304  }
305
306  // Implements java.lang.Thread.isInterrupted.
307  bool IsInterrupted() {
308    MutexLock mu(*wait_mutex_);
309    return interrupted_;
310  }
311
312  void Interrupt() {
313    MutexLock mu(*wait_mutex_);
314    if (interrupted_) {
315      return;
316    }
317    interrupted_ = true;
318    NotifyLocked();
319  }
320
321  void Notify() {
322    MutexLock mu(*wait_mutex_);
323    NotifyLocked();
324  }
325
326  const ClassLoader* GetClassLoaderOverride() {
327    // TODO: need to place the class_loader_override_ in a handle
328    // DCHECK(CanAccessDirectReferences());
329    return class_loader_override_;
330  }
331
332  void SetClassLoaderOverride(const ClassLoader* class_loader_override) {
333    class_loader_override_ = class_loader_override;
334  }
335
336  // Create the internal representation of a stack trace, that is more time
337  // and space efficient to compute than the StackTraceElement[]
338  jobject CreateInternalStackTrace(JNIEnv* env) const;
339
340  // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
341  // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
342  // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
343  // with the number of valid frames in the returned array.
344  static jobjectArray InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
345      jobjectArray output_array = NULL, int* stack_depth = NULL);
346
347  void VisitRoots(Heap::RootVisitor* visitor, void* arg);
348
349#if VERIFY_OBJECT_ENABLED
350  void VerifyStack();
351#else
352  void VerifyStack() {}
353#endif
354
355  //
356  // Offsets of various members of native Thread class, used by compiled code.
357  //
358
359  static ThreadOffset SelfOffset() {
360    return ThreadOffset(OFFSETOF_MEMBER(Thread, self_));
361  }
362
363  static ThreadOffset ExceptionOffset() {
364    return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_));
365  }
366
367  static ThreadOffset ThinLockIdOffset() {
368    return ThreadOffset(OFFSETOF_MEMBER(Thread, thin_lock_id_));
369  }
370
371  static ThreadOffset CardTableOffset() {
372    return ThreadOffset(OFFSETOF_MEMBER(Thread, card_table_));
373  }
374
375  static ThreadOffset SuspendCountOffset() {
376    return ThreadOffset(OFFSETOF_MEMBER(Thread, suspend_count_));
377  }
378
379  static ThreadOffset StateOffset() {
380    return ThreadOffset(OFFSETOF_VOLATILE_MEMBER(Thread, state_));
381  }
382
383  // Size of stack less any space reserved for stack overflow
384  size_t GetStackSize() {
385    return stack_size_ - (stack_end_ - stack_begin_);
386  }
387
388  // Set the stack end to that to be used during a stack overflow
389  void SetStackEndForStackOverflow() {
390    // During stack overflow we allow use of the full stack
391    if (stack_end_ == stack_begin_) {
392      DumpStack(std::cerr);
393      LOG(FATAL) << "Need to increase kStackOverflowReservedBytes (currently "
394                 << kStackOverflowReservedBytes << ")";
395    }
396
397    stack_end_ = stack_begin_;
398  }
399
400  // Set the stack end to that to be used during regular execution
401  void ResetDefaultStackEnd() {
402    // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
403    // to throw a StackOverflowError.
404    stack_end_ = stack_begin_ + kStackOverflowReservedBytes;
405  }
406
407  static ThreadOffset StackEndOffset() {
408    return ThreadOffset(OFFSETOF_MEMBER(Thread, stack_end_));
409  }
410
411  static ThreadOffset JniEnvOffset() {
412    return ThreadOffset(OFFSETOF_MEMBER(Thread, jni_env_));
413  }
414
415  static ThreadOffset TopOfManagedStackOffset() {
416    return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
417                        ManagedStack::TopQuickFrameOffset());
418  }
419
420  static ThreadOffset TopOfManagedStackPcOffset() {
421    return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
422                        ManagedStack::TopQuickFramePcOffset());
423  }
424
425  const ManagedStack* GetManagedStack() const {
426    return &managed_stack_;
427  }
428
429  // Linked list recording fragments of managed stack.
430  void PushManagedStackFragment(ManagedStack* fragment) {
431    managed_stack_.PushManagedStackFragment(fragment);
432  }
433  void PopManagedStackFragment(const ManagedStack& fragment) {
434    managed_stack_.PopManagedStackFragment(fragment);
435  }
436
437  ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
438    return managed_stack_.PushShadowFrame(new_top_frame);
439  }
440
441  ShadowFrame* PopShadowFrame() {
442    return managed_stack_.PopShadowFrame();
443  }
444
445  static ThreadOffset TopShadowFrameOffset() {
446    return ThreadOffset(OFFSETOF_MEMBER(Thread, managed_stack_) +
447                        ManagedStack::TopShadowFrameOffset());
448  }
449
450  // Number of references allocated in ShadowFrames on this thread
451  size_t NumShadowFrameReferences() const {
452    return managed_stack_.NumShadowFrameReferences();
453  }
454
455  // Number of references in SIRTs on this thread
456  size_t NumSirtReferences();
457
458  // Number of references allocated in SIRTs & shadow frames on this thread
459  size_t NumStackReferences() {
460    return NumSirtReferences() + NumShadowFrameReferences();
461  };
462
463  // Is the given obj in this thread's stack indirect reference table?
464  bool SirtContains(jobject obj);
465
466  void SirtVisitRoots(Heap::RootVisitor* visitor, void* arg);
467
468  void PushSirt(StackIndirectReferenceTable* sirt);
469  StackIndirectReferenceTable* PopSirt();
470
471  static ThreadOffset TopSirtOffset() {
472    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_sirt_));
473  }
474
475  DebugInvokeReq* GetInvokeReq() {
476    return debug_invoke_req_;
477  }
478
479  void SetDebuggerUpdatesEnabled(bool enabled);
480
481  const std::vector<TraceStackFrame>* GetTraceStack() const {
482    return trace_stack_;
483  }
484
485  bool IsTraceStackEmpty() const {
486    return trace_stack_->empty();
487  }
488
489  void PushTraceStackFrame(const TraceStackFrame& frame) {
490    trace_stack_->push_back(frame);
491  }
492
493  TraceStackFrame PopTraceStackFrame() {
494    TraceStackFrame frame = trace_stack_->back();
495    trace_stack_->pop_back();
496    return frame;
497  }
498
499  void CheckSafeToLockOrUnlock(MutexRank rank, bool is_locking);
500  void CheckSafeToWait(MutexRank rank);
501
502 private:
503  Thread();
504  ~Thread();
505  void Destroy();
506  friend class ThreadList;  // For ~Thread and Destroy.
507
508  void CreatePeer(const char* name, bool as_daemon, Object* thread_group);
509  friend class Runtime; // For CreatePeer.
510
511  void DumpState(std::ostream& os) const;
512  void DumpStack(std::ostream& os) const;
513
514  // Out-of-line conveniences for debugging in gdb.
515  static Thread* CurrentFromGdb(); // Like Thread::Current.
516  void DumpFromGdb() const; // Like Thread::Dump(std::cerr).
517
518  static void* CreateCallback(void* arg);
519
520  void HandleUncaughtExceptions();
521  void RemoveFromThreadGroup();
522
523  void Init();
524  void InitCardTable();
525  void InitCpu();
526  void InitFunctionPointers();
527  void InitTid();
528  void InitPthreadKeySelf();
529  void InitStackHwm();
530
531  void NotifyLocked() {
532    if (wait_monitor_ != NULL) {
533      wait_cond_->Signal();
534    }
535  }
536
537  static void ThreadExitCallback(void* arg);
538
539  // TLS key used to retrieve the Thread*.
540  static pthread_key_t pthread_key_self_;
541
542  // --- Frequently accessed fields first for short offsets ---
543
544  // A non-zero value is used to tell the current thread to enter a safe point
545  // at the next poll.
546  int suspend_count_;
547
548  // The biased card table, see CardTable for details
549  byte* card_table_;
550
551  // The pending exception or NULL.
552  Throwable* exception_;
553
554  // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
555  // We leave extra space so there's room for the code that throws StackOverflowError.
556  byte* stack_end_;
557
558  // The top of the managed stack often manipulated directly by compiler generated code.
559  ManagedStack managed_stack_;
560
561  // Every thread may have an associated JNI environment
562  JNIEnvExt* jni_env_;
563
564  // Initialized to "this". On certain architectures (such as x86) reading
565  // off of Thread::Current is easy but getting the address of Thread::Current
566  // is hard. This field can be read off of Thread::Current to give the address.
567  Thread* self_;
568
569  volatile ThreadState state_;
570
571  // Our managed peer (an instance of java.lang.Thread).
572  Object* peer_;
573
574  // The "lowest addressable byte" of the stack
575  byte* stack_begin_;
576
577  // Size of the stack
578  size_t stack_size_;
579
580  // Thin lock thread id. This is a small integer used by the thin lock implementation.
581  // This is not to be confused with the native thread's tid, nor is it the value returned
582  // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
583  // important difference between this id and the ids visible to managed code is that these
584  // ones get reused (to ensure that they fit in the number of bits available).
585  uint32_t thin_lock_id_;
586
587  // System thread id.
588  pid_t tid_;
589
590  // Guards the 'interrupted_' and 'wait_monitor_' members.
591  mutable Mutex* wait_mutex_;
592  ConditionVariable* wait_cond_;
593  // Pointer to the monitor lock we're currently waiting on (or NULL), guarded by wait_mutex_.
594  Monitor* wait_monitor_;
595  // Thread "interrupted" status; stays raised until queried or thrown, guarded by wait_mutex_.
596  uint32_t interrupted_;
597  // The next thread in the wait set this thread is part of.
598  Thread* wait_next_;
599  // If we're blocked in MonitorEnter, this is the object we're trying to lock.
600  Object* monitor_enter_object_;
601
602  friend class Monitor;
603
604  // Top of linked list of stack indirect reference tables or NULL for none
605  StackIndirectReferenceTable* top_sirt_;
606
607  Runtime* runtime_;
608
609  RuntimeStats stats_;
610
611  // Needed to get the right ClassLoader in JNI_OnLoad, but also
612  // useful for testing.
613  const ClassLoader* class_loader_override_;
614
615  // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
616  Context* long_jump_context_;
617
618  // A boolean telling us whether we're recursively throwing OOME.
619  uint32_t throwing_OutOfMemoryError_;
620
621  // How much of 'suspend_count_' is by request of the debugger, used to set things right
622  // when the debugger detaches. Must be <= suspend_count_.
623  int debug_suspend_count_;
624
625  // JDWP invoke-during-breakpoint support.
626  DebugInvokeReq* debug_invoke_req_;
627
628  // Additional stack used by method tracer to store method and return pc values.
629  // Stored as a pointer since std::vector is not PACKED.
630  std::vector<TraceStackFrame>* trace_stack_;
631
632  // A cached copy of the java.lang.Thread's name.
633  std::string* name_;
634
635  // A cached pthread_t for the pthread underlying this Thread*.
636  pthread_t pthread_self_;
637
638  // Mutexes held by this thread, see CheckSafeToLockOrUnlock.
639  uint32_t held_mutexes_[kMaxMutexRank + 1];
640
641  // A positive value implies we're in a region where thread suspension isn't expected.
642  uint32_t no_thread_suspension_;
643 public:
644  // Runtime support function pointers
645  EntryPoints entrypoints_;
646
647 private:
648  friend class ScopedThreadListLockReleaser;
649  DISALLOW_COPY_AND_ASSIGN(Thread);
650};
651
652std::ostream& operator<<(std::ostream& os, const Thread& thread);
653std::ostream& operator<<(std::ostream& os, const ThreadState& state);
654
655class ScopedThreadStateChange {
656 public:
657  ScopedThreadStateChange(Thread* thread, ThreadState new_state) : thread_(thread) {
658    old_thread_state_ = thread_->SetState(new_state);
659  }
660
661  ~ScopedThreadStateChange() {
662    thread_->SetState(old_thread_state_);
663  }
664
665 private:
666  Thread* thread_;
667  ThreadState old_thread_state_;
668  DISALLOW_COPY_AND_ASSIGN(ScopedThreadStateChange);
669};
670
671}  // namespace art
672
673#endif  // ART_SRC_THREAD_H_
674