thread.h revision abbe07d095547ded03c2e9d0d53943d43471278d
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_SRC_THREAD_H_
18#define ART_SRC_THREAD_H_
19
20#include <pthread.h>
21
22#include <bitset>
23#include <iosfwd>
24#include <list>
25#include <string>
26#include <vector>
27
28#include "dex_file.h"
29#include "globals.h"
30#include "jni_internal.h"
31#include "logging.h"
32#include "macros.h"
33#include "mutex.h"
34#include "mem_map.h"
35#include "oat/runtime/oat_support_entrypoints.h"
36#include "offsets.h"
37#include "runtime_stats.h"
38#include "shadow_frame.h"
39#include "stack.h"
40#include "trace.h"
41#include "UniquePtr.h"
42
43namespace art {
44
45class Array;
46class Class;
47class ClassLinker;
48class ClassLoader;
49class Context;
50class DebugInvokeReq;
51class Method;
52class Monitor;
53class Object;
54class Runtime;
55class ShadowFrame;
56class StackIndirectReferenceTable;
57class StackTraceElement;
58class StaticStorageBase;
59class Thread;
60class ThreadList;
61class Throwable;
62
63template<class T> class ObjectArray;
64template<class T> class PrimitiveArray;
65typedef PrimitiveArray<int32_t> IntArray;
66
67// Thread priorities. These must match the Thread.MIN_PRIORITY,
68// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
69enum ThreadPriority {
70  kMinThreadPriority = 1,
71  kNormThreadPriority = 5,
72  kMaxThreadPriority = 10,
73};
74
75enum ThreadState {
76  kTerminated   = 0, // Thread.TERMINATED     JDWP TS_ZOMBIE
77  kRunnable     = 1, // Thread.RUNNABLE       JDWP TS_RUNNING
78  kTimedWaiting = 2, // Thread.TIMED_WAITING  JDWP TS_WAIT    - in Object.wait() with a timeout
79  kBlocked      = 3, // Thread.BLOCKED        JDWP TS_MONITOR - blocked on a monitor
80  kWaiting      = 4, // Thread.WAITING        JDWP TS_WAIT    - in Object.wait()
81  kStarting     = 5, // Thread.NEW                            - native thread started, not yet ready to run managed code
82  kNative       = 6, //                                       - running in a JNI native method
83  kVmWait       = 7, //                                       - waiting on an internal runtime resource
84  kSuspended    = 8, //                                       - suspended by GC or debugger
85};
86
87class PACKED Thread {
88 public:
89  // Space to throw a StackOverflowError in.
90#if !defined(ART_USE_LLVM_COMPILER)
91  static const size_t kStackOverflowReservedBytes = 4 * KB;
92#else  // LLVM_x86 requires more memory to throw stack overflow exception.
93  static const size_t kStackOverflowReservedBytes = 8 * KB;
94#endif
95
96  static const size_t kDefaultStackSize = 16 * KB;
97
98  class StackVisitor {
99   public:
100    virtual ~StackVisitor() {}
101    // Return 'true' if we should continue to visit more frames, 'false' to stop.
102    virtual bool VisitFrame(const Frame& frame, uintptr_t pc) = 0;
103  };
104
105  // Creates a new native thread corresponding to the given managed peer.
106  // Used to implement Thread.start.
107  static void Create(Object* peer, size_t stack_size);
108
109  // Attaches the calling native thread to the runtime, returning the new native peer.
110  // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
111  static Thread* Attach(const char* thread_name, bool as_daemon, Object* thread_group);
112
113  // Reset internal state of child thread after fork.
114  void InitAfterFork();
115
116  static Thread* Current() {
117    void* thread = pthread_getspecific(Thread::pthread_key_self_);
118    return reinterpret_cast<Thread*>(thread);
119  }
120
121  static Thread* FromManagedThread(Object* thread_peer);
122  static Thread* FromManagedThread(JNIEnv* env, jobject thread);
123  static uint32_t LockOwnerFromThreadLock(Object* thread_lock);
124
125  // Translates 172 to pAllocArrayFromCode and so on.
126  static void DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers);
127
128  // When full == true, dumps the detailed thread state and the thread stack (used for SIGQUIT).
129  // When full == false, dumps a one-line summary of thread state (used for operator<<).
130  void Dump(std::ostream& os, bool full = true) const;
131
132  // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which
133  // case we use 'tid' to identify the thread, and we'll include as much information as we can.
134  static void DumpState(std::ostream& os, const Thread* thread, pid_t tid);
135
136  ThreadState GetState() const {
137    return state_;
138  }
139
140  ThreadState SetState(ThreadState new_state);
141  void SetStateWithoutSuspendCheck(ThreadState new_state);
142
143  bool IsDaemon();
144  bool IsSuspended();
145
146  void WaitUntilSuspended();
147
148  bool HoldsLock(Object*);
149
150  /*
151   * Changes the priority of this thread to match that of the java.lang.Thread object.
152   *
153   * We map a priority value from 1-10 to Linux "nice" values, where lower
154   * numbers indicate higher priority.
155   */
156  void SetNativePriority(int newPriority);
157
158  /*
159   * Returns the thread priority for the current thread by querying the system.
160   * This is useful when attaching a thread through JNI.
161   *
162   * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
163   */
164  static int GetNativePriority();
165
166  // Returns the "main" ThreadGroup, used when attaching user threads.
167  static Object* GetMainThreadGroup();
168  // Returns the "system" ThreadGroup, used when attaching our internal threads.
169  static Object* GetSystemThreadGroup();
170
171  bool CanAccessDirectReferences() const {
172#ifdef MOVING_GARBAGE_COLLECTOR
173    // TODO: when we have a moving collector, we'll need: return state_ == kRunnable;
174#endif
175    return true;
176  }
177
178  uint32_t GetThinLockId() const {
179    return thin_lock_id_;
180  }
181
182  pid_t GetTid() const {
183    return tid_;
184  }
185
186  // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer.
187  String* GetThreadName() const;
188
189  // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
190  // allocation, or locking.
191  void GetThreadName(std::string& name) const;
192
193  // Sets the thread's name.
194  void SetThreadName(const char* name);
195
196  Object* GetPeer() const {
197    return peer_;
198  }
199
200  Object* GetThreadGroup() const;
201
202  RuntimeStats* GetStats() {
203    return &stats_;
204  }
205
206  int GetSuspendCount() const {
207    return suspend_count_;
208  }
209
210  bool IsStillStarting() const;
211
212  // Returns the current Method* and native PC (not dex PC) for this thread.
213  Method* GetCurrentMethod(uintptr_t* pc = NULL, Method*** sp = NULL) const;
214
215  bool IsExceptionPending() const {
216    return exception_ != NULL;
217  }
218
219  Throwable* GetException() const {
220    DCHECK(CanAccessDirectReferences());
221    return exception_;
222  }
223
224  void SetException(Throwable* new_exception) {
225    DCHECK(CanAccessDirectReferences());
226    CHECK(new_exception != NULL);
227    // TODO: CHECK(exception_ == NULL);
228    exception_ = new_exception;  // TODO
229  }
230
231  void ClearException() {
232    exception_ = NULL;
233  }
234
235  // Find catch block and perform long jump to appropriate exception handle
236  void DeliverException();
237
238  Context* GetLongJumpContext();
239
240  Frame GetTopOfStack() const {
241    return top_of_managed_stack_;
242  }
243
244  // TODO: this is here for testing, remove when we have exception unit tests
245  // that use the real stack
246  void SetTopOfStack(void* stack, uintptr_t pc) {
247    top_of_managed_stack_.SetSP(reinterpret_cast<Method**>(stack));
248    top_of_managed_stack_pc_ = pc;
249  }
250
251  void SetTopOfStackPC(uintptr_t pc) {
252    top_of_managed_stack_pc_ = pc;
253  }
254
255  // If 'msg' is NULL, no detail message is set.
256  void ThrowNewException(const char* exception_class_descriptor, const char* msg);
257
258  // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
259  // used as the new exception's cause.
260  void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg);
261
262  void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
263      __attribute__((format(printf, 3, 4)));
264
265  void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap);
266
267  // OutOfMemoryError is special, because we need to pre-allocate an instance.
268  // Only the GC should call this.
269  void ThrowOutOfMemoryError(const char* msg);
270
271  Frame FindExceptionHandler(void* throw_pc, void** handler_pc);
272
273  void* FindExceptionHandlerInMethod(const Method* method,
274                                     void* throw_pc,
275                                     const DexFile& dex_file,
276                                     ClassLinker* class_linker);
277
278  static void Startup();
279  static void FinishStartup();
280  static void Shutdown();
281
282  // JNI methods
283  JNIEnvExt* GetJniEnv() const {
284    return jni_env_;
285  }
286
287  // Number of references in SIRTs on this thread
288  size_t NumSirtReferences();
289
290  // Number of references allocated in ShadowFrames on this thread
291  size_t NumShadowFrameReferences();
292
293  // Number of references allocated in SIRTs & shadow frames on this thread
294  size_t NumStackReferences() {
295    return NumSirtReferences() + NumShadowFrameReferences();
296  };
297
298  // Is the given obj in this thread's stack indirect reference table?
299  bool SirtContains(jobject obj);
300
301  // Is the given obj in this thread's ShadowFrame?
302  bool ShadowFrameContains(jobject obj);
303
304  // Is the given obj in this thread's Sirts & ShadowFrames?
305  bool StackReferencesContain(jobject obj);
306
307  void SirtVisitRoots(Heap::RootVisitor* visitor, void* arg);
308
309  void ShadowFrameVisitRoots(Heap::RootVisitor* visitor, void* arg);
310
311  // Convert a jobject into a Object*
312  Object* DecodeJObject(jobject obj);
313
314  // Implements java.lang.Thread.interrupted.
315  bool Interrupted() {
316    MutexLock mu(*wait_mutex_);
317    bool interrupted = interrupted_;
318    interrupted_ = false;
319    return interrupted;
320  }
321
322  // Implements java.lang.Thread.isInterrupted.
323  bool IsInterrupted() {
324    MutexLock mu(*wait_mutex_);
325    return interrupted_;
326  }
327
328  void Interrupt() {
329    MutexLock mu(*wait_mutex_);
330    if (interrupted_) {
331      return;
332    }
333    interrupted_ = true;
334    NotifyLocked();
335  }
336
337  void Notify() {
338    MutexLock mu(*wait_mutex_);
339    NotifyLocked();
340  }
341
342  // Linked list recording transitions from native to managed code
343  void PushNativeToManagedRecord(NativeToManagedRecord* record);
344  void PopNativeToManagedRecord(const NativeToManagedRecord& record);
345
346  const ClassLoader* GetClassLoaderOverride() {
347    // TODO: need to place the class_loader_override_ in a handle
348    // DCHECK(CanAccessDirectReferences());
349    return class_loader_override_;
350  }
351
352  void SetClassLoaderOverride(const ClassLoader* class_loader_override) {
353    class_loader_override_ = class_loader_override;
354  }
355
356  // Create the internal representation of a stack trace, that is more time
357  // and space efficient to compute than the StackTraceElement[]
358  jobject CreateInternalStackTrace(JNIEnv* env) const;
359
360  // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
361  // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
362  // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
363  // with the number of valid frames in the returned array.
364  static jobjectArray InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
365      jobjectArray output_array = NULL, int* stack_depth = NULL);
366
367  void VisitRoots(Heap::RootVisitor* visitor, void* arg);
368
369#if VERIFY_OBJECT_ENABLED
370  void VerifyStack();
371#else
372  void VerifyStack() {}
373#endif
374
375  //
376  // Offsets of various members of native Thread class, used by compiled code.
377  //
378
379  static ThreadOffset SelfOffset() {
380    return ThreadOffset(OFFSETOF_MEMBER(Thread, self_));
381  }
382
383  static ThreadOffset ExceptionOffset() {
384    return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_));
385  }
386
387  static ThreadOffset ThinLockIdOffset() {
388    return ThreadOffset(OFFSETOF_MEMBER(Thread, thin_lock_id_));
389  }
390
391  static ThreadOffset CardTableOffset() {
392    return ThreadOffset(OFFSETOF_MEMBER(Thread, card_table_));
393  }
394
395  static ThreadOffset SuspendCountOffset() {
396    return ThreadOffset(OFFSETOF_MEMBER(Thread, suspend_count_));
397  }
398
399  static ThreadOffset StateOffset() {
400    return ThreadOffset(OFFSETOF_VOLATILE_MEMBER(Thread, state_));
401  }
402
403  // Size of stack less any space reserved for stack overflow
404  size_t GetStackSize() {
405    return stack_size_ - (stack_end_ - stack_begin_);
406  }
407
408  // Set the stack end to that to be used during a stack overflow
409  void SetStackEndForStackOverflow() {
410    // During stack overflow we allow use of the full stack
411    if (stack_end_ == stack_begin_) {
412      DumpStack(std::cerr);
413      LOG(FATAL) << "Need to increase kStackOverflowReservedBytes (currently "
414                 << kStackOverflowReservedBytes << ")";
415    }
416
417    stack_end_ = stack_begin_;
418  }
419
420  // Set the stack end to that to be used during regular execution
421  void ResetDefaultStackEnd() {
422    // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
423    // to throw a StackOverflowError.
424    stack_end_ = stack_begin_ + kStackOverflowReservedBytes;
425  }
426
427  static ThreadOffset StackEndOffset() {
428    return ThreadOffset(OFFSETOF_MEMBER(Thread, stack_end_));
429  }
430
431  static ThreadOffset JniEnvOffset() {
432    return ThreadOffset(OFFSETOF_MEMBER(Thread, jni_env_));
433  }
434
435  static ThreadOffset TopOfManagedStackOffset() {
436    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_of_managed_stack_) +
437        OFFSETOF_MEMBER(Frame, sp_));
438  }
439
440  static ThreadOffset TopOfManagedStackPcOffset() {
441    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_of_managed_stack_pc_));
442  }
443
444  ShadowFrame* PushShadowFrame(ShadowFrame* frame) {
445    ShadowFrame* old_frame = top_shadow_frame_;
446    top_shadow_frame_ = frame;
447    frame->SetLink(old_frame);
448    return old_frame;
449  }
450
451  ShadowFrame* PopShadowFrame() {
452    CHECK(top_shadow_frame_ != NULL);
453    ShadowFrame* frame = top_shadow_frame_;
454    top_shadow_frame_ = frame->GetLink();
455    return frame;
456  }
457
458  static ThreadOffset TopShadowFrameOffset() {
459    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_shadow_frame_));
460  }
461
462  void PushSirt(StackIndirectReferenceTable* sirt);
463  StackIndirectReferenceTable* PopSirt();
464
465  static ThreadOffset TopSirtOffset() {
466    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_sirt_));
467  }
468
469  void WalkStack(StackVisitor* visitor, bool include_upcalls = false) const;
470
471  DebugInvokeReq* GetInvokeReq() {
472    return debug_invoke_req_;
473  }
474
475  void SetDebuggerUpdatesEnabled(bool enabled);
476
477  bool IsTraceStackEmpty() const {
478    return trace_stack_->empty();
479  }
480
481  TraceStackFrame GetTraceStackFrame(uint32_t depth) const {
482    return trace_stack_->at(trace_stack_->size() - depth - 1);
483  }
484
485  void PushTraceStackFrame(const TraceStackFrame& frame) {
486    trace_stack_->push_back(frame);
487  }
488
489  TraceStackFrame PopTraceStackFrame() {
490    TraceStackFrame frame = trace_stack_->back();
491    trace_stack_->pop_back();
492    return frame;
493  }
494
495  void CheckSafeToLockOrUnlock(MutexRank rank, bool is_locking);
496  void CheckSafeToWait(MutexRank rank);
497
498 private:
499  Thread();
500  ~Thread();
501  void Destroy();
502  friend class ThreadList;  // For ~Thread and Destroy.
503
504  void CreatePeer(const char* name, bool as_daemon, Object* thread_group);
505  friend class Runtime; // For CreatePeer.
506
507  void DumpState(std::ostream& os) const;
508  void DumpStack(std::ostream& os) const;
509
510  // Out-of-line conveniences for debugging in gdb.
511  static Thread* CurrentFromGdb(); // Like Thread::Current.
512  void DumpFromGdb() const; // Like Thread::Dump(std::cerr).
513
514  static void* CreateCallback(void* arg);
515
516  void HandleUncaughtExceptions();
517  void RemoveFromThreadGroup();
518
519  void Init();
520  void InitCardTable();
521  void InitCpu();
522  void InitFunctionPointers();
523  void InitTid();
524  void InitPthreadKeySelf();
525  void InitStackHwm();
526
527  void NotifyLocked() {
528    if (wait_monitor_ != NULL) {
529      wait_cond_->Signal();
530    }
531  }
532
533  static void ThreadExitCallback(void* arg);
534
535  // Thin lock thread id. This is a small integer used by the thin lock implementation.
536  // This is not to be confused with the native thread's tid, nor is it the value returned
537  // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
538  // important difference between this id and the ids visible to managed code is that these
539  // ones get reused (to ensure that they fit in the number of bits available).
540  uint32_t thin_lock_id_;
541
542  // System thread id.
543  pid_t tid_;
544
545  // Our managed peer (an instance of java.lang.Thread).
546  Object* peer_;
547
548  // The top_of_managed_stack_ and top_of_managed_stack_pc_ fields are accessed from
549  // compiled code, so we keep them early in the structure to (a) avoid having to keep
550  // fixing the assembler offsets and (b) improve the chances that these will still be aligned.
551
552  // Top of the managed stack, written out prior to the state transition from
553  // kRunnable to kNative. Uses include giving the starting point for scanning
554  // a managed stack when a thread is in native code.
555  Frame top_of_managed_stack_;
556  // PC corresponding to the call out of the top_of_managed_stack_ frame
557  uintptr_t top_of_managed_stack_pc_;
558
559  // Guards the 'interrupted_' and 'wait_monitor_' members.
560  mutable Mutex* wait_mutex_;
561  ConditionVariable* wait_cond_;
562  // Pointer to the monitor lock we're currently waiting on (or NULL), guarded by wait_mutex_.
563  Monitor* wait_monitor_;
564  // Thread "interrupted" status; stays raised until queried or thrown, guarded by wait_mutex_.
565  uint32_t interrupted_;
566  // The next thread in the wait set this thread is part of.
567  Thread* wait_next_;
568  // If we're blocked in MonitorEnter, this is the object we're trying to lock.
569  Object* monitor_enter_object_;
570
571  friend class Monitor;
572
573  RuntimeStats stats_;
574
575  // The biased card table, see CardTable for details
576  byte* card_table_;
577
578  // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
579  // We leave extra space so there's room for the code that throws StackOverflowError.
580  byte* stack_end_;
581
582  // Size of the stack
583  size_t stack_size_;
584
585  // The "lowest addressable byte" of the stack
586  byte* stack_begin_;
587
588  // A linked list (of stack allocated records) recording transitions from
589  // native to managed code.
590  NativeToManagedRecord* native_to_managed_record_;
591
592  // Top of linked list of stack indirect reference tables or NULL for none
593  StackIndirectReferenceTable* top_sirt_;
594
595  // Top of linked list of shadow stack or NULL for none
596  // Some backend may require shadow frame to ease the GC work.
597  ShadowFrame* top_shadow_frame_;
598
599  // Every thread may have an associated JNI environment
600  JNIEnvExt* jni_env_;
601
602  volatile ThreadState state_;
603
604  // Initialized to "this". On certain architectures (such as x86) reading
605  // off of Thread::Current is easy but getting the address of Thread::Current
606  // is hard. This field can be read off of Thread::Current to give the address.
607  Thread* self_;
608
609  Runtime* runtime_;
610
611  // The pending exception or NULL.
612  Throwable* exception_;
613
614  // A non-zero value is used to tell the current thread to enter a safe point
615  // at the next poll.
616  int suspend_count_;
617  // How much of 'suspend_count_' is by request of the debugger, used to set things right
618  // when the debugger detaches. Must be <= suspend_count_.
619  int debug_suspend_count_;
620
621  // Needed to get the right ClassLoader in JNI_OnLoad, but also
622  // useful for testing.
623  const ClassLoader* class_loader_override_;
624
625  // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
626  Context* long_jump_context_;
627
628  // A boolean telling us whether we're recursively throwing OOME.
629  uint32_t throwing_OutOfMemoryError_;
630
631  Throwable* pre_allocated_OutOfMemoryError_;
632
633  // JDWP invoke-during-breakpoint support.
634  DebugInvokeReq* debug_invoke_req_;
635
636  // TLS key used to retrieve the Thread*.
637  static pthread_key_t pthread_key_self_;
638
639  // Additional stack used by method tracer to store method and return pc values.
640  // Stored as a pointer since std::vector is not PACKED.
641  std::vector<TraceStackFrame>* trace_stack_;
642
643  // A cached copy of the java.lang.Thread's name.
644  std::string* name_;
645
646  uint32_t held_mutexes_[kMaxMutexRank + 1];
647
648 public:
649  // Runtime support function pointers
650  EntryPoints entrypoints_;
651
652 private:
653  DISALLOW_COPY_AND_ASSIGN(Thread);
654};
655
656std::ostream& operator<<(std::ostream& os, const Thread& thread);
657std::ostream& operator<<(std::ostream& os, const ThreadState& state);
658
659class ScopedThreadStateChange {
660 public:
661  ScopedThreadStateChange(Thread* thread, ThreadState new_state) : thread_(thread) {
662    old_thread_state_ = thread_->SetState(new_state);
663  }
664
665  ~ScopedThreadStateChange() {
666    thread_->SetState(old_thread_state_);
667  }
668
669 private:
670  Thread* thread_;
671  ThreadState old_thread_state_;
672  DISALLOW_COPY_AND_ASSIGN(ScopedThreadStateChange);
673};
674
675}  // namespace art
676
677#endif  // ART_SRC_THREAD_H_
678