thread.h revision 40381fb9dc4b4cf274f1e58b2cdf4396202c6189
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_SRC_THREAD_H_
18#define ART_SRC_THREAD_H_
19
20#include <pthread.h>
21
22#include <bitset>
23#include <iosfwd>
24#include <list>
25#include <string>
26
27#include "dex_file.h"
28#include "globals.h"
29#include "jni_internal.h"
30#include "logging.h"
31#include "macros.h"
32#include "mutex.h"
33#include "mem_map.h"
34#include "offsets.h"
35#include "runtime_stats.h"
36#include "stack.h"
37#include "UniquePtr.h"
38
39namespace art {
40
41class Array;
42class Class;
43class ClassLinker;
44class ClassLoader;
45class Context;
46class Method;
47class Monitor;
48class Object;
49class Runtime;
50class StackIndirectReferenceTable;
51class StackTraceElement;
52class StaticStorageBase;
53class Thread;
54class ThreadList;
55class Throwable;
56
57template<class T> class ObjectArray;
58template<class T> class PrimitiveArray;
59typedef PrimitiveArray<int32_t> IntArray;
60
61class PACKED Thread {
62 public:
63  /* thread priorities, from java.lang.Thread */
64  enum Priority {
65    kMinPriority = 1,
66    kNormPriority = 5,
67    kMaxPriority = 10,
68  };
69  enum State {
70    // These match up with JDWP values.
71    kTerminated   = 0,        // TERMINATED
72    kRunnable     = 1,        // RUNNABLE or running now
73    kTimedWaiting = 2,        // TIMED_WAITING in Object.wait()
74    kBlocked      = 3,        // BLOCKED on a monitor
75    kWaiting      = 4,        // WAITING in Object.wait()
76    // Non-JDWP states.
77    kInitializing = 5,        // allocated, not yet running --- TODO: unnecessary?
78    kStarting     = 6,        // native thread started, not yet ready to run managed code
79    kNative       = 7,        // off in a JNI native method
80    kVmWait       = 8,        // waiting on a VM resource
81    kSuspended    = 9,        // suspended, usually by GC or debugger
82  };
83
84  // Space to throw a StackOverflowError in.
85  static const size_t kStackOverflowReservedBytes = 4 * KB;
86
87  static const size_t kDefaultStackSize = 64 * KB;
88
89  // Runtime support function pointers
90  void (*pDebugMe)(Method*, uint32_t);
91  void* (*pMemcpy)(void*, const void*, size_t);
92  uint64_t (*pShlLong)(uint64_t, uint32_t);
93  uint64_t (*pShrLong)(uint64_t, uint32_t);
94  uint64_t (*pUshrLong)(uint64_t, uint32_t);
95  float (*pI2f)(int);
96  int (*pF2iz)(float);
97  float (*pD2f)(double);
98  double (*pF2d)(float);
99  double (*pI2d)(int);
100  int (*pD2iz)(double);
101  float (*pL2f)(long);
102  double (*pL2d)(long);
103  long long (*pF2l)(float);
104  long long (*pD2l)(double);
105  float (*pFadd)(float, float);
106  float (*pFsub)(float, float);
107  float (*pFdiv)(float, float);
108  float (*pFmul)(float, float);
109  float (*pFmodf)(float, float);
110  double (*pDadd)(double, double);
111  double (*pDsub)(double, double);
112  double (*pDdiv)(double, double);
113  double (*pDmul)(double, double);
114  double (*pFmod)(double, double);
115  int (*pIdivmod)(int, int);
116  int (*pIdiv)(int, int);
117  long long (*pLmul)(long long, long long);
118  long long (*pLdivmod)(long long, long long);
119  void (*pCheckSuspendFromCode)(Thread*);  // Stub that is called when the suspend count is non-zero
120  void (*pTestSuspendFromCode)();  // Stub that is periodically called to test the suspend count
121  void* (*pAllocObjectFromCode)(uint32_t, void*);
122  void* (*pAllocArrayFromCode)(uint32_t, void*, int32_t);
123  void (*pCanPutArrayElementFromCode)(void*, void*);
124  void* (*pCheckAndAllocArrayFromCode)(uint32_t, void*, int32_t);
125  void (*pCheckCastFromCode)(void*, void*);
126  Object* (*pDecodeJObjectInThread)(Thread* thread, jobject obj);
127  void (*pDeliverException)(void*);
128  void* (*pFindInstanceFieldFromCode)(uint32_t, void*);
129  Method* (*pFindInterfaceMethodInCache)(Class*, uint32_t, const Method*, struct DvmDex*);
130  void* (*pFindNativeMethod)(Thread* thread);
131  int32_t (*pGet32Static)(uint32_t, void*);
132  int64_t (*pGet64Static)(uint32_t, void*);
133  void* (*pGetObjStatic)(uint32_t, void*);
134  void (*pHandleFillArrayDataFromCode)(void*, void*);
135  void* (*pInitializeStaticStorage)(uint32_t, void*);
136  uint32_t (*pInstanceofNonTrivialFromCode)(const Class*, const Class*);
137  void (*pInvokeInterfaceTrampoline)(uint32_t, void*);
138  Class* (*pInitializeTypeFromCode)(uint32_t, Method*);
139  void (*pLockObjectFromCode)(void*);
140  void (*pObjectInit)(void*);
141  void (*pResolveMethodFromCode)(Method*, uint32_t);
142  void* (*pResolveStringFromCode)(void*, uint32_t);
143  int (*pSet32Static)(uint32_t, void*, int32_t);
144  int (*pSet64Static)(uint32_t, void*, int64_t);
145  int (*pSetObjStatic)(uint32_t, void*, void*);
146  void (*pThrowStackOverflowFromCode)(void*);
147  void (*pThrowNullPointerFromCode)();
148  void (*pThrowArrayBoundsFromCode)(int32_t, int32_t);
149  void (*pThrowDivZeroFromCode)();
150  void (*pThrowVerificationErrorFromCode)(int32_t, int32_t);
151  void (*pThrowNegArraySizeFromCode)(int32_t);
152  void (*pThrowNoSuchMethodFromCode)(int32_t);
153  void (*pThrowAbstractMethodErrorFromCode)(Method* method, Thread* thread, Method** sp);
154  void (*pUnlockObjectFromCode)(void*);
155  void* (*pUnresolvedDirectMethodTrampolineFromCode)(int32_t, void*, Thread*,
156                                                     Runtime::TrampolineType);
157
158  class StackVisitor {
159   public:
160    virtual ~StackVisitor() {}
161    virtual void VisitFrame(const Frame& frame, uintptr_t pc) = 0;
162  };
163
164  // Creates a new thread.
165  static void Create(Object* peer, size_t stack_size);
166
167  // Creates a new thread from the calling thread.
168  static Thread* Attach(const Runtime* runtime, const char* name, bool as_daemon);
169
170  // Reset internal state of child thread after fork.
171  void InitAfterFork();
172
173  static Thread* Current() {
174    void* thread = pthread_getspecific(Thread::pthread_key_self_);
175    return reinterpret_cast<Thread*>(thread);
176  }
177
178  static Thread* FromManagedThread(JNIEnv* env, jobject thread);
179  static uint32_t LockOwnerFromThreadLock(Object* thread_lock);
180
181  void Dump(std::ostream& os) const;
182
183  State GetState() const {
184    return state_;
185  }
186
187  State SetState(State new_state);
188
189  bool IsDaemon();
190
191  void WaitUntilSuspended();
192
193  bool HoldsLock(Object*);
194
195  /*
196   * Changes the priority of this thread to match that of the java.lang.Thread object.
197   *
198   * We map a priority value from 1-10 to Linux "nice" values, where lower
199   * numbers indicate higher priority.
200   */
201  void SetNativePriority(int newPriority);
202
203  /*
204   * Returns the thread priority for the current thread by querying the system.
205   * This is useful when attaching a thread through JNI.
206   *
207   * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
208   */
209  static int GetNativePriority();
210
211  bool CanAccessDirectReferences() const {
212#ifdef MOVING_GARBAGE_COLLECTOR
213    // TODO: when we have a moving collector, we'll need: return state_ == kRunnable;
214#endif
215    return true;
216  }
217
218  uint32_t GetThinLockId() const {
219    return thin_lock_id_;
220  }
221
222  pid_t GetTid() const {
223    return tid_;
224  }
225
226  // Returns the java.lang.Thread's name, or NULL.
227  String* GetName() const;
228
229  // Returns the current method's declaring class' source file and the current line number.
230  void GetCurrentLocation(const char*& source_file, uint32_t& line_number) const;
231
232  Object* GetPeer() const {
233    return peer_;
234  }
235
236  RuntimeStats* GetStats() {
237    return &stats_;
238  }
239
240  // Returns the Method* for the current method.
241  // This is used by the JNI implementation for logging and diagnostic purposes.
242  const Method* GetCurrentMethod() const;
243
244  bool IsExceptionPending() const {
245    return exception_ != NULL;
246  }
247
248  Throwable* GetException() const {
249    DCHECK(CanAccessDirectReferences());
250    return exception_;
251  }
252
253  void SetException(Throwable* new_exception) {
254    DCHECK(CanAccessDirectReferences());
255    CHECK(new_exception != NULL);
256    // TODO: CHECK(exception_ == NULL);
257    exception_ = new_exception;  // TODO
258  }
259
260  void ClearException() {
261    exception_ = NULL;
262  }
263
264  // Find catch block and perform long jump to appropriate exception handle
265  void DeliverException();
266
267  Context* GetLongJumpContext();
268
269  Frame GetTopOfStack() const {
270    return top_of_managed_stack_;
271  }
272
273  // TODO: this is here for testing, remove when we have exception unit tests
274  // that use the real stack
275  void SetTopOfStack(void* stack, uintptr_t pc) {
276    top_of_managed_stack_.SetSP(reinterpret_cast<Method**>(stack));
277    top_of_managed_stack_pc_ = pc;
278  }
279
280  void SetTopOfStackPC(uintptr_t pc) {
281    top_of_managed_stack_pc_ = pc;
282  }
283
284  // 'msg' may be NULL.
285  void ThrowNewException(const char* exception_class_descriptor, const char* msg);
286
287  void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
288      __attribute__((format(printf, 3, 4)));
289
290  void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap);
291
292  // OutOfMemoryError is special, because we need to pre-allocate an instance.
293  void ThrowOutOfMemoryError(const char* msg);
294  void ThrowOutOfMemoryError(Class* c, size_t byte_count);
295
296  Frame FindExceptionHandler(void* throw_pc, void** handler_pc);
297
298  void* FindExceptionHandlerInMethod(const Method* method,
299                                     void* throw_pc,
300                                     const DexFile& dex_file,
301                                     ClassLinker* class_linker);
302
303  void SetName(const char* name);
304
305  static void Startup();
306  static void FinishStartup();
307  static void Shutdown();
308
309  // JNI methods
310  JNIEnvExt* GetJniEnv() const {
311    return jni_env_;
312  }
313
314  // Number of references allocated in SIRTs on this thread
315  size_t NumSirtReferences();
316
317  // Is the given obj in this thread's stack indirect reference table?
318  bool SirtContains(jobject obj);
319
320  void SirtVisitRoots(Heap::RootVisitor* visitor, void* arg);
321
322  // Convert a jobject into a Object*
323  Object* DecodeJObject(jobject obj);
324
325  // Implements java.lang.Thread.interrupted.
326  bool Interrupted() {
327    MutexLock mu(*wait_mutex_);
328    bool interrupted = interrupted_;
329    interrupted_ = false;
330    return interrupted;
331  }
332
333  // Implements java.lang.Thread.isInterrupted.
334  bool IsInterrupted() {
335    MutexLock mu(*wait_mutex_);
336    return interrupted_;
337  }
338
339  void Interrupt() {
340    MutexLock mu(*wait_mutex_);
341    if (interrupted_) {
342      return;
343    }
344    interrupted_ = true;
345    NotifyLocked();
346  }
347
348  void Notify() {
349    MutexLock mu(*wait_mutex_);
350    NotifyLocked();
351  }
352
353  // Linked list recording transitions from native to managed code
354  void PushNativeToManagedRecord(NativeToManagedRecord* record);
355  void PopNativeToManagedRecord(const NativeToManagedRecord& record);
356
357  const ClassLoader* GetClassLoaderOverride() {
358    // TODO: need to place the class_loader_override_ in a handle
359    // DCHECK(CanAccessDirectReferences());
360    return class_loader_override_;
361  }
362
363  void SetClassLoaderOverride(const ClassLoader* class_loader_override) {
364    class_loader_override_ = class_loader_override;
365  }
366
367  // Create the internal representation of a stack trace, that is more time
368  // and space efficient to compute than the StackTraceElement[]
369  jobject CreateInternalStackTrace(JNIEnv* env) const;
370
371  // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
372  // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
373  // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
374  // with the number of valid frames in the returned array.
375  static jobjectArray InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
376      jobjectArray output_array = NULL, int* stack_depth = NULL);
377
378  void VisitRoots(Heap::RootVisitor* visitor, void* arg);
379
380  //
381  // Offsets of various members of native Thread class, used by compiled code.
382  //
383
384  static ThreadOffset SelfOffset() {
385    return ThreadOffset(OFFSETOF_MEMBER(Thread, self_));
386  }
387
388  static ThreadOffset ExceptionOffset() {
389    return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_));
390  }
391
392  static ThreadOffset ThinLockIdOffset() {
393    return ThreadOffset(OFFSETOF_MEMBER(Thread, thin_lock_id_));
394  }
395
396  static ThreadOffset CardTableOffset() {
397    return ThreadOffset(OFFSETOF_MEMBER(Thread, card_table_));
398  }
399
400  static ThreadOffset SuspendCountOffset() {
401    return ThreadOffset(OFFSETOF_MEMBER(Thread, suspend_count_));
402  }
403
404  static ThreadOffset StateOffset() {
405    return ThreadOffset(OFFSETOF_VOLATILE_MEMBER(Thread, state_));
406  }
407
408  // Size of stack less any space reserved for stack overflow
409  size_t GetStackSize() {
410    return stack_size_ - (stack_end_ - stack_base_);
411  }
412
413  // Set the stack end to that to be used during a stack overflow
414  void SetStackEndForStackOverflow() {
415    // During stack overflow we allow use of the full stack
416    if (stack_end_ == stack_base_) {
417      DumpStack(std::cerr);
418      LOG(FATAL) << "Need to increase kStackOverflowReservedBytes (currently "
419                 << kStackOverflowReservedBytes << ")";
420    }
421
422    stack_end_ = stack_base_;
423  }
424
425  // Set the stack end to that to be used during regular execution
426  void ResetDefaultStackEnd() {
427    // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
428    // to throw a StackOverflowError.
429    stack_end_ = stack_base_ + kStackOverflowReservedBytes;
430  }
431
432  static ThreadOffset StackEndOffset() {
433    return ThreadOffset(OFFSETOF_MEMBER(Thread, stack_end_));
434  }
435
436  static ThreadOffset JniEnvOffset() {
437    return ThreadOffset(OFFSETOF_MEMBER(Thread, jni_env_));
438  }
439
440  static ThreadOffset TopOfManagedStackOffset() {
441    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_of_managed_stack_) +
442        OFFSETOF_MEMBER(Frame, sp_));
443  }
444
445  static ThreadOffset TopOfManagedStackPcOffset() {
446    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_of_managed_stack_pc_));
447  }
448
449  void PushSirt(StackIndirectReferenceTable* sirt);
450  StackIndirectReferenceTable* PopSirt();
451
452  static ThreadOffset TopSirtOffset() {
453    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_sirt_));
454  }
455
456  void WalkStack(StackVisitor* visitor) const;
457
458 private:
459  Thread();
460  ~Thread();
461  friend class ThreadList;  // For ~Thread.
462
463  void CreatePeer(const char* name, bool as_daemon);
464  friend class Runtime; // For CreatePeer.
465
466  void DumpState(std::ostream& os) const;
467  void DumpStack(std::ostream& os) const;
468
469  // Out-of-line conveniences for debugging in gdb.
470  static Thread* CurrentFromGdb(); // Like Thread::Current.
471  void DumpFromGdb() const; // Like Thread::Dump(std::cerr).
472
473  void Attach(const Runtime* runtime);
474  static void* CreateCallback(void* arg);
475
476  void HandleUncaughtExceptions();
477
478  void InitCpu();
479  void InitFunctionPointers();
480  void InitTid();
481  void InitPthreadKeySelf();
482  void InitStackHwm();
483
484  void NotifyLocked() {
485    if (wait_monitor_ != NULL) {
486      wait_cond_->Signal();
487    }
488  }
489
490  static void ThreadExitCallback(void* arg);
491
492  void WalkStackUntilUpCall(StackVisitor* visitor, bool include_upcall) const;
493
494  // Thin lock thread id. This is a small integer used by the thin lock implementation.
495  // This is not to be confused with the native thread's tid, nor is it the value returned
496  // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
497  // important difference between this id and the ids visible to managed code is that these
498  // ones get reused (to ensure that they fit in the number of bits available).
499  uint32_t thin_lock_id_;
500
501  // System thread id.
502  pid_t tid_;
503
504  // Our managed peer (an instance of java.lang.Thread).
505  Object* peer_;
506
507  // The top_of_managed_stack_ and top_of_managed_stack_pc_ fields are accessed from
508  // compiled code, so we keep them early in the structure to (a) avoid having to keep
509  // fixing the assembler offsets and (b) improve the chances that these will still be aligned.
510
511  // Top of the managed stack, written out prior to the state transition from
512  // kRunnable to kNative. Uses include giving the starting point for scanning
513  // a managed stack when a thread is in native code.
514  Frame top_of_managed_stack_;
515  // PC corresponding to the call out of the top_of_managed_stack_ frame
516  uintptr_t top_of_managed_stack_pc_;
517
518  // Guards the 'interrupted_' and 'wait_monitor_' members.
519  mutable Mutex* wait_mutex_;
520  ConditionVariable* wait_cond_;
521  // Pointer to the monitor lock we're currently waiting on (or NULL), guarded by wait_mutex_.
522  Monitor* wait_monitor_;
523  // Thread "interrupted" status; stays raised until queried or thrown, guarded by wait_mutex_.
524  uint32_t interrupted_;
525  // The next thread in the wait set this thread is part of.
526  Thread* wait_next_;
527  // If we're blocked in MonitorEnter, this is the object we're trying to lock.
528  Object* monitor_enter_object_;
529
530  friend class Monitor;
531
532  RuntimeStats stats_;
533
534  // FIXME: placeholder for the gc cardTable
535  uint32_t card_table_;
536
537  // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
538  // We leave extra space so there's room for the code that throws StackOverflowError.
539  byte* stack_end_;
540
541  // Size of the stack
542  size_t stack_size_;
543
544  // The "lowest addressable byte" of the stack
545  byte* stack_base_;
546
547  // A linked list (of stack allocated records) recording transitions from
548  // native to managed code.
549  NativeToManagedRecord* native_to_managed_record_;
550
551  // Top of linked list of stack indirect reference tables or NULL for none
552  StackIndirectReferenceTable* top_sirt_;
553
554  // Every thread may have an associated JNI environment
555  JNIEnvExt* jni_env_;
556
557  volatile State state_;
558
559  // Initialized to "this". On certain architectures (such as x86) reading
560  // off of Thread::Current is easy but getting the address of Thread::Current
561  // is hard. This field can be read off of Thread::Current to give the address.
562  Thread* self_;
563
564  Runtime* runtime_;
565
566  // The pending exception or NULL.
567  Throwable* exception_;
568
569  // A non-zero value is used to tell the current thread to enter a safe point
570  // at the next poll.
571  int suspend_count_;
572
573  // Needed to get the right ClassLoader in JNI_OnLoad, but also
574  // useful for testing.
575  const ClassLoader* class_loader_override_;
576
577  // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
578  Context* long_jump_context_;
579
580  // A boolean telling us whether we're recursively throwing OOME.
581  uint32_t throwing_OutOfMemoryError_;
582
583  Throwable* pre_allocated_OutOfMemoryError_;
584
585  // TLS key used to retrieve the VM thread object.
586  static pthread_key_t pthread_key_self_;
587
588  DISALLOW_COPY_AND_ASSIGN(Thread);
589};
590
591std::ostream& operator<<(std::ostream& os, const Thread& thread);
592std::ostream& operator<<(std::ostream& os, const Thread::State& state);
593
594class ScopedThreadStateChange {
595 public:
596  ScopedThreadStateChange(Thread* thread, Thread::State new_state) : thread_(thread) {
597    old_thread_state_ = thread_->SetState(new_state);
598  }
599
600  ~ScopedThreadStateChange() {
601    thread_->SetState(old_thread_state_);
602  }
603
604 private:
605  Thread* thread_;
606  Thread::State old_thread_state_;
607  DISALLOW_COPY_AND_ASSIGN(ScopedThreadStateChange);
608};
609
610}  // namespace art
611
612#endif  // ART_SRC_THREAD_H_
613