thread.h revision bdb0391258abc54bf77c676e36847d28a783bfe5
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_SRC_THREAD_H_
18#define ART_SRC_THREAD_H_
19
20#include <pthread.h>
21
22#include <bitset>
23#include <iosfwd>
24#include <list>
25#include <string>
26
27#include "dex_file.h"
28#include "globals.h"
29#include "jni_internal.h"
30#include "logging.h"
31#include "macros.h"
32#include "mutex.h"
33#include "mem_map.h"
34#include "offsets.h"
35#include "UniquePtr.h"
36
37namespace art {
38
39class Array;
40class Class;
41class ClassLinker;
42class ClassLoader;
43class Context;
44class Method;
45class Monitor;
46class Object;
47class Runtime;
48class Thread;
49class ThreadList;
50class Throwable;
51class StackTraceElement;
52class StaticStorageBase;
53
54template<class T> class ObjectArray;
55template<class T> class PrimitiveArray;
56typedef PrimitiveArray<int32_t> IntArray;
57
58// Stack allocated indirect reference table, allocated within the bridge frame
59// between managed and native code.
60class StackIndirectReferenceTable {
61 public:
62  // Number of references contained within this SIRT
63  size_t NumberOfReferences() {
64    return number_of_references_;
65  }
66
67  // Link to previous SIRT or NULL
68  StackIndirectReferenceTable* Link() {
69    return link_;
70  }
71
72  Object** References() {
73    return references_;
74  }
75
76  // Offset of length within SIRT, used by generated code
77  static size_t NumberOfReferencesOffset() {
78    return OFFSETOF_MEMBER(StackIndirectReferenceTable, number_of_references_);
79  }
80
81  // Offset of link within SIRT, used by generated code
82  static size_t LinkOffset() {
83    return OFFSETOF_MEMBER(StackIndirectReferenceTable, link_);
84  }
85
86 private:
87  StackIndirectReferenceTable() {}
88
89  size_t number_of_references_;
90  StackIndirectReferenceTable* link_;
91
92  // Fake array, really allocated and filled in by jni_compiler.
93  Object* references_[0];
94
95  DISALLOW_COPY_AND_ASSIGN(StackIndirectReferenceTable);
96};
97
98struct NativeToManagedRecord {
99  NativeToManagedRecord* link_;
100  void* last_top_of_managed_stack_;
101  uintptr_t last_top_of_managed_stack_pc_;
102};
103
104// Iterator over managed frames up to the first native-to-managed transition
105class Frame {
106 public:
107  Frame() : sp_(NULL) {}
108
109  Method* GetMethod() const {
110    return (sp_ != NULL) ? *sp_ : NULL;
111  }
112
113  bool HasNext() const {
114    return NextMethod() != NULL;
115  }
116
117  void Next();
118
119  uintptr_t GetReturnPC() const;
120
121  uintptr_t LoadCalleeSave(int num) const;
122
123  Method** GetSP() const {
124    return sp_;
125  }
126
127  // TODO: this is here for testing, remove when we have exception unit tests
128  // that use the real stack
129  void SetSP(Method** sp) {
130    sp_ = sp;
131  }
132
133 private:
134  Method* NextMethod() const;
135
136  friend class Thread;
137
138  Method** sp_;
139};
140
141class Thread {
142 public:
143  /* thread priorities, from java.lang.Thread */
144  enum Priority {
145    kMinPriority = 1,
146    kNormPriority = 5,
147    kMaxPriority = 10,
148  };
149  enum State {
150    kUnknown = -1,
151
152    // These match up with JDWP values.
153    kTerminated   = 0,        // TERMINATED
154    kRunnable     = 1,        // RUNNABLE or running now
155    kTimedWaiting = 2,        // TIMED_WAITING in Object.wait()
156    kBlocked      = 3,        // BLOCKED on a monitor
157    kWaiting      = 4,        // WAITING in Object.wait()
158    // Non-JDWP states.
159    kInitializing = 5,        // allocated, not yet running --- TODO: unnecessary?
160    kStarting     = 6,        // native thread started, not yet ready to run managed code
161    kNative       = 7,        // off in a JNI native method
162    kVmWait       = 8,        // waiting on a VM resource
163    kSuspended    = 9,        // suspended, usually by GC or debugger
164  };
165
166  static const size_t kStackOverflowReservedBytes = 1024; // Space to throw a StackOverflowError in.
167
168  static const size_t kDefaultStackSize = 64 * KB;
169
170  // Runtime support function pointers
171  void (*pDebugMe)(Method*, uint32_t);
172  void* (*pMemcpy)(void*, const void*, size_t);
173  uint64_t (*pShlLong)(uint64_t, uint32_t);
174  uint64_t (*pShrLong)(uint64_t, uint32_t);
175  uint64_t (*pUshrLong)(uint64_t, uint32_t);
176  float (*pI2f)(int);
177  int (*pF2iz)(float);
178  float (*pD2f)(double);
179  double (*pF2d)(float);
180  double (*pI2d)(int);
181  int (*pD2iz)(double);
182  float (*pL2f)(long);
183  double (*pL2d)(long);
184  long long (*pF2l)(float);
185  long long (*pD2l)(double);
186  float (*pFadd)(float, float);
187  float (*pFsub)(float, float);
188  float (*pFdiv)(float, float);
189  float (*pFmul)(float, float);
190  float (*pFmodf)(float, float);
191  double (*pDadd)(double, double);
192  double (*pDsub)(double, double);
193  double (*pDdiv)(double, double);
194  double (*pDmul)(double, double);
195  double (*pFmod)(double, double);
196  int (*pIdivmod)(int, int);
197  int (*pIdiv)(int, int);
198  long long (*pLmul)(long long, long long);
199  long long (*pLdivmod)(long long, long long);
200  Array* (*pAllocFromCode)(uint32_t, Method*, int32_t);
201  Array* (*pCheckAndAllocFromCode)(uint32_t, Method*, int32_t);
202  Object* (*pAllocObjectFromCode)(uint32_t, Method*);
203  uint32_t (*pGet32Static)(uint32_t, const Method*);
204  void (*pSet32Static)(uint32_t, const Method*, uint32_t);
205  uint64_t (*pGet64Static)(uint32_t, const Method*);
206  void (*pSet64Static)(uint32_t, const Method*, uint64_t);
207  Object* (*pGetObjStatic)(uint32_t, const Method*);
208  void (*pSetObjStatic)(uint32_t, const Method*, Object*);
209  void (*pCanPutArrayElementFromCode)(const Class*, const Class*);
210  bool (*pInstanceofNonTrivialFromCode) (const Object*, const Class*);
211  void (*pCheckCastFromCode) (const Class*, const Class*);
212  Method* (*pFindInterfaceMethodInCache)(Class*, uint32_t, const Method*, struct DvmDex*);
213  void (*pUnlockObjectFromCode)(Thread*, Object*);
214  void (*pLockObjectFromCode)(Thread*, Object*);
215  void (*pThrowException)(void*);
216  void (*pHandleFillArrayDataFromCode)(Array*, const uint16_t*);
217  Class* (*pInitializeTypeFromCode)(uint32_t, Method*);
218  void (*pResolveMethodFromCode)(Method*, uint32_t);
219  void (*pInvokeInterfaceTrampoline)(void*, void*, void*, void*);
220  StaticStorageBase* (*pInitializeStaticStorage)(uint32_t, const Method*);
221  Field* (*pFindFieldFromCode)(uint32_t, const Method*);
222  void (*pCheckSuspendFromCode)(Thread*);
223  void (*pStackOverflowFromCode)(Method*);
224  void (*pThrowNullPointerFromCode)();
225  void (*pThrowArrayBoundsFromCode)(int32_t, int32_t);
226  void (*pThrowDivZeroFromCode)();
227  void (*pThrowVerificationErrorFromCode)(int32_t, int32_t);
228  void (*pThrowNegArraySizeFromCode)(int32_t);
229  void (*pThrowRuntimeExceptionFromCode)(int32_t);
230  void (*pThrowInternalErrorFromCode)(int32_t);
231  void (*pThrowNoSuchMethodFromCode)(int32_t);
232  void (*pThrowAbstractMethodErrorFromCode)(Method* method, Thread* thread);
233  void* (*pFindNativeMethod)(Thread* thread);
234  Object* (*pDecodeJObjectInThread)(Thread* thread, jobject obj);
235
236  class StackVisitor {
237   public:
238    virtual ~StackVisitor() {}
239    virtual void VisitFrame(const Frame& frame, uintptr_t pc) = 0;
240  };
241
242  // Creates a new thread.
243  static void Create(Object* peer, size_t stack_size);
244
245  // Creates a new thread from the calling thread.
246  static Thread* Attach(const Runtime* runtime, const char* name, bool as_daemon);
247
248  static Thread* Current() {
249    void* thread = pthread_getspecific(Thread::pthread_key_self_);
250    return reinterpret_cast<Thread*>(thread);
251  }
252
253  static Thread* FromManagedThread(JNIEnv* env, jobject thread) {
254    // TODO: make these more generally available, and cached.
255    jclass java_lang_Thread = env->FindClass("java/lang/Thread");
256    jfieldID fid = env->GetFieldID(java_lang_Thread, "vmData", "I");
257    return reinterpret_cast<Thread*>(static_cast<uintptr_t>(env->GetIntField(thread, fid)));
258  }
259
260  void Dump(std::ostream& os) const;
261
262  State GetState() const {
263    return state_;
264  }
265
266  State SetState(State new_state);
267
268  void WaitUntilSuspended();
269
270  /*
271   * Changes the priority of this thread to match that of the java.lang.Thread object.
272   *
273   * We map a priority value from 1-10 to Linux "nice" values, where lower
274   * numbers indicate higher priority.
275   */
276  void SetNativePriority(int newPriority);
277
278  /*
279   * Returns the thread priority for the current thread by querying the system.
280   * This is useful when attaching a thread through JNI.
281   *
282   * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
283   */
284  static int GetNativePriority();
285
286  bool CanAccessDirectReferences() const {
287    // TODO: when we have a moving collector, we'll need: return state_ == kRunnable;
288    return true;
289  }
290
291  uint32_t GetThinLockId() const {
292    return thin_lock_id_;
293  }
294
295  pid_t GetTid() const {
296    return tid_;
297  }
298
299  pthread_t GetImpl() const {
300    return pthread_;
301  }
302
303  Object* GetPeer() const {
304    return peer_;
305  }
306
307  // Returns the Method* for the current method.
308  // This is used by the JNI implementation for logging and diagnostic purposes.
309  const Method* GetCurrentMethod() const {
310    return top_of_managed_stack_.GetMethod();
311  }
312
313  bool IsExceptionPending() const {
314    return exception_ != NULL;
315  }
316
317  Throwable* GetException() const {
318    DCHECK(CanAccessDirectReferences());
319    return exception_;
320  }
321
322  void SetException(Throwable* new_exception) {
323    DCHECK(CanAccessDirectReferences());
324    CHECK(new_exception != NULL);
325    // TODO: CHECK(exception_ == NULL);
326    exception_ = new_exception;  // TODO
327  }
328
329  void ClearException() {
330    exception_ = NULL;
331  }
332
333  // Find catch block and perform long jump to appropriate exception handle
334  void DeliverException(Throwable* exception);
335
336  Context* GetLongJumpContext();
337
338  Frame GetTopOfStack() const {
339    return top_of_managed_stack_;
340  }
341
342  // TODO: this is here for testing, remove when we have exception unit tests
343  // that use the real stack
344  void SetTopOfStack(void* stack, uintptr_t pc) {
345    top_of_managed_stack_.SetSP(reinterpret_cast<Method**>(stack));
346    top_of_managed_stack_pc_ = pc;
347  }
348
349  void SetTopOfStackPC(uintptr_t pc) {
350    top_of_managed_stack_pc_ = pc;
351  }
352
353  // Returns a special method that describes all callee saves being spilt to the
354  // stack.
355  Method* CalleeSaveMethod() const;
356
357  void ThrowNewException(const char* exception_class_descriptor, const char* fmt, ...)
358      __attribute__ ((format(printf, 3, 4)));
359
360  // This exception is special, because we need to pre-allocate an instance.
361  void ThrowOutOfMemoryError();
362
363  Frame FindExceptionHandler(void* throw_pc, void** handler_pc);
364
365  void* FindExceptionHandlerInMethod(const Method* method,
366                                     void* throw_pc,
367                                     const DexFile& dex_file,
368                                     ClassLinker* class_linker);
369
370  void SetName(const char* name);
371
372  static void Startup();
373  static void Shutdown();
374
375  // JNI methods
376  JNIEnvExt* GetJniEnv() const {
377    return jni_env_;
378  }
379
380  // Number of references allocated in SIRTs on this thread
381  size_t NumSirtReferences();
382
383  // Is the given obj in this thread's stack indirect reference table?
384  bool SirtContains(jobject obj);
385
386  // Convert a jobject into a Object*
387  Object* DecodeJObject(jobject obj);
388
389  // Implements java.lang.Thread.interrupted.
390  bool Interrupted() {
391    MutexLock mu(wait_mutex_);
392    bool interrupted = interrupted_;
393    interrupted_ = false;
394    return interrupted;
395  }
396
397  // Implements java.lang.Thread.isInterrupted.
398  bool IsInterrupted() {
399    MutexLock mu(wait_mutex_);
400    return interrupted_;
401  }
402
403  void RegisterExceptionEntryPoint(void (*handler)(Method**)) {
404    exception_entry_point_ = handler;
405  }
406
407  void RegisterSuspendCountEntryPoint(void (*handler)(Method**)) {
408    suspend_count_entry_point_ = handler;
409  }
410
411  // Linked list recording transitions from native to managed code
412  void PushNativeToManagedRecord(NativeToManagedRecord* record) {
413    record->last_top_of_managed_stack_ = reinterpret_cast<void*>(top_of_managed_stack_.GetSP());
414    record->last_top_of_managed_stack_pc_ = top_of_managed_stack_pc_;
415    record->link_ = native_to_managed_record_;
416    native_to_managed_record_ = record;
417    top_of_managed_stack_.SetSP(NULL);
418  }
419  void PopNativeToManagedRecord(const NativeToManagedRecord& record) {
420    native_to_managed_record_ = record.link_;
421    top_of_managed_stack_.SetSP(reinterpret_cast<Method**>(record.last_top_of_managed_stack_));
422    top_of_managed_stack_pc_ = record.last_top_of_managed_stack_pc_;
423  }
424
425  const ClassLoader* GetClassLoaderOverride() {
426    // TODO: need to place the class_loader_override_ in a handle
427    // DCHECK(CanAccessDirectReferences());
428    return class_loader_override_;
429  }
430
431  void SetClassLoaderOverride(const ClassLoader* class_loader_override) {
432    class_loader_override_ = class_loader_override;
433  }
434
435  // Create the internal representation of a stack trace, that is more time
436  // and space efficient to compute than the StackTraceElement[]
437  jobject CreateInternalStackTrace() const;
438
439  // Convert an internal stack trace representation to a StackTraceElement[]
440  static jobjectArray
441      InternalStackTraceToStackTraceElementArray(jobject internal, JNIEnv* env);
442
443  void VisitRoots(Heap::RootVisitor* visitor, void* arg) const;
444
445  //
446  // Offsets of various members of native Thread class, used by compiled code.
447  //
448
449  static ThreadOffset SelfOffset() {
450    return ThreadOffset(OFFSETOF_MEMBER(Thread, self_));
451  }
452
453  static ThreadOffset ExceptionOffset() {
454    return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_));
455  }
456
457  static ThreadOffset IdOffset() {
458    return ThreadOffset(OFFSETOF_MEMBER(Thread, thin_lock_id_));
459  }
460
461  static ThreadOffset CardTableOffset() {
462    return ThreadOffset(OFFSETOF_MEMBER(Thread, card_table_));
463  }
464
465  static ThreadOffset SuspendCountOffset() {
466    return ThreadOffset(OFFSETOF_MEMBER(Thread, suspend_count_));
467  }
468
469  static ThreadOffset StateOffset() {
470    return ThreadOffset(OFFSETOF_VOLATILE_MEMBER(Thread, state_));
471  }
472
473  static ThreadOffset StackEndOffset() {
474    return ThreadOffset(OFFSETOF_MEMBER(Thread, stack_end_));
475  }
476
477  static ThreadOffset JniEnvOffset() {
478    return ThreadOffset(OFFSETOF_MEMBER(Thread, jni_env_));
479  }
480
481  static ThreadOffset TopOfManagedStackOffset() {
482    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_of_managed_stack_) +
483        OFFSETOF_MEMBER(Frame, sp_));
484  }
485
486  static ThreadOffset TopOfManagedStackPcOffset() {
487    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_of_managed_stack_pc_));
488  }
489
490  static ThreadOffset TopSirtOffset() {
491    return ThreadOffset(OFFSETOF_MEMBER(Thread, top_sirt_));
492  }
493
494  static ThreadOffset ExceptionEntryPointOffset() {
495    return ThreadOffset(OFFSETOF_MEMBER(Thread, exception_entry_point_));
496  }
497
498  static ThreadOffset SuspendCountEntryPointOffset() {
499    return ThreadOffset(OFFSETOF_MEMBER(Thread, suspend_count_entry_point_));
500  }
501
502 private:
503  Thread();
504  ~Thread();
505  friend class ThreadList;  // For ~Thread.
506
507  void CreatePeer(const char* name, bool as_daemon);
508  friend class Runtime; // For CreatePeer.
509
510  void DumpState(std::ostream& os) const;
511  void DumpStack(std::ostream& os) const;
512
513  void Attach(const Runtime* runtime);
514  static void* CreateCallback(void* arg);
515
516  void InitCpu();
517  void InitFunctionPointers();
518  void InitStackHwm();
519
520  static void ThreadExitCallback(void* arg);
521
522  void WalkStack(StackVisitor* visitor) const;
523
524  void WalkStackUntilUpCall(StackVisitor* visitor) const;
525
526  // Thin lock thread id. This is a small integer used by the thin lock implementation.
527  // This is not to be confused with the native thread's tid, nor is it the value returned
528  // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
529  // important difference between this id and the ids visible to managed code is that these
530  // ones get reused (to ensure that they fit in the number of bits available).
531  uint32_t thin_lock_id_;
532
533  // System thread id.
534  pid_t tid_;
535
536  // Native thread handle.
537  pthread_t pthread_;
538
539  // Our managed peer (an instance of java.lang.Thread).
540  Object* peer_;
541
542  // Guards the 'interrupted_' and 'wait_monitor_' members.
543  mutable Mutex wait_mutex_;
544  // Pointer to the monitor lock we're currently waiting on (or NULL), guarded by wait_mutex_.
545  Monitor* wait_monitor_;
546  // Thread "interrupted" status; stays raised until queried or thrown, guarded by wait_mutex_.
547  bool interrupted_;
548
549  // FIXME: placeholder for the gc cardTable
550  uint32_t card_table_;
551
552  // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
553  // We leave extra space so there's room for the code that throws StackOverflowError.
554  byte* stack_end_;
555
556  // Top of the managed stack, written out prior to the state transition from
557  // kRunnable to kNative. Uses include to give the starting point for scanning
558  // a managed stack when a thread is in native code.
559  Frame top_of_managed_stack_;
560
561  // PC corresponding to the call out of the top_of_managed_stack_ frame
562  uintptr_t top_of_managed_stack_pc_;
563
564  // A linked list (of stack allocated records) recording transitions from
565  // native to managed code.
566  NativeToManagedRecord* native_to_managed_record_;
567
568  // Top of linked list of stack indirect reference tables or NULL for none
569  StackIndirectReferenceTable* top_sirt_;
570
571  // Every thread may have an associated JNI environment
572  JNIEnvExt* jni_env_;
573
574  volatile State state_;
575
576  // Initialized to "this". On certain architectures (such as x86) reading
577  // off of Thread::Current is easy but getting the address of Thread::Current
578  // is hard. This field can be read off of Thread::Current to give the address.
579  Thread* self_;
580
581  Runtime* runtime_;
582
583  // The pending exception or NULL.
584  Throwable* exception_;
585
586  // A non-zero value is used to tell the current thread to enter a safe point
587  // at the next poll.
588  int suspend_count_;
589
590  // Needed to get the right ClassLoader in JNI_OnLoad, but also
591  // useful for testing.
592  const ClassLoader* class_loader_override_;
593
594  // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
595  UniquePtr<Context> long_jump_context_;
596
597  // TLS key used to retrieve the VM thread object.
598  static pthread_key_t pthread_key_self_;
599
600  // Entry point called when exception_ is set
601  void (*exception_entry_point_)(Method** frame);
602
603  // Entry point called when suspend_count_ is non-zero
604  void (*suspend_count_entry_point_)(Method** frame);
605
606  DISALLOW_COPY_AND_ASSIGN(Thread);
607};
608
609std::ostream& operator<<(std::ostream& os, const Thread& thread);
610std::ostream& operator<<(std::ostream& os, const Thread::State& state);
611
612class ScopedThreadStateChange {
613 public:
614  ScopedThreadStateChange(Thread* thread, Thread::State new_state) : thread_(thread) {
615    old_thread_state_ = thread_->SetState(new_state);
616  }
617
618  ~ScopedThreadStateChange() {
619    thread_->SetState(old_thread_state_);
620  }
621
622 private:
623  Thread* thread_;
624  Thread::State old_thread_state_;
625  DISALLOW_COPY_AND_ASSIGN(ScopedThreadStateChange);
626};
627
628}  // namespace art
629
630#endif  // ART_SRC_THREAD_H_
631