1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_THREAD_H_
18#define ART_RUNTIME_THREAD_H_
19
20#include <bitset>
21#include <deque>
22#include <iosfwd>
23#include <list>
24#include <memory>
25#include <setjmp.h>
26#include <string>
27
28#include "arch/context.h"
29#include "arch/instruction_set.h"
30#include "atomic.h"
31#include "base/macros.h"
32#include "base/mutex.h"
33#include "entrypoints/interpreter/interpreter_entrypoints.h"
34#include "entrypoints/jni/jni_entrypoints.h"
35#include "entrypoints/quick/quick_entrypoints.h"
36#include "globals.h"
37#include "handle_scope.h"
38#include "instrumentation.h"
39#include "jvalue.h"
40#include "object_callbacks.h"
41#include "offsets.h"
42#include "runtime_stats.h"
43#include "stack.h"
44#include "thread_state.h"
45
46namespace art {
47
48namespace gc {
49namespace collector {
50  class SemiSpace;
51}  // namespace collector
52}  // namespace gc
53
54namespace mirror {
55  class Array;
56  class Class;
57  class ClassLoader;
58  class Object;
59  template<class T> class ObjectArray;
60  template<class T> class PrimitiveArray;
61  typedef PrimitiveArray<int32_t> IntArray;
62  class StackTraceElement;
63  class String;
64  class Throwable;
65}  // namespace mirror
66
67namespace verifier {
68class MethodVerifier;
69}  // namespace verifier
70
71class ArtMethod;
72class BaseMutex;
73class ClassLinker;
74class Closure;
75class Context;
76struct DebugInvokeReq;
77class DeoptimizationReturnValueRecord;
78class DexFile;
79class JavaVMExt;
80struct JNIEnvExt;
81class Monitor;
82class Runtime;
83class ScopedObjectAccessAlreadyRunnable;
84class ShadowFrame;
85class SingleStepControl;
86class StackedShadowFrameRecord;
87class Thread;
88class ThreadList;
89
90// Thread priorities. These must match the Thread.MIN_PRIORITY,
91// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
92enum ThreadPriority {
93  kMinThreadPriority = 1,
94  kNormThreadPriority = 5,
95  kMaxThreadPriority = 10,
96};
97
98enum ThreadFlag {
99  kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0 and the Thread should enter the
100                          // safepoint handler.
101  kCheckpointRequest = 2  // Request that the thread do some checkpoint work and then continue.
102};
103
104enum class StackedShadowFrameType {
105  kShadowFrameUnderConstruction,
106  kDeoptimizationShadowFrame
107};
108
109static constexpr size_t kNumRosAllocThreadLocalSizeBrackets = 34;
110
111// Thread's stack layout for implicit stack overflow checks:
112//
113//   +---------------------+  <- highest address of stack memory
114//   |                     |
115//   .                     .  <- SP
116//   |                     |
117//   |                     |
118//   +---------------------+  <- stack_end
119//   |                     |
120//   |  Gap                |
121//   |                     |
122//   +---------------------+  <- stack_begin
123//   |                     |
124//   | Protected region    |
125//   |                     |
126//   +---------------------+  <- lowest address of stack memory
127//
128// The stack always grows down in memory.  At the lowest address is a region of memory
129// that is set mprotect(PROT_NONE).  Any attempt to read/write to this region will
130// result in a segmentation fault signal.  At any point, the thread's SP will be somewhere
131// between the stack_end and the highest address in stack memory.  An implicit stack
132// overflow check is a read of memory at a certain offset below the current SP (4K typically).
133// If the thread's SP is below the stack_end address this will be a read into the protected
134// region.  If the SP is above the stack_end address, the thread is guaranteed to have
135// at least 4K of space.  Because stack overflow checks are only performed in generated code,
136// if the thread makes a call out to a native function (through JNI), that native function
137// might only have 4K of memory (if the SP is adjacent to stack_end).
138
139class Thread {
140 public:
141  // For implicit overflow checks we reserve an extra piece of memory at the bottom
142  // of the stack (lowest memory).  The higher portion of the memory
143  // is protected against reads and the lower is available for use while
144  // throwing the StackOverflow exception.
145  static constexpr size_t kStackOverflowProtectedSize = 4 * KB;
146  static const size_t kStackOverflowImplicitCheckSize;
147
148  // Creates a new native thread corresponding to the given managed peer.
149  // Used to implement Thread.start.
150  static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
151
152  // Attaches the calling native thread to the runtime, returning the new native peer.
153  // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
154  static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
155                        bool create_peer);
156
157  // Reset internal state of child thread after fork.
158  void InitAfterFork();
159
160  // Get the currently executing thread, frequently referred to as 'self'. This call has reasonably
161  // high cost and so we favor passing self around when possible.
162  // TODO: mark as PURE so the compiler may coalesce and remove?
163  static Thread* Current();
164
165  // On a runnable thread, check for pending thread suspension request and handle if pending.
166  void AllowThreadSuspension() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
167
168  // Process pending thread suspension request and handle if pending.
169  void CheckSuspend() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
170
171  static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
172                                   mirror::Object* thread_peer)
173      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
174      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
175      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
176  static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
177      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
178      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
179      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
180
181  // Translates 172 to pAllocArrayFromCode and so on.
182  template<size_t size_of_pointers>
183  static void DumpThreadOffset(std::ostream& os, uint32_t offset);
184
185  // Dumps a one-line summary of thread state (used for operator<<).
186  void ShortDump(std::ostream& os) const;
187
188  // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
189  void Dump(std::ostream& os) const
190      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
191      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
192
193  void DumpJavaStack(std::ostream& os) const
194      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
195      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
196
197  // Dumps the SIGQUIT per-thread header. 'thread' can be null for a non-attached thread, in which
198  // case we use 'tid' to identify the thread, and we'll include as much information as we can.
199  static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
200      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
201      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
202
203  ThreadState GetState() const {
204    DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
205    DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
206    return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
207  }
208
209  ThreadState SetState(ThreadState new_state);
210
211  int GetSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
212    return tls32_.suspend_count;
213  }
214
215  int GetDebugSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
216    return tls32_.debug_suspend_count;
217  }
218
219  bool IsSuspended() const {
220    union StateAndFlags state_and_flags;
221    state_and_flags.as_int = tls32_.state_and_flags.as_int;
222    return state_and_flags.as_struct.state != kRunnable &&
223        (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
224  }
225
226  void ModifySuspendCount(Thread* self, int delta, bool for_debugger)
227      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
228
229  bool RequestCheckpoint(Closure* function)
230      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
231
232  void SetFlipFunction(Closure* function);
233  Closure* GetFlipFunction();
234
235  // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
236  // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
237  void FullSuspendCheck()
238      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
239      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
240
241  // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
242  ThreadState TransitionFromSuspendedToRunnable()
243      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
244      SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
245      ALWAYS_INLINE;
246
247  // Transition from runnable into a state where mutator privileges are denied. Releases share of
248  // mutator lock.
249  void TransitionFromRunnableToSuspended(ThreadState new_state)
250      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
251      UNLOCK_FUNCTION(Locks::mutator_lock_)
252      ALWAYS_INLINE;
253
254  // Once called thread suspension will cause an assertion failure.
255  const char* StartAssertNoThreadSuspension(const char* cause) {
256    if (kIsDebugBuild) {
257      CHECK(cause != nullptr);
258      const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
259      tls32_.no_thread_suspension++;
260      tlsPtr_.last_no_thread_suspension_cause = cause;
261      return previous_cause;
262    } else {
263      return nullptr;
264    }
265  }
266
267  // End region where no thread suspension is expected.
268  void EndAssertNoThreadSuspension(const char* old_cause) {
269    if (kIsDebugBuild) {
270      CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
271      CHECK_GT(tls32_.no_thread_suspension, 0U);
272      tls32_.no_thread_suspension--;
273      tlsPtr_.last_no_thread_suspension_cause = old_cause;
274    }
275  }
276
277  void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
278
279  bool IsDaemon() const {
280    return tls32_.daemon;
281  }
282
283  bool HoldsLock(mirror::Object*) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
284
285  /*
286   * Changes the priority of this thread to match that of the java.lang.Thread object.
287   *
288   * We map a priority value from 1-10 to Linux "nice" values, where lower
289   * numbers indicate higher priority.
290   */
291  void SetNativePriority(int newPriority);
292
293  /*
294   * Returns the thread priority for the current thread by querying the system.
295   * This is useful when attaching a thread through JNI.
296   *
297   * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
298   */
299  static int GetNativePriority();
300
301  uint32_t GetThreadId() const {
302    return tls32_.thin_lock_thread_id;
303  }
304
305  pid_t GetTid() const {
306    return tls32_.tid;
307  }
308
309  // Returns the java.lang.Thread's name, or null if this Thread* doesn't have a peer.
310  mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
311      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
312
313  // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
314  // allocation, or locking.
315  void GetThreadName(std::string& name) const;
316
317  // Sets the thread's name.
318  void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
319
320  // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
321  uint64_t GetCpuMicroTime() const;
322
323  mirror::Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
324    CHECK(tlsPtr_.jpeer == nullptr);
325    return tlsPtr_.opeer;
326  }
327
328  bool HasPeer() const {
329    return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
330  }
331
332  RuntimeStats* GetStats() {
333    return &tls64_.stats;
334  }
335
336  bool IsStillStarting() const;
337
338  bool IsExceptionPending() const {
339    return tlsPtr_.exception != nullptr;
340  }
341
342  mirror::Throwable* GetException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
343    return tlsPtr_.exception;
344  }
345
346  void AssertPendingException() const;
347  void AssertPendingOOMException() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
348  void AssertNoPendingException() const;
349  void AssertNoPendingExceptionForNewException(const char* msg) const;
350
351  void SetException(mirror::Throwable* new_exception)
352      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
353    CHECK(new_exception != nullptr);
354    // TODO: DCHECK(!IsExceptionPending());
355    tlsPtr_.exception = new_exception;
356  }
357
358  void ClearException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
359    tlsPtr_.exception = nullptr;
360  }
361
362  // Find catch block and perform long jump to appropriate exception handle
363  NO_RETURN void QuickDeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
364
365  Context* GetLongJumpContext();
366  void ReleaseLongJumpContext(Context* context) {
367    if (tlsPtr_.long_jump_context != nullptr) {
368      // Each QuickExceptionHandler gets a long jump context and uses
369      // it for doing the long jump, after finding catch blocks/doing deoptimization.
370      // Both finding catch blocks and deoptimization can trigger another
371      // exception such as a result of class loading. So there can be nested
372      // cases of exception handling and multiple contexts being used.
373      // ReleaseLongJumpContext tries to save the context in tlsPtr_.long_jump_context
374      // for reuse so there is no need to always allocate a new one each time when
375      // getting a context. Since we only keep one context for reuse, delete the
376      // existing one since the passed in context is yet to be used for longjump.
377      delete tlsPtr_.long_jump_context;
378    }
379    tlsPtr_.long_jump_context = context;
380  }
381
382  // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
383  // abort the runtime iff abort_on_error is true.
384  ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
385      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
386
387  // Returns whether the given exception was thrown by the current Java method being executed
388  // (Note that this includes native Java methods).
389  bool IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const
390      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
391
392  void SetTopOfStack(ArtMethod** top_method) {
393    tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
394  }
395
396  void SetTopOfShadowStack(ShadowFrame* top) {
397    tlsPtr_.managed_stack.SetTopShadowFrame(top);
398  }
399
400  bool HasManagedStack() const {
401    return (tlsPtr_.managed_stack.GetTopQuickFrame() != nullptr) ||
402        (tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
403  }
404
405  // If 'msg' is null, no detail message is set.
406  void ThrowNewException(const char* exception_class_descriptor, const char* msg)
407      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
408
409  // If 'msg' is null, no detail message is set. An exception must be pending, and will be
410  // used as the new exception's cause.
411  void ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg)
412      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
413
414  void ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...)
415      __attribute__((format(printf, 3, 4)))
416      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
417
418  void ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap)
419      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
420
421  // OutOfMemoryError is special, because we need to pre-allocate an instance.
422  // Only the GC should call this.
423  void ThrowOutOfMemoryError(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
424
425  static void Startup();
426  static void FinishStartup();
427  static void Shutdown();
428
429  // JNI methods
430  JNIEnvExt* GetJniEnv() const {
431    return tlsPtr_.jni_env;
432  }
433
434  // Convert a jobject into a Object*
435  mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
436
437  mirror::Object* GetMonitorEnterObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
438    return tlsPtr_.monitor_enter_object;
439  }
440
441  void SetMonitorEnterObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
442    tlsPtr_.monitor_enter_object = obj;
443  }
444
445  // Implements java.lang.Thread.interrupted.
446  bool Interrupted() LOCKS_EXCLUDED(wait_mutex_);
447  // Implements java.lang.Thread.isInterrupted.
448  bool IsInterrupted() LOCKS_EXCLUDED(wait_mutex_);
449  bool IsInterruptedLocked() EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
450    return interrupted_;
451  }
452  void Interrupt(Thread* self) LOCKS_EXCLUDED(wait_mutex_);
453  void SetInterruptedLocked(bool i) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
454    interrupted_ = i;
455  }
456  void Notify() LOCKS_EXCLUDED(wait_mutex_);
457
458 private:
459  void NotifyLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_);
460
461 public:
462  Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
463    return wait_mutex_;
464  }
465
466  ConditionVariable* GetWaitConditionVariable() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
467    return wait_cond_;
468  }
469
470  Monitor* GetWaitMonitor() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
471    return wait_monitor_;
472  }
473
474  void SetWaitMonitor(Monitor* mon) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
475    wait_monitor_ = mon;
476  }
477
478
479  // Waiter link-list support.
480  Thread* GetWaitNext() const {
481    return tlsPtr_.wait_next;
482  }
483
484  void SetWaitNext(Thread* next) {
485    tlsPtr_.wait_next = next;
486  }
487
488  jobject GetClassLoaderOverride() {
489    return tlsPtr_.class_loader_override;
490  }
491
492  void SetClassLoaderOverride(jobject class_loader_override);
493
494  // Create the internal representation of a stack trace, that is more time
495  // and space efficient to compute than the StackTraceElement[].
496  template<bool kTransactionActive>
497  jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
498      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
499
500  // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
501  // StackTraceElement[]. If output_array is null, a new array is created, otherwise as many
502  // frames as will fit are written into the given array. If stack_depth is non-null, it's updated
503  // with the number of valid frames in the returned array.
504  static jobjectArray InternalStackTraceToStackTraceElementArray(
505      const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
506      jobjectArray output_array = nullptr, int* stack_depth = nullptr)
507      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
508
509  void VisitRoots(RootVisitor* visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
510
511  ALWAYS_INLINE void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
512
513  //
514  // Offsets of various members of native Thread class, used by compiled code.
515  //
516
517  template<size_t pointer_size>
518  static ThreadOffset<pointer_size> ThinLockIdOffset() {
519    return ThreadOffset<pointer_size>(
520        OFFSETOF_MEMBER(Thread, tls32_) +
521        OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
522  }
523
524  template<size_t pointer_size>
525  static ThreadOffset<pointer_size> ThreadFlagsOffset() {
526    return ThreadOffset<pointer_size>(
527        OFFSETOF_MEMBER(Thread, tls32_) +
528        OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
529  }
530
531 private:
532  template<size_t pointer_size>
533  static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
534    size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
535    size_t scale;
536    size_t shrink;
537    if (pointer_size == sizeof(void*)) {
538      scale = 1;
539      shrink = 1;
540    } else if (pointer_size > sizeof(void*)) {
541      scale = pointer_size / sizeof(void*);
542      shrink = 1;
543    } else {
544      DCHECK_GT(sizeof(void*), pointer_size);
545      scale = 1;
546      shrink = sizeof(void*) / pointer_size;
547    }
548    return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
549  }
550
551 public:
552  static uint32_t QuickEntryPointOffsetWithSize(size_t quick_entrypoint_offset,
553                                                size_t pointer_size) {
554    DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size;
555    if (pointer_size == 4) {
556      return QuickEntryPointOffset<4>(quick_entrypoint_offset).Uint32Value();
557    } else {
558      return QuickEntryPointOffset<8>(quick_entrypoint_offset).Uint32Value();
559    }
560  }
561
562  template<size_t pointer_size>
563  static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
564    return ThreadOffsetFromTlsPtr<pointer_size>(
565        OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
566  }
567
568  template<size_t pointer_size>
569  static ThreadOffset<pointer_size> InterpreterEntryPointOffset(size_t interp_entrypoint_offset) {
570    return ThreadOffsetFromTlsPtr<pointer_size>(
571        OFFSETOF_MEMBER(tls_ptr_sized_values, interpreter_entrypoints) + interp_entrypoint_offset);
572  }
573
574  template<size_t pointer_size>
575  static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
576    return ThreadOffsetFromTlsPtr<pointer_size>(
577        OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
578  }
579
580  template<size_t pointer_size>
581  static ThreadOffset<pointer_size> SelfOffset() {
582    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
583  }
584
585  template<size_t pointer_size>
586  static ThreadOffset<pointer_size> ExceptionOffset() {
587    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
588  }
589
590  template<size_t pointer_size>
591  static ThreadOffset<pointer_size> PeerOffset() {
592    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
593  }
594
595
596  template<size_t pointer_size>
597  static ThreadOffset<pointer_size> CardTableOffset() {
598    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
599  }
600
601  template<size_t pointer_size>
602  static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
603    return ThreadOffsetFromTlsPtr<pointer_size>(
604        OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
605  }
606
607  template<size_t pointer_size>
608  static ThreadOffset<pointer_size> ThreadLocalPosOffset() {
609    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_pos));
610  }
611
612  template<size_t pointer_size>
613  static ThreadOffset<pointer_size> ThreadLocalEndOffset() {
614    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_end));
615  }
616
617  template<size_t pointer_size>
618  static ThreadOffset<pointer_size> ThreadLocalObjectsOffset() {
619    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, thread_local_objects));
620  }
621
622  // Size of stack less any space reserved for stack overflow
623  size_t GetStackSize() const {
624    return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
625  }
626
627  uint8_t* GetStackEndForInterpreter(bool implicit_overflow_check) const {
628    if (implicit_overflow_check) {
629      // The interpreter needs the extra overflow bytes that stack_end does
630      // not include.
631      return tlsPtr_.stack_end + GetStackOverflowReservedBytes(kRuntimeISA);
632    } else {
633      return tlsPtr_.stack_end;
634    }
635  }
636
637  uint8_t* GetStackEnd() const {
638    return tlsPtr_.stack_end;
639  }
640
641  // Set the stack end to that to be used during a stack overflow
642  void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
643
644  // Set the stack end to that to be used during regular execution
645  void ResetDefaultStackEnd() {
646    // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
647    // to throw a StackOverflowError.
648    tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
649  }
650
651  // Install the protected region for implicit stack checks.
652  void InstallImplicitProtection();
653
654  bool IsHandlingStackOverflow() const {
655    return tlsPtr_.stack_end == tlsPtr_.stack_begin;
656  }
657
658  template<size_t pointer_size>
659  static ThreadOffset<pointer_size> StackEndOffset() {
660    return ThreadOffsetFromTlsPtr<pointer_size>(
661        OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
662  }
663
664  template<size_t pointer_size>
665  static ThreadOffset<pointer_size> JniEnvOffset() {
666    return ThreadOffsetFromTlsPtr<pointer_size>(
667        OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
668  }
669
670  template<size_t pointer_size>
671  static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
672    return ThreadOffsetFromTlsPtr<pointer_size>(
673        OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
674        ManagedStack::TopQuickFrameOffset());
675  }
676
677  const ManagedStack* GetManagedStack() const {
678    return &tlsPtr_.managed_stack;
679  }
680
681  // Linked list recording fragments of managed stack.
682  void PushManagedStackFragment(ManagedStack* fragment) {
683    tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
684  }
685  void PopManagedStackFragment(const ManagedStack& fragment) {
686    tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
687  }
688
689  ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
690    return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
691  }
692
693  ShadowFrame* PopShadowFrame() {
694    return tlsPtr_.managed_stack.PopShadowFrame();
695  }
696
697  template<size_t pointer_size>
698  static ThreadOffset<pointer_size> TopShadowFrameOffset() {
699    return ThreadOffsetFromTlsPtr<pointer_size>(
700        OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
701        ManagedStack::TopShadowFrameOffset());
702  }
703
704  // Number of references allocated in JNI ShadowFrames on this thread.
705  size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
706    return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
707  }
708
709  // Number of references in handle scope on this thread.
710  size_t NumHandleReferences();
711
712  // Number of references allocated in handle scopes & JNI shadow frames on this thread.
713  size_t NumStackReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
714    return NumHandleReferences() + NumJniShadowFrameReferences();
715  }
716
717  // Is the given obj in this thread's stack indirect reference table?
718  bool HandleScopeContains(jobject obj) const;
719
720  void HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id)
721      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
722
723  HandleScope* GetTopHandleScope() {
724    return tlsPtr_.top_handle_scope;
725  }
726
727  void PushHandleScope(HandleScope* handle_scope) {
728    DCHECK_EQ(handle_scope->GetLink(), tlsPtr_.top_handle_scope);
729    tlsPtr_.top_handle_scope = handle_scope;
730  }
731
732  HandleScope* PopHandleScope() {
733    HandleScope* handle_scope = tlsPtr_.top_handle_scope;
734    DCHECK(handle_scope != nullptr);
735    tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
736    return handle_scope;
737  }
738
739  template<size_t pointer_size>
740  static ThreadOffset<pointer_size> TopHandleScopeOffset() {
741    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
742                                                                top_handle_scope));
743  }
744
745  DebugInvokeReq* GetInvokeReq() const {
746    return tlsPtr_.debug_invoke_req;
747  }
748
749  SingleStepControl* GetSingleStepControl() const {
750    return tlsPtr_.single_step_control;
751  }
752
753  // Indicates whether this thread is ready to invoke a method for debugging. This
754  // is only true if the thread has been suspended by a debug event.
755  bool IsReadyForDebugInvoke() const {
756    return tls32_.ready_for_debug_invoke;
757  }
758
759  void SetReadyForDebugInvoke(bool ready) {
760    tls32_.ready_for_debug_invoke = ready;
761  }
762
763  bool IsDebugMethodEntry() const {
764    return tls32_.debug_method_entry_;
765  }
766
767  void SetDebugMethodEntry() {
768    tls32_.debug_method_entry_ = true;
769  }
770
771  void ClearDebugMethodEntry() {
772    tls32_.debug_method_entry_ = false;
773  }
774
775  // Activates single step control for debugging. The thread takes the
776  // ownership of the given SingleStepControl*. It is deleted by a call
777  // to DeactivateSingleStepControl or upon thread destruction.
778  void ActivateSingleStepControl(SingleStepControl* ssc);
779
780  // Deactivates single step control for debugging.
781  void DeactivateSingleStepControl();
782
783  // Sets debug invoke request for debugging. When the thread is resumed,
784  // it executes the method described by this request then sends the reply
785  // before suspending itself. The thread takes the ownership of the given
786  // DebugInvokeReq*. It is deleted by a call to ClearDebugInvokeReq.
787  void SetDebugInvokeReq(DebugInvokeReq* req);
788
789  // Clears debug invoke request for debugging. When the thread completes
790  // method invocation, it deletes its debug invoke request and suspends
791  // itself.
792  void ClearDebugInvokeReq();
793
794  // Returns the fake exception used to activate deoptimization.
795  static mirror::Throwable* GetDeoptimizationException() {
796    return reinterpret_cast<mirror::Throwable*>(-1);
797  }
798
799  // Currently deoptimization invokes verifier which can trigger class loading
800  // and execute Java code, so there might be nested deoptimizations happening.
801  // We need to save the ongoing deoptimization shadow frames and return
802  // values on stacks.
803  void SetDeoptimizationReturnValue(const JValue& ret_val, bool is_reference) {
804    tls64_.deoptimization_return_value.SetJ(ret_val.GetJ());
805    tls32_.deoptimization_return_value_is_reference = is_reference;
806  }
807  bool IsDeoptimizationReturnValueReference() {
808    return tls32_.deoptimization_return_value_is_reference;
809  }
810  void ClearDeoptimizationReturnValue() {
811    tls64_.deoptimization_return_value.SetJ(0);
812    tls32_.deoptimization_return_value_is_reference = false;
813  }
814  void PushAndClearDeoptimizationReturnValue();
815  JValue PopDeoptimizationReturnValue();
816  void PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type);
817  ShadowFrame* PopStackedShadowFrame(StackedShadowFrameType type);
818
819  std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
820    return tlsPtr_.instrumentation_stack;
821  }
822
823  std::vector<ArtMethod*>* GetStackTraceSample() const {
824    return tlsPtr_.stack_trace_sample;
825  }
826
827  void SetStackTraceSample(std::vector<ArtMethod*>* sample) {
828    tlsPtr_.stack_trace_sample = sample;
829  }
830
831  uint64_t GetTraceClockBase() const {
832    return tls64_.trace_clock_base;
833  }
834
835  void SetTraceClockBase(uint64_t clock_base) {
836    tls64_.trace_clock_base = clock_base;
837  }
838
839  BaseMutex* GetHeldMutex(LockLevel level) const {
840    return tlsPtr_.held_mutexes[level];
841  }
842
843  void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
844    tlsPtr_.held_mutexes[level] = mutex;
845  }
846
847  void RunCheckpointFunction();
848
849  bool ReadFlag(ThreadFlag flag) const {
850    return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
851  }
852
853  bool TestAllFlags() const {
854    return (tls32_.state_and_flags.as_struct.flags != 0);
855  }
856
857  void AtomicSetFlag(ThreadFlag flag) {
858    tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flag);
859  }
860
861  void AtomicClearFlag(ThreadFlag flag) {
862    tls32_.state_and_flags.as_atomic_int.FetchAndAndSequentiallyConsistent(-1 ^ flag);
863  }
864
865  void ResetQuickAllocEntryPointsForThread();
866
867  // Returns the remaining space in the TLAB.
868  size_t TlabSize() const;
869  // Doesn't check that there is room.
870  mirror::Object* AllocTlab(size_t bytes);
871  void SetTlab(uint8_t* start, uint8_t* end);
872  bool HasTlab() const;
873  uint8_t* GetTlabStart() {
874    return tlsPtr_.thread_local_start;
875  }
876  uint8_t* GetTlabPos() {
877    return tlsPtr_.thread_local_pos;
878  }
879
880  // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
881  // equal to a valid pointer.
882  // TODO: does this need to atomic?  I don't think so.
883  void RemoveSuspendTrigger() {
884    tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
885  }
886
887  // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
888  // The next time a suspend check is done, it will load from the value at this address
889  // and trigger a SIGSEGV.
890  void TriggerSuspend() {
891    tlsPtr_.suspend_trigger = nullptr;
892  }
893
894
895  // Push an object onto the allocation stack.
896  bool PushOnThreadLocalAllocationStack(mirror::Object* obj)
897      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
898
899  // Set the thread local allocation pointers to the given pointers.
900  void SetThreadLocalAllocationStack(StackReference<mirror::Object>* start,
901                                     StackReference<mirror::Object>* end);
902
903  // Resets the thread local allocation pointers.
904  void RevokeThreadLocalAllocationStack();
905
906  size_t GetThreadLocalBytesAllocated() const {
907    return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
908  }
909
910  size_t GetThreadLocalObjectsAllocated() const {
911    return tlsPtr_.thread_local_objects;
912  }
913
914  void* GetRosAllocRun(size_t index) const {
915    return tlsPtr_.rosalloc_runs[index];
916  }
917
918  void SetRosAllocRun(size_t index, void* run) {
919    tlsPtr_.rosalloc_runs[index] = run;
920  }
921
922  void ProtectStack();
923  bool UnprotectStack();
924
925  void NoteSignalBeingHandled() {
926    if (tls32_.handling_signal_) {
927      LOG(FATAL) << "Detected signal while processing a signal";
928    }
929    tls32_.handling_signal_ = true;
930  }
931
932  void NoteSignalHandlerDone() {
933    tls32_.handling_signal_ = false;
934  }
935
936  jmp_buf* GetNestedSignalState() {
937    return tlsPtr_.nested_signal_state;
938  }
939
940  bool IsSuspendedAtSuspendCheck() const {
941    return tls32_.suspended_at_suspend_check;
942  }
943
944  void PushVerifier(verifier::MethodVerifier* verifier);
945  void PopVerifier(verifier::MethodVerifier* verifier);
946
947  void InitStringEntryPoints();
948
949 private:
950  explicit Thread(bool daemon);
951  ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_,
952                           Locks::thread_suspend_count_lock_);
953  void Destroy();
954
955  void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
956
957  template<bool kTransactionActive>
958  void InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
959                jobject thread_name, jint thread_priority)
960      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
961
962  // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
963  // Dbg::Disconnected.
964  ThreadState SetStateUnsafe(ThreadState new_state) {
965    ThreadState old_state = GetState();
966    tls32_.state_and_flags.as_struct.state = new_state;
967    return old_state;
968  }
969
970  void VerifyStackImpl() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
971
972  void DumpState(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
973  void DumpStack(std::ostream& os) const
974      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
975      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
976
977  // Out-of-line conveniences for debugging in gdb.
978  static Thread* CurrentFromGdb();  // Like Thread::Current.
979  // Like Thread::Dump(std::cerr).
980  void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
981
982  static void* CreateCallback(void* arg);
983
984  void HandleUncaughtExceptions(ScopedObjectAccess& soa)
985      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
986  void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
987
988  // Initialize a thread.
989  //
990  // The third parameter is not mandatory. If given, the thread will use this JNIEnvExt. In case
991  // Init succeeds, this means the thread takes ownership of it. If Init fails, it is the caller's
992  // responsibility to destroy the given JNIEnvExt. If the parameter is null, Init will try to
993  // create a JNIEnvExt on its own (and potentially fail at that stage, indicated by a return value
994  // of false).
995  bool Init(ThreadList*, JavaVMExt*, JNIEnvExt* jni_env_ext = nullptr)
996      EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
997  void InitCardTable();
998  void InitCpu();
999  void CleanupCpu();
1000  void InitTlsEntryPoints();
1001  void InitTid();
1002  void InitPthreadKeySelf();
1003  bool InitStackHwm();
1004
1005  void SetUpAlternateSignalStack();
1006  void TearDownAlternateSignalStack();
1007
1008  // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
1009  // change from being Suspended to Runnable without a suspend request occurring.
1010  union PACKED(4) StateAndFlags {
1011    StateAndFlags() {}
1012    struct PACKED(4) {
1013      // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
1014      // ThreadFlags for bit field meanings.
1015      volatile uint16_t flags;
1016      // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
1017      // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
1018      // operation. If a thread is suspended and a suspend_request is present, a thread may not
1019      // change to Runnable as a GC or other operation is in progress.
1020      volatile uint16_t state;
1021    } as_struct;
1022    AtomicInteger as_atomic_int;
1023    volatile int32_t as_int;
1024
1025   private:
1026    // gcc does not handle struct with volatile member assignments correctly.
1027    // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
1028    DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
1029  };
1030  static_assert(sizeof(StateAndFlags) == sizeof(int32_t), "Weird state_and_flags size");
1031
1032  static void ThreadExitCallback(void* arg);
1033
1034  // Maximum number of checkpoint functions.
1035  static constexpr uint32_t kMaxCheckpoints = 3;
1036
1037  // Has Thread::Startup been called?
1038  static bool is_started_;
1039
1040  // TLS key used to retrieve the Thread*.
1041  static pthread_key_t pthread_key_self_;
1042
1043  // Used to notify threads that they should attempt to resume, they will suspend again if
1044  // their suspend count is > 0.
1045  static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
1046
1047  /***********************************************************************************************/
1048  // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
1049  // pointer size differences. To encourage shorter encoding, more frequently used values appear
1050  // first if possible.
1051  /***********************************************************************************************/
1052
1053  struct PACKED(4) tls_32bit_sized_values {
1054    // We have no control over the size of 'bool', but want our boolean fields
1055    // to be 4-byte quantities.
1056    typedef uint32_t bool32_t;
1057
1058    explicit tls_32bit_sized_values(bool is_daemon) :
1059      suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
1060      daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
1061      thread_exit_check_count(0), handling_signal_(false),
1062      deoptimization_return_value_is_reference(false), suspended_at_suspend_check(false),
1063      ready_for_debug_invoke(false), debug_method_entry_(false) {
1064    }
1065
1066    union StateAndFlags state_and_flags;
1067    static_assert(sizeof(union StateAndFlags) == sizeof(int32_t),
1068                  "Size of state_and_flags and int32 are different");
1069
1070    // A non-zero value is used to tell the current thread to enter a safe point
1071    // at the next poll.
1072    int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1073
1074    // How much of 'suspend_count_' is by request of the debugger, used to set things right
1075    // when the debugger detaches. Must be <= suspend_count_.
1076    int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
1077
1078    // Thin lock thread id. This is a small integer used by the thin lock implementation.
1079    // This is not to be confused with the native thread's tid, nor is it the value returned
1080    // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
1081    // important difference between this id and the ids visible to managed code is that these
1082    // ones get reused (to ensure that they fit in the number of bits available).
1083    uint32_t thin_lock_thread_id;
1084
1085    // System thread id.
1086    uint32_t tid;
1087
1088    // Is the thread a daemon?
1089    const bool32_t daemon;
1090
1091    // A boolean telling us whether we're recursively throwing OOME.
1092    bool32_t throwing_OutOfMemoryError;
1093
1094    // A positive value implies we're in a region where thread suspension isn't expected.
1095    uint32_t no_thread_suspension;
1096
1097    // How many times has our pthread key's destructor been called?
1098    uint32_t thread_exit_check_count;
1099
1100    // True if signal is being handled by this thread.
1101    bool32_t handling_signal_;
1102
1103    // True if the return value for interpreter after deoptimization is a reference.
1104    // For gc purpose.
1105    bool32_t deoptimization_return_value_is_reference;
1106
1107    // True if the thread is suspended in FullSuspendCheck(). This is
1108    // used to distinguish runnable threads that are suspended due to
1109    // a normal suspend check from other threads.
1110    bool32_t suspended_at_suspend_check;
1111
1112    // True if the thread has been suspended by a debugger event. This is
1113    // used to invoke method from the debugger which is only allowed when
1114    // the thread is suspended by an event.
1115    bool32_t ready_for_debug_invoke;
1116
1117    // True if the thread enters a method. This is used to detect method entry
1118    // event for the debugger.
1119    bool32_t debug_method_entry_;
1120  } tls32_;
1121
1122  struct PACKED(8) tls_64bit_sized_values {
1123    tls_64bit_sized_values() : trace_clock_base(0), deoptimization_return_value() {
1124    }
1125
1126    // The clock base used for tracing.
1127    uint64_t trace_clock_base;
1128
1129    // Return value used by deoptimization.
1130    JValue deoptimization_return_value;
1131
1132    RuntimeStats stats;
1133  } tls64_;
1134
1135  struct PACKED(4) tls_ptr_sized_values {
1136      tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
1137      managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), tmp_jni_env(nullptr),
1138      self(nullptr), opeer(nullptr), jpeer(nullptr), stack_begin(nullptr), stack_size(0),
1139      stack_trace_sample(nullptr), wait_next(nullptr), monitor_enter_object(nullptr),
1140      top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
1141      instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
1142      stacked_shadow_frame_record(nullptr), deoptimization_return_value_stack(nullptr),
1143      name(nullptr), pthread_self(0),
1144      last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
1145      thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
1146      thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr),
1147      nested_signal_state(nullptr), flip_function(nullptr), method_verifier(nullptr) {
1148      std::fill(held_mutexes, held_mutexes + kLockLevelCount, nullptr);
1149    }
1150
1151    // The biased card table, see CardTable for details.
1152    uint8_t* card_table;
1153
1154    // The pending exception or null.
1155    mirror::Throwable* exception;
1156
1157    // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
1158    // We leave extra space so there's room for the code that throws StackOverflowError.
1159    uint8_t* stack_end;
1160
1161    // The top of the managed stack often manipulated directly by compiler generated code.
1162    ManagedStack managed_stack;
1163
1164    // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
1165    // normally set to the address of itself.
1166    uintptr_t* suspend_trigger;
1167
1168    // Every thread may have an associated JNI environment
1169    JNIEnvExt* jni_env;
1170
1171    // Temporary storage to transfer a pre-allocated JNIEnvExt from the creating thread to the
1172    // created thread.
1173    JNIEnvExt* tmp_jni_env;
1174
1175    // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1176    // is easy but getting the address of Thread::Current is hard. This field can be read off of
1177    // Thread::Current to give the address.
1178    Thread* self;
1179
1180    // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1181    // start up, until the thread is registered and the local opeer_ is used.
1182    mirror::Object* opeer;
1183    jobject jpeer;
1184
1185    // The "lowest addressable byte" of the stack.
1186    uint8_t* stack_begin;
1187
1188    // Size of the stack.
1189    size_t stack_size;
1190
1191    // Pointer to previous stack trace captured by sampling profiler.
1192    std::vector<ArtMethod*>* stack_trace_sample;
1193
1194    // The next thread in the wait set this thread is part of or null if not waiting.
1195    Thread* wait_next;
1196
1197    // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1198    mirror::Object* monitor_enter_object;
1199
1200    // Top of linked list of handle scopes or null for none.
1201    HandleScope* top_handle_scope;
1202
1203    // Needed to get the right ClassLoader in JNI_OnLoad, but also
1204    // useful for testing.
1205    jobject class_loader_override;
1206
1207    // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1208    Context* long_jump_context;
1209
1210    // Additional stack used by method instrumentation to store method and return pc values.
1211    // Stored as a pointer since std::deque is not PACKED.
1212    std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1213
1214    // JDWP invoke-during-breakpoint support.
1215    DebugInvokeReq* debug_invoke_req;
1216
1217    // JDWP single-stepping support.
1218    SingleStepControl* single_step_control;
1219
1220    // For gc purpose, a shadow frame record stack that keeps track of:
1221    // 1) shadow frames under construction.
1222    // 2) deoptimization shadow frames.
1223    StackedShadowFrameRecord* stacked_shadow_frame_record;
1224
1225    // Deoptimization return value record stack.
1226    DeoptimizationReturnValueRecord* deoptimization_return_value_stack;
1227
1228    // A cached copy of the java.lang.Thread's name.
1229    std::string* name;
1230
1231    // A cached pthread_t for the pthread underlying this Thread*.
1232    pthread_t pthread_self;
1233
1234    // If no_thread_suspension_ is > 0, what is causing that assertion.
1235    const char* last_no_thread_suspension_cause;
1236
1237    // Pending checkpoint function or null if non-pending. Installation guarding by
1238    // Locks::thread_suspend_count_lock_.
1239    Closure* checkpoint_functions[kMaxCheckpoints];
1240
1241    // Entrypoint function pointers.
1242    // TODO: move this to more of a global offset table model to avoid per-thread duplication.
1243    InterpreterEntryPoints interpreter_entrypoints;
1244    JniEntryPoints jni_entrypoints;
1245    QuickEntryPoints quick_entrypoints;
1246
1247    // Thread-local allocation pointer.
1248    uint8_t* thread_local_start;
1249    uint8_t* thread_local_pos;
1250    uint8_t* thread_local_end;
1251    size_t thread_local_objects;
1252
1253    // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
1254    void* rosalloc_runs[kNumRosAllocThreadLocalSizeBrackets];
1255
1256    // Thread-local allocation stack data/routines.
1257    StackReference<mirror::Object>* thread_local_alloc_stack_top;
1258    StackReference<mirror::Object>* thread_local_alloc_stack_end;
1259
1260    // Support for Mutex lock hierarchy bug detection.
1261    BaseMutex* held_mutexes[kLockLevelCount];
1262
1263    // Recorded thread state for nested signals.
1264    jmp_buf* nested_signal_state;
1265
1266    // The function used for thread flip.
1267    Closure* flip_function;
1268
1269    // Current method verifier, used for root marking.
1270    verifier::MethodVerifier* method_verifier;
1271  } tlsPtr_;
1272
1273  // Guards the 'interrupted_' and 'wait_monitor_' members.
1274  Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1275
1276  // Condition variable waited upon during a wait.
1277  ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
1278  // Pointer to the monitor lock we're currently waiting on or null if not waiting.
1279  Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1280
1281  // Thread "interrupted" status; stays raised until queried or thrown.
1282  bool interrupted_ GUARDED_BY(wait_mutex_);
1283
1284  friend class Dbg;  // For SetStateUnsafe.
1285  friend class gc::collector::SemiSpace;  // For getting stack traces.
1286  friend class Runtime;  // For CreatePeer.
1287  friend class QuickExceptionHandler;  // For dumping the stack.
1288  friend class ScopedThreadStateChange;
1289  friend class StubTest;  // For accessing entrypoints.
1290  friend class ThreadList;  // For ~Thread and Destroy.
1291
1292  friend class EntrypointsOrderTest;  // To test the order of tls entries.
1293
1294  DISALLOW_COPY_AND_ASSIGN(Thread);
1295};
1296
1297class ScopedAssertNoThreadSuspension {
1298 public:
1299  ScopedAssertNoThreadSuspension(Thread* self, const char* cause)
1300      : self_(self), old_cause_(self->StartAssertNoThreadSuspension(cause)) {
1301  }
1302  ~ScopedAssertNoThreadSuspension() {
1303    self_->EndAssertNoThreadSuspension(old_cause_);
1304  }
1305  Thread* Self() {
1306    return self_;
1307  }
1308
1309 private:
1310  Thread* const self_;
1311  const char* const old_cause_;
1312};
1313
1314class ScopedStackedShadowFramePusher {
1315 public:
1316  ScopedStackedShadowFramePusher(Thread* self, ShadowFrame* sf, StackedShadowFrameType type)
1317    : self_(self), type_(type) {
1318    self_->PushStackedShadowFrame(sf, type);
1319  }
1320  ~ScopedStackedShadowFramePusher() {
1321    self_->PopStackedShadowFrame(type_);
1322  }
1323
1324 private:
1325  Thread* const self_;
1326  const StackedShadowFrameType type_;
1327
1328  DISALLOW_COPY_AND_ASSIGN(ScopedStackedShadowFramePusher);
1329};
1330
1331std::ostream& operator<<(std::ostream& os, const Thread& thread);
1332std::ostream& operator<<(std::ostream& os, const StackedShadowFrameType& thread);
1333
1334}  // namespace art
1335
1336#endif  // ART_RUNTIME_THREAD_H_
1337