thread.h revision 63c051a540e6dfc806f656b88ac3a63e99395429
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_THREAD_H_
18#define ART_RUNTIME_THREAD_H_
19
20#include <bitset>
21#include <deque>
22#include <iosfwd>
23#include <list>
24#include <memory>
25#include <string>
26
27#include "atomic.h"
28#include "base/macros.h"
29#include "base/mutex.h"
30#include "entrypoints/interpreter/interpreter_entrypoints.h"
31#include "entrypoints/jni/jni_entrypoints.h"
32#include "entrypoints/portable/portable_entrypoints.h"
33#include "entrypoints/quick/quick_entrypoints.h"
34#include "globals.h"
35#include "handle_scope.h"
36#include "instruction_set.h"
37#include "jvalue.h"
38#include "object_callbacks.h"
39#include "offsets.h"
40#include "runtime_stats.h"
41#include "stack.h"
42#include "thread_state.h"
43#include "throw_location.h"
44
45namespace art {
46
47namespace gc {
48namespace collector {
49  class SemiSpace;
50}  // namespace collector
51}  // namespace gc
52
53namespace mirror {
54  class ArtMethod;
55  class Array;
56  class Class;
57  class ClassLoader;
58  class Object;
59  template<class T> class ObjectArray;
60  template<class T> class PrimitiveArray;
61  typedef PrimitiveArray<int32_t> IntArray;
62  class StackTraceElement;
63  class Throwable;
64}  // namespace mirror
65class BaseMutex;
66class ClassLinker;
67class Closure;
68class Context;
69struct DebugInvokeReq;
70class DexFile;
71class JavaVMExt;
72struct JNIEnvExt;
73class Monitor;
74class Runtime;
75class ScopedObjectAccessAlreadyRunnable;
76class ShadowFrame;
77struct SingleStepControl;
78class Thread;
79class ThreadList;
80
81// Thread priorities. These must match the Thread.MIN_PRIORITY,
82// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
83enum ThreadPriority {
84  kMinThreadPriority = 1,
85  kNormThreadPriority = 5,
86  kMaxThreadPriority = 10,
87};
88
89enum ThreadFlag {
90  kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0 and the Thread should enter the
91                          // safepoint handler.
92  kCheckpointRequest = 2  // Request that the thread do some checkpoint work and then continue.
93};
94
95static constexpr size_t kNumRosAllocThreadLocalSizeBrackets = 34;
96
97// Thread's stack layout for implicit stack overflow checks:
98//
99//   +---------------------+  <- highest address of stack memory
100//   |                     |
101//   .                     .  <- SP
102//   |                     |
103//   |                     |
104//   +---------------------+  <- stack_end
105//   |                     |
106//   |  Gap                |
107//   |                     |
108//   +---------------------+  <- stack_begin
109//   |                     |
110//   | Protected region    |
111//   |                     |
112//   +---------------------+  <- lowest address of stack memory
113//
114// The stack always grows down in memory.  At the lowest address is a region of memory
115// that is set mprotect(PROT_NONE).  Any attempt to read/write to this region will
116// result in a segmentation fault signal.  At any point, the thread's SP will be somewhere
117// between the stack_end and the highest address in stack memory.  An implicit stack
118// overflow check is a read of memory at a certain offset below the current SP (4K typically).
119// If the thread's SP is below the stack_end address this will be a read into the protected
120// region.  If the SP is above the stack_end address, the thread is guaranteed to have
121// at least 4K of space.  Because stack overflow checks are only performed in generated code,
122// if the thread makes a call out to a native function (through JNI), that native function
123// might only have 4K of memory (if the SP is adjacent to stack_end).
124
125class Thread {
126 public:
127  // For implicit overflow checks we reserve an extra piece of memory at the bottom
128  // of the stack (lowest memory).  The higher portion of the memory
129  // is protected against reads and the lower is available for use while
130  // throwing the StackOverflow exception.
131  static constexpr size_t kStackOverflowProtectedSize = 4 * KB;
132  static const size_t kStackOverflowImplicitCheckSize;
133
134  // Creates a new native thread corresponding to the given managed peer.
135  // Used to implement Thread.start.
136  static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
137
138  // Attaches the calling native thread to the runtime, returning the new native peer.
139  // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
140  static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
141                        bool create_peer);
142
143  // Reset internal state of child thread after fork.
144  void InitAfterFork();
145
146  static Thread* Current();
147
148  static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
149                                   mirror::Object* thread_peer)
150      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
151      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
152      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
153  static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
154      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
155      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
156      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
157
158  // Translates 172 to pAllocArrayFromCode and so on.
159  template<size_t size_of_pointers>
160  static void DumpThreadOffset(std::ostream& os, uint32_t offset);
161
162  // Dumps a one-line summary of thread state (used for operator<<).
163  void ShortDump(std::ostream& os) const;
164
165  // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
166  void Dump(std::ostream& os) const
167      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
168      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
169
170  void DumpJavaStack(std::ostream& os) const
171      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
172      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
173
174  // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which
175  // case we use 'tid' to identify the thread, and we'll include as much information as we can.
176  static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
177      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
178      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
179
180  ThreadState GetState() const {
181    DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
182    DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
183    return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
184  }
185
186  ThreadState SetState(ThreadState new_state);
187
188  int GetSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
189    return tls32_.suspend_count;
190  }
191
192  int GetDebugSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
193    return tls32_.debug_suspend_count;
194  }
195
196  bool IsSuspended() const {
197    union StateAndFlags state_and_flags;
198    state_and_flags.as_int = tls32_.state_and_flags.as_int;
199    return state_and_flags.as_struct.state != kRunnable &&
200        (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
201  }
202
203  void ModifySuspendCount(Thread* self, int delta, bool for_debugger)
204      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
205
206  bool RequestCheckpoint(Closure* function)
207      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
208
209  // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
210  // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
211  void FullSuspendCheck()
212      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
213      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
214
215  // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
216  ThreadState TransitionFromSuspendedToRunnable()
217      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
218      SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
219      ALWAYS_INLINE;
220
221  // Transition from runnable into a state where mutator privileges are denied. Releases share of
222  // mutator lock.
223  void TransitionFromRunnableToSuspended(ThreadState new_state)
224      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
225      UNLOCK_FUNCTION(Locks::mutator_lock_)
226      ALWAYS_INLINE;
227
228  // Once called thread suspension will cause an assertion failure.
229  const char* StartAssertNoThreadSuspension(const char* cause) {
230    if (kIsDebugBuild) {
231      CHECK(cause != NULL);
232      const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
233      tls32_.no_thread_suspension++;
234      tlsPtr_.last_no_thread_suspension_cause = cause;
235      return previous_cause;
236    } else {
237      return nullptr;
238    }
239  }
240
241  // End region where no thread suspension is expected.
242  void EndAssertNoThreadSuspension(const char* old_cause) {
243    if (kIsDebugBuild) {
244      CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
245      CHECK_GT(tls32_.no_thread_suspension, 0U);
246      tls32_.no_thread_suspension--;
247      tlsPtr_.last_no_thread_suspension_cause = old_cause;
248    }
249  }
250
251  void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
252
253  bool IsDaemon() const {
254    return tls32_.daemon;
255  }
256
257  bool HoldsLock(mirror::Object*) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
258
259  /*
260   * Changes the priority of this thread to match that of the java.lang.Thread object.
261   *
262   * We map a priority value from 1-10 to Linux "nice" values, where lower
263   * numbers indicate higher priority.
264   */
265  void SetNativePriority(int newPriority);
266
267  /*
268   * Returns the thread priority for the current thread by querying the system.
269   * This is useful when attaching a thread through JNI.
270   *
271   * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
272   */
273  static int GetNativePriority();
274
275  uint32_t GetThreadId() const {
276    return tls32_.thin_lock_thread_id;
277  }
278
279  pid_t GetTid() const {
280    return tls32_.tid;
281  }
282
283  // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer.
284  mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
285      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
286
287  // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
288  // allocation, or locking.
289  void GetThreadName(std::string& name) const;
290
291  // Sets the thread's name.
292  void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
293
294  // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
295  uint64_t GetCpuMicroTime() const;
296
297  mirror::Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
298    CHECK(tlsPtr_.jpeer == nullptr);
299    return tlsPtr_.opeer;
300  }
301
302  bool HasPeer() const {
303    return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
304  }
305
306  RuntimeStats* GetStats() {
307    return &tls64_.stats;
308  }
309
310  bool IsStillStarting() const;
311
312  bool IsExceptionPending() const {
313    return tlsPtr_.exception != nullptr;
314  }
315
316  mirror::Throwable* GetException(ThrowLocation* throw_location) const
317      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
318    if (throw_location != nullptr) {
319      *throw_location = tlsPtr_.throw_location;
320    }
321    return tlsPtr_.exception;
322  }
323
324  void AssertNoPendingException() const;
325  void AssertNoPendingExceptionForNewException(const char* msg) const;
326
327  void SetException(const ThrowLocation& throw_location, mirror::Throwable* new_exception)
328      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
329    CHECK(new_exception != NULL);
330    // TODO: DCHECK(!IsExceptionPending());
331    tlsPtr_.exception = new_exception;
332    tlsPtr_.throw_location = throw_location;
333  }
334
335  void ClearException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
336    tlsPtr_.exception = nullptr;
337    tlsPtr_.throw_location.Clear();
338    SetExceptionReportedToInstrumentation(false);
339  }
340
341  // Find catch block and perform long jump to appropriate exception handle
342  void QuickDeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
343
344  Context* GetLongJumpContext();
345  void ReleaseLongJumpContext(Context* context) {
346    DCHECK(tlsPtr_.long_jump_context == nullptr);
347    tlsPtr_.long_jump_context = context;
348  }
349
350  // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
351  // abort the runtime iff abort_on_error is true.
352  mirror::ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
353      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
354
355  ThrowLocation GetCurrentLocationForThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
356
357  void SetTopOfStack(StackReference<mirror::ArtMethod>* top_method, uintptr_t pc) {
358    tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
359    tlsPtr_.managed_stack.SetTopQuickFramePc(pc);
360  }
361
362  void SetTopOfShadowStack(ShadowFrame* top) {
363    tlsPtr_.managed_stack.SetTopShadowFrame(top);
364  }
365
366  bool HasManagedStack() const {
367    return (tlsPtr_.managed_stack.GetTopQuickFrame() != nullptr) ||
368        (tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
369  }
370
371  // If 'msg' is NULL, no detail message is set.
372  void ThrowNewException(const ThrowLocation& throw_location,
373                         const char* exception_class_descriptor, const char* msg)
374      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
375
376  // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
377  // used as the new exception's cause.
378  void ThrowNewWrappedException(const ThrowLocation& throw_location,
379                                const char* exception_class_descriptor,
380                                const char* msg)
381      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
382
383  void ThrowNewExceptionF(const ThrowLocation& throw_location,
384                          const char* exception_class_descriptor, const char* fmt, ...)
385      __attribute__((format(printf, 4, 5)))
386      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
387
388  void ThrowNewExceptionV(const ThrowLocation& throw_location,
389                          const char* exception_class_descriptor, const char* fmt, va_list ap)
390      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
391
392  // OutOfMemoryError is special, because we need to pre-allocate an instance.
393  // Only the GC should call this.
394  void ThrowOutOfMemoryError(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
395
396  static void Startup();
397  static void FinishStartup();
398  static void Shutdown();
399
400  // JNI methods
401  JNIEnvExt* GetJniEnv() const {
402    return tlsPtr_.jni_env;
403  }
404
405  // Convert a jobject into a Object*
406  mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
407
408  mirror::Object* GetMonitorEnterObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
409    return tlsPtr_.monitor_enter_object;
410  }
411
412  void SetMonitorEnterObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
413    tlsPtr_.monitor_enter_object = obj;
414  }
415
416  // Implements java.lang.Thread.interrupted.
417  bool Interrupted() LOCKS_EXCLUDED(wait_mutex_);
418  // Implements java.lang.Thread.isInterrupted.
419  bool IsInterrupted() LOCKS_EXCLUDED(wait_mutex_);
420  bool IsInterruptedLocked() EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
421    return interrupted_;
422  }
423  void Interrupt(Thread* self) LOCKS_EXCLUDED(wait_mutex_);
424  void SetInterruptedLocked(bool i) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
425    interrupted_ = i;
426  }
427  void Notify() LOCKS_EXCLUDED(wait_mutex_);
428
429 private:
430  void NotifyLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_);
431
432 public:
433  Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
434    return wait_mutex_;
435  }
436
437  ConditionVariable* GetWaitConditionVariable() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
438    return wait_cond_;
439  }
440
441  Monitor* GetWaitMonitor() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
442    return wait_monitor_;
443  }
444
445  void SetWaitMonitor(Monitor* mon) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
446    wait_monitor_ = mon;
447  }
448
449
450  // Waiter link-list support.
451  Thread* GetWaitNext() const {
452    return tlsPtr_.wait_next;
453  }
454
455  void SetWaitNext(Thread* next) {
456    tlsPtr_.wait_next = next;
457  }
458
459  mirror::ClassLoader* GetClassLoaderOverride() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
460    return tlsPtr_.class_loader_override;
461  }
462
463  void SetClassLoaderOverride(mirror::ClassLoader* class_loader_override)
464      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
465
466  // Create the internal representation of a stack trace, that is more time
467  // and space efficient to compute than the StackTraceElement[].
468  template<bool kTransactionActive>
469  jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
470      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
471
472  // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
473  // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
474  // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
475  // with the number of valid frames in the returned array.
476  static jobjectArray InternalStackTraceToStackTraceElementArray(
477      const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
478      jobjectArray output_array = nullptr, int* stack_depth = nullptr)
479      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
480
481  void VisitRoots(RootCallback* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
482
483  ALWAYS_INLINE void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
484
485  //
486  // Offsets of various members of native Thread class, used by compiled code.
487  //
488
489  template<size_t pointer_size>
490  static ThreadOffset<pointer_size> ThinLockIdOffset() {
491    return ThreadOffset<pointer_size>(
492        OFFSETOF_MEMBER(Thread, tls32_) +
493        OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
494  }
495
496  template<size_t pointer_size>
497  static ThreadOffset<pointer_size> ThreadFlagsOffset() {
498    return ThreadOffset<pointer_size>(
499        OFFSETOF_MEMBER(Thread, tls32_) +
500        OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
501  }
502
503 private:
504  template<size_t pointer_size>
505  static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
506    size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
507    size_t scale;
508    size_t shrink;
509    if (pointer_size == sizeof(void*)) {
510      scale = 1;
511      shrink = 1;
512    } else if (pointer_size > sizeof(void*)) {
513      scale = pointer_size / sizeof(void*);
514      shrink = 1;
515    } else {
516      DCHECK_GT(sizeof(void*), pointer_size);
517      scale = 1;
518      shrink = sizeof(void*) / pointer_size;
519    }
520    return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
521  }
522
523 public:
524  template<size_t pointer_size>
525  static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
526    return ThreadOffsetFromTlsPtr<pointer_size>(
527        OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
528  }
529
530  template<size_t pointer_size>
531  static ThreadOffset<pointer_size> InterpreterEntryPointOffset(size_t interp_entrypoint_offset) {
532    return ThreadOffsetFromTlsPtr<pointer_size>(
533        OFFSETOF_MEMBER(tls_ptr_sized_values, interpreter_entrypoints) + interp_entrypoint_offset);
534  }
535
536  template<size_t pointer_size>
537  static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
538    return ThreadOffsetFromTlsPtr<pointer_size>(
539        OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
540  }
541
542  template<size_t pointer_size>
543  static ThreadOffset<pointer_size> PortableEntryPointOffset(size_t port_entrypoint_offset) {
544    return ThreadOffsetFromTlsPtr<pointer_size>(
545        OFFSETOF_MEMBER(tls_ptr_sized_values, portable_entrypoints) + port_entrypoint_offset);
546  }
547
548  template<size_t pointer_size>
549  static ThreadOffset<pointer_size> SelfOffset() {
550    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
551  }
552
553  template<size_t pointer_size>
554  static ThreadOffset<pointer_size> ExceptionOffset() {
555    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
556  }
557
558  template<size_t pointer_size>
559  static ThreadOffset<pointer_size> PeerOffset() {
560    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
561  }
562
563
564  template<size_t pointer_size>
565  static ThreadOffset<pointer_size> CardTableOffset() {
566    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
567  }
568
569  template<size_t pointer_size>
570  static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
571    return ThreadOffsetFromTlsPtr<pointer_size>(
572        OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
573  }
574
575  // Size of stack less any space reserved for stack overflow
576  size_t GetStackSize() const {
577    return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
578  }
579
580  byte* GetStackEndForInterpreter(bool implicit_overflow_check) const {
581    if (implicit_overflow_check) {
582      // The interpreter needs the extra overflow bytes that stack_end does
583      // not include.
584      return tlsPtr_.stack_end + GetStackOverflowReservedBytes(kRuntimeISA);
585    } else {
586      return tlsPtr_.stack_end;
587    }
588  }
589
590  byte* GetStackEnd() const {
591    return tlsPtr_.stack_end;
592  }
593
594  // Set the stack end to that to be used during a stack overflow
595  void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
596
597  // Set the stack end to that to be used during regular execution
598  void ResetDefaultStackEnd(bool implicit_overflow_check) {
599    // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
600    // to throw a StackOverflowError.
601    if (implicit_overflow_check) {
602      // For implicit checks we also need to add in the protected region above the
603      // overflow region.
604      tlsPtr_.stack_end = tlsPtr_.stack_begin + kStackOverflowImplicitCheckSize;
605    } else {
606      tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
607    }
608  }
609
610  // Install the protected region for implicit stack checks.
611  void InstallImplicitProtection();
612
613  bool IsHandlingStackOverflow() const {
614    return tlsPtr_.stack_end == tlsPtr_.stack_begin;
615  }
616
617  template<size_t pointer_size>
618  static ThreadOffset<pointer_size> StackEndOffset() {
619    return ThreadOffsetFromTlsPtr<pointer_size>(
620        OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
621  }
622
623  template<size_t pointer_size>
624  static ThreadOffset<pointer_size> JniEnvOffset() {
625    return ThreadOffsetFromTlsPtr<pointer_size>(
626        OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
627  }
628
629  template<size_t pointer_size>
630  static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
631    return ThreadOffsetFromTlsPtr<pointer_size>(
632        OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
633        ManagedStack::TopQuickFrameOffset());
634  }
635
636  template<size_t pointer_size>
637  static ThreadOffset<pointer_size> TopOfManagedStackPcOffset() {
638    return ThreadOffsetFromTlsPtr<pointer_size>(
639        OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
640        ManagedStack::TopQuickFramePcOffset());
641  }
642
643  const ManagedStack* GetManagedStack() const {
644    return &tlsPtr_.managed_stack;
645  }
646
647  // Linked list recording fragments of managed stack.
648  void PushManagedStackFragment(ManagedStack* fragment) {
649    tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
650  }
651  void PopManagedStackFragment(const ManagedStack& fragment) {
652    tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
653  }
654
655  ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
656    return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
657  }
658
659  ShadowFrame* PopShadowFrame() {
660    return tlsPtr_.managed_stack.PopShadowFrame();
661  }
662
663  template<size_t pointer_size>
664  static ThreadOffset<pointer_size> TopShadowFrameOffset() {
665    return ThreadOffsetFromTlsPtr<pointer_size>(
666        OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
667        ManagedStack::TopShadowFrameOffset());
668  }
669
670  // Number of references allocated in JNI ShadowFrames on this thread.
671  size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
672    return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
673  }
674
675  // Number of references in handle scope on this thread.
676  size_t NumHandleReferences();
677
678  // Number of references allocated in handle scopes & JNI shadow frames on this thread.
679  size_t NumStackReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
680    return NumHandleReferences() + NumJniShadowFrameReferences();
681  };
682
683  // Is the given obj in this thread's stack indirect reference table?
684  bool HandleScopeContains(jobject obj) const;
685
686  void HandleScopeVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id)
687      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
688
689  HandleScope* GetTopHandleScope() {
690    return tlsPtr_.top_handle_scope;
691  }
692
693  void PushHandleScope(HandleScope* handle_scope) {
694    handle_scope->SetLink(tlsPtr_.top_handle_scope);
695    tlsPtr_.top_handle_scope = handle_scope;
696  }
697
698  HandleScope* PopHandleScope() {
699    HandleScope* handle_scope = tlsPtr_.top_handle_scope;
700    DCHECK(handle_scope != nullptr);
701    tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
702    return handle_scope;
703  }
704
705  template<size_t pointer_size>
706  static ThreadOffset<pointer_size> TopHandleScopeOffset() {
707    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
708                                                                top_handle_scope));
709  }
710
711  DebugInvokeReq* GetInvokeReq() const {
712    return tlsPtr_.debug_invoke_req;
713  }
714
715  SingleStepControl* GetSingleStepControl() const {
716    return tlsPtr_.single_step_control;
717  }
718
719  // Returns the fake exception used to activate deoptimization.
720  static mirror::Throwable* GetDeoptimizationException() {
721    return reinterpret_cast<mirror::Throwable*>(-1);
722  }
723
724  void SetDeoptimizationShadowFrame(ShadowFrame* sf);
725  void SetDeoptimizationReturnValue(const JValue& ret_val);
726
727  ShadowFrame* GetAndClearDeoptimizationShadowFrame(JValue* ret_val);
728
729  bool HasDeoptimizationShadowFrame() const {
730    return tlsPtr_.deoptimization_shadow_frame != nullptr;
731  }
732
733  void SetShadowFrameUnderConstruction(ShadowFrame* sf);
734  void ClearShadowFrameUnderConstruction();
735
736  bool HasShadowFrameUnderConstruction() const {
737    return tlsPtr_.shadow_frame_under_construction != nullptr;
738  }
739
740  std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
741    return tlsPtr_.instrumentation_stack;
742  }
743
744  std::vector<mirror::ArtMethod*>* GetStackTraceSample() const {
745    return tlsPtr_.stack_trace_sample;
746  }
747
748  void SetStackTraceSample(std::vector<mirror::ArtMethod*>* sample) {
749    tlsPtr_.stack_trace_sample = sample;
750  }
751
752  uint64_t GetTraceClockBase() const {
753    return tls64_.trace_clock_base;
754  }
755
756  void SetTraceClockBase(uint64_t clock_base) {
757    tls64_.trace_clock_base = clock_base;
758  }
759
760  BaseMutex* GetHeldMutex(LockLevel level) const {
761    return tlsPtr_.held_mutexes[level];
762  }
763
764  void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
765    tlsPtr_.held_mutexes[level] = mutex;
766  }
767
768  void RunCheckpointFunction();
769
770  bool ReadFlag(ThreadFlag flag) const {
771    return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
772  }
773
774  bool TestAllFlags() const {
775    return (tls32_.state_and_flags.as_struct.flags != 0);
776  }
777
778  void AtomicSetFlag(ThreadFlag flag) {
779    tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flag);
780  }
781
782  void AtomicClearFlag(ThreadFlag flag) {
783    tls32_.state_and_flags.as_atomic_int.FetchAndAndSequentiallyConsistent(-1 ^ flag);
784  }
785
786  void ResetQuickAllocEntryPointsForThread();
787
788  // Returns the remaining space in the TLAB.
789  size_t TlabSize() const;
790  // Doesn't check that there is room.
791  mirror::Object* AllocTlab(size_t bytes);
792  void SetTlab(byte* start, byte* end);
793  bool HasTlab() const;
794
795  // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
796  // equal to a valid pointer.
797  // TODO: does this need to atomic?  I don't think so.
798  void RemoveSuspendTrigger() {
799    tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
800  }
801
802  // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
803  // The next time a suspend check is done, it will load from the value at this address
804  // and trigger a SIGSEGV.
805  void TriggerSuspend() {
806    tlsPtr_.suspend_trigger = nullptr;
807  }
808
809
810  // Push an object onto the allocation stack.
811  bool PushOnThreadLocalAllocationStack(mirror::Object* obj);
812
813  // Set the thread local allocation pointers to the given pointers.
814  void SetThreadLocalAllocationStack(mirror::Object** start, mirror::Object** end);
815
816  // Resets the thread local allocation pointers.
817  void RevokeThreadLocalAllocationStack();
818
819  size_t GetThreadLocalBytesAllocated() const {
820    return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
821  }
822
823  size_t GetThreadLocalObjectsAllocated() const {
824    return tlsPtr_.thread_local_objects;
825  }
826
827  void* GetRosAllocRun(size_t index) const {
828    return tlsPtr_.rosalloc_runs[index];
829  }
830
831  void SetRosAllocRun(size_t index, void* run) {
832    tlsPtr_.rosalloc_runs[index] = run;
833  }
834
835  bool IsExceptionReportedToInstrumentation() const {
836    return tls32_.is_exception_reported_to_instrumentation_;
837  }
838
839  void SetExceptionReportedToInstrumentation(bool reported) {
840    tls32_.is_exception_reported_to_instrumentation_ = reported;
841  }
842
843  void ProtectStack();
844  bool UnprotectStack();
845
846  void NoteSignalBeingHandled() {
847    if (tls32_.handling_signal_) {
848      LOG(FATAL) << "Detected signal while processing a signal";
849    }
850    tls32_.handling_signal_ = true;
851  }
852
853  void NoteSignalHandlerDone() {
854    tls32_.handling_signal_ = false;
855  }
856
857 private:
858  explicit Thread(bool daemon);
859  ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_,
860                           Locks::thread_suspend_count_lock_);
861  void Destroy();
862
863  void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
864
865  template<bool kTransactionActive>
866  void InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
867                jobject thread_name, jint thread_priority)
868      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
869
870  // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
871  // Dbg::Disconnected.
872  ThreadState SetStateUnsafe(ThreadState new_state) {
873    ThreadState old_state = GetState();
874    tls32_.state_and_flags.as_struct.state = new_state;
875    return old_state;
876  }
877
878  void VerifyStackImpl() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
879
880  void DumpState(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
881  void DumpStack(std::ostream& os) const
882      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
883      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
884
885  // Out-of-line conveniences for debugging in gdb.
886  static Thread* CurrentFromGdb();  // Like Thread::Current.
887  // Like Thread::Dump(std::cerr).
888  void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
889
890  static void* CreateCallback(void* arg);
891
892  void HandleUncaughtExceptions(ScopedObjectAccess& soa)
893      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
894  void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
895
896  void Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
897  void InitCardTable();
898  void InitCpu();
899  void CleanupCpu();
900  void InitTlsEntryPoints();
901  void InitTid();
902  void InitPthreadKeySelf();
903  void InitStackHwm();
904
905  void SetUpAlternateSignalStack();
906  void TearDownAlternateSignalStack();
907
908  // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
909  // change from being Suspended to Runnable without a suspend request occurring.
910  union PACKED(4) StateAndFlags {
911    StateAndFlags() {}
912    struct PACKED(4) {
913      // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
914      // ThreadFlags for bit field meanings.
915      volatile uint16_t flags;
916      // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
917      // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
918      // operation. If a thread is suspended and a suspend_request is present, a thread may not
919      // change to Runnable as a GC or other operation is in progress.
920      volatile uint16_t state;
921    } as_struct;
922    AtomicInteger as_atomic_int;
923    volatile int32_t as_int;
924
925   private:
926    // gcc does not handle struct with volatile member assignments correctly.
927    // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
928    DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
929  };
930  COMPILE_ASSERT(sizeof(StateAndFlags) == sizeof(int32_t), weird_state_and_flags_size);
931
932  static void ThreadExitCallback(void* arg);
933
934  // Maximum number of checkpoint functions.
935  static constexpr uint32_t kMaxCheckpoints = 3;
936
937  // Has Thread::Startup been called?
938  static bool is_started_;
939
940  // TLS key used to retrieve the Thread*.
941  static pthread_key_t pthread_key_self_;
942
943  // Used to notify threads that they should attempt to resume, they will suspend again if
944  // their suspend count is > 0.
945  static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
946
947  /***********************************************************************************************/
948  // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
949  // pointer size differences. To encourage shorter encoding, more frequently used values appear
950  // first if possible.
951  /***********************************************************************************************/
952
953  struct PACKED(4) tls_32bit_sized_values {
954    // We have no control over the size of 'bool', but want our boolean fields
955    // to be 4-byte quantities.
956    typedef uint32_t bool32_t;
957
958    explicit tls_32bit_sized_values(bool is_daemon) :
959      suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
960      daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
961      thread_exit_check_count(0), is_exception_reported_to_instrumentation_(false),
962      handling_signal_(false), padding_(0) {
963    }
964
965    union StateAndFlags state_and_flags;
966    COMPILE_ASSERT(sizeof(union StateAndFlags) == sizeof(int32_t),
967                   sizeof_state_and_flags_and_int32_are_different);
968
969    // A non-zero value is used to tell the current thread to enter a safe point
970    // at the next poll.
971    int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
972
973    // How much of 'suspend_count_' is by request of the debugger, used to set things right
974    // when the debugger detaches. Must be <= suspend_count_.
975    int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
976
977    // Thin lock thread id. This is a small integer used by the thin lock implementation.
978    // This is not to be confused with the native thread's tid, nor is it the value returned
979    // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
980    // important difference between this id and the ids visible to managed code is that these
981    // ones get reused (to ensure that they fit in the number of bits available).
982    uint32_t thin_lock_thread_id;
983
984    // System thread id.
985    uint32_t tid;
986
987    // Is the thread a daemon?
988    const bool32_t daemon;
989
990    // A boolean telling us whether we're recursively throwing OOME.
991    bool32_t throwing_OutOfMemoryError;
992
993    // A positive value implies we're in a region where thread suspension isn't expected.
994    uint32_t no_thread_suspension;
995
996    // How many times has our pthread key's destructor been called?
997    uint32_t thread_exit_check_count;
998
999    // When true this field indicates that the exception associated with this thread has already
1000    // been reported to instrumentation.
1001    bool32_t is_exception_reported_to_instrumentation_;
1002
1003    // True if signal is being handled by this thread.
1004    bool32_t handling_signal_;
1005
1006    // Padding to make the size aligned to 8.  Remove this if we add another 32 bit field.
1007    int32_t padding_;
1008  } tls32_;
1009
1010  struct PACKED(8) tls_64bit_sized_values {
1011    tls_64bit_sized_values() : trace_clock_base(0), deoptimization_return_value() {
1012    }
1013
1014    // The clock base used for tracing.
1015    uint64_t trace_clock_base;
1016
1017    // Return value used by deoptimization.
1018    JValue deoptimization_return_value;
1019
1020    RuntimeStats stats;
1021  } tls64_;
1022
1023  struct PACKED(4) tls_ptr_sized_values {
1024      tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
1025      managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), self(nullptr), opeer(nullptr),
1026      jpeer(nullptr), stack_begin(nullptr), stack_size(0), throw_location(),
1027      stack_trace_sample(nullptr), wait_next(nullptr), monitor_enter_object(nullptr),
1028      top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
1029      instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
1030      deoptimization_shadow_frame(nullptr), shadow_frame_under_construction(nullptr), name(nullptr),
1031      pthread_self(0), last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
1032      thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
1033      thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr) {
1034    }
1035
1036    // The biased card table, see CardTable for details.
1037    byte* card_table;
1038
1039    // The pending exception or NULL.
1040    mirror::Throwable* exception;
1041
1042    // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
1043    // We leave extra space so there's room for the code that throws StackOverflowError.
1044    byte* stack_end;
1045
1046    // The top of the managed stack often manipulated directly by compiler generated code.
1047    ManagedStack managed_stack;
1048
1049    // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
1050    // normally set to the address of itself.
1051    uintptr_t* suspend_trigger;
1052
1053    // Every thread may have an associated JNI environment
1054    JNIEnvExt* jni_env;
1055
1056    // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1057    // is easy but getting the address of Thread::Current is hard. This field can be read off of
1058    // Thread::Current to give the address.
1059    Thread* self;
1060
1061    // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1062    // start up, until the thread is registered and the local opeer_ is used.
1063    mirror::Object* opeer;
1064    jobject jpeer;
1065
1066    // The "lowest addressable byte" of the stack.
1067    byte* stack_begin;
1068
1069    // Size of the stack.
1070    size_t stack_size;
1071
1072    // The location the current exception was thrown from.
1073    ThrowLocation throw_location;
1074
1075    // Pointer to previous stack trace captured by sampling profiler.
1076    std::vector<mirror::ArtMethod*>* stack_trace_sample;
1077
1078    // The next thread in the wait set this thread is part of or NULL if not waiting.
1079    Thread* wait_next;
1080
1081    // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1082    mirror::Object* monitor_enter_object;
1083
1084    // Top of linked list of handle scopes or nullptr for none.
1085    HandleScope* top_handle_scope;
1086
1087    // Needed to get the right ClassLoader in JNI_OnLoad, but also
1088    // useful for testing.
1089    mirror::ClassLoader* class_loader_override;
1090
1091    // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1092    Context* long_jump_context;
1093
1094    // Additional stack used by method instrumentation to store method and return pc values.
1095    // Stored as a pointer since std::deque is not PACKED.
1096    std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1097
1098    // JDWP invoke-during-breakpoint support.
1099    DebugInvokeReq* debug_invoke_req;
1100
1101    // JDWP single-stepping support.
1102    SingleStepControl* single_step_control;
1103
1104    // Shadow frame stack that is used temporarily during the deoptimization of a method.
1105    ShadowFrame* deoptimization_shadow_frame;
1106
1107    // Shadow frame stack that is currently under construction but not yet on the stack
1108    ShadowFrame* shadow_frame_under_construction;
1109
1110    // A cached copy of the java.lang.Thread's name.
1111    std::string* name;
1112
1113    // A cached pthread_t for the pthread underlying this Thread*.
1114    pthread_t pthread_self;
1115
1116    // If no_thread_suspension_ is > 0, what is causing that assertion.
1117    const char* last_no_thread_suspension_cause;
1118
1119    // Pending checkpoint function or NULL if non-pending. Installation guarding by
1120    // Locks::thread_suspend_count_lock_.
1121    Closure* checkpoint_functions[kMaxCheckpoints];
1122
1123    // Entrypoint function pointers.
1124    // TODO: move this to more of a global offset table model to avoid per-thread duplication.
1125    InterpreterEntryPoints interpreter_entrypoints;
1126    JniEntryPoints jni_entrypoints;
1127    PortableEntryPoints portable_entrypoints;
1128    QuickEntryPoints quick_entrypoints;
1129
1130    // Thread-local allocation pointer.
1131    byte* thread_local_start;
1132    byte* thread_local_pos;
1133    byte* thread_local_end;
1134    size_t thread_local_objects;
1135
1136    // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
1137    void* rosalloc_runs[kNumRosAllocThreadLocalSizeBrackets];
1138
1139    // Thread-local allocation stack data/routines.
1140    mirror::Object** thread_local_alloc_stack_top;
1141    mirror::Object** thread_local_alloc_stack_end;
1142
1143    // Support for Mutex lock hierarchy bug detection.
1144    BaseMutex* held_mutexes[kLockLevelCount];
1145  } tlsPtr_;
1146
1147  // Guards the 'interrupted_' and 'wait_monitor_' members.
1148  Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1149
1150  // Condition variable waited upon during a wait.
1151  ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
1152  // Pointer to the monitor lock we're currently waiting on or NULL if not waiting.
1153  Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1154
1155  // Thread "interrupted" status; stays raised until queried or thrown.
1156  bool interrupted_ GUARDED_BY(wait_mutex_);
1157
1158  friend class Dbg;  // For SetStateUnsafe.
1159  friend class gc::collector::SemiSpace;  // For getting stack traces.
1160  friend class Runtime;  // For CreatePeer.
1161  friend class QuickExceptionHandler;  // For dumping the stack.
1162  friend class ScopedThreadStateChange;
1163  friend class SignalCatcher;  // For SetStateUnsafe.
1164  friend class StubTest;  // For accessing entrypoints.
1165  friend class ThreadList;  // For ~Thread and Destroy.
1166
1167  friend class EntrypointsOrderTest;  // To test the order of tls entries.
1168
1169  DISALLOW_COPY_AND_ASSIGN(Thread);
1170};
1171
1172std::ostream& operator<<(std::ostream& os, const Thread& thread);
1173std::ostream& operator<<(std::ostream& os, const ThreadState& state);
1174
1175}  // namespace art
1176
1177#endif  // ART_RUNTIME_THREAD_H_
1178