thread.h revision 535a3fbc08e1577f43aec7402cab80c14ca64c41
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_THREAD_H_
18#define ART_RUNTIME_THREAD_H_
19
20#include <bitset>
21#include <deque>
22#include <iosfwd>
23#include <list>
24#include <memory>
25#include <string>
26
27#include "atomic.h"
28#include "base/macros.h"
29#include "base/mutex.h"
30#include "entrypoints/interpreter/interpreter_entrypoints.h"
31#include "entrypoints/jni/jni_entrypoints.h"
32#include "entrypoints/portable/portable_entrypoints.h"
33#include "entrypoints/quick/quick_entrypoints.h"
34#include "globals.h"
35#include "handle_scope.h"
36#include "instruction_set.h"
37#include "jvalue.h"
38#include "object_callbacks.h"
39#include "offsets.h"
40#include "runtime_stats.h"
41#include "stack.h"
42#include "thread_state.h"
43#include "throw_location.h"
44
45namespace art {
46
47namespace gc {
48namespace collector {
49  class SemiSpace;
50}  // namespace collector
51}  // namespace gc
52
53namespace mirror {
54  class ArtMethod;
55  class Array;
56  class Class;
57  class ClassLoader;
58  class Object;
59  template<class T> class ObjectArray;
60  template<class T> class PrimitiveArray;
61  typedef PrimitiveArray<int32_t> IntArray;
62  class StackTraceElement;
63  class Throwable;
64}  // namespace mirror
65class BaseMutex;
66class ClassLinker;
67class Closure;
68class Context;
69struct DebugInvokeReq;
70class DexFile;
71class JavaVMExt;
72struct JNIEnvExt;
73class Monitor;
74class Runtime;
75class ScopedObjectAccessAlreadyRunnable;
76class ShadowFrame;
77struct SingleStepControl;
78class Thread;
79class ThreadList;
80
81// Thread priorities. These must match the Thread.MIN_PRIORITY,
82// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
83enum ThreadPriority {
84  kMinThreadPriority = 1,
85  kNormThreadPriority = 5,
86  kMaxThreadPriority = 10,
87};
88
89enum ThreadFlag {
90  kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0 and the Thread should enter the
91                          // safepoint handler.
92  kCheckpointRequest = 2  // Request that the thread do some checkpoint work and then continue.
93};
94
95static constexpr size_t kNumRosAllocThreadLocalSizeBrackets = 34;
96
97class Thread {
98 public:
99  // How much of the reserved bytes is reserved for incoming signals.
100  static constexpr size_t kStackOverflowSignalReservedBytes = 2 * KB;
101
102  // For implicit overflow checks we reserve an extra piece of memory at the bottom
103  // of the stack (lowest memory).  The higher portion of the memory
104  // is protected against reads and the lower is available for use while
105  // throwing the StackOverflow exception.
106  static constexpr size_t kStackOverflowProtectedSize = 16 * KB;
107  static const size_t kStackOverflowImplicitCheckSize;
108
109  // Creates a new native thread corresponding to the given managed peer.
110  // Used to implement Thread.start.
111  static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
112
113  // Attaches the calling native thread to the runtime, returning the new native peer.
114  // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
115  static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
116                        bool create_peer);
117
118  // Reset internal state of child thread after fork.
119  void InitAfterFork();
120
121  static Thread* Current();
122
123  static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
124                                   mirror::Object* thread_peer)
125      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
126      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
127      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
128  static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
129      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
130      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
131      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
132
133  // Translates 172 to pAllocArrayFromCode and so on.
134  template<size_t size_of_pointers>
135  static void DumpThreadOffset(std::ostream& os, uint32_t offset);
136
137  // Dumps a one-line summary of thread state (used for operator<<).
138  void ShortDump(std::ostream& os) const;
139
140  // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
141  void Dump(std::ostream& os) const
142      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
143      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
144
145  void DumpJavaStack(std::ostream& os) const
146      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
147      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
148
149  // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which
150  // case we use 'tid' to identify the thread, and we'll include as much information as we can.
151  static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
152      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
153      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
154
155  ThreadState GetState() const {
156    DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
157    DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
158    return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
159  }
160
161  ThreadState SetState(ThreadState new_state);
162
163  int GetSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
164    return tls32_.suspend_count;
165  }
166
167  int GetDebugSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
168    return tls32_.debug_suspend_count;
169  }
170
171  bool IsSuspended() const {
172    union StateAndFlags state_and_flags;
173    state_and_flags.as_int = tls32_.state_and_flags.as_int;
174    return state_and_flags.as_struct.state != kRunnable &&
175        (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
176  }
177
178  void ModifySuspendCount(Thread* self, int delta, bool for_debugger)
179      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
180
181  bool RequestCheckpoint(Closure* function)
182      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
183
184  // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
185  // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
186  void FullSuspendCheck()
187      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
188      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
189
190  // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
191  ThreadState TransitionFromSuspendedToRunnable()
192      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
193      SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
194      ALWAYS_INLINE;
195
196  // Transition from runnable into a state where mutator privileges are denied. Releases share of
197  // mutator lock.
198  void TransitionFromRunnableToSuspended(ThreadState new_state)
199      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
200      UNLOCK_FUNCTION(Locks::mutator_lock_)
201      ALWAYS_INLINE;
202
203  // Once called thread suspension will cause an assertion failure.
204  const char* StartAssertNoThreadSuspension(const char* cause) {
205    if (kIsDebugBuild) {
206      CHECK(cause != NULL);
207      const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
208      tls32_.no_thread_suspension++;
209      tlsPtr_.last_no_thread_suspension_cause = cause;
210      return previous_cause;
211    } else {
212      return nullptr;
213    }
214  }
215
216  // End region where no thread suspension is expected.
217  void EndAssertNoThreadSuspension(const char* old_cause) {
218    if (kIsDebugBuild) {
219      CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
220      CHECK_GT(tls32_.no_thread_suspension, 0U);
221      tls32_.no_thread_suspension--;
222      tlsPtr_.last_no_thread_suspension_cause = old_cause;
223    }
224  }
225
226  void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
227
228  bool IsDaemon() const {
229    return tls32_.daemon;
230  }
231
232  bool HoldsLock(mirror::Object*) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
233
234  /*
235   * Changes the priority of this thread to match that of the java.lang.Thread object.
236   *
237   * We map a priority value from 1-10 to Linux "nice" values, where lower
238   * numbers indicate higher priority.
239   */
240  void SetNativePriority(int newPriority);
241
242  /*
243   * Returns the thread priority for the current thread by querying the system.
244   * This is useful when attaching a thread through JNI.
245   *
246   * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
247   */
248  static int GetNativePriority();
249
250  uint32_t GetThreadId() const {
251    return tls32_.thin_lock_thread_id;
252  }
253
254  pid_t GetTid() const {
255    return tls32_.tid;
256  }
257
258  // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer.
259  mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
260      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
261
262  // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
263  // allocation, or locking.
264  void GetThreadName(std::string& name) const;
265
266  // Sets the thread's name.
267  void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
268
269  // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
270  uint64_t GetCpuMicroTime() const;
271
272  mirror::Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
273    CHECK(tlsPtr_.jpeer == nullptr);
274    return tlsPtr_.opeer;
275  }
276
277  bool HasPeer() const {
278    return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
279  }
280
281  RuntimeStats* GetStats() {
282    return &tls64_.stats;
283  }
284
285  bool IsStillStarting() const;
286
287  bool IsExceptionPending() const {
288    return tlsPtr_.exception != nullptr;
289  }
290
291  mirror::Throwable* GetException(ThrowLocation* throw_location) const
292      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
293    if (throw_location != nullptr) {
294      *throw_location = tlsPtr_.throw_location;
295    }
296    return tlsPtr_.exception;
297  }
298
299  void AssertNoPendingException() const;
300  void AssertNoPendingExceptionForNewException(const char* msg) const;
301
302  void SetException(const ThrowLocation& throw_location, mirror::Throwable* new_exception)
303      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
304    CHECK(new_exception != NULL);
305    // TODO: DCHECK(!IsExceptionPending());
306    tlsPtr_.exception = new_exception;
307    tlsPtr_.throw_location = throw_location;
308  }
309
310  void ClearException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
311    tlsPtr_.exception = nullptr;
312    tlsPtr_.throw_location.Clear();
313    SetExceptionReportedToInstrumentation(false);
314  }
315
316  // Find catch block and perform long jump to appropriate exception handle
317  void QuickDeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
318
319  Context* GetLongJumpContext();
320  void ReleaseLongJumpContext(Context* context) {
321    DCHECK(tlsPtr_.long_jump_context == nullptr);
322    tlsPtr_.long_jump_context = context;
323  }
324
325  mirror::ArtMethod* GetCurrentMethod(uint32_t* dex_pc) const
326      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
327
328  ThrowLocation GetCurrentLocationForThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
329
330  void SetTopOfStack(StackReference<mirror::ArtMethod>* top_method, uintptr_t pc) {
331    tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
332    tlsPtr_.managed_stack.SetTopQuickFramePc(pc);
333  }
334
335  void SetTopOfShadowStack(ShadowFrame* top) {
336    tlsPtr_.managed_stack.SetTopShadowFrame(top);
337  }
338
339  bool HasManagedStack() const {
340    return (tlsPtr_.managed_stack.GetTopQuickFrame() != nullptr) ||
341        (tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
342  }
343
344  // If 'msg' is NULL, no detail message is set.
345  void ThrowNewException(const ThrowLocation& throw_location,
346                         const char* exception_class_descriptor, const char* msg)
347      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
348
349  // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
350  // used as the new exception's cause.
351  void ThrowNewWrappedException(const ThrowLocation& throw_location,
352                                const char* exception_class_descriptor,
353                                const char* msg)
354      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
355
356  void ThrowNewExceptionF(const ThrowLocation& throw_location,
357                          const char* exception_class_descriptor, const char* fmt, ...)
358      __attribute__((format(printf, 4, 5)))
359      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
360
361  void ThrowNewExceptionV(const ThrowLocation& throw_location,
362                          const char* exception_class_descriptor, const char* fmt, va_list ap)
363      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
364
365  // OutOfMemoryError is special, because we need to pre-allocate an instance.
366  // Only the GC should call this.
367  void ThrowOutOfMemoryError(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
368
369  static void Startup();
370  static void FinishStartup();
371  static void Shutdown();
372
373  // JNI methods
374  JNIEnvExt* GetJniEnv() const {
375    return tlsPtr_.jni_env;
376  }
377
378  // Convert a jobject into a Object*
379  mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
380
381  mirror::Object* GetMonitorEnterObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
382    return tlsPtr_.monitor_enter_object;
383  }
384
385  void SetMonitorEnterObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
386    tlsPtr_.monitor_enter_object = obj;
387  }
388
389  // Implements java.lang.Thread.interrupted.
390  bool Interrupted() LOCKS_EXCLUDED(wait_mutex_);
391  // Implements java.lang.Thread.isInterrupted.
392  bool IsInterrupted() LOCKS_EXCLUDED(wait_mutex_);
393  bool IsInterruptedLocked() EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
394    return interrupted_;
395  }
396  void Interrupt(Thread* self) LOCKS_EXCLUDED(wait_mutex_);
397  void SetInterruptedLocked(bool i) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
398    interrupted_ = i;
399  }
400  void Notify() LOCKS_EXCLUDED(wait_mutex_);
401
402 private:
403  void NotifyLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_);
404
405 public:
406  Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
407    return wait_mutex_;
408  }
409
410  ConditionVariable* GetWaitConditionVariable() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
411    return wait_cond_;
412  }
413
414  Monitor* GetWaitMonitor() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
415    return wait_monitor_;
416  }
417
418  void SetWaitMonitor(Monitor* mon) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
419    wait_monitor_ = mon;
420  }
421
422
423  // Waiter link-list support.
424  Thread* GetWaitNext() const {
425    return tlsPtr_.wait_next;
426  }
427
428  void SetWaitNext(Thread* next) {
429    tlsPtr_.wait_next = next;
430  }
431
432  mirror::ClassLoader* GetClassLoaderOverride() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
433    return tlsPtr_.class_loader_override;
434  }
435
436  void SetClassLoaderOverride(mirror::ClassLoader* class_loader_override)
437      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
438
439  // Create the internal representation of a stack trace, that is more time
440  // and space efficient to compute than the StackTraceElement[].
441  template<bool kTransactionActive>
442  jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
443      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
444
445  // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
446  // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
447  // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
448  // with the number of valid frames in the returned array.
449  static jobjectArray InternalStackTraceToStackTraceElementArray(
450      const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
451      jobjectArray output_array = nullptr, int* stack_depth = nullptr)
452      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
453
454  void VisitRoots(RootCallback* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
455
456  ALWAYS_INLINE void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
457
458  //
459  // Offsets of various members of native Thread class, used by compiled code.
460  //
461
462  template<size_t pointer_size>
463  static ThreadOffset<pointer_size> ThinLockIdOffset() {
464    return ThreadOffset<pointer_size>(
465        OFFSETOF_MEMBER(Thread, tls32_) +
466        OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
467  }
468
469  template<size_t pointer_size>
470  static ThreadOffset<pointer_size> ThreadFlagsOffset() {
471    return ThreadOffset<pointer_size>(
472        OFFSETOF_MEMBER(Thread, tls32_) +
473        OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
474  }
475
476 private:
477  template<size_t pointer_size>
478  static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
479    size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
480    size_t scale;
481    size_t shrink;
482    if (pointer_size == sizeof(void*)) {
483      scale = 1;
484      shrink = 1;
485    } else if (pointer_size > sizeof(void*)) {
486      scale = pointer_size / sizeof(void*);
487      shrink = 1;
488    } else {
489      DCHECK_GT(sizeof(void*), pointer_size);
490      scale = 1;
491      shrink = sizeof(void*) / pointer_size;
492    }
493    return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
494  }
495
496 public:
497  template<size_t pointer_size>
498  static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
499    return ThreadOffsetFromTlsPtr<pointer_size>(
500        OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
501  }
502
503  template<size_t pointer_size>
504  static ThreadOffset<pointer_size> InterpreterEntryPointOffset(size_t interp_entrypoint_offset) {
505    return ThreadOffsetFromTlsPtr<pointer_size>(
506        OFFSETOF_MEMBER(tls_ptr_sized_values, interpreter_entrypoints) + interp_entrypoint_offset);
507  }
508
509  template<size_t pointer_size>
510  static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
511    return ThreadOffsetFromTlsPtr<pointer_size>(
512        OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
513  }
514
515  template<size_t pointer_size>
516  static ThreadOffset<pointer_size> PortableEntryPointOffset(size_t port_entrypoint_offset) {
517    return ThreadOffsetFromTlsPtr<pointer_size>(
518        OFFSETOF_MEMBER(tls_ptr_sized_values, portable_entrypoints) + port_entrypoint_offset);
519  }
520
521  template<size_t pointer_size>
522  static ThreadOffset<pointer_size> SelfOffset() {
523    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
524  }
525
526  template<size_t pointer_size>
527  static ThreadOffset<pointer_size> ExceptionOffset() {
528    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
529  }
530
531  template<size_t pointer_size>
532  static ThreadOffset<pointer_size> PeerOffset() {
533    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
534  }
535
536
537  template<size_t pointer_size>
538  static ThreadOffset<pointer_size> CardTableOffset() {
539    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
540  }
541
542  template<size_t pointer_size>
543  static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
544    return ThreadOffsetFromTlsPtr<pointer_size>(
545        OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
546  }
547
548  // Size of stack less any space reserved for stack overflow
549  size_t GetStackSize() const {
550    return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
551  }
552
553  byte* GetStackEndForInterpreter(bool implicit_overflow_check) const {
554    if (implicit_overflow_check) {
555      // The interpreter needs the extra overflow bytes that stack_end does
556      // not include.
557      return tlsPtr_.stack_end + GetStackOverflowReservedBytes(kRuntimeISA);
558    } else {
559      return tlsPtr_.stack_end;
560    }
561  }
562
563  byte* GetStackEnd() const {
564    return tlsPtr_.stack_end;
565  }
566
567  // Set the stack end to that to be used during a stack overflow
568  void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
569
570  // Set the stack end to that to be used during regular execution
571  void ResetDefaultStackEnd(bool implicit_overflow_check) {
572    // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
573    // to throw a StackOverflowError.
574    if (implicit_overflow_check) {
575      // For implicit checks we also need to add in the protected region above the
576      // overflow region.
577      tlsPtr_.stack_end = tlsPtr_.stack_begin + kStackOverflowImplicitCheckSize;
578    } else {
579      tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
580    }
581  }
582
583  // Install the protected region for implicit stack checks.
584  void InstallImplicitProtection(bool is_main_stack);
585
586  bool IsHandlingStackOverflow() const {
587    return tlsPtr_.stack_end == tlsPtr_.stack_begin;
588  }
589
590  template<size_t pointer_size>
591  static ThreadOffset<pointer_size> StackEndOffset() {
592    return ThreadOffsetFromTlsPtr<pointer_size>(
593        OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
594  }
595
596  template<size_t pointer_size>
597  static ThreadOffset<pointer_size> JniEnvOffset() {
598    return ThreadOffsetFromTlsPtr<pointer_size>(
599        OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
600  }
601
602  template<size_t pointer_size>
603  static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
604    return ThreadOffsetFromTlsPtr<pointer_size>(
605        OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
606        ManagedStack::TopQuickFrameOffset());
607  }
608
609  template<size_t pointer_size>
610  static ThreadOffset<pointer_size> TopOfManagedStackPcOffset() {
611    return ThreadOffsetFromTlsPtr<pointer_size>(
612        OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
613        ManagedStack::TopQuickFramePcOffset());
614  }
615
616  const ManagedStack* GetManagedStack() const {
617    return &tlsPtr_.managed_stack;
618  }
619
620  // Linked list recording fragments of managed stack.
621  void PushManagedStackFragment(ManagedStack* fragment) {
622    tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
623  }
624  void PopManagedStackFragment(const ManagedStack& fragment) {
625    tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
626  }
627
628  ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
629    return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
630  }
631
632  ShadowFrame* PopShadowFrame() {
633    return tlsPtr_.managed_stack.PopShadowFrame();
634  }
635
636  template<size_t pointer_size>
637  static ThreadOffset<pointer_size> TopShadowFrameOffset() {
638    return ThreadOffsetFromTlsPtr<pointer_size>(
639        OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
640        ManagedStack::TopShadowFrameOffset());
641  }
642
643  // Number of references allocated in JNI ShadowFrames on this thread.
644  size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
645    return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
646  }
647
648  // Number of references in handle scope on this thread.
649  size_t NumHandleReferences();
650
651  // Number of references allocated in handle scopes & JNI shadow frames on this thread.
652  size_t NumStackReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
653    return NumHandleReferences() + NumJniShadowFrameReferences();
654  };
655
656  // Is the given obj in this thread's stack indirect reference table?
657  bool HandleScopeContains(jobject obj) const;
658
659  void HandleScopeVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id)
660      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
661
662  HandleScope* GetTopHandleScope() {
663    return tlsPtr_.top_handle_scope;
664  }
665
666  void PushHandleScope(HandleScope* handle_scope) {
667    handle_scope->SetLink(tlsPtr_.top_handle_scope);
668    tlsPtr_.top_handle_scope = handle_scope;
669  }
670
671  HandleScope* PopHandleScope() {
672    HandleScope* handle_scope = tlsPtr_.top_handle_scope;
673    DCHECK(handle_scope != nullptr);
674    tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
675    return handle_scope;
676  }
677
678  template<size_t pointer_size>
679  static ThreadOffset<pointer_size> TopHandleScopeOffset() {
680    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
681                                                                top_handle_scope));
682  }
683
684  DebugInvokeReq* GetInvokeReq() const {
685    return tlsPtr_.debug_invoke_req;
686  }
687
688  SingleStepControl* GetSingleStepControl() const {
689    return tlsPtr_.single_step_control;
690  }
691
692  // Returns the fake exception used to activate deoptimization.
693  static mirror::Throwable* GetDeoptimizationException() {
694    return reinterpret_cast<mirror::Throwable*>(-1);
695  }
696
697  void SetDeoptimizationShadowFrame(ShadowFrame* sf);
698  void SetDeoptimizationReturnValue(const JValue& ret_val);
699
700  ShadowFrame* GetAndClearDeoptimizationShadowFrame(JValue* ret_val);
701
702  bool HasDeoptimizationShadowFrame() const {
703    return tlsPtr_.deoptimization_shadow_frame != nullptr;
704  }
705
706  void SetShadowFrameUnderConstruction(ShadowFrame* sf);
707  void ClearShadowFrameUnderConstruction();
708
709  bool HasShadowFrameUnderConstruction() const {
710    return tlsPtr_.shadow_frame_under_construction != nullptr;
711  }
712
713  std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
714    return tlsPtr_.instrumentation_stack;
715  }
716
717  std::vector<mirror::ArtMethod*>* GetStackTraceSample() const {
718    return tlsPtr_.stack_trace_sample;
719  }
720
721  void SetStackTraceSample(std::vector<mirror::ArtMethod*>* sample) {
722    tlsPtr_.stack_trace_sample = sample;
723  }
724
725  uint64_t GetTraceClockBase() const {
726    return tls64_.trace_clock_base;
727  }
728
729  void SetTraceClockBase(uint64_t clock_base) {
730    tls64_.trace_clock_base = clock_base;
731  }
732
733  BaseMutex* GetHeldMutex(LockLevel level) const {
734    return tlsPtr_.held_mutexes[level];
735  }
736
737  void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
738    tlsPtr_.held_mutexes[level] = mutex;
739  }
740
741  void RunCheckpointFunction();
742
743  bool ReadFlag(ThreadFlag flag) const {
744    return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
745  }
746
747  bool TestAllFlags() const {
748    return (tls32_.state_and_flags.as_struct.flags != 0);
749  }
750
751  void AtomicSetFlag(ThreadFlag flag) {
752    tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flag);
753  }
754
755  void AtomicClearFlag(ThreadFlag flag) {
756    tls32_.state_and_flags.as_atomic_int.FetchAndAndSequentiallyConsistent(-1 ^ flag);
757  }
758
759  void ResetQuickAllocEntryPointsForThread();
760
761  // Returns the remaining space in the TLAB.
762  size_t TlabSize() const;
763  // Doesn't check that there is room.
764  mirror::Object* AllocTlab(size_t bytes);
765  void SetTlab(byte* start, byte* end);
766  bool HasTlab() const;
767
768  // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
769  // equal to a valid pointer.
770  // TODO: does this need to atomic?  I don't think so.
771  void RemoveSuspendTrigger() {
772    tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
773  }
774
775  // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
776  // The next time a suspend check is done, it will load from the value at this address
777  // and trigger a SIGSEGV.
778  void TriggerSuspend() {
779    tlsPtr_.suspend_trigger = nullptr;
780  }
781
782
783  // Push an object onto the allocation stack.
784  bool PushOnThreadLocalAllocationStack(mirror::Object* obj);
785
786  // Set the thread local allocation pointers to the given pointers.
787  void SetThreadLocalAllocationStack(mirror::Object** start, mirror::Object** end);
788
789  // Resets the thread local allocation pointers.
790  void RevokeThreadLocalAllocationStack();
791
792  size_t GetThreadLocalBytesAllocated() const {
793    return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
794  }
795
796  size_t GetThreadLocalObjectsAllocated() const {
797    return tlsPtr_.thread_local_objects;
798  }
799
800  void* GetRosAllocRun(size_t index) const {
801    return tlsPtr_.rosalloc_runs[index];
802  }
803
804  void SetRosAllocRun(size_t index, void* run) {
805    tlsPtr_.rosalloc_runs[index] = run;
806  }
807
808  bool IsExceptionReportedToInstrumentation() const {
809    return tls32_.is_exception_reported_to_instrumentation_;
810  }
811
812  void SetExceptionReportedToInstrumentation(bool reported) {
813    tls32_.is_exception_reported_to_instrumentation_ = reported;
814  }
815
816 private:
817  explicit Thread(bool daemon);
818  ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_,
819                           Locks::thread_suspend_count_lock_);
820  void Destroy();
821
822  void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
823
824  template<bool kTransactionActive>
825  void InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
826                jobject thread_name, jint thread_priority)
827      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
828
829  // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
830  // Dbg::Disconnected.
831  ThreadState SetStateUnsafe(ThreadState new_state) {
832    ThreadState old_state = GetState();
833    tls32_.state_and_flags.as_struct.state = new_state;
834    return old_state;
835  }
836
837  void VerifyStackImpl() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
838
839  void DumpState(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
840  void DumpStack(std::ostream& os) const
841      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
842      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
843
844  // Out-of-line conveniences for debugging in gdb.
845  static Thread* CurrentFromGdb();  // Like Thread::Current.
846  // Like Thread::Dump(std::cerr).
847  void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
848
849  static void* CreateCallback(void* arg);
850
851  void HandleUncaughtExceptions(ScopedObjectAccess& soa)
852      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
853  void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
854
855  void Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
856  void InitCardTable();
857  void InitCpu();
858  void CleanupCpu();
859  void InitTlsEntryPoints();
860  void InitTid();
861  void InitPthreadKeySelf();
862  void InitStackHwm();
863
864  void SetUpAlternateSignalStack();
865  void TearDownAlternateSignalStack();
866
867  // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
868  // change from being Suspended to Runnable without a suspend request occurring.
869  union PACKED(4) StateAndFlags {
870    StateAndFlags() {}
871    struct PACKED(4) {
872      // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
873      // ThreadFlags for bit field meanings.
874      volatile uint16_t flags;
875      // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
876      // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
877      // operation. If a thread is suspended and a suspend_request is present, a thread may not
878      // change to Runnable as a GC or other operation is in progress.
879      volatile uint16_t state;
880    } as_struct;
881    AtomicInteger as_atomic_int;
882    volatile int32_t as_int;
883
884   private:
885    // gcc does not handle struct with volatile member assignments correctly.
886    // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
887    DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
888  };
889  COMPILE_ASSERT(sizeof(StateAndFlags) == sizeof(int32_t), weird_state_and_flags_size);
890
891  static void ThreadExitCallback(void* arg);
892
893  // Maximum number of checkpoint functions.
894  static constexpr uint32_t kMaxCheckpoints = 3;
895
896  // Has Thread::Startup been called?
897  static bool is_started_;
898
899  // TLS key used to retrieve the Thread*.
900  static pthread_key_t pthread_key_self_;
901
902  // Used to notify threads that they should attempt to resume, they will suspend again if
903  // their suspend count is > 0.
904  static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
905
906  /***********************************************************************************************/
907  // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
908  // pointer size differences. To encourage shorter encoding, more frequently used values appear
909  // first if possible.
910  /***********************************************************************************************/
911
912  struct PACKED(4) tls_32bit_sized_values {
913    // We have no control over the size of 'bool', but want our boolean fields
914    // to be 4-byte quantities.
915    typedef uint32_t bool32_t;
916
917    explicit tls_32bit_sized_values(bool is_daemon) :
918      suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
919      daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
920      thread_exit_check_count(0), is_exception_reported_to_instrumentation_(false) {
921    }
922
923    union StateAndFlags state_and_flags;
924    COMPILE_ASSERT(sizeof(union StateAndFlags) == sizeof(int32_t),
925                   sizeof_state_and_flags_and_int32_are_different);
926
927    // A non-zero value is used to tell the current thread to enter a safe point
928    // at the next poll.
929    int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
930
931    // How much of 'suspend_count_' is by request of the debugger, used to set things right
932    // when the debugger detaches. Must be <= suspend_count_.
933    int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
934
935    // Thin lock thread id. This is a small integer used by the thin lock implementation.
936    // This is not to be confused with the native thread's tid, nor is it the value returned
937    // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
938    // important difference between this id and the ids visible to managed code is that these
939    // ones get reused (to ensure that they fit in the number of bits available).
940    uint32_t thin_lock_thread_id;
941
942    // System thread id.
943    uint32_t tid;
944
945    // Is the thread a daemon?
946    const bool32_t daemon;
947
948    // A boolean telling us whether we're recursively throwing OOME.
949    bool32_t throwing_OutOfMemoryError;
950
951    // A positive value implies we're in a region where thread suspension isn't expected.
952    uint32_t no_thread_suspension;
953
954    // How many times has our pthread key's destructor been called?
955    uint32_t thread_exit_check_count;
956
957    // When true this field indicates that the exception associated with this thread has already
958    // been reported to instrumentation.
959    bool32_t is_exception_reported_to_instrumentation_;
960  } tls32_;
961
962  struct PACKED(8) tls_64bit_sized_values {
963    tls_64bit_sized_values() : trace_clock_base(0), deoptimization_return_value() {
964    }
965
966    // The clock base used for tracing.
967    uint64_t trace_clock_base;
968
969    // Return value used by deoptimization.
970    JValue deoptimization_return_value;
971
972    RuntimeStats stats;
973  } tls64_;
974
975  struct PACKED(4) tls_ptr_sized_values {
976      tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
977      managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), self(nullptr), opeer(nullptr),
978      jpeer(nullptr), stack_begin(nullptr), stack_size(0), throw_location(),
979      stack_trace_sample(nullptr), wait_next(nullptr), monitor_enter_object(nullptr),
980      top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
981      instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
982      deoptimization_shadow_frame(nullptr), shadow_frame_under_construction(nullptr), name(nullptr),
983      pthread_self(0), last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
984      thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
985      thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr) {
986    }
987
988    // The biased card table, see CardTable for details.
989    byte* card_table;
990
991    // The pending exception or NULL.
992    mirror::Throwable* exception;
993
994    // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
995    // We leave extra space so there's room for the code that throws StackOverflowError.
996    byte* stack_end;
997
998    // The top of the managed stack often manipulated directly by compiler generated code.
999    ManagedStack managed_stack;
1000
1001    // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
1002    // normally set to the address of itself.
1003    uintptr_t* suspend_trigger;
1004
1005    // Every thread may have an associated JNI environment
1006    JNIEnvExt* jni_env;
1007
1008    // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1009    // is easy but getting the address of Thread::Current is hard. This field can be read off of
1010    // Thread::Current to give the address.
1011    Thread* self;
1012
1013    // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1014    // start up, until the thread is registered and the local opeer_ is used.
1015    mirror::Object* opeer;
1016    jobject jpeer;
1017
1018    // The "lowest addressable byte" of the stack.
1019    byte* stack_begin;
1020
1021    // Size of the stack.
1022    size_t stack_size;
1023
1024    // The location the current exception was thrown from.
1025    ThrowLocation throw_location;
1026
1027    // Pointer to previous stack trace captured by sampling profiler.
1028    std::vector<mirror::ArtMethod*>* stack_trace_sample;
1029
1030    // The next thread in the wait set this thread is part of or NULL if not waiting.
1031    Thread* wait_next;
1032
1033    // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1034    mirror::Object* monitor_enter_object;
1035
1036    // Top of linked list of handle scopes or nullptr for none.
1037    HandleScope* top_handle_scope;
1038
1039    // Needed to get the right ClassLoader in JNI_OnLoad, but also
1040    // useful for testing.
1041    mirror::ClassLoader* class_loader_override;
1042
1043    // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1044    Context* long_jump_context;
1045
1046    // Additional stack used by method instrumentation to store method and return pc values.
1047    // Stored as a pointer since std::deque is not PACKED.
1048    std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1049
1050    // JDWP invoke-during-breakpoint support.
1051    DebugInvokeReq* debug_invoke_req;
1052
1053    // JDWP single-stepping support.
1054    SingleStepControl* single_step_control;
1055
1056    // Shadow frame stack that is used temporarily during the deoptimization of a method.
1057    ShadowFrame* deoptimization_shadow_frame;
1058
1059    // Shadow frame stack that is currently under construction but not yet on the stack
1060    ShadowFrame* shadow_frame_under_construction;
1061
1062    // A cached copy of the java.lang.Thread's name.
1063    std::string* name;
1064
1065    // A cached pthread_t for the pthread underlying this Thread*.
1066    pthread_t pthread_self;
1067
1068    // If no_thread_suspension_ is > 0, what is causing that assertion.
1069    const char* last_no_thread_suspension_cause;
1070
1071    // Pending checkpoint function or NULL if non-pending. Installation guarding by
1072    // Locks::thread_suspend_count_lock_.
1073    Closure* checkpoint_functions[kMaxCheckpoints];
1074
1075    // Entrypoint function pointers.
1076    // TODO: move this to more of a global offset table model to avoid per-thread duplication.
1077    InterpreterEntryPoints interpreter_entrypoints;
1078    JniEntryPoints jni_entrypoints;
1079    PortableEntryPoints portable_entrypoints;
1080    QuickEntryPoints quick_entrypoints;
1081
1082    // Thread-local allocation pointer.
1083    byte* thread_local_start;
1084    byte* thread_local_pos;
1085    byte* thread_local_end;
1086    size_t thread_local_objects;
1087
1088    // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
1089    void* rosalloc_runs[kNumRosAllocThreadLocalSizeBrackets];
1090
1091    // Thread-local allocation stack data/routines.
1092    mirror::Object** thread_local_alloc_stack_top;
1093    mirror::Object** thread_local_alloc_stack_end;
1094
1095    // Support for Mutex lock hierarchy bug detection.
1096    BaseMutex* held_mutexes[kLockLevelCount];
1097  } tlsPtr_;
1098
1099  // Guards the 'interrupted_' and 'wait_monitor_' members.
1100  Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1101
1102  // Condition variable waited upon during a wait.
1103  ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
1104  // Pointer to the monitor lock we're currently waiting on or NULL if not waiting.
1105  Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1106
1107  // Thread "interrupted" status; stays raised until queried or thrown.
1108  bool interrupted_ GUARDED_BY(wait_mutex_);
1109
1110  friend class Dbg;  // For SetStateUnsafe.
1111  friend class gc::collector::SemiSpace;  // For getting stack traces.
1112  friend class Runtime;  // For CreatePeer.
1113  friend class QuickExceptionHandler;  // For dumping the stack.
1114  friend class ScopedThreadStateChange;
1115  friend class SignalCatcher;  // For SetStateUnsafe.
1116  friend class StubTest;  // For accessing entrypoints.
1117  friend class ThreadList;  // For ~Thread and Destroy.
1118
1119  friend class EntrypointsOrderTest;  // To test the order of tls entries.
1120
1121  DISALLOW_COPY_AND_ASSIGN(Thread);
1122};
1123
1124std::ostream& operator<<(std::ostream& os, const Thread& thread);
1125std::ostream& operator<<(std::ostream& os, const ThreadState& state);
1126
1127}  // namespace art
1128
1129#endif  // ART_RUNTIME_THREAD_H_
1130