thread.h revision 83b1940e6482b9d8feba5c492507735686650ea5
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#ifndef ART_RUNTIME_THREAD_H_
18#define ART_RUNTIME_THREAD_H_
19
20#include <bitset>
21#include <deque>
22#include <iosfwd>
23#include <list>
24#include <memory>
25#include <string>
26
27#include "atomic.h"
28#include "base/macros.h"
29#include "base/mutex.h"
30#include "entrypoints/interpreter/interpreter_entrypoints.h"
31#include "entrypoints/jni/jni_entrypoints.h"
32#include "entrypoints/portable/portable_entrypoints.h"
33#include "entrypoints/quick/quick_entrypoints.h"
34#include "globals.h"
35#include "handle_scope.h"
36#include "instruction_set.h"
37#include "jvalue.h"
38#include "object_callbacks.h"
39#include "offsets.h"
40#include "runtime_stats.h"
41#include "stack.h"
42#include "thread_state.h"
43#include "throw_location.h"
44
45namespace art {
46
47namespace gc {
48namespace collector {
49  class SemiSpace;
50}  // namespace collector
51}  // namespace gc
52
53namespace mirror {
54  class ArtMethod;
55  class Array;
56  class Class;
57  class ClassLoader;
58  class Object;
59  template<class T> class ObjectArray;
60  template<class T> class PrimitiveArray;
61  typedef PrimitiveArray<int32_t> IntArray;
62  class StackTraceElement;
63  class Throwable;
64}  // namespace mirror
65class BaseMutex;
66class ClassLinker;
67class Closure;
68class Context;
69struct DebugInvokeReq;
70class DexFile;
71class JavaVMExt;
72struct JNIEnvExt;
73class Monitor;
74class Runtime;
75class ScopedObjectAccessAlreadyRunnable;
76class ShadowFrame;
77struct SingleStepControl;
78class Thread;
79class ThreadList;
80
81// Thread priorities. These must match the Thread.MIN_PRIORITY,
82// Thread.NORM_PRIORITY, and Thread.MAX_PRIORITY constants.
83enum ThreadPriority {
84  kMinThreadPriority = 1,
85  kNormThreadPriority = 5,
86  kMaxThreadPriority = 10,
87};
88
89enum ThreadFlag {
90  kSuspendRequest   = 1,  // If set implies that suspend_count_ > 0 and the Thread should enter the
91                          // safepoint handler.
92  kCheckpointRequest = 2  // Request that the thread do some checkpoint work and then continue.
93};
94
95static constexpr size_t kNumRosAllocThreadLocalSizeBrackets = 34;
96
97class Thread {
98 public:
99  // How much of the reserved bytes is reserved for incoming signals.
100  static constexpr size_t kStackOverflowSignalReservedBytes = 2 * KB;
101
102  // For implicit overflow checks we reserve an extra piece of memory at the bottom
103  // of the stack (lowest memory).  The higher portion of the memory
104  // is protected against reads and the lower is available for use while
105  // throwing the StackOverflow exception.
106  static constexpr size_t kStackOverflowProtectedSize = 16 * KB;
107  static const size_t kStackOverflowImplicitCheckSize;
108
109  // Creates a new native thread corresponding to the given managed peer.
110  // Used to implement Thread.start.
111  static void CreateNativeThread(JNIEnv* env, jobject peer, size_t stack_size, bool daemon);
112
113  // Attaches the calling native thread to the runtime, returning the new native peer.
114  // Used to implement JNI AttachCurrentThread and AttachCurrentThreadAsDaemon calls.
115  static Thread* Attach(const char* thread_name, bool as_daemon, jobject thread_group,
116                        bool create_peer);
117
118  // Reset internal state of child thread after fork.
119  void InitAfterFork();
120
121  static Thread* Current();
122
123  static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts,
124                                   mirror::Object* thread_peer)
125      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
126      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
127      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
128  static Thread* FromManagedThread(const ScopedObjectAccessAlreadyRunnable& ts, jobject thread)
129      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_list_lock_)
130      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
131      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
132
133  // Translates 172 to pAllocArrayFromCode and so on.
134  template<size_t size_of_pointers>
135  static void DumpThreadOffset(std::ostream& os, uint32_t offset);
136
137  // Dumps a one-line summary of thread state (used for operator<<).
138  void ShortDump(std::ostream& os) const;
139
140  // Dumps the detailed thread state and the thread stack (used for SIGQUIT).
141  void Dump(std::ostream& os) const
142      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
143      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
144
145  void DumpJavaStack(std::ostream& os) const
146      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
147      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
148
149  // Dumps the SIGQUIT per-thread header. 'thread' can be NULL for a non-attached thread, in which
150  // case we use 'tid' to identify the thread, and we'll include as much information as we can.
151  static void DumpState(std::ostream& os, const Thread* thread, pid_t tid)
152      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
153      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
154
155  ThreadState GetState() const {
156    DCHECK_GE(tls32_.state_and_flags.as_struct.state, kTerminated);
157    DCHECK_LE(tls32_.state_and_flags.as_struct.state, kSuspended);
158    return static_cast<ThreadState>(tls32_.state_and_flags.as_struct.state);
159  }
160
161  ThreadState SetState(ThreadState new_state);
162
163  int GetSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
164    return tls32_.suspend_count;
165  }
166
167  int GetDebugSuspendCount() const EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_) {
168    return tls32_.debug_suspend_count;
169  }
170
171  bool IsSuspended() const {
172    union StateAndFlags state_and_flags;
173    state_and_flags.as_int = tls32_.state_and_flags.as_int;
174    return state_and_flags.as_struct.state != kRunnable &&
175        (state_and_flags.as_struct.flags & kSuspendRequest) != 0;
176  }
177
178  void ModifySuspendCount(Thread* self, int delta, bool for_debugger)
179      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
180
181  bool RequestCheckpoint(Closure* function)
182      EXCLUSIVE_LOCKS_REQUIRED(Locks::thread_suspend_count_lock_);
183
184  // Called when thread detected that the thread_suspend_count_ was non-zero. Gives up share of
185  // mutator_lock_ and waits until it is resumed and thread_suspend_count_ is zero.
186  void FullSuspendCheck()
187      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
188      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
189
190  // Transition from non-runnable to runnable state acquiring share on mutator_lock_.
191  ThreadState TransitionFromSuspendedToRunnable()
192      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
193      SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
194      ALWAYS_INLINE;
195
196  // Transition from runnable into a state where mutator privileges are denied. Releases share of
197  // mutator lock.
198  void TransitionFromRunnableToSuspended(ThreadState new_state)
199      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
200      UNLOCK_FUNCTION(Locks::mutator_lock_)
201      ALWAYS_INLINE;
202
203  // Once called thread suspension will cause an assertion failure.
204  const char* StartAssertNoThreadSuspension(const char* cause) {
205    if (kIsDebugBuild) {
206      CHECK(cause != NULL);
207      const char* previous_cause = tlsPtr_.last_no_thread_suspension_cause;
208      tls32_.no_thread_suspension++;
209      tlsPtr_.last_no_thread_suspension_cause = cause;
210      return previous_cause;
211    } else {
212      return nullptr;
213    }
214  }
215
216  // End region where no thread suspension is expected.
217  void EndAssertNoThreadSuspension(const char* old_cause) {
218    if (kIsDebugBuild) {
219      CHECK(old_cause != nullptr || tls32_.no_thread_suspension == 1);
220      CHECK_GT(tls32_.no_thread_suspension, 0U);
221      tls32_.no_thread_suspension--;
222      tlsPtr_.last_no_thread_suspension_cause = old_cause;
223    }
224  }
225
226  void AssertThreadSuspensionIsAllowable(bool check_locks = true) const;
227
228  bool IsDaemon() const {
229    return tls32_.daemon;
230  }
231
232  bool HoldsLock(mirror::Object*) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
233
234  /*
235   * Changes the priority of this thread to match that of the java.lang.Thread object.
236   *
237   * We map a priority value from 1-10 to Linux "nice" values, where lower
238   * numbers indicate higher priority.
239   */
240  void SetNativePriority(int newPriority);
241
242  /*
243   * Returns the thread priority for the current thread by querying the system.
244   * This is useful when attaching a thread through JNI.
245   *
246   * Returns a value from 1 to 10 (compatible with java.lang.Thread values).
247   */
248  static int GetNativePriority();
249
250  uint32_t GetThreadId() const {
251    return tls32_.thin_lock_thread_id;
252  }
253
254  pid_t GetTid() const {
255    return tls32_.tid;
256  }
257
258  // Returns the java.lang.Thread's name, or NULL if this Thread* doesn't have a peer.
259  mirror::String* GetThreadName(const ScopedObjectAccessAlreadyRunnable& ts) const
260      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
261
262  // Sets 'name' to the java.lang.Thread's name. This requires no transition to managed code,
263  // allocation, or locking.
264  void GetThreadName(std::string& name) const;
265
266  // Sets the thread's name.
267  void SetThreadName(const char* name) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
268
269  // Returns the thread-specific CPU-time clock in microseconds or -1 if unavailable.
270  uint64_t GetCpuMicroTime() const;
271
272  mirror::Object* GetPeer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
273    CHECK(tlsPtr_.jpeer == nullptr);
274    return tlsPtr_.opeer;
275  }
276
277  bool HasPeer() const {
278    return tlsPtr_.jpeer != nullptr || tlsPtr_.opeer != nullptr;
279  }
280
281  RuntimeStats* GetStats() {
282    return &tls64_.stats;
283  }
284
285  bool IsStillStarting() const;
286
287  bool IsExceptionPending() const {
288    return tlsPtr_.exception != nullptr;
289  }
290
291  mirror::Throwable* GetException(ThrowLocation* throw_location) const
292      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
293    if (throw_location != nullptr) {
294      *throw_location = tlsPtr_.throw_location;
295    }
296    return tlsPtr_.exception;
297  }
298
299  void AssertNoPendingException() const;
300  void AssertNoPendingExceptionForNewException(const char* msg) const;
301
302  void SetException(const ThrowLocation& throw_location, mirror::Throwable* new_exception)
303      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
304    CHECK(new_exception != NULL);
305    // TODO: DCHECK(!IsExceptionPending());
306    tlsPtr_.exception = new_exception;
307    tlsPtr_.throw_location = throw_location;
308  }
309
310  void ClearException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
311    tlsPtr_.exception = nullptr;
312    tlsPtr_.throw_location.Clear();
313    SetExceptionReportedToInstrumentation(false);
314  }
315
316  // Find catch block and perform long jump to appropriate exception handle
317  void QuickDeliverException() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
318
319  Context* GetLongJumpContext();
320  void ReleaseLongJumpContext(Context* context) {
321    DCHECK(tlsPtr_.long_jump_context == nullptr);
322    tlsPtr_.long_jump_context = context;
323  }
324
325  // Get the current method and dex pc. If there are errors in retrieving the dex pc, this will
326  // abort the runtime iff abort_on_error is true.
327  mirror::ArtMethod* GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error = true) const
328      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
329
330  ThrowLocation GetCurrentLocationForThrow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
331
332  void SetTopOfStack(StackReference<mirror::ArtMethod>* top_method, uintptr_t pc) {
333    tlsPtr_.managed_stack.SetTopQuickFrame(top_method);
334    tlsPtr_.managed_stack.SetTopQuickFramePc(pc);
335  }
336
337  void SetTopOfShadowStack(ShadowFrame* top) {
338    tlsPtr_.managed_stack.SetTopShadowFrame(top);
339  }
340
341  bool HasManagedStack() const {
342    return (tlsPtr_.managed_stack.GetTopQuickFrame() != nullptr) ||
343        (tlsPtr_.managed_stack.GetTopShadowFrame() != nullptr);
344  }
345
346  // If 'msg' is NULL, no detail message is set.
347  void ThrowNewException(const ThrowLocation& throw_location,
348                         const char* exception_class_descriptor, const char* msg)
349      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
350
351  // If 'msg' is NULL, no detail message is set. An exception must be pending, and will be
352  // used as the new exception's cause.
353  void ThrowNewWrappedException(const ThrowLocation& throw_location,
354                                const char* exception_class_descriptor,
355                                const char* msg)
356      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
357
358  void ThrowNewExceptionF(const ThrowLocation& throw_location,
359                          const char* exception_class_descriptor, const char* fmt, ...)
360      __attribute__((format(printf, 4, 5)))
361      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
362
363  void ThrowNewExceptionV(const ThrowLocation& throw_location,
364                          const char* exception_class_descriptor, const char* fmt, va_list ap)
365      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
366
367  // OutOfMemoryError is special, because we need to pre-allocate an instance.
368  // Only the GC should call this.
369  void ThrowOutOfMemoryError(const char* msg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
370
371  static void Startup();
372  static void FinishStartup();
373  static void Shutdown();
374
375  // JNI methods
376  JNIEnvExt* GetJniEnv() const {
377    return tlsPtr_.jni_env;
378  }
379
380  // Convert a jobject into a Object*
381  mirror::Object* DecodeJObject(jobject obj) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
382
383  mirror::Object* GetMonitorEnterObject() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
384    return tlsPtr_.monitor_enter_object;
385  }
386
387  void SetMonitorEnterObject(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
388    tlsPtr_.monitor_enter_object = obj;
389  }
390
391  // Implements java.lang.Thread.interrupted.
392  bool Interrupted() LOCKS_EXCLUDED(wait_mutex_);
393  // Implements java.lang.Thread.isInterrupted.
394  bool IsInterrupted() LOCKS_EXCLUDED(wait_mutex_);
395  bool IsInterruptedLocked() EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
396    return interrupted_;
397  }
398  void Interrupt(Thread* self) LOCKS_EXCLUDED(wait_mutex_);
399  void SetInterruptedLocked(bool i) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
400    interrupted_ = i;
401  }
402  void Notify() LOCKS_EXCLUDED(wait_mutex_);
403
404 private:
405  void NotifyLocked(Thread* self) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_);
406
407 public:
408  Mutex* GetWaitMutex() const LOCK_RETURNED(wait_mutex_) {
409    return wait_mutex_;
410  }
411
412  ConditionVariable* GetWaitConditionVariable() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
413    return wait_cond_;
414  }
415
416  Monitor* GetWaitMonitor() const EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
417    return wait_monitor_;
418  }
419
420  void SetWaitMonitor(Monitor* mon) EXCLUSIVE_LOCKS_REQUIRED(wait_mutex_) {
421    wait_monitor_ = mon;
422  }
423
424
425  // Waiter link-list support.
426  Thread* GetWaitNext() const {
427    return tlsPtr_.wait_next;
428  }
429
430  void SetWaitNext(Thread* next) {
431    tlsPtr_.wait_next = next;
432  }
433
434  mirror::ClassLoader* GetClassLoaderOverride() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
435    return tlsPtr_.class_loader_override;
436  }
437
438  void SetClassLoaderOverride(mirror::ClassLoader* class_loader_override)
439      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
440
441  // Create the internal representation of a stack trace, that is more time
442  // and space efficient to compute than the StackTraceElement[].
443  template<bool kTransactionActive>
444  jobject CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const
445      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
446
447  // Convert an internal stack trace representation (returned by CreateInternalStackTrace) to a
448  // StackTraceElement[]. If output_array is NULL, a new array is created, otherwise as many
449  // frames as will fit are written into the given array. If stack_depth is non-NULL, it's updated
450  // with the number of valid frames in the returned array.
451  static jobjectArray InternalStackTraceToStackTraceElementArray(
452      const ScopedObjectAccessAlreadyRunnable& soa, jobject internal,
453      jobjectArray output_array = nullptr, int* stack_depth = nullptr)
454      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
455
456  void VisitRoots(RootCallback* visitor, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
457
458  ALWAYS_INLINE void VerifyStack() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
459
460  //
461  // Offsets of various members of native Thread class, used by compiled code.
462  //
463
464  template<size_t pointer_size>
465  static ThreadOffset<pointer_size> ThinLockIdOffset() {
466    return ThreadOffset<pointer_size>(
467        OFFSETOF_MEMBER(Thread, tls32_) +
468        OFFSETOF_MEMBER(tls_32bit_sized_values, thin_lock_thread_id));
469  }
470
471  template<size_t pointer_size>
472  static ThreadOffset<pointer_size> ThreadFlagsOffset() {
473    return ThreadOffset<pointer_size>(
474        OFFSETOF_MEMBER(Thread, tls32_) +
475        OFFSETOF_MEMBER(tls_32bit_sized_values, state_and_flags));
476  }
477
478 private:
479  template<size_t pointer_size>
480  static ThreadOffset<pointer_size> ThreadOffsetFromTlsPtr(size_t tls_ptr_offset) {
481    size_t base = OFFSETOF_MEMBER(Thread, tlsPtr_);
482    size_t scale;
483    size_t shrink;
484    if (pointer_size == sizeof(void*)) {
485      scale = 1;
486      shrink = 1;
487    } else if (pointer_size > sizeof(void*)) {
488      scale = pointer_size / sizeof(void*);
489      shrink = 1;
490    } else {
491      DCHECK_GT(sizeof(void*), pointer_size);
492      scale = 1;
493      shrink = sizeof(void*) / pointer_size;
494    }
495    return ThreadOffset<pointer_size>(base + ((tls_ptr_offset * scale) / shrink));
496  }
497
498 public:
499  template<size_t pointer_size>
500  static ThreadOffset<pointer_size> QuickEntryPointOffset(size_t quick_entrypoint_offset) {
501    return ThreadOffsetFromTlsPtr<pointer_size>(
502        OFFSETOF_MEMBER(tls_ptr_sized_values, quick_entrypoints) + quick_entrypoint_offset);
503  }
504
505  template<size_t pointer_size>
506  static ThreadOffset<pointer_size> InterpreterEntryPointOffset(size_t interp_entrypoint_offset) {
507    return ThreadOffsetFromTlsPtr<pointer_size>(
508        OFFSETOF_MEMBER(tls_ptr_sized_values, interpreter_entrypoints) + interp_entrypoint_offset);
509  }
510
511  template<size_t pointer_size>
512  static ThreadOffset<pointer_size> JniEntryPointOffset(size_t jni_entrypoint_offset) {
513    return ThreadOffsetFromTlsPtr<pointer_size>(
514        OFFSETOF_MEMBER(tls_ptr_sized_values, jni_entrypoints) + jni_entrypoint_offset);
515  }
516
517  template<size_t pointer_size>
518  static ThreadOffset<pointer_size> PortableEntryPointOffset(size_t port_entrypoint_offset) {
519    return ThreadOffsetFromTlsPtr<pointer_size>(
520        OFFSETOF_MEMBER(tls_ptr_sized_values, portable_entrypoints) + port_entrypoint_offset);
521  }
522
523  template<size_t pointer_size>
524  static ThreadOffset<pointer_size> SelfOffset() {
525    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, self));
526  }
527
528  template<size_t pointer_size>
529  static ThreadOffset<pointer_size> ExceptionOffset() {
530    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, exception));
531  }
532
533  template<size_t pointer_size>
534  static ThreadOffset<pointer_size> PeerOffset() {
535    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, opeer));
536  }
537
538
539  template<size_t pointer_size>
540  static ThreadOffset<pointer_size> CardTableOffset() {
541    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values, card_table));
542  }
543
544  template<size_t pointer_size>
545  static ThreadOffset<pointer_size> ThreadSuspendTriggerOffset() {
546    return ThreadOffsetFromTlsPtr<pointer_size>(
547        OFFSETOF_MEMBER(tls_ptr_sized_values, suspend_trigger));
548  }
549
550  // Size of stack less any space reserved for stack overflow
551  size_t GetStackSize() const {
552    return tlsPtr_.stack_size - (tlsPtr_.stack_end - tlsPtr_.stack_begin);
553  }
554
555  byte* GetStackEndForInterpreter(bool implicit_overflow_check) const {
556    if (implicit_overflow_check) {
557      // The interpreter needs the extra overflow bytes that stack_end does
558      // not include.
559      return tlsPtr_.stack_end + GetStackOverflowReservedBytes(kRuntimeISA);
560    } else {
561      return tlsPtr_.stack_end;
562    }
563  }
564
565  byte* GetStackEnd() const {
566    return tlsPtr_.stack_end;
567  }
568
569  // Set the stack end to that to be used during a stack overflow
570  void SetStackEndForStackOverflow() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
571
572  // Set the stack end to that to be used during regular execution
573  void ResetDefaultStackEnd(bool implicit_overflow_check) {
574    // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
575    // to throw a StackOverflowError.
576    if (implicit_overflow_check) {
577      // For implicit checks we also need to add in the protected region above the
578      // overflow region.
579      tlsPtr_.stack_end = tlsPtr_.stack_begin + kStackOverflowImplicitCheckSize;
580    } else {
581      tlsPtr_.stack_end = tlsPtr_.stack_begin + GetStackOverflowReservedBytes(kRuntimeISA);
582    }
583  }
584
585  // Install the protected region for implicit stack checks.
586  void InstallImplicitProtection(bool is_main_stack);
587
588  bool IsHandlingStackOverflow() const {
589    return tlsPtr_.stack_end == tlsPtr_.stack_begin;
590  }
591
592  template<size_t pointer_size>
593  static ThreadOffset<pointer_size> StackEndOffset() {
594    return ThreadOffsetFromTlsPtr<pointer_size>(
595        OFFSETOF_MEMBER(tls_ptr_sized_values, stack_end));
596  }
597
598  template<size_t pointer_size>
599  static ThreadOffset<pointer_size> JniEnvOffset() {
600    return ThreadOffsetFromTlsPtr<pointer_size>(
601        OFFSETOF_MEMBER(tls_ptr_sized_values, jni_env));
602  }
603
604  template<size_t pointer_size>
605  static ThreadOffset<pointer_size> TopOfManagedStackOffset() {
606    return ThreadOffsetFromTlsPtr<pointer_size>(
607        OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
608        ManagedStack::TopQuickFrameOffset());
609  }
610
611  template<size_t pointer_size>
612  static ThreadOffset<pointer_size> TopOfManagedStackPcOffset() {
613    return ThreadOffsetFromTlsPtr<pointer_size>(
614        OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
615        ManagedStack::TopQuickFramePcOffset());
616  }
617
618  const ManagedStack* GetManagedStack() const {
619    return &tlsPtr_.managed_stack;
620  }
621
622  // Linked list recording fragments of managed stack.
623  void PushManagedStackFragment(ManagedStack* fragment) {
624    tlsPtr_.managed_stack.PushManagedStackFragment(fragment);
625  }
626  void PopManagedStackFragment(const ManagedStack& fragment) {
627    tlsPtr_.managed_stack.PopManagedStackFragment(fragment);
628  }
629
630  ShadowFrame* PushShadowFrame(ShadowFrame* new_top_frame) {
631    return tlsPtr_.managed_stack.PushShadowFrame(new_top_frame);
632  }
633
634  ShadowFrame* PopShadowFrame() {
635    return tlsPtr_.managed_stack.PopShadowFrame();
636  }
637
638  template<size_t pointer_size>
639  static ThreadOffset<pointer_size> TopShadowFrameOffset() {
640    return ThreadOffsetFromTlsPtr<pointer_size>(
641        OFFSETOF_MEMBER(tls_ptr_sized_values, managed_stack) +
642        ManagedStack::TopShadowFrameOffset());
643  }
644
645  // Number of references allocated in JNI ShadowFrames on this thread.
646  size_t NumJniShadowFrameReferences() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
647    return tlsPtr_.managed_stack.NumJniShadowFrameReferences();
648  }
649
650  // Number of references in handle scope on this thread.
651  size_t NumHandleReferences();
652
653  // Number of references allocated in handle scopes & JNI shadow frames on this thread.
654  size_t NumStackReferences() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
655    return NumHandleReferences() + NumJniShadowFrameReferences();
656  };
657
658  // Is the given obj in this thread's stack indirect reference table?
659  bool HandleScopeContains(jobject obj) const;
660
661  void HandleScopeVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id)
662      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
663
664  HandleScope* GetTopHandleScope() {
665    return tlsPtr_.top_handle_scope;
666  }
667
668  void PushHandleScope(HandleScope* handle_scope) {
669    handle_scope->SetLink(tlsPtr_.top_handle_scope);
670    tlsPtr_.top_handle_scope = handle_scope;
671  }
672
673  HandleScope* PopHandleScope() {
674    HandleScope* handle_scope = tlsPtr_.top_handle_scope;
675    DCHECK(handle_scope != nullptr);
676    tlsPtr_.top_handle_scope = tlsPtr_.top_handle_scope->GetLink();
677    return handle_scope;
678  }
679
680  template<size_t pointer_size>
681  static ThreadOffset<pointer_size> TopHandleScopeOffset() {
682    return ThreadOffsetFromTlsPtr<pointer_size>(OFFSETOF_MEMBER(tls_ptr_sized_values,
683                                                                top_handle_scope));
684  }
685
686  DebugInvokeReq* GetInvokeReq() const {
687    return tlsPtr_.debug_invoke_req;
688  }
689
690  SingleStepControl* GetSingleStepControl() const {
691    return tlsPtr_.single_step_control;
692  }
693
694  // Returns the fake exception used to activate deoptimization.
695  static mirror::Throwable* GetDeoptimizationException() {
696    return reinterpret_cast<mirror::Throwable*>(-1);
697  }
698
699  void SetDeoptimizationShadowFrame(ShadowFrame* sf);
700  void SetDeoptimizationReturnValue(const JValue& ret_val);
701
702  ShadowFrame* GetAndClearDeoptimizationShadowFrame(JValue* ret_val);
703
704  bool HasDeoptimizationShadowFrame() const {
705    return tlsPtr_.deoptimization_shadow_frame != nullptr;
706  }
707
708  void SetShadowFrameUnderConstruction(ShadowFrame* sf);
709  void ClearShadowFrameUnderConstruction();
710
711  bool HasShadowFrameUnderConstruction() const {
712    return tlsPtr_.shadow_frame_under_construction != nullptr;
713  }
714
715  std::deque<instrumentation::InstrumentationStackFrame>* GetInstrumentationStack() {
716    return tlsPtr_.instrumentation_stack;
717  }
718
719  std::vector<mirror::ArtMethod*>* GetStackTraceSample() const {
720    return tlsPtr_.stack_trace_sample;
721  }
722
723  void SetStackTraceSample(std::vector<mirror::ArtMethod*>* sample) {
724    tlsPtr_.stack_trace_sample = sample;
725  }
726
727  uint64_t GetTraceClockBase() const {
728    return tls64_.trace_clock_base;
729  }
730
731  void SetTraceClockBase(uint64_t clock_base) {
732    tls64_.trace_clock_base = clock_base;
733  }
734
735  BaseMutex* GetHeldMutex(LockLevel level) const {
736    return tlsPtr_.held_mutexes[level];
737  }
738
739  void SetHeldMutex(LockLevel level, BaseMutex* mutex) {
740    tlsPtr_.held_mutexes[level] = mutex;
741  }
742
743  void RunCheckpointFunction();
744
745  bool ReadFlag(ThreadFlag flag) const {
746    return (tls32_.state_and_flags.as_struct.flags & flag) != 0;
747  }
748
749  bool TestAllFlags() const {
750    return (tls32_.state_and_flags.as_struct.flags != 0);
751  }
752
753  void AtomicSetFlag(ThreadFlag flag) {
754    tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flag);
755  }
756
757  void AtomicClearFlag(ThreadFlag flag) {
758    tls32_.state_and_flags.as_atomic_int.FetchAndAndSequentiallyConsistent(-1 ^ flag);
759  }
760
761  void ResetQuickAllocEntryPointsForThread();
762
763  // Returns the remaining space in the TLAB.
764  size_t TlabSize() const;
765  // Doesn't check that there is room.
766  mirror::Object* AllocTlab(size_t bytes);
767  void SetTlab(byte* start, byte* end);
768  bool HasTlab() const;
769
770  // Remove the suspend trigger for this thread by making the suspend_trigger_ TLS value
771  // equal to a valid pointer.
772  // TODO: does this need to atomic?  I don't think so.
773  void RemoveSuspendTrigger() {
774    tlsPtr_.suspend_trigger = reinterpret_cast<uintptr_t*>(&tlsPtr_.suspend_trigger);
775  }
776
777  // Trigger a suspend check by making the suspend_trigger_ TLS value an invalid pointer.
778  // The next time a suspend check is done, it will load from the value at this address
779  // and trigger a SIGSEGV.
780  void TriggerSuspend() {
781    tlsPtr_.suspend_trigger = nullptr;
782  }
783
784
785  // Push an object onto the allocation stack.
786  bool PushOnThreadLocalAllocationStack(mirror::Object* obj);
787
788  // Set the thread local allocation pointers to the given pointers.
789  void SetThreadLocalAllocationStack(mirror::Object** start, mirror::Object** end);
790
791  // Resets the thread local allocation pointers.
792  void RevokeThreadLocalAllocationStack();
793
794  size_t GetThreadLocalBytesAllocated() const {
795    return tlsPtr_.thread_local_end - tlsPtr_.thread_local_start;
796  }
797
798  size_t GetThreadLocalObjectsAllocated() const {
799    return tlsPtr_.thread_local_objects;
800  }
801
802  void* GetRosAllocRun(size_t index) const {
803    return tlsPtr_.rosalloc_runs[index];
804  }
805
806  void SetRosAllocRun(size_t index, void* run) {
807    tlsPtr_.rosalloc_runs[index] = run;
808  }
809
810  bool IsExceptionReportedToInstrumentation() const {
811    return tls32_.is_exception_reported_to_instrumentation_;
812  }
813
814  void SetExceptionReportedToInstrumentation(bool reported) {
815    tls32_.is_exception_reported_to_instrumentation_ = reported;
816  }
817
818 private:
819  explicit Thread(bool daemon);
820  ~Thread() LOCKS_EXCLUDED(Locks::mutator_lock_,
821                           Locks::thread_suspend_count_lock_);
822  void Destroy();
823
824  void CreatePeer(const char* name, bool as_daemon, jobject thread_group);
825
826  template<bool kTransactionActive>
827  void InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
828                jobject thread_name, jint thread_priority)
829      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
830
831  // Avoid use, callers should use SetState. Used only by SignalCatcher::HandleSigQuit, ~Thread and
832  // Dbg::Disconnected.
833  ThreadState SetStateUnsafe(ThreadState new_state) {
834    ThreadState old_state = GetState();
835    tls32_.state_and_flags.as_struct.state = new_state;
836    return old_state;
837  }
838
839  void VerifyStackImpl() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
840
841  void DumpState(std::ostream& os) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
842  void DumpStack(std::ostream& os) const
843      LOCKS_EXCLUDED(Locks::thread_suspend_count_lock_)
844      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
845
846  // Out-of-line conveniences for debugging in gdb.
847  static Thread* CurrentFromGdb();  // Like Thread::Current.
848  // Like Thread::Dump(std::cerr).
849  void DumpFromGdb() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
850
851  static void* CreateCallback(void* arg);
852
853  void HandleUncaughtExceptions(ScopedObjectAccess& soa)
854      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
855  void RemoveFromThreadGroup(ScopedObjectAccess& soa) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
856
857  void Init(ThreadList*, JavaVMExt*) EXCLUSIVE_LOCKS_REQUIRED(Locks::runtime_shutdown_lock_);
858  void InitCardTable();
859  void InitCpu();
860  void CleanupCpu();
861  void InitTlsEntryPoints();
862  void InitTid();
863  void InitPthreadKeySelf();
864  void InitStackHwm();
865
866  void SetUpAlternateSignalStack();
867  void TearDownAlternateSignalStack();
868
869  // 32 bits of atomically changed state and flags. Keeping as 32 bits allows and atomic CAS to
870  // change from being Suspended to Runnable without a suspend request occurring.
871  union PACKED(4) StateAndFlags {
872    StateAndFlags() {}
873    struct PACKED(4) {
874      // Bitfield of flag values. Must be changed atomically so that flag values aren't lost. See
875      // ThreadFlags for bit field meanings.
876      volatile uint16_t flags;
877      // Holds the ThreadState. May be changed non-atomically between Suspended (ie not Runnable)
878      // transitions. Changing to Runnable requires that the suspend_request be part of the atomic
879      // operation. If a thread is suspended and a suspend_request is present, a thread may not
880      // change to Runnable as a GC or other operation is in progress.
881      volatile uint16_t state;
882    } as_struct;
883    AtomicInteger as_atomic_int;
884    volatile int32_t as_int;
885
886   private:
887    // gcc does not handle struct with volatile member assignments correctly.
888    // See http://gcc.gnu.org/bugzilla/show_bug.cgi?id=47409
889    DISALLOW_COPY_AND_ASSIGN(StateAndFlags);
890  };
891  COMPILE_ASSERT(sizeof(StateAndFlags) == sizeof(int32_t), weird_state_and_flags_size);
892
893  static void ThreadExitCallback(void* arg);
894
895  // Maximum number of checkpoint functions.
896  static constexpr uint32_t kMaxCheckpoints = 3;
897
898  // Has Thread::Startup been called?
899  static bool is_started_;
900
901  // TLS key used to retrieve the Thread*.
902  static pthread_key_t pthread_key_self_;
903
904  // Used to notify threads that they should attempt to resume, they will suspend again if
905  // their suspend count is > 0.
906  static ConditionVariable* resume_cond_ GUARDED_BY(Locks::thread_suspend_count_lock_);
907
908  /***********************************************************************************************/
909  // Thread local storage. Fields are grouped by size to enable 32 <-> 64 searching to account for
910  // pointer size differences. To encourage shorter encoding, more frequently used values appear
911  // first if possible.
912  /***********************************************************************************************/
913
914  struct PACKED(4) tls_32bit_sized_values {
915    // We have no control over the size of 'bool', but want our boolean fields
916    // to be 4-byte quantities.
917    typedef uint32_t bool32_t;
918
919    explicit tls_32bit_sized_values(bool is_daemon) :
920      suspend_count(0), debug_suspend_count(0), thin_lock_thread_id(0), tid(0),
921      daemon(is_daemon), throwing_OutOfMemoryError(false), no_thread_suspension(0),
922      thread_exit_check_count(0), is_exception_reported_to_instrumentation_(false) {
923    }
924
925    union StateAndFlags state_and_flags;
926    COMPILE_ASSERT(sizeof(union StateAndFlags) == sizeof(int32_t),
927                   sizeof_state_and_flags_and_int32_are_different);
928
929    // A non-zero value is used to tell the current thread to enter a safe point
930    // at the next poll.
931    int suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
932
933    // How much of 'suspend_count_' is by request of the debugger, used to set things right
934    // when the debugger detaches. Must be <= suspend_count_.
935    int debug_suspend_count GUARDED_BY(Locks::thread_suspend_count_lock_);
936
937    // Thin lock thread id. This is a small integer used by the thin lock implementation.
938    // This is not to be confused with the native thread's tid, nor is it the value returned
939    // by java.lang.Thread.getId --- this is a distinct value, used only for locking. One
940    // important difference between this id and the ids visible to managed code is that these
941    // ones get reused (to ensure that they fit in the number of bits available).
942    uint32_t thin_lock_thread_id;
943
944    // System thread id.
945    uint32_t tid;
946
947    // Is the thread a daemon?
948    const bool32_t daemon;
949
950    // A boolean telling us whether we're recursively throwing OOME.
951    bool32_t throwing_OutOfMemoryError;
952
953    // A positive value implies we're in a region where thread suspension isn't expected.
954    uint32_t no_thread_suspension;
955
956    // How many times has our pthread key's destructor been called?
957    uint32_t thread_exit_check_count;
958
959    // When true this field indicates that the exception associated with this thread has already
960    // been reported to instrumentation.
961    bool32_t is_exception_reported_to_instrumentation_;
962  } tls32_;
963
964  struct PACKED(8) tls_64bit_sized_values {
965    tls_64bit_sized_values() : trace_clock_base(0), deoptimization_return_value() {
966    }
967
968    // The clock base used for tracing.
969    uint64_t trace_clock_base;
970
971    // Return value used by deoptimization.
972    JValue deoptimization_return_value;
973
974    RuntimeStats stats;
975  } tls64_;
976
977  struct PACKED(4) tls_ptr_sized_values {
978      tls_ptr_sized_values() : card_table(nullptr), exception(nullptr), stack_end(nullptr),
979      managed_stack(), suspend_trigger(nullptr), jni_env(nullptr), self(nullptr), opeer(nullptr),
980      jpeer(nullptr), stack_begin(nullptr), stack_size(0), throw_location(),
981      stack_trace_sample(nullptr), wait_next(nullptr), monitor_enter_object(nullptr),
982      top_handle_scope(nullptr), class_loader_override(nullptr), long_jump_context(nullptr),
983      instrumentation_stack(nullptr), debug_invoke_req(nullptr), single_step_control(nullptr),
984      deoptimization_shadow_frame(nullptr), shadow_frame_under_construction(nullptr), name(nullptr),
985      pthread_self(0), last_no_thread_suspension_cause(nullptr), thread_local_start(nullptr),
986      thread_local_pos(nullptr), thread_local_end(nullptr), thread_local_objects(0),
987      thread_local_alloc_stack_top(nullptr), thread_local_alloc_stack_end(nullptr) {
988    }
989
990    // The biased card table, see CardTable for details.
991    byte* card_table;
992
993    // The pending exception or NULL.
994    mirror::Throwable* exception;
995
996    // The end of this thread's stack. This is the lowest safely-addressable address on the stack.
997    // We leave extra space so there's room for the code that throws StackOverflowError.
998    byte* stack_end;
999
1000    // The top of the managed stack often manipulated directly by compiler generated code.
1001    ManagedStack managed_stack;
1002
1003    // In certain modes, setting this to 0 will trigger a SEGV and thus a suspend check.  It is
1004    // normally set to the address of itself.
1005    uintptr_t* suspend_trigger;
1006
1007    // Every thread may have an associated JNI environment
1008    JNIEnvExt* jni_env;
1009
1010    // Initialized to "this". On certain architectures (such as x86) reading off of Thread::Current
1011    // is easy but getting the address of Thread::Current is hard. This field can be read off of
1012    // Thread::Current to give the address.
1013    Thread* self;
1014
1015    // Our managed peer (an instance of java.lang.Thread). The jobject version is used during thread
1016    // start up, until the thread is registered and the local opeer_ is used.
1017    mirror::Object* opeer;
1018    jobject jpeer;
1019
1020    // The "lowest addressable byte" of the stack.
1021    byte* stack_begin;
1022
1023    // Size of the stack.
1024    size_t stack_size;
1025
1026    // The location the current exception was thrown from.
1027    ThrowLocation throw_location;
1028
1029    // Pointer to previous stack trace captured by sampling profiler.
1030    std::vector<mirror::ArtMethod*>* stack_trace_sample;
1031
1032    // The next thread in the wait set this thread is part of or NULL if not waiting.
1033    Thread* wait_next;
1034
1035    // If we're blocked in MonitorEnter, this is the object we're trying to lock.
1036    mirror::Object* monitor_enter_object;
1037
1038    // Top of linked list of handle scopes or nullptr for none.
1039    HandleScope* top_handle_scope;
1040
1041    // Needed to get the right ClassLoader in JNI_OnLoad, but also
1042    // useful for testing.
1043    mirror::ClassLoader* class_loader_override;
1044
1045    // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
1046    Context* long_jump_context;
1047
1048    // Additional stack used by method instrumentation to store method and return pc values.
1049    // Stored as a pointer since std::deque is not PACKED.
1050    std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack;
1051
1052    // JDWP invoke-during-breakpoint support.
1053    DebugInvokeReq* debug_invoke_req;
1054
1055    // JDWP single-stepping support.
1056    SingleStepControl* single_step_control;
1057
1058    // Shadow frame stack that is used temporarily during the deoptimization of a method.
1059    ShadowFrame* deoptimization_shadow_frame;
1060
1061    // Shadow frame stack that is currently under construction but not yet on the stack
1062    ShadowFrame* shadow_frame_under_construction;
1063
1064    // A cached copy of the java.lang.Thread's name.
1065    std::string* name;
1066
1067    // A cached pthread_t for the pthread underlying this Thread*.
1068    pthread_t pthread_self;
1069
1070    // If no_thread_suspension_ is > 0, what is causing that assertion.
1071    const char* last_no_thread_suspension_cause;
1072
1073    // Pending checkpoint function or NULL if non-pending. Installation guarding by
1074    // Locks::thread_suspend_count_lock_.
1075    Closure* checkpoint_functions[kMaxCheckpoints];
1076
1077    // Entrypoint function pointers.
1078    // TODO: move this to more of a global offset table model to avoid per-thread duplication.
1079    InterpreterEntryPoints interpreter_entrypoints;
1080    JniEntryPoints jni_entrypoints;
1081    PortableEntryPoints portable_entrypoints;
1082    QuickEntryPoints quick_entrypoints;
1083
1084    // Thread-local allocation pointer.
1085    byte* thread_local_start;
1086    byte* thread_local_pos;
1087    byte* thread_local_end;
1088    size_t thread_local_objects;
1089
1090    // There are RosAlloc::kNumThreadLocalSizeBrackets thread-local size brackets per thread.
1091    void* rosalloc_runs[kNumRosAllocThreadLocalSizeBrackets];
1092
1093    // Thread-local allocation stack data/routines.
1094    mirror::Object** thread_local_alloc_stack_top;
1095    mirror::Object** thread_local_alloc_stack_end;
1096
1097    // Support for Mutex lock hierarchy bug detection.
1098    BaseMutex* held_mutexes[kLockLevelCount];
1099  } tlsPtr_;
1100
1101  // Guards the 'interrupted_' and 'wait_monitor_' members.
1102  Mutex* wait_mutex_ DEFAULT_MUTEX_ACQUIRED_AFTER;
1103
1104  // Condition variable waited upon during a wait.
1105  ConditionVariable* wait_cond_ GUARDED_BY(wait_mutex_);
1106  // Pointer to the monitor lock we're currently waiting on or NULL if not waiting.
1107  Monitor* wait_monitor_ GUARDED_BY(wait_mutex_);
1108
1109  // Thread "interrupted" status; stays raised until queried or thrown.
1110  bool interrupted_ GUARDED_BY(wait_mutex_);
1111
1112  friend class Dbg;  // For SetStateUnsafe.
1113  friend class gc::collector::SemiSpace;  // For getting stack traces.
1114  friend class Runtime;  // For CreatePeer.
1115  friend class QuickExceptionHandler;  // For dumping the stack.
1116  friend class ScopedThreadStateChange;
1117  friend class SignalCatcher;  // For SetStateUnsafe.
1118  friend class StubTest;  // For accessing entrypoints.
1119  friend class ThreadList;  // For ~Thread and Destroy.
1120
1121  friend class EntrypointsOrderTest;  // To test the order of tls entries.
1122
1123  DISALLOW_COPY_AND_ASSIGN(Thread);
1124};
1125
1126std::ostream& operator<<(std::ostream& os, const Thread& thread);
1127std::ostream& operator<<(std::ostream& os, const ThreadState& state);
1128
1129}  // namespace art
1130
1131#endif  // ART_RUNTIME_THREAD_H_
1132