thread.cc revision f1d666e1b48f8070ef1177fce156c08827f08eb8
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define ATRACE_TAG ATRACE_TAG_DALVIK
18
19#include "thread.h"
20
21#include <cutils/trace.h>
22#include <pthread.h>
23#include <signal.h>
24#include <sys/resource.h>
25#include <sys/time.h>
26
27#include <algorithm>
28#include <bitset>
29#include <cerrno>
30#include <iostream>
31#include <list>
32#include <sstream>
33
34#include "arch/context.h"
35#include "art_field-inl.h"
36#include "art_method-inl.h"
37#include "base/bit_utils.h"
38#include "base/memory_tool.h"
39#include "base/mutex.h"
40#include "base/timing_logger.h"
41#include "base/to_str.h"
42#include "class_linker-inl.h"
43#include "debugger.h"
44#include "dex_file-inl.h"
45#include "entrypoints/entrypoint_utils.h"
46#include "entrypoints/quick/quick_alloc_entrypoints.h"
47#include "gc_map.h"
48#include "gc/accounting/card_table-inl.h"
49#include "gc/allocator/rosalloc.h"
50#include "gc/heap.h"
51#include "gc/space/space.h"
52#include "handle_scope-inl.h"
53#include "indirect_reference_table-inl.h"
54#include "jni_internal.h"
55#include "mirror/class_loader.h"
56#include "mirror/class-inl.h"
57#include "mirror/object_array-inl.h"
58#include "mirror/stack_trace_element.h"
59#include "monitor.h"
60#include "object_lock.h"
61#include "quick_exception_handler.h"
62#include "quick/quick_method_frame_info.h"
63#include "reflection.h"
64#include "runtime.h"
65#include "scoped_thread_state_change.h"
66#include "ScopedLocalRef.h"
67#include "ScopedUtfChars.h"
68#include "stack.h"
69#include "thread_list.h"
70#include "thread-inl.h"
71#include "utils.h"
72#include "verifier/dex_gc_map.h"
73#include "verifier/method_verifier.h"
74#include "verify_object-inl.h"
75#include "vmap_table.h"
76#include "well_known_classes.h"
77
78#if ART_USE_FUTEXES
79#include "linux/futex.h"
80#include "sys/syscall.h"
81#ifndef SYS_futex
82#define SYS_futex __NR_futex
83#endif
84#endif  // ART_USE_FUTEXES
85
86namespace art {
87
88bool Thread::is_started_ = false;
89pthread_key_t Thread::pthread_key_self_;
90ConditionVariable* Thread::resume_cond_ = nullptr;
91const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA);
92
93// For implicit overflow checks we reserve an extra piece of memory at the bottom
94// of the stack (lowest memory).  The higher portion of the memory
95// is protected against reads and the lower is available for use while
96// throwing the StackOverflow exception.
97constexpr size_t kStackOverflowProtectedSize = 4 * kMemoryToolStackGuardSizeScale * KB;
98
99static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
100
101void Thread::InitCardTable() {
102  tlsPtr_.card_table = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin();
103}
104
105static void UnimplementedEntryPoint() {
106  UNIMPLEMENTED(FATAL);
107}
108
109void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
110                     QuickEntryPoints* qpoints);
111
112void Thread::InitTlsEntryPoints() {
113  // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
114  uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.interpreter_entrypoints);
115  uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) +
116      sizeof(tlsPtr_.quick_entrypoints));
117  for (uintptr_t* it = begin; it != end; ++it) {
118    *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
119  }
120  InitEntryPoints(&tlsPtr_.interpreter_entrypoints, &tlsPtr_.jni_entrypoints,
121                  &tlsPtr_.quick_entrypoints);
122}
123
124void Thread::InitStringEntryPoints() {
125  ScopedObjectAccess soa(this);
126  QuickEntryPoints* qpoints = &tlsPtr_.quick_entrypoints;
127  qpoints->pNewEmptyString = reinterpret_cast<void(*)()>(
128      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newEmptyString));
129  qpoints->pNewStringFromBytes_B = reinterpret_cast<void(*)()>(
130      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_B));
131  qpoints->pNewStringFromBytes_BI = reinterpret_cast<void(*)()>(
132      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BI));
133  qpoints->pNewStringFromBytes_BII = reinterpret_cast<void(*)()>(
134      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BII));
135  qpoints->pNewStringFromBytes_BIII = reinterpret_cast<void(*)()>(
136      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIII));
137  qpoints->pNewStringFromBytes_BIIString = reinterpret_cast<void(*)()>(
138      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIIString));
139  qpoints->pNewStringFromBytes_BString = reinterpret_cast<void(*)()>(
140      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BString));
141  qpoints->pNewStringFromBytes_BIICharset = reinterpret_cast<void(*)()>(
142      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIICharset));
143  qpoints->pNewStringFromBytes_BCharset = reinterpret_cast<void(*)()>(
144      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BCharset));
145  qpoints->pNewStringFromChars_C = reinterpret_cast<void(*)()>(
146      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromChars_C));
147  qpoints->pNewStringFromChars_CII = reinterpret_cast<void(*)()>(
148      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromChars_CII));
149  qpoints->pNewStringFromChars_IIC = reinterpret_cast<void(*)()>(
150      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromChars_IIC));
151  qpoints->pNewStringFromCodePoints = reinterpret_cast<void(*)()>(
152      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromCodePoints));
153  qpoints->pNewStringFromString = reinterpret_cast<void(*)()>(
154      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromString));
155  qpoints->pNewStringFromStringBuffer = reinterpret_cast<void(*)()>(
156      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromStringBuffer));
157  qpoints->pNewStringFromStringBuilder = reinterpret_cast<void(*)()>(
158      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromStringBuilder));
159}
160
161void Thread::ResetQuickAllocEntryPointsForThread() {
162  ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints);
163}
164
165class DeoptimizationContextRecord {
166 public:
167  DeoptimizationContextRecord(const JValue& ret_val, bool is_reference,
168                              mirror::Throwable* pending_exception,
169                              DeoptimizationContextRecord* link)
170      : ret_val_(ret_val), is_reference_(is_reference), pending_exception_(pending_exception),
171        link_(link) {}
172
173  JValue GetReturnValue() const { return ret_val_; }
174  bool IsReference() const { return is_reference_; }
175  mirror::Throwable* GetPendingException() const { return pending_exception_; }
176  DeoptimizationContextRecord* GetLink() const { return link_; }
177  mirror::Object** GetReturnValueAsGCRoot() {
178    DCHECK(is_reference_);
179    return ret_val_.GetGCRoot();
180  }
181  mirror::Object** GetPendingExceptionAsGCRoot() {
182    return reinterpret_cast<mirror::Object**>(&pending_exception_);
183  }
184
185 private:
186  // The value returned by the method at the top of the stack before deoptimization.
187  JValue ret_val_;
188
189  // Indicates whether the returned value is a reference. If so, the GC will visit it.
190  const bool is_reference_;
191
192  // The exception that was pending before deoptimization (or null if there was no pending
193  // exception).
194  mirror::Throwable* pending_exception_;
195
196  // A link to the previous DeoptimizationContextRecord.
197  DeoptimizationContextRecord* const link_;
198
199  DISALLOW_COPY_AND_ASSIGN(DeoptimizationContextRecord);
200};
201
202class StackedShadowFrameRecord {
203 public:
204  StackedShadowFrameRecord(ShadowFrame* shadow_frame,
205                           StackedShadowFrameType type,
206                           StackedShadowFrameRecord* link)
207      : shadow_frame_(shadow_frame),
208        type_(type),
209        link_(link) {}
210
211  ShadowFrame* GetShadowFrame() const { return shadow_frame_; }
212  StackedShadowFrameType GetType() const { return type_; }
213  StackedShadowFrameRecord* GetLink() const { return link_; }
214
215 private:
216  ShadowFrame* const shadow_frame_;
217  const StackedShadowFrameType type_;
218  StackedShadowFrameRecord* const link_;
219
220  DISALLOW_COPY_AND_ASSIGN(StackedShadowFrameRecord);
221};
222
223void Thread::PushDeoptimizationContext(const JValue& return_value, bool is_reference,
224                                       mirror::Throwable* exception) {
225  DeoptimizationContextRecord* record = new DeoptimizationContextRecord(
226      return_value,
227      is_reference,
228      exception,
229      tlsPtr_.deoptimization_context_stack);
230  tlsPtr_.deoptimization_context_stack = record;
231}
232
233void Thread::PopDeoptimizationContext(JValue* result, mirror::Throwable** exception) {
234  AssertHasDeoptimizationContext();
235  DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack;
236  tlsPtr_.deoptimization_context_stack = record->GetLink();
237  result->SetJ(record->GetReturnValue().GetJ());
238  *exception = record->GetPendingException();
239  delete record;
240}
241
242void Thread::AssertHasDeoptimizationContext() {
243  CHECK(tlsPtr_.deoptimization_context_stack != nullptr)
244      << "No deoptimization context for thread " << *this;
245}
246
247void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type) {
248  StackedShadowFrameRecord* record = new StackedShadowFrameRecord(
249      sf, type, tlsPtr_.stacked_shadow_frame_record);
250  tlsPtr_.stacked_shadow_frame_record = record;
251}
252
253ShadowFrame* Thread::PopStackedShadowFrame(StackedShadowFrameType type) {
254  StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
255  DCHECK(record != nullptr);
256  DCHECK_EQ(record->GetType(), type);
257  tlsPtr_.stacked_shadow_frame_record = record->GetLink();
258  ShadowFrame* shadow_frame = record->GetShadowFrame();
259  delete record;
260  return shadow_frame;
261}
262
263void Thread::InitTid() {
264  tls32_.tid = ::art::GetTid();
265}
266
267void Thread::InitAfterFork() {
268  // One thread (us) survived the fork, but we have a new tid so we need to
269  // update the value stashed in this Thread*.
270  InitTid();
271}
272
273void* Thread::CreateCallback(void* arg) {
274  Thread* self = reinterpret_cast<Thread*>(arg);
275  Runtime* runtime = Runtime::Current();
276  if (runtime == nullptr) {
277    LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self;
278    return nullptr;
279  }
280  {
281    // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true
282    //       after self->Init().
283    MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
284    // Check that if we got here we cannot be shutting down (as shutdown should never have started
285    // while threads are being born).
286    CHECK(!runtime->IsShuttingDownLocked());
287    // Note: given that the JNIEnv is created in the parent thread, the only failure point here is
288    //       a mess in InitStackHwm. We do not have a reasonable way to recover from that, so abort
289    //       the runtime in such a case. In case this ever changes, we need to make sure here to
290    //       delete the tmp_jni_env, as we own it at this point.
291    CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM(), self->tlsPtr_.tmp_jni_env));
292    self->tlsPtr_.tmp_jni_env = nullptr;
293    Runtime::Current()->EndThreadBirth();
294  }
295  {
296    ScopedObjectAccess soa(self);
297    self->InitStringEntryPoints();
298
299    // Copy peer into self, deleting global reference when done.
300    CHECK(self->tlsPtr_.jpeer != nullptr);
301    self->tlsPtr_.opeer = soa.Decode<mirror::Object*>(self->tlsPtr_.jpeer);
302    self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer);
303    self->tlsPtr_.jpeer = nullptr;
304    self->SetThreadName(self->GetThreadName(soa)->ToModifiedUtf8().c_str());
305
306    ArtField* priorityField = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority);
307    self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer));
308    Dbg::PostThreadStart(self);
309
310    // Invoke the 'run' method of our java.lang.Thread.
311    mirror::Object* receiver = self->tlsPtr_.opeer;
312    jmethodID mid = WellKnownClasses::java_lang_Thread_run;
313    ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
314    InvokeVirtualOrInterfaceWithJValues(soa, ref.get(), mid, nullptr);
315  }
316  // Detach and delete self.
317  Runtime::Current()->GetThreadList()->Unregister(self);
318
319  return nullptr;
320}
321
322Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
323                                  mirror::Object* thread_peer) {
324  ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer);
325  Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer)));
326  // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
327  // to stop it from going away.
328  if (kIsDebugBuild) {
329    MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
330    if (result != nullptr && !result->IsSuspended()) {
331      Locks::thread_list_lock_->AssertHeld(soa.Self());
332    }
333  }
334  return result;
335}
336
337Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
338                                  jobject java_thread) {
339  return FromManagedThread(soa, soa.Decode<mirror::Object*>(java_thread));
340}
341
342static size_t FixStackSize(size_t stack_size) {
343  // A stack size of zero means "use the default".
344  if (stack_size == 0) {
345    stack_size = Runtime::Current()->GetDefaultStackSize();
346  }
347
348  // Dalvik used the bionic pthread default stack size for native threads,
349  // so include that here to support apps that expect large native stacks.
350  stack_size += 1 * MB;
351
352  // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN.
353  if (stack_size < PTHREAD_STACK_MIN) {
354    stack_size = PTHREAD_STACK_MIN;
355  }
356
357  if (Runtime::Current()->ExplicitStackOverflowChecks()) {
358    // It's likely that callers are trying to ensure they have at least a certain amount of
359    // stack space, so we should add our reserved space on top of what they requested, rather
360    // than implicitly take it away from them.
361    stack_size += GetStackOverflowReservedBytes(kRuntimeISA);
362  } else {
363    // If we are going to use implicit stack checks, allocate space for the protected
364    // region at the bottom of the stack.
365    stack_size += Thread::kStackOverflowImplicitCheckSize +
366        GetStackOverflowReservedBytes(kRuntimeISA);
367  }
368
369  // Some systems require the stack size to be a multiple of the system page size, so round up.
370  stack_size = RoundUp(stack_size, kPageSize);
371
372  return stack_size;
373}
374
375// Global variable to prevent the compiler optimizing away the page reads for the stack.
376uint8_t dont_optimize_this;
377
378// Install a protected region in the stack.  This is used to trigger a SIGSEGV if a stack
379// overflow is detected.  It is located right below the stack_begin_.
380//
381// There is a little complexity here that deserves a special mention.  On some
382// architectures, the stack created using a VM_GROWSDOWN flag
383// to prevent memory being allocated when it's not needed.  This flag makes the
384// kernel only allocate memory for the stack by growing down in memory.  Because we
385// want to put an mprotected region far away from that at the stack top, we need
386// to make sure the pages for the stack are mapped in before we call mprotect.  We do
387// this by reading every page from the stack bottom (highest address) to the stack top.
388// We then madvise this away.
389
390// AddressSanitizer does not like the part of this functions that reads every stack page.
391// Looks a lot like an out-of-bounds access.
392ATTRIBUTE_NO_SANITIZE_ADDRESS
393void Thread::InstallImplicitProtection() {
394  uint8_t* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
395  uint8_t* stack_himem = tlsPtr_.stack_end;
396  uint8_t* stack_top = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(&stack_himem) &
397      ~(kPageSize - 1));    // Page containing current top of stack.
398
399  // First remove the protection on the protected region as will want to read and
400  // write it.  This may fail (on the first attempt when the stack is not mapped)
401  // but we ignore that.
402  UnprotectStack();
403
404  // Map in the stack.  This must be done by reading from the
405  // current stack pointer downwards as the stack may be mapped using VM_GROWSDOWN
406  // in the kernel.  Any access more than a page below the current SP might cause
407  // a segv.
408
409  // Read every page from the high address to the low.
410  for (uint8_t* p = stack_top; p >= pregion; p -= kPageSize) {
411    dont_optimize_this = *p;
412  }
413
414  VLOG(threads) << "installing stack protected region at " << std::hex <<
415      static_cast<void*>(pregion) << " to " <<
416      static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
417
418  // Protect the bottom of the stack to prevent read/write to it.
419  ProtectStack();
420
421  // Tell the kernel that we won't be needing these pages any more.
422  // NB. madvise will probably write zeroes into the memory (on linux it does).
423  uint32_t unwanted_size = stack_top - pregion - kPageSize;
424  madvise(pregion, unwanted_size, MADV_DONTNEED);
425}
426
427void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
428  CHECK(java_peer != nullptr);
429  Thread* self = static_cast<JNIEnvExt*>(env)->self;
430
431  if (VLOG_IS_ON(threads)) {
432    ScopedObjectAccess soa(env);
433
434    ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
435    mirror::String* java_name = reinterpret_cast<mirror::String*>(f->GetObject(
436        soa.Decode<mirror::Object*>(java_peer)));
437    std::string thread_name;
438    if (java_name != nullptr) {
439      thread_name = java_name->ToModifiedUtf8();
440    } else {
441      thread_name = "(Unnamed)";
442    }
443
444    VLOG(threads) << "Creating native thread for " << thread_name;
445    self->Dump(LOG(INFO));
446  }
447
448  Runtime* runtime = Runtime::Current();
449
450  // Atomically start the birth of the thread ensuring the runtime isn't shutting down.
451  bool thread_start_during_shutdown = false;
452  {
453    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
454    if (runtime->IsShuttingDownLocked()) {
455      thread_start_during_shutdown = true;
456    } else {
457      runtime->StartThreadBirth();
458    }
459  }
460  if (thread_start_during_shutdown) {
461    ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError"));
462    env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown");
463    return;
464  }
465
466  Thread* child_thread = new Thread(is_daemon);
467  // Use global JNI ref to hold peer live while child thread starts.
468  child_thread->tlsPtr_.jpeer = env->NewGlobalRef(java_peer);
469  stack_size = FixStackSize(stack_size);
470
471  // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to
472  // assign it.
473  env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer,
474                    reinterpret_cast<jlong>(child_thread));
475
476  // Try to allocate a JNIEnvExt for the thread. We do this here as we might be out of memory and
477  // do not have a good way to report this on the child's side.
478  std::unique_ptr<JNIEnvExt> child_jni_env_ext(
479      JNIEnvExt::Create(child_thread, Runtime::Current()->GetJavaVM()));
480
481  int pthread_create_result = 0;
482  if (child_jni_env_ext.get() != nullptr) {
483    pthread_t new_pthread;
484    pthread_attr_t attr;
485    child_thread->tlsPtr_.tmp_jni_env = child_jni_env_ext.get();
486    CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
487    CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED),
488                       "PTHREAD_CREATE_DETACHED");
489    CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
490    pthread_create_result = pthread_create(&new_pthread,
491                                           &attr,
492                                           Thread::CreateCallback,
493                                           child_thread);
494    CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
495
496    if (pthread_create_result == 0) {
497      // pthread_create started the new thread. The child is now responsible for managing the
498      // JNIEnvExt we created.
499      // Note: we can't check for tmp_jni_env == nullptr, as that would require synchronization
500      //       between the threads.
501      child_jni_env_ext.release();
502      return;
503    }
504  }
505
506  // Either JNIEnvExt::Create or pthread_create(3) failed, so clean up.
507  {
508    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
509    runtime->EndThreadBirth();
510  }
511  // Manually delete the global reference since Thread::Init will not have been run.
512  env->DeleteGlobalRef(child_thread->tlsPtr_.jpeer);
513  child_thread->tlsPtr_.jpeer = nullptr;
514  delete child_thread;
515  child_thread = nullptr;
516  // TODO: remove from thread group?
517  env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0);
518  {
519    std::string msg(child_jni_env_ext.get() == nullptr ?
520        "Could not allocate JNI Env" :
521        StringPrintf("pthread_create (%s stack) failed: %s",
522                                 PrettySize(stack_size).c_str(), strerror(pthread_create_result)));
523    ScopedObjectAccess soa(env);
524    soa.Self()->ThrowOutOfMemoryError(msg.c_str());
525  }
526}
527
528bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_env_ext) {
529  // This function does all the initialization that must be run by the native thread it applies to.
530  // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
531  // we can handshake with the corresponding native thread when it's ready.) Check this native
532  // thread hasn't been through here already...
533  CHECK(Thread::Current() == nullptr);
534
535  // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
536  // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
537  tlsPtr_.pthread_self = pthread_self();
538  CHECK(is_started_);
539
540  SetUpAlternateSignalStack();
541  if (!InitStackHwm()) {
542    return false;
543  }
544  InitCpu();
545  InitTlsEntryPoints();
546  RemoveSuspendTrigger();
547  InitCardTable();
548  InitTid();
549
550#ifdef __ANDROID__
551  __get_tls()[TLS_SLOT_ART_THREAD_SELF] = this;
552#else
553  CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
554#endif
555  DCHECK_EQ(Thread::Current(), this);
556
557  tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this);
558
559  if (jni_env_ext != nullptr) {
560    DCHECK_EQ(jni_env_ext->vm, java_vm);
561    DCHECK_EQ(jni_env_ext->self, this);
562    tlsPtr_.jni_env = jni_env_ext;
563  } else {
564    tlsPtr_.jni_env = JNIEnvExt::Create(this, java_vm);
565    if (tlsPtr_.jni_env == nullptr) {
566      return false;
567    }
568  }
569
570  thread_list->Register(this);
571  return true;
572}
573
574Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group,
575                       bool create_peer) {
576  Runtime* runtime = Runtime::Current();
577  if (runtime == nullptr) {
578    LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name;
579    return nullptr;
580  }
581  Thread* self;
582  {
583    MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
584    if (runtime->IsShuttingDownLocked()) {
585      LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name;
586      return nullptr;
587    } else {
588      Runtime::Current()->StartThreadBirth();
589      self = new Thread(as_daemon);
590      bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
591      Runtime::Current()->EndThreadBirth();
592      if (!init_success) {
593        delete self;
594        return nullptr;
595      }
596    }
597  }
598
599  self->InitStringEntryPoints();
600
601  CHECK_NE(self->GetState(), kRunnable);
602  self->SetState(kNative);
603
604  // If we're the main thread, ClassLinker won't be created until after we're attached,
605  // so that thread needs a two-stage attach. Regular threads don't need this hack.
606  // In the compiler, all threads need this hack, because no-one's going to be getting
607  // a native peer!
608  if (create_peer) {
609    self->CreatePeer(thread_name, as_daemon, thread_group);
610  } else {
611    // These aren't necessary, but they improve diagnostics for unit tests & command-line tools.
612    if (thread_name != nullptr) {
613      self->tlsPtr_.name->assign(thread_name);
614      ::art::SetThreadName(thread_name);
615    } else if (self->GetJniEnv()->check_jni) {
616      LOG(WARNING) << *Thread::Current() << " attached without supplying a name";
617    }
618  }
619
620  if (VLOG_IS_ON(threads)) {
621    if (thread_name != nullptr) {
622      VLOG(threads) << "Attaching thread " << thread_name;
623    } else {
624      VLOG(threads) << "Attaching unnamed thread.";
625    }
626    ScopedObjectAccess soa(self);
627    self->Dump(LOG(INFO));
628  }
629
630  {
631    ScopedObjectAccess soa(self);
632    Dbg::PostThreadStart(self);
633  }
634
635  return self;
636}
637
638void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) {
639  Runtime* runtime = Runtime::Current();
640  CHECK(runtime->IsStarted());
641  JNIEnv* env = tlsPtr_.jni_env;
642
643  if (thread_group == nullptr) {
644    thread_group = runtime->GetMainThreadGroup();
645  }
646  ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
647  // Add missing null check in case of OOM b/18297817
648  if (name != nullptr && thread_name.get() == nullptr) {
649    CHECK(IsExceptionPending());
650    return;
651  }
652  jint thread_priority = GetNativePriority();
653  jboolean thread_is_daemon = as_daemon;
654
655  ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread));
656  if (peer.get() == nullptr) {
657    CHECK(IsExceptionPending());
658    return;
659  }
660  {
661    ScopedObjectAccess soa(this);
662    tlsPtr_.opeer = soa.Decode<mirror::Object*>(peer.get());
663  }
664  env->CallNonvirtualVoidMethod(peer.get(),
665                                WellKnownClasses::java_lang_Thread,
666                                WellKnownClasses::java_lang_Thread_init,
667                                thread_group, thread_name.get(), thread_priority, thread_is_daemon);
668  AssertNoPendingException();
669
670  Thread* self = this;
671  DCHECK_EQ(self, Thread::Current());
672  env->SetLongField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer,
673                    reinterpret_cast<jlong>(self));
674
675  ScopedObjectAccess soa(self);
676  StackHandleScope<1> hs(self);
677  MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName(soa)));
678  if (peer_thread_name.Get() == nullptr) {
679    // The Thread constructor should have set the Thread.name to a
680    // non-null value. However, because we can run without code
681    // available (in the compiler, in tests), we manually assign the
682    // fields the constructor should have set.
683    if (runtime->IsActiveTransaction()) {
684      InitPeer<true>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
685    } else {
686      InitPeer<false>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
687    }
688    peer_thread_name.Assign(GetThreadName(soa));
689  }
690  // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
691  if (peer_thread_name.Get() != nullptr) {
692    SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
693  }
694}
695
696template<bool kTransactionActive>
697void Thread::InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
698                      jobject thread_name, jint thread_priority) {
699  soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->
700      SetBoolean<kTransactionActive>(tlsPtr_.opeer, thread_is_daemon);
701  soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->
702      SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object*>(thread_group));
703  soa.DecodeField(WellKnownClasses::java_lang_Thread_name)->
704      SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object*>(thread_name));
705  soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->
706      SetInt<kTransactionActive>(tlsPtr_.opeer, thread_priority);
707}
708
709void Thread::SetThreadName(const char* name) {
710  tlsPtr_.name->assign(name);
711  ::art::SetThreadName(name);
712  Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
713}
714
715bool Thread::InitStackHwm() {
716  void* read_stack_base;
717  size_t read_stack_size;
718  size_t read_guard_size;
719  GetThreadStack(tlsPtr_.pthread_self, &read_stack_base, &read_stack_size, &read_guard_size);
720
721  tlsPtr_.stack_begin = reinterpret_cast<uint8_t*>(read_stack_base);
722  tlsPtr_.stack_size = read_stack_size;
723
724  // The minimum stack size we can cope with is the overflow reserved bytes (typically
725  // 8K) + the protected region size (4K) + another page (4K).  Typically this will
726  // be 8+4+4 = 16K.  The thread won't be able to do much with this stack even the GC takes
727  // between 8K and 12K.
728  uint32_t min_stack = GetStackOverflowReservedBytes(kRuntimeISA) + kStackOverflowProtectedSize
729    + 4 * KB;
730  if (read_stack_size <= min_stack) {
731    // Note, as we know the stack is small, avoid operations that could use a lot of stack.
732    LogMessage::LogLineLowStack(__PRETTY_FUNCTION__, __LINE__, ERROR,
733                                "Attempt to attach a thread with a too-small stack");
734    return false;
735  }
736
737  // This is included in the SIGQUIT output, but it's useful here for thread debugging.
738  VLOG(threads) << StringPrintf("Native stack is at %p (%s with %s guard)",
739                                read_stack_base,
740                                PrettySize(read_stack_size).c_str(),
741                                PrettySize(read_guard_size).c_str());
742
743  // Set stack_end_ to the bottom of the stack saving space of stack overflows
744
745  Runtime* runtime = Runtime::Current();
746  bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler();
747  ResetDefaultStackEnd();
748
749  // Install the protected region if we are doing implicit overflow checks.
750  if (implicit_stack_check) {
751    // The thread might have protected region at the bottom.  We need
752    // to install our own region so we need to move the limits
753    // of the stack to make room for it.
754
755    tlsPtr_.stack_begin += read_guard_size + kStackOverflowProtectedSize;
756    tlsPtr_.stack_end += read_guard_size + kStackOverflowProtectedSize;
757    tlsPtr_.stack_size -= read_guard_size;
758
759    InstallImplicitProtection();
760  }
761
762  // Sanity check.
763  int stack_variable;
764  CHECK_GT(&stack_variable, reinterpret_cast<void*>(tlsPtr_.stack_end));
765
766  return true;
767}
768
769void Thread::ShortDump(std::ostream& os) const {
770  os << "Thread[";
771  if (GetThreadId() != 0) {
772    // If we're in kStarting, we won't have a thin lock id or tid yet.
773    os << GetThreadId()
774       << ",tid=" << GetTid() << ',';
775  }
776  os << GetState()
777     << ",Thread*=" << this
778     << ",peer=" << tlsPtr_.opeer
779     << ",\"" << (tlsPtr_.name != nullptr ? *tlsPtr_.name : "null") << "\""
780     << "]";
781}
782
783void Thread::Dump(std::ostream& os) const {
784  DumpState(os);
785  DumpStack(os);
786}
787
788mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
789  ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
790  return (tlsPtr_.opeer != nullptr) ?
791      reinterpret_cast<mirror::String*>(f->GetObject(tlsPtr_.opeer)) : nullptr;
792}
793
794void Thread::GetThreadName(std::string& name) const {
795  name.assign(*tlsPtr_.name);
796}
797
798uint64_t Thread::GetCpuMicroTime() const {
799#if defined(__linux__)
800  clockid_t cpu_clock_id;
801  pthread_getcpuclockid(tlsPtr_.pthread_self, &cpu_clock_id);
802  timespec now;
803  clock_gettime(cpu_clock_id, &now);
804  return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000);
805#else  // __APPLE__
806  UNIMPLEMENTED(WARNING);
807  return -1;
808#endif
809}
810
811// Attempt to rectify locks so that we dump thread list with required locks before exiting.
812static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
813  LOG(ERROR) << *thread << " suspend count already zero.";
814  Locks::thread_suspend_count_lock_->Unlock(self);
815  if (!Locks::mutator_lock_->IsSharedHeld(self)) {
816    Locks::mutator_lock_->SharedTryLock(self);
817    if (!Locks::mutator_lock_->IsSharedHeld(self)) {
818      LOG(WARNING) << "Dumping thread list without holding mutator_lock_";
819    }
820  }
821  if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
822    Locks::thread_list_lock_->TryLock(self);
823    if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
824      LOG(WARNING) << "Dumping thread list without holding thread_list_lock_";
825    }
826  }
827  std::ostringstream ss;
828  Runtime::Current()->GetThreadList()->Dump(ss);
829  LOG(FATAL) << ss.str();
830}
831
832bool Thread::ModifySuspendCount(Thread* self, int delta, AtomicInteger* suspend_barrier,
833                                bool for_debugger) {
834  if (kIsDebugBuild) {
835    DCHECK(delta == -1 || delta == +1 || delta == -tls32_.debug_suspend_count)
836          << delta << " " << tls32_.debug_suspend_count << " " << this;
837    DCHECK_GE(tls32_.suspend_count, tls32_.debug_suspend_count) << this;
838    Locks::thread_suspend_count_lock_->AssertHeld(self);
839    if (this != self && !IsSuspended()) {
840      Locks::thread_list_lock_->AssertHeld(self);
841    }
842  }
843  if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) {
844    UnsafeLogFatalForSuspendCount(self, this);
845    return false;
846  }
847
848  uint16_t flags = kSuspendRequest;
849  if (delta > 0 && suspend_barrier != nullptr) {
850    uint32_t available_barrier = kMaxSuspendBarriers;
851    for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) {
852      if (tlsPtr_.active_suspend_barriers[i] == nullptr) {
853        available_barrier = i;
854        break;
855      }
856    }
857    if (available_barrier == kMaxSuspendBarriers) {
858      // No barrier spaces available, we can't add another.
859      return false;
860    }
861    tlsPtr_.active_suspend_barriers[available_barrier] = suspend_barrier;
862    flags |= kActiveSuspendBarrier;
863  }
864
865  tls32_.suspend_count += delta;
866  if (for_debugger) {
867    tls32_.debug_suspend_count += delta;
868  }
869
870  if (tls32_.suspend_count == 0) {
871    AtomicClearFlag(kSuspendRequest);
872  } else {
873    // Two bits might be set simultaneously.
874    tls32_.state_and_flags.as_atomic_int.FetchAndOrSequentiallyConsistent(flags);
875    TriggerSuspend();
876  }
877  return true;
878}
879
880bool Thread::PassActiveSuspendBarriers(Thread* self) {
881  // Grab the suspend_count lock and copy the current set of
882  // barriers. Then clear the list and the flag. The ModifySuspendCount
883  // function requires the lock so we prevent a race between setting
884  // the kActiveSuspendBarrier flag and clearing it.
885  AtomicInteger* pass_barriers[kMaxSuspendBarriers];
886  {
887    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
888    if (!ReadFlag(kActiveSuspendBarrier)) {
889      // quick exit test: the barriers have already been claimed - this is
890      // possible as there may be a race to claim and it doesn't matter
891      // who wins.
892      // All of the callers of this function (except the SuspendAllInternal)
893      // will first test the kActiveSuspendBarrier flag without lock. Here
894      // double-check whether the barrier has been passed with the
895      // suspend_count lock.
896      return false;
897    }
898
899    for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) {
900      pass_barriers[i] = tlsPtr_.active_suspend_barriers[i];
901      tlsPtr_.active_suspend_barriers[i] = nullptr;
902    }
903    AtomicClearFlag(kActiveSuspendBarrier);
904  }
905
906  uint32_t barrier_count = 0;
907  for (uint32_t i = 0; i < kMaxSuspendBarriers; i++) {
908    AtomicInteger* pending_threads = pass_barriers[i];
909    if (pending_threads != nullptr) {
910      bool done = false;
911      do {
912        int32_t cur_val = pending_threads->LoadRelaxed();
913        CHECK_GT(cur_val, 0) << "Unexpected value for PassActiveSuspendBarriers(): " << cur_val;
914        // Reduce value by 1.
915        done = pending_threads->CompareExchangeWeakRelaxed(cur_val, cur_val - 1);
916#if ART_USE_FUTEXES
917        if (done && (cur_val - 1) == 0) {  // Weak CAS may fail spuriously.
918          futex(pending_threads->Address(), FUTEX_WAKE, -1, nullptr, nullptr, 0);
919        }
920#endif
921      } while (!done);
922      ++barrier_count;
923    }
924  }
925  CHECK_GT(barrier_count, 0U);
926  return true;
927}
928
929void Thread::ClearSuspendBarrier(AtomicInteger* target) {
930  CHECK(ReadFlag(kActiveSuspendBarrier));
931  bool clear_flag = true;
932  for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) {
933    AtomicInteger* ptr = tlsPtr_.active_suspend_barriers[i];
934    if (ptr == target) {
935      tlsPtr_.active_suspend_barriers[i] = nullptr;
936    } else if (ptr != nullptr) {
937      clear_flag = false;
938    }
939  }
940  if (LIKELY(clear_flag)) {
941    AtomicClearFlag(kActiveSuspendBarrier);
942  }
943}
944
945void Thread::RunCheckpointFunction() {
946  Closure *checkpoints[kMaxCheckpoints];
947
948  // Grab the suspend_count lock and copy the current set of
949  // checkpoints.  Then clear the list and the flag.  The RequestCheckpoint
950  // function will also grab this lock so we prevent a race between setting
951  // the kCheckpointRequest flag and clearing it.
952  {
953    MutexLock mu(this, *Locks::thread_suspend_count_lock_);
954    for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
955      checkpoints[i] = tlsPtr_.checkpoint_functions[i];
956      tlsPtr_.checkpoint_functions[i] = nullptr;
957    }
958    AtomicClearFlag(kCheckpointRequest);
959  }
960
961  // Outside the lock, run all the checkpoint functions that
962  // we collected.
963  bool found_checkpoint = false;
964  for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
965    if (checkpoints[i] != nullptr) {
966      ATRACE_BEGIN("Checkpoint function");
967      checkpoints[i]->Run(this);
968      ATRACE_END();
969      found_checkpoint = true;
970    }
971  }
972  CHECK(found_checkpoint);
973}
974
975bool Thread::RequestCheckpoint(Closure* function) {
976  union StateAndFlags old_state_and_flags;
977  old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
978  if (old_state_and_flags.as_struct.state != kRunnable) {
979    return false;  // Fail, thread is suspended and so can't run a checkpoint.
980  }
981
982  uint32_t available_checkpoint = kMaxCheckpoints;
983  for (uint32_t i = 0 ; i < kMaxCheckpoints; ++i) {
984    if (tlsPtr_.checkpoint_functions[i] == nullptr) {
985      available_checkpoint = i;
986      break;
987    }
988  }
989  if (available_checkpoint == kMaxCheckpoints) {
990    // No checkpoint functions available, we can't run a checkpoint
991    return false;
992  }
993  tlsPtr_.checkpoint_functions[available_checkpoint] = function;
994
995  // Checkpoint function installed now install flag bit.
996  // We must be runnable to request a checkpoint.
997  DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable);
998  union StateAndFlags new_state_and_flags;
999  new_state_and_flags.as_int = old_state_and_flags.as_int;
1000  new_state_and_flags.as_struct.flags |= kCheckpointRequest;
1001  bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(
1002      old_state_and_flags.as_int, new_state_and_flags.as_int);
1003  if (UNLIKELY(!success)) {
1004    // The thread changed state before the checkpoint was installed.
1005    CHECK_EQ(tlsPtr_.checkpoint_functions[available_checkpoint], function);
1006    tlsPtr_.checkpoint_functions[available_checkpoint] = nullptr;
1007  } else {
1008    CHECK_EQ(ReadFlag(kCheckpointRequest), true);
1009    TriggerSuspend();
1010  }
1011  return success;
1012}
1013
1014Closure* Thread::GetFlipFunction() {
1015  Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function);
1016  Closure* func;
1017  do {
1018    func = atomic_func->LoadRelaxed();
1019    if (func == nullptr) {
1020      return nullptr;
1021    }
1022  } while (!atomic_func->CompareExchangeWeakSequentiallyConsistent(func, nullptr));
1023  DCHECK(func != nullptr);
1024  return func;
1025}
1026
1027void Thread::SetFlipFunction(Closure* function) {
1028  CHECK(function != nullptr);
1029  Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function);
1030  atomic_func->StoreSequentiallyConsistent(function);
1031}
1032
1033void Thread::FullSuspendCheck() {
1034  VLOG(threads) << this << " self-suspending";
1035  ATRACE_BEGIN("Full suspend check");
1036  // Make thread appear suspended to other threads, release mutator_lock_.
1037  tls32_.suspended_at_suspend_check = true;
1038  // Transition to suspended and back to runnable, re-acquire share on mutator_lock_.
1039  ScopedThreadSuspension(this, kSuspended);
1040  tls32_.suspended_at_suspend_check = false;
1041  ATRACE_END();
1042  VLOG(threads) << this << " self-reviving";
1043}
1044
1045void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
1046  std::string group_name;
1047  int priority;
1048  bool is_daemon = false;
1049  Thread* self = Thread::Current();
1050
1051  // If flip_function is not null, it means we have run a checkpoint
1052  // before the thread wakes up to execute the flip function and the
1053  // thread roots haven't been forwarded.  So the following access to
1054  // the roots (opeer or methods in the frames) would be bad. Run it
1055  // here. TODO: clean up.
1056  if (thread != nullptr) {
1057    ScopedObjectAccessUnchecked soa(self);
1058    Thread* this_thread = const_cast<Thread*>(thread);
1059    Closure* flip_func = this_thread->GetFlipFunction();
1060    if (flip_func != nullptr) {
1061      flip_func->Run(this_thread);
1062    }
1063  }
1064
1065  // Don't do this if we are aborting since the GC may have all the threads suspended. This will
1066  // cause ScopedObjectAccessUnchecked to deadlock.
1067  if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) {
1068    ScopedObjectAccessUnchecked soa(self);
1069    priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)
1070        ->GetInt(thread->tlsPtr_.opeer);
1071    is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)
1072        ->GetBoolean(thread->tlsPtr_.opeer);
1073
1074    mirror::Object* thread_group =
1075        soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->tlsPtr_.opeer);
1076
1077    if (thread_group != nullptr) {
1078      ArtField* group_name_field =
1079          soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
1080      mirror::String* group_name_string =
1081          reinterpret_cast<mirror::String*>(group_name_field->GetObject(thread_group));
1082      group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>";
1083    }
1084  } else {
1085    priority = GetNativePriority();
1086  }
1087
1088  std::string scheduler_group_name(GetSchedulerGroupName(tid));
1089  if (scheduler_group_name.empty()) {
1090    scheduler_group_name = "default";
1091  }
1092
1093  if (thread != nullptr) {
1094    os << '"' << *thread->tlsPtr_.name << '"';
1095    if (is_daemon) {
1096      os << " daemon";
1097    }
1098    os << " prio=" << priority
1099       << " tid=" << thread->GetThreadId()
1100       << " " << thread->GetState();
1101    if (thread->IsStillStarting()) {
1102      os << " (still starting up)";
1103    }
1104    os << "\n";
1105  } else {
1106    os << '"' << ::art::GetThreadName(tid) << '"'
1107       << " prio=" << priority
1108       << " (not attached)\n";
1109  }
1110
1111  if (thread != nullptr) {
1112    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
1113    os << "  | group=\"" << group_name << "\""
1114       << " sCount=" << thread->tls32_.suspend_count
1115       << " dsCount=" << thread->tls32_.debug_suspend_count
1116       << " obj=" << reinterpret_cast<void*>(thread->tlsPtr_.opeer)
1117       << " self=" << reinterpret_cast<const void*>(thread) << "\n";
1118  }
1119
1120  os << "  | sysTid=" << tid
1121     << " nice=" << getpriority(PRIO_PROCESS, tid)
1122     << " cgrp=" << scheduler_group_name;
1123  if (thread != nullptr) {
1124    int policy;
1125    sched_param sp;
1126    CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp),
1127                       __FUNCTION__);
1128    os << " sched=" << policy << "/" << sp.sched_priority
1129       << " handle=" << reinterpret_cast<void*>(thread->tlsPtr_.pthread_self);
1130  }
1131  os << "\n";
1132
1133  // Grab the scheduler stats for this thread.
1134  std::string scheduler_stats;
1135  if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) {
1136    scheduler_stats.resize(scheduler_stats.size() - 1);  // Lose the trailing '\n'.
1137  } else {
1138    scheduler_stats = "0 0 0";
1139  }
1140
1141  char native_thread_state = '?';
1142  int utime = 0;
1143  int stime = 0;
1144  int task_cpu = 0;
1145  GetTaskStats(tid, &native_thread_state, &utime, &stime, &task_cpu);
1146
1147  os << "  | state=" << native_thread_state
1148     << " schedstat=( " << scheduler_stats << " )"
1149     << " utm=" << utime
1150     << " stm=" << stime
1151     << " core=" << task_cpu
1152     << " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
1153  if (thread != nullptr) {
1154    os << "  | stack=" << reinterpret_cast<void*>(thread->tlsPtr_.stack_begin) << "-"
1155        << reinterpret_cast<void*>(thread->tlsPtr_.stack_end) << " stackSize="
1156        << PrettySize(thread->tlsPtr_.stack_size) << "\n";
1157    // Dump the held mutexes.
1158    os << "  | held mutexes=";
1159    for (size_t i = 0; i < kLockLevelCount; ++i) {
1160      if (i != kMonitorLock) {
1161        BaseMutex* mutex = thread->GetHeldMutex(static_cast<LockLevel>(i));
1162        if (mutex != nullptr) {
1163          os << " \"" << mutex->GetName() << "\"";
1164          if (mutex->IsReaderWriterMutex()) {
1165            ReaderWriterMutex* rw_mutex = down_cast<ReaderWriterMutex*>(mutex);
1166            if (rw_mutex->GetExclusiveOwnerTid() == static_cast<uint64_t>(tid)) {
1167              os << "(exclusive held)";
1168            } else {
1169              os << "(shared held)";
1170            }
1171          }
1172        }
1173      }
1174    }
1175    os << "\n";
1176  }
1177}
1178
1179void Thread::DumpState(std::ostream& os) const {
1180  Thread::DumpState(os, this, GetTid());
1181}
1182
1183struct StackDumpVisitor : public StackVisitor {
1184  StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in)
1185      SHARED_REQUIRES(Locks::mutator_lock_)
1186      : StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
1187        os(os_in),
1188        thread(thread_in),
1189        can_allocate(can_allocate_in),
1190        last_method(nullptr),
1191        last_line_number(0),
1192        repetition_count(0),
1193        frame_count(0) {}
1194
1195  virtual ~StackDumpVisitor() {
1196    if (frame_count == 0) {
1197      os << "  (no managed stack frames)\n";
1198    }
1199  }
1200
1201  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
1202    ArtMethod* m = GetMethod();
1203    if (m->IsRuntimeMethod()) {
1204      return true;
1205    }
1206    m = m->GetInterfaceMethodIfProxy(sizeof(void*));
1207    const int kMaxRepetition = 3;
1208    mirror::Class* c = m->GetDeclaringClass();
1209    mirror::DexCache* dex_cache = c->GetDexCache();
1210    int line_number = -1;
1211    if (dex_cache != nullptr) {  // be tolerant of bad input
1212      const DexFile& dex_file = *dex_cache->GetDexFile();
1213      line_number = dex_file.GetLineNumFromPC(m, GetDexPc(false));
1214    }
1215    if (line_number == last_line_number && last_method == m) {
1216      ++repetition_count;
1217    } else {
1218      if (repetition_count >= kMaxRepetition) {
1219        os << "  ... repeated " << (repetition_count - kMaxRepetition) << " times\n";
1220      }
1221      repetition_count = 0;
1222      last_line_number = line_number;
1223      last_method = m;
1224    }
1225    if (repetition_count < kMaxRepetition) {
1226      os << "  at " << PrettyMethod(m, false);
1227      if (m->IsNative()) {
1228        os << "(Native method)";
1229      } else {
1230        const char* source_file(m->GetDeclaringClassSourceFile());
1231        os << "(" << (source_file != nullptr ? source_file : "unavailable")
1232           << ":" << line_number << ")";
1233      }
1234      os << "\n";
1235      if (frame_count == 0) {
1236        Monitor::DescribeWait(os, thread);
1237      }
1238      if (can_allocate) {
1239        // Visit locks, but do not abort on errors. This would trigger a nested abort.
1240        Monitor::VisitLocks(this, DumpLockedObject, &os, false);
1241      }
1242    }
1243
1244    ++frame_count;
1245    return true;
1246  }
1247
1248  static void DumpLockedObject(mirror::Object* o, void* context)
1249      SHARED_REQUIRES(Locks::mutator_lock_) {
1250    std::ostream& os = *reinterpret_cast<std::ostream*>(context);
1251    os << "  - locked ";
1252    if (o == nullptr) {
1253      os << "an unknown object";
1254    } else {
1255      if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) &&
1256          Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) {
1257        // Getting the identity hashcode here would result in lock inflation and suspension of the
1258        // current thread, which isn't safe if this is the only runnable thread.
1259        os << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)", reinterpret_cast<intptr_t>(o),
1260                           PrettyTypeOf(o).c_str());
1261      } else {
1262        // IdentityHashCode can cause thread suspension, which would invalidate o if it moved. So
1263        // we get the pretty type beofre we call IdentityHashCode.
1264        const std::string pretty_type(PrettyTypeOf(o));
1265        os << StringPrintf("<0x%08x> (a %s)", o->IdentityHashCode(), pretty_type.c_str());
1266      }
1267    }
1268    os << "\n";
1269  }
1270
1271  std::ostream& os;
1272  const Thread* thread;
1273  const bool can_allocate;
1274  ArtMethod* last_method;
1275  int last_line_number;
1276  int repetition_count;
1277  int frame_count;
1278};
1279
1280static bool ShouldShowNativeStack(const Thread* thread)
1281    SHARED_REQUIRES(Locks::mutator_lock_) {
1282  ThreadState state = thread->GetState();
1283
1284  // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting.
1285  if (state > kWaiting && state < kStarting) {
1286    return true;
1287  }
1288
1289  // In an Object.wait variant or Thread.sleep? That's not interesting.
1290  if (state == kTimedWaiting || state == kSleeping || state == kWaiting) {
1291    return false;
1292  }
1293
1294  // Threads with no managed stack frames should be shown.
1295  const ManagedStack* managed_stack = thread->GetManagedStack();
1296  if (managed_stack == nullptr || (managed_stack->GetTopQuickFrame() == nullptr &&
1297      managed_stack->GetTopShadowFrame() == nullptr)) {
1298    return true;
1299  }
1300
1301  // In some other native method? That's interesting.
1302  // We don't just check kNative because native methods will be in state kSuspended if they're
1303  // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the
1304  // thread-startup states if it's early enough in their life cycle (http://b/7432159).
1305  ArtMethod* current_method = thread->GetCurrentMethod(nullptr);
1306  return current_method != nullptr && current_method->IsNative();
1307}
1308
1309void Thread::DumpJavaStack(std::ostream& os) const {
1310  // If flip_function is not null, it means we have run a checkpoint
1311  // before the thread wakes up to execute the flip function and the
1312  // thread roots haven't been forwarded.  So the following access to
1313  // the roots (locks or methods in the frames) would be bad. Run it
1314  // here. TODO: clean up.
1315  {
1316    Thread* this_thread = const_cast<Thread*>(this);
1317    Closure* flip_func = this_thread->GetFlipFunction();
1318    if (flip_func != nullptr) {
1319      flip_func->Run(this_thread);
1320    }
1321  }
1322
1323  // Dumping the Java stack involves the verifier for locks. The verifier operates under the
1324  // assumption that there is no exception pending on entry. Thus, stash any pending exception.
1325  // Thread::Current() instead of this in case a thread is dumping the stack of another suspended
1326  // thread.
1327  StackHandleScope<1> scope(Thread::Current());
1328  Handle<mirror::Throwable> exc;
1329  bool have_exception = false;
1330  if (IsExceptionPending()) {
1331    exc = scope.NewHandle(GetException());
1332    const_cast<Thread*>(this)->ClearException();
1333    have_exception = true;
1334  }
1335
1336  std::unique_ptr<Context> context(Context::Create());
1337  StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(),
1338                          !tls32_.throwing_OutOfMemoryError);
1339  dumper.WalkStack();
1340
1341  if (have_exception) {
1342    const_cast<Thread*>(this)->SetException(exc.Get());
1343  }
1344}
1345
1346void Thread::DumpStack(std::ostream& os) const {
1347  // TODO: we call this code when dying but may not have suspended the thread ourself. The
1348  //       IsSuspended check is therefore racy with the use for dumping (normally we inhibit
1349  //       the race with the thread_suspend_count_lock_).
1350  bool dump_for_abort = (gAborting > 0);
1351  bool safe_to_dump = (this == Thread::Current() || IsSuspended());
1352  if (!kIsDebugBuild) {
1353    // We always want to dump the stack for an abort, however, there is no point dumping another
1354    // thread's stack in debug builds where we'll hit the not suspended check in the stack walk.
1355    safe_to_dump = (safe_to_dump || dump_for_abort);
1356  }
1357  if (safe_to_dump) {
1358    // If we're currently in native code, dump that stack before dumping the managed stack.
1359    if (dump_for_abort || ShouldShowNativeStack(this)) {
1360      DumpKernelStack(os, GetTid(), "  kernel: ", false);
1361      DumpNativeStack(os, GetTid(), "  native: ", GetCurrentMethod(nullptr, !dump_for_abort));
1362    }
1363    DumpJavaStack(os);
1364  } else {
1365    os << "Not able to dump stack of thread that isn't suspended";
1366  }
1367}
1368
1369void Thread::ThreadExitCallback(void* arg) {
1370  Thread* self = reinterpret_cast<Thread*>(arg);
1371  if (self->tls32_.thread_exit_check_count == 0) {
1372    LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's "
1373        "going to use a pthread_key_create destructor?): " << *self;
1374    CHECK(is_started_);
1375#ifdef __ANDROID__
1376    __get_tls()[TLS_SLOT_ART_THREAD_SELF] = self;
1377#else
1378    CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
1379#endif
1380    self->tls32_.thread_exit_check_count = 1;
1381  } else {
1382    LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
1383  }
1384}
1385
1386void Thread::Startup() {
1387  CHECK(!is_started_);
1388  is_started_ = true;
1389  {
1390    // MutexLock to keep annotalysis happy.
1391    //
1392    // Note we use null for the thread because Thread::Current can
1393    // return garbage since (is_started_ == true) and
1394    // Thread::pthread_key_self_ is not yet initialized.
1395    // This was seen on glibc.
1396    MutexLock mu(nullptr, *Locks::thread_suspend_count_lock_);
1397    resume_cond_ = new ConditionVariable("Thread resumption condition variable",
1398                                         *Locks::thread_suspend_count_lock_);
1399  }
1400
1401  // Allocate a TLS slot.
1402  CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback),
1403                     "self key");
1404
1405  // Double-check the TLS slot allocation.
1406  if (pthread_getspecific(pthread_key_self_) != nullptr) {
1407    LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr";
1408  }
1409}
1410
1411void Thread::FinishStartup() {
1412  Runtime* runtime = Runtime::Current();
1413  CHECK(runtime->IsStarted());
1414
1415  // Finish attaching the main thread.
1416  ScopedObjectAccess soa(Thread::Current());
1417  Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup());
1418
1419  Runtime::Current()->GetClassLinker()->RunRootClinits();
1420}
1421
1422void Thread::Shutdown() {
1423  CHECK(is_started_);
1424  is_started_ = false;
1425  CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
1426  MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
1427  if (resume_cond_ != nullptr) {
1428    delete resume_cond_;
1429    resume_cond_ = nullptr;
1430  }
1431}
1432
1433Thread::Thread(bool daemon) : tls32_(daemon), wait_monitor_(nullptr), interrupted_(false) {
1434  wait_mutex_ = new Mutex("a thread wait mutex");
1435  wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
1436  tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>;
1437  tlsPtr_.name = new std::string(kThreadNameDuringStartup);
1438  tlsPtr_.nested_signal_state = static_cast<jmp_buf*>(malloc(sizeof(jmp_buf)));
1439
1440  static_assert((sizeof(Thread) % 4) == 0U,
1441                "art::Thread has a size which is not a multiple of 4.");
1442  tls32_.state_and_flags.as_struct.flags = 0;
1443  tls32_.state_and_flags.as_struct.state = kNative;
1444  memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes));
1445  std::fill(tlsPtr_.rosalloc_runs,
1446            tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBrackets,
1447            gc::allocator::RosAlloc::GetDedicatedFullRun());
1448  for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
1449    tlsPtr_.checkpoint_functions[i] = nullptr;
1450  }
1451  for (uint32_t i = 0; i < kMaxSuspendBarriers; ++i) {
1452    tlsPtr_.active_suspend_barriers[i] = nullptr;
1453  }
1454  tlsPtr_.flip_function = nullptr;
1455  tlsPtr_.thread_local_mark_stack = nullptr;
1456  tls32_.suspended_at_suspend_check = false;
1457}
1458
1459bool Thread::IsStillStarting() const {
1460  // You might think you can check whether the state is kStarting, but for much of thread startup,
1461  // the thread is in kNative; it might also be in kVmWait.
1462  // You might think you can check whether the peer is null, but the peer is actually created and
1463  // assigned fairly early on, and needs to be.
1464  // It turns out that the last thing to change is the thread name; that's a good proxy for "has
1465  // this thread _ever_ entered kRunnable".
1466  return (tlsPtr_.jpeer == nullptr && tlsPtr_.opeer == nullptr) ||
1467      (*tlsPtr_.name == kThreadNameDuringStartup);
1468}
1469
1470void Thread::AssertPendingException() const {
1471  CHECK(IsExceptionPending()) << "Pending exception expected.";
1472}
1473
1474void Thread::AssertPendingOOMException() const {
1475  AssertPendingException();
1476  auto* e = GetException();
1477  CHECK_EQ(e->GetClass(), DecodeJObject(WellKnownClasses::java_lang_OutOfMemoryError)->AsClass())
1478      << e->Dump();
1479}
1480
1481void Thread::AssertNoPendingException() const {
1482  if (UNLIKELY(IsExceptionPending())) {
1483    ScopedObjectAccess soa(Thread::Current());
1484    mirror::Throwable* exception = GetException();
1485    LOG(FATAL) << "No pending exception expected: " << exception->Dump();
1486  }
1487}
1488
1489void Thread::AssertNoPendingExceptionForNewException(const char* msg) const {
1490  if (UNLIKELY(IsExceptionPending())) {
1491    ScopedObjectAccess soa(Thread::Current());
1492    mirror::Throwable* exception = GetException();
1493    LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: "
1494        << exception->Dump();
1495  }
1496}
1497
1498class MonitorExitVisitor : public SingleRootVisitor {
1499 public:
1500  explicit MonitorExitVisitor(Thread* self) : self_(self) { }
1501
1502  // NO_THREAD_SAFETY_ANALYSIS due to MonitorExit.
1503  void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED)
1504      OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1505    if (self_->HoldsLock(entered_monitor)) {
1506      LOG(WARNING) << "Calling MonitorExit on object "
1507                   << entered_monitor << " (" << PrettyTypeOf(entered_monitor) << ")"
1508                   << " left locked by native thread "
1509                   << *Thread::Current() << " which is detaching";
1510      entered_monitor->MonitorExit(self_);
1511    }
1512  }
1513
1514 private:
1515  Thread* const self_;
1516};
1517
1518void Thread::Destroy() {
1519  Thread* self = this;
1520  DCHECK_EQ(self, Thread::Current());
1521
1522  if (tlsPtr_.jni_env != nullptr) {
1523    {
1524      ScopedObjectAccess soa(self);
1525      MonitorExitVisitor visitor(self);
1526      // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
1527      tlsPtr_.jni_env->monitors.VisitRoots(&visitor, RootInfo(kRootVMInternal));
1528    }
1529    // Release locally held global references which releasing may require the mutator lock.
1530    if (tlsPtr_.jpeer != nullptr) {
1531      // If pthread_create fails we don't have a jni env here.
1532      tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer);
1533      tlsPtr_.jpeer = nullptr;
1534    }
1535    if (tlsPtr_.class_loader_override != nullptr) {
1536      tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.class_loader_override);
1537      tlsPtr_.class_loader_override = nullptr;
1538    }
1539  }
1540
1541  if (tlsPtr_.opeer != nullptr) {
1542    ScopedObjectAccess soa(self);
1543    // We may need to call user-supplied managed code, do this before final clean-up.
1544    HandleUncaughtExceptions(soa);
1545    RemoveFromThreadGroup(soa);
1546
1547    // this.nativePeer = 0;
1548    if (Runtime::Current()->IsActiveTransaction()) {
1549      soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)
1550          ->SetLong<true>(tlsPtr_.opeer, 0);
1551    } else {
1552      soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)
1553          ->SetLong<false>(tlsPtr_.opeer, 0);
1554    }
1555    Dbg::PostThreadDeath(self);
1556
1557    // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
1558    // who is waiting.
1559    mirror::Object* lock =
1560        soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer);
1561    // (This conditional is only needed for tests, where Thread.lock won't have been set.)
1562    if (lock != nullptr) {
1563      StackHandleScope<1> hs(self);
1564      Handle<mirror::Object> h_obj(hs.NewHandle(lock));
1565      ObjectLock<mirror::Object> locker(self, h_obj);
1566      locker.NotifyAll();
1567    }
1568    tlsPtr_.opeer = nullptr;
1569  }
1570
1571  {
1572    ScopedObjectAccess soa(self);
1573    Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this);
1574    if (kUseReadBarrier) {
1575      Runtime::Current()->GetHeap()->ConcurrentCopyingCollector()->RevokeThreadLocalMarkStack(this);
1576    }
1577  }
1578}
1579
1580Thread::~Thread() {
1581  CHECK(tlsPtr_.class_loader_override == nullptr);
1582  CHECK(tlsPtr_.jpeer == nullptr);
1583  CHECK(tlsPtr_.opeer == nullptr);
1584  bool initialized = (tlsPtr_.jni_env != nullptr);  // Did Thread::Init run?
1585  if (initialized) {
1586    delete tlsPtr_.jni_env;
1587    tlsPtr_.jni_env = nullptr;
1588  }
1589  CHECK_NE(GetState(), kRunnable);
1590  CHECK_NE(ReadFlag(kCheckpointRequest), true);
1591  CHECK(tlsPtr_.checkpoint_functions[0] == nullptr);
1592  CHECK(tlsPtr_.checkpoint_functions[1] == nullptr);
1593  CHECK(tlsPtr_.checkpoint_functions[2] == nullptr);
1594  CHECK(tlsPtr_.flip_function == nullptr);
1595  CHECK_EQ(tls32_.suspended_at_suspend_check, false);
1596
1597  // Make sure we processed all deoptimization requests.
1598  CHECK(tlsPtr_.deoptimization_context_stack == nullptr) << "Missed deoptimization";
1599
1600  // We may be deleting a still born thread.
1601  SetStateUnsafe(kTerminated);
1602
1603  delete wait_cond_;
1604  delete wait_mutex_;
1605
1606  if (tlsPtr_.long_jump_context != nullptr) {
1607    delete tlsPtr_.long_jump_context;
1608  }
1609
1610  if (initialized) {
1611    CleanupCpu();
1612  }
1613
1614  if (tlsPtr_.single_step_control != nullptr) {
1615    delete tlsPtr_.single_step_control;
1616  }
1617  delete tlsPtr_.instrumentation_stack;
1618  delete tlsPtr_.name;
1619  delete tlsPtr_.stack_trace_sample;
1620  free(tlsPtr_.nested_signal_state);
1621
1622  Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this);
1623
1624  TearDownAlternateSignalStack();
1625}
1626
1627void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) {
1628  if (!IsExceptionPending()) {
1629    return;
1630  }
1631  ScopedLocalRef<jobject> peer(tlsPtr_.jni_env, soa.AddLocalReference<jobject>(tlsPtr_.opeer));
1632  ScopedThreadStateChange tsc(this, kNative);
1633
1634  // Get and clear the exception.
1635  ScopedLocalRef<jthrowable> exception(tlsPtr_.jni_env, tlsPtr_.jni_env->ExceptionOccurred());
1636  tlsPtr_.jni_env->ExceptionClear();
1637
1638  // If the thread has its own handler, use that.
1639  ScopedLocalRef<jobject> handler(tlsPtr_.jni_env,
1640                                  tlsPtr_.jni_env->GetObjectField(peer.get(),
1641                                      WellKnownClasses::java_lang_Thread_uncaughtHandler));
1642  if (handler.get() == nullptr) {
1643    // Otherwise use the thread group's default handler.
1644    handler.reset(tlsPtr_.jni_env->GetObjectField(peer.get(),
1645                                                  WellKnownClasses::java_lang_Thread_group));
1646  }
1647
1648  // Call the handler.
1649  tlsPtr_.jni_env->CallVoidMethod(handler.get(),
1650      WellKnownClasses::java_lang_Thread__UncaughtExceptionHandler_uncaughtException,
1651      peer.get(), exception.get());
1652
1653  // If the handler threw, clear that exception too.
1654  tlsPtr_.jni_env->ExceptionClear();
1655}
1656
1657void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
1658  // this.group.removeThread(this);
1659  // group can be null if we're in the compiler or a test.
1660  mirror::Object* ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)
1661      ->GetObject(tlsPtr_.opeer);
1662  if (ogroup != nullptr) {
1663    ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup));
1664    ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(tlsPtr_.opeer));
1665    ScopedThreadStateChange tsc(soa.Self(), kNative);
1666    tlsPtr_.jni_env->CallVoidMethod(group.get(),
1667                                    WellKnownClasses::java_lang_ThreadGroup_removeThread,
1668                                    peer.get());
1669  }
1670}
1671
1672size_t Thread::NumHandleReferences() {
1673  size_t count = 0;
1674  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur != nullptr; cur = cur->GetLink()) {
1675    count += cur->NumberOfReferences();
1676  }
1677  return count;
1678}
1679
1680bool Thread::HandleScopeContains(jobject obj) const {
1681  StackReference<mirror::Object>* hs_entry =
1682      reinterpret_cast<StackReference<mirror::Object>*>(obj);
1683  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur!= nullptr; cur = cur->GetLink()) {
1684    if (cur->Contains(hs_entry)) {
1685      return true;
1686    }
1687  }
1688  // JNI code invoked from portable code uses shadow frames rather than the handle scope.
1689  return tlsPtr_.managed_stack.ShadowFramesContain(hs_entry);
1690}
1691
1692void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) {
1693  BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
1694      visitor, RootInfo(kRootNativeStack, thread_id));
1695  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
1696    for (size_t j = 0, count = cur->NumberOfReferences(); j < count; ++j) {
1697      // GetReference returns a pointer to the stack reference within the handle scope. If this
1698      // needs to be updated, it will be done by the root visitor.
1699      buffered_visitor.VisitRootIfNonNull(cur->GetHandle(j).GetReference());
1700    }
1701  }
1702}
1703
1704mirror::Object* Thread::DecodeJObject(jobject obj) const {
1705  if (obj == nullptr) {
1706    return nullptr;
1707  }
1708  IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
1709  IndirectRefKind kind = GetIndirectRefKind(ref);
1710  mirror::Object* result;
1711  bool expect_null = false;
1712  // The "kinds" below are sorted by the frequency we expect to encounter them.
1713  if (kind == kLocal) {
1714    IndirectReferenceTable& locals = tlsPtr_.jni_env->locals;
1715    // Local references do not need a read barrier.
1716    result = locals.Get<kWithoutReadBarrier>(ref);
1717  } else if (kind == kHandleScopeOrInvalid) {
1718    // TODO: make stack indirect reference table lookup more efficient.
1719    // Check if this is a local reference in the handle scope.
1720    if (LIKELY(HandleScopeContains(obj))) {
1721      // Read from handle scope.
1722      result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr();
1723      VerifyObject(result);
1724    } else {
1725      tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of invalid jobject %p", obj);
1726      expect_null = true;
1727      result = nullptr;
1728    }
1729  } else if (kind == kGlobal) {
1730    result = tlsPtr_.jni_env->vm->DecodeGlobal(ref);
1731  } else {
1732    DCHECK_EQ(kind, kWeakGlobal);
1733    result = tlsPtr_.jni_env->vm->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
1734    if (Runtime::Current()->IsClearedJniWeakGlobal(result)) {
1735      // This is a special case where it's okay to return null.
1736      expect_null = true;
1737      result = nullptr;
1738    }
1739  }
1740
1741  if (UNLIKELY(!expect_null && result == nullptr)) {
1742    tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of deleted %s %p",
1743                                   ToStr<IndirectRefKind>(kind).c_str(), obj);
1744  }
1745  return result;
1746}
1747
1748// Implements java.lang.Thread.interrupted.
1749bool Thread::Interrupted() {
1750  MutexLock mu(Thread::Current(), *wait_mutex_);
1751  bool interrupted = IsInterruptedLocked();
1752  SetInterruptedLocked(false);
1753  return interrupted;
1754}
1755
1756// Implements java.lang.Thread.isInterrupted.
1757bool Thread::IsInterrupted() {
1758  MutexLock mu(Thread::Current(), *wait_mutex_);
1759  return IsInterruptedLocked();
1760}
1761
1762void Thread::Interrupt(Thread* self) {
1763  MutexLock mu(self, *wait_mutex_);
1764  if (interrupted_) {
1765    return;
1766  }
1767  interrupted_ = true;
1768  NotifyLocked(self);
1769}
1770
1771void Thread::Notify() {
1772  Thread* self = Thread::Current();
1773  MutexLock mu(self, *wait_mutex_);
1774  NotifyLocked(self);
1775}
1776
1777void Thread::NotifyLocked(Thread* self) {
1778  if (wait_monitor_ != nullptr) {
1779    wait_cond_->Signal(self);
1780  }
1781}
1782
1783void Thread::SetClassLoaderOverride(jobject class_loader_override) {
1784  if (tlsPtr_.class_loader_override != nullptr) {
1785    GetJniEnv()->DeleteGlobalRef(tlsPtr_.class_loader_override);
1786  }
1787  tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override);
1788}
1789
1790class CountStackDepthVisitor : public StackVisitor {
1791 public:
1792  explicit CountStackDepthVisitor(Thread* thread)
1793      SHARED_REQUIRES(Locks::mutator_lock_)
1794      : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
1795        depth_(0), skip_depth_(0), skipping_(true) {}
1796
1797  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
1798    // We want to skip frames up to and including the exception's constructor.
1799    // Note we also skip the frame if it doesn't have a method (namely the callee
1800    // save frame)
1801    ArtMethod* m = GetMethod();
1802    if (skipping_ && !m->IsRuntimeMethod() &&
1803        !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) {
1804      skipping_ = false;
1805    }
1806    if (!skipping_) {
1807      if (!m->IsRuntimeMethod()) {  // Ignore runtime frames (in particular callee save).
1808        ++depth_;
1809      }
1810    } else {
1811      ++skip_depth_;
1812    }
1813    return true;
1814  }
1815
1816  int GetDepth() const {
1817    return depth_;
1818  }
1819
1820  int GetSkipDepth() const {
1821    return skip_depth_;
1822  }
1823
1824 private:
1825  uint32_t depth_;
1826  uint32_t skip_depth_;
1827  bool skipping_;
1828};
1829
1830template<bool kTransactionActive>
1831class BuildInternalStackTraceVisitor : public StackVisitor {
1832 public:
1833  BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth)
1834      : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
1835        self_(self),
1836        skip_depth_(skip_depth),
1837        count_(0),
1838        trace_(nullptr),
1839        pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {}
1840
1841  bool Init(int depth) SHARED_REQUIRES(Locks::mutator_lock_) ACQUIRE(Roles::uninterruptible_) {
1842    // Allocate method trace with format [method pointers][pcs].
1843    auto* cl = Runtime::Current()->GetClassLinker();
1844    trace_ = cl->AllocPointerArray(self_, depth * 2);
1845    const char* last_no_suspend_cause =
1846        self_->StartAssertNoThreadSuspension("Building internal stack trace");
1847    if (trace_ == nullptr) {
1848      self_->AssertPendingOOMException();
1849      return false;
1850    }
1851    // If We are called from native, use non-transactional mode.
1852    CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause;
1853    return true;
1854  }
1855
1856  virtual ~BuildInternalStackTraceVisitor() RELEASE(Roles::uninterruptible_) {
1857    self_->EndAssertNoThreadSuspension(nullptr);
1858  }
1859
1860  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
1861    if (trace_ == nullptr) {
1862      return true;  // We're probably trying to fillInStackTrace for an OutOfMemoryError.
1863    }
1864    if (skip_depth_ > 0) {
1865      skip_depth_--;
1866      return true;
1867    }
1868    ArtMethod* m = GetMethod();
1869    if (m->IsRuntimeMethod()) {
1870      return true;  // Ignore runtime frames (in particular callee save).
1871    }
1872    trace_->SetElementPtrSize<kTransactionActive>(
1873        count_, m, pointer_size_);
1874    trace_->SetElementPtrSize<kTransactionActive>(
1875        trace_->GetLength() / 2 + count_, m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc(),
1876            pointer_size_);
1877    ++count_;
1878    return true;
1879  }
1880
1881  mirror::PointerArray* GetInternalStackTrace() const {
1882    return trace_;
1883  }
1884
1885 private:
1886  Thread* const self_;
1887  // How many more frames to skip.
1888  int32_t skip_depth_;
1889  // Current position down stack trace.
1890  uint32_t count_;
1891  // An array of the methods on the stack, the last entries are the dex PCs.
1892  mirror::PointerArray* trace_;
1893  // For cross compilation.
1894  size_t pointer_size_;
1895};
1896
1897template<bool kTransactionActive>
1898jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
1899  // Compute depth of stack
1900  CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
1901  count_visitor.WalkStack();
1902  int32_t depth = count_visitor.GetDepth();
1903  int32_t skip_depth = count_visitor.GetSkipDepth();
1904
1905  // Build internal stack trace.
1906  BuildInternalStackTraceVisitor<kTransactionActive> build_trace_visitor(soa.Self(),
1907                                                                         const_cast<Thread*>(this),
1908                                                                         skip_depth);
1909  if (!build_trace_visitor.Init(depth)) {
1910    return nullptr;  // Allocation failed.
1911  }
1912  build_trace_visitor.WalkStack();
1913  mirror::PointerArray* trace = build_trace_visitor.GetInternalStackTrace();
1914  if (kIsDebugBuild) {
1915    // Second half is dex PCs.
1916    for (uint32_t i = 0; i < static_cast<uint32_t>(trace->GetLength() / 2); ++i) {
1917      auto* method = trace->GetElementPtrSize<ArtMethod*>(
1918          i, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
1919      CHECK(method != nullptr);
1920    }
1921  }
1922  return soa.AddLocalReference<jobject>(trace);
1923}
1924template jobject Thread::CreateInternalStackTrace<false>(
1925    const ScopedObjectAccessAlreadyRunnable& soa) const;
1926template jobject Thread::CreateInternalStackTrace<true>(
1927    const ScopedObjectAccessAlreadyRunnable& soa) const;
1928
1929bool Thread::IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const {
1930  CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
1931  count_visitor.WalkStack();
1932  return count_visitor.GetDepth() == exception->GetStackDepth();
1933}
1934
1935jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
1936    const ScopedObjectAccessAlreadyRunnable& soa, jobject internal, jobjectArray output_array,
1937    int* stack_depth) {
1938  // Decode the internal stack trace into the depth, method trace and PC trace
1939  int32_t depth = soa.Decode<mirror::PointerArray*>(internal)->GetLength() / 2;
1940
1941  auto* cl = Runtime::Current()->GetClassLinker();
1942
1943  jobjectArray result;
1944
1945  if (output_array != nullptr) {
1946    // Reuse the array we were given.
1947    result = output_array;
1948    // ...adjusting the number of frames we'll write to not exceed the array length.
1949    const int32_t traces_length =
1950        soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->GetLength();
1951    depth = std::min(depth, traces_length);
1952  } else {
1953    // Create java_trace array and place in local reference table
1954    mirror::ObjectArray<mirror::StackTraceElement>* java_traces =
1955        cl->AllocStackTraceElementArray(soa.Self(), depth);
1956    if (java_traces == nullptr) {
1957      return nullptr;
1958    }
1959    result = soa.AddLocalReference<jobjectArray>(java_traces);
1960  }
1961
1962  if (stack_depth != nullptr) {
1963    *stack_depth = depth;
1964  }
1965
1966  for (int32_t i = 0; i < depth; ++i) {
1967    auto* method_trace = soa.Decode<mirror::PointerArray*>(internal);
1968    // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
1969    ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, sizeof(void*));
1970    uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>(
1971        i + method_trace->GetLength() / 2, sizeof(void*));
1972    int32_t line_number;
1973    StackHandleScope<3> hs(soa.Self());
1974    auto class_name_object(hs.NewHandle<mirror::String>(nullptr));
1975    auto source_name_object(hs.NewHandle<mirror::String>(nullptr));
1976    if (method->IsProxyMethod()) {
1977      line_number = -1;
1978      class_name_object.Assign(method->GetDeclaringClass()->GetName());
1979      // source_name_object intentionally left null for proxy methods
1980    } else {
1981      line_number = method->GetLineNumFromDexPC(dex_pc);
1982      // Allocate element, potentially triggering GC
1983      // TODO: reuse class_name_object via Class::name_?
1984      const char* descriptor = method->GetDeclaringClassDescriptor();
1985      CHECK(descriptor != nullptr);
1986      std::string class_name(PrettyDescriptor(descriptor));
1987      class_name_object.Assign(
1988          mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
1989      if (class_name_object.Get() == nullptr) {
1990        soa.Self()->AssertPendingOOMException();
1991        return nullptr;
1992      }
1993      const char* source_file = method->GetDeclaringClassSourceFile();
1994      if (source_file != nullptr) {
1995        source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
1996        if (source_name_object.Get() == nullptr) {
1997          soa.Self()->AssertPendingOOMException();
1998          return nullptr;
1999        }
2000      }
2001    }
2002    const char* method_name = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetName();
2003    CHECK(method_name != nullptr);
2004    Handle<mirror::String> method_name_object(
2005        hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
2006    if (method_name_object.Get() == nullptr) {
2007      return nullptr;
2008    }
2009    mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(
2010        soa.Self(), class_name_object, method_name_object, source_name_object, line_number);
2011    if (obj == nullptr) {
2012      return nullptr;
2013    }
2014    // We are called from native: use non-transactional mode.
2015    soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->Set<false>(i, obj);
2016  }
2017  return result;
2018}
2019
2020void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) {
2021  va_list args;
2022  va_start(args, fmt);
2023  ThrowNewExceptionV(exception_class_descriptor, fmt, args);
2024  va_end(args);
2025}
2026
2027void Thread::ThrowNewExceptionV(const char* exception_class_descriptor,
2028                                const char* fmt, va_list ap) {
2029  std::string msg;
2030  StringAppendV(&msg, fmt, ap);
2031  ThrowNewException(exception_class_descriptor, msg.c_str());
2032}
2033
2034void Thread::ThrowNewException(const char* exception_class_descriptor,
2035                               const char* msg) {
2036  // Callers should either clear or call ThrowNewWrappedException.
2037  AssertNoPendingExceptionForNewException(msg);
2038  ThrowNewWrappedException(exception_class_descriptor, msg);
2039}
2040
2041static mirror::ClassLoader* GetCurrentClassLoader(Thread* self)
2042    SHARED_REQUIRES(Locks::mutator_lock_) {
2043  ArtMethod* method = self->GetCurrentMethod(nullptr);
2044  return method != nullptr
2045      ? method->GetDeclaringClass()->GetClassLoader()
2046      : nullptr;
2047}
2048
2049void Thread::ThrowNewWrappedException(const char* exception_class_descriptor,
2050                                      const char* msg) {
2051  DCHECK_EQ(this, Thread::Current());
2052  ScopedObjectAccessUnchecked soa(this);
2053  StackHandleScope<3> hs(soa.Self());
2054  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetCurrentClassLoader(soa.Self())));
2055  ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException()));
2056  ClearException();
2057  Runtime* runtime = Runtime::Current();
2058  auto* cl = runtime->GetClassLinker();
2059  Handle<mirror::Class> exception_class(
2060      hs.NewHandle(cl->FindClass(this, exception_class_descriptor, class_loader)));
2061  if (UNLIKELY(exception_class.Get() == nullptr)) {
2062    CHECK(IsExceptionPending());
2063    LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
2064    return;
2065  }
2066
2067  if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(soa.Self(), exception_class, true,
2068                                                             true))) {
2069    DCHECK(IsExceptionPending());
2070    return;
2071  }
2072  DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass());
2073  Handle<mirror::Throwable> exception(
2074      hs.NewHandle(down_cast<mirror::Throwable*>(exception_class->AllocObject(this))));
2075
2076  // If we couldn't allocate the exception, throw the pre-allocated out of memory exception.
2077  if (exception.Get() == nullptr) {
2078    SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
2079    return;
2080  }
2081
2082  // Choose an appropriate constructor and set up the arguments.
2083  const char* signature;
2084  ScopedLocalRef<jstring> msg_string(GetJniEnv(), nullptr);
2085  if (msg != nullptr) {
2086    // Ensure we remember this and the method over the String allocation.
2087    msg_string.reset(
2088        soa.AddLocalReference<jstring>(mirror::String::AllocFromModifiedUtf8(this, msg)));
2089    if (UNLIKELY(msg_string.get() == nullptr)) {
2090      CHECK(IsExceptionPending());  // OOME.
2091      return;
2092    }
2093    if (cause.get() == nullptr) {
2094      signature = "(Ljava/lang/String;)V";
2095    } else {
2096      signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V";
2097    }
2098  } else {
2099    if (cause.get() == nullptr) {
2100      signature = "()V";
2101    } else {
2102      signature = "(Ljava/lang/Throwable;)V";
2103    }
2104  }
2105  ArtMethod* exception_init_method =
2106      exception_class->FindDeclaredDirectMethod("<init>", signature, cl->GetImagePointerSize());
2107
2108  CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in "
2109      << PrettyDescriptor(exception_class_descriptor);
2110
2111  if (UNLIKELY(!runtime->IsStarted())) {
2112    // Something is trying to throw an exception without a started runtime, which is the common
2113    // case in the compiler. We won't be able to invoke the constructor of the exception, so set
2114    // the exception fields directly.
2115    if (msg != nullptr) {
2116      exception->SetDetailMessage(down_cast<mirror::String*>(DecodeJObject(msg_string.get())));
2117    }
2118    if (cause.get() != nullptr) {
2119      exception->SetCause(down_cast<mirror::Throwable*>(DecodeJObject(cause.get())));
2120    }
2121    ScopedLocalRef<jobject> trace(GetJniEnv(),
2122                                  Runtime::Current()->IsActiveTransaction()
2123                                      ? CreateInternalStackTrace<true>(soa)
2124                                      : CreateInternalStackTrace<false>(soa));
2125    if (trace.get() != nullptr) {
2126      exception->SetStackState(down_cast<mirror::Throwable*>(DecodeJObject(trace.get())));
2127    }
2128    SetException(exception.Get());
2129  } else {
2130    jvalue jv_args[2];
2131    size_t i = 0;
2132
2133    if (msg != nullptr) {
2134      jv_args[i].l = msg_string.get();
2135      ++i;
2136    }
2137    if (cause.get() != nullptr) {
2138      jv_args[i].l = cause.get();
2139      ++i;
2140    }
2141    ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(exception.Get()));
2142    InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(exception_init_method), jv_args);
2143    if (LIKELY(!IsExceptionPending())) {
2144      SetException(exception.Get());
2145    }
2146  }
2147}
2148
2149void Thread::ThrowOutOfMemoryError(const char* msg) {
2150  LOG(WARNING) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
2151      msg, (tls32_.throwing_OutOfMemoryError ? " (recursive case)" : ""));
2152  if (!tls32_.throwing_OutOfMemoryError) {
2153    tls32_.throwing_OutOfMemoryError = true;
2154    ThrowNewException("Ljava/lang/OutOfMemoryError;", msg);
2155    tls32_.throwing_OutOfMemoryError = false;
2156  } else {
2157    Dump(LOG(WARNING));  // The pre-allocated OOME has no stack, so help out and log one.
2158    SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
2159  }
2160}
2161
2162Thread* Thread::CurrentFromGdb() {
2163  return Thread::Current();
2164}
2165
2166void Thread::DumpFromGdb() const {
2167  std::ostringstream ss;
2168  Dump(ss);
2169  std::string str(ss.str());
2170  // log to stderr for debugging command line processes
2171  std::cerr << str;
2172#ifdef __ANDROID__
2173  // log to logcat for debugging frameworks processes
2174  LOG(INFO) << str;
2175#endif
2176}
2177
2178// Explicitly instantiate 32 and 64bit thread offset dumping support.
2179template void Thread::DumpThreadOffset<4>(std::ostream& os, uint32_t offset);
2180template void Thread::DumpThreadOffset<8>(std::ostream& os, uint32_t offset);
2181
2182template<size_t ptr_size>
2183void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
2184#define DO_THREAD_OFFSET(x, y) \
2185    if (offset == x.Uint32Value()) { \
2186      os << y; \
2187      return; \
2188    }
2189  DO_THREAD_OFFSET(ThreadFlagsOffset<ptr_size>(), "state_and_flags")
2190  DO_THREAD_OFFSET(CardTableOffset<ptr_size>(), "card_table")
2191  DO_THREAD_OFFSET(ExceptionOffset<ptr_size>(), "exception")
2192  DO_THREAD_OFFSET(PeerOffset<ptr_size>(), "peer");
2193  DO_THREAD_OFFSET(JniEnvOffset<ptr_size>(), "jni_env")
2194  DO_THREAD_OFFSET(SelfOffset<ptr_size>(), "self")
2195  DO_THREAD_OFFSET(StackEndOffset<ptr_size>(), "stack_end")
2196  DO_THREAD_OFFSET(ThinLockIdOffset<ptr_size>(), "thin_lock_thread_id")
2197  DO_THREAD_OFFSET(TopOfManagedStackOffset<ptr_size>(), "top_quick_frame_method")
2198  DO_THREAD_OFFSET(TopShadowFrameOffset<ptr_size>(), "top_shadow_frame")
2199  DO_THREAD_OFFSET(TopHandleScopeOffset<ptr_size>(), "top_handle_scope")
2200  DO_THREAD_OFFSET(ThreadSuspendTriggerOffset<ptr_size>(), "suspend_trigger")
2201#undef DO_THREAD_OFFSET
2202
2203#define INTERPRETER_ENTRY_POINT_INFO(x) \
2204    if (INTERPRETER_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
2205      os << #x; \
2206      return; \
2207    }
2208  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToInterpreterBridge)
2209  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToCompiledCodeBridge)
2210#undef INTERPRETER_ENTRY_POINT_INFO
2211
2212#define JNI_ENTRY_POINT_INFO(x) \
2213    if (JNI_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
2214      os << #x; \
2215      return; \
2216    }
2217  JNI_ENTRY_POINT_INFO(pDlsymLookup)
2218#undef JNI_ENTRY_POINT_INFO
2219
2220#define QUICK_ENTRY_POINT_INFO(x) \
2221    if (QUICK_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
2222      os << #x; \
2223      return; \
2224    }
2225  QUICK_ENTRY_POINT_INFO(pAllocArray)
2226  QUICK_ENTRY_POINT_INFO(pAllocArrayResolved)
2227  QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck)
2228  QUICK_ENTRY_POINT_INFO(pAllocObject)
2229  QUICK_ENTRY_POINT_INFO(pAllocObjectResolved)
2230  QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized)
2231  QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck)
2232  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray)
2233  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck)
2234  QUICK_ENTRY_POINT_INFO(pAllocStringFromBytes)
2235  QUICK_ENTRY_POINT_INFO(pAllocStringFromChars)
2236  QUICK_ENTRY_POINT_INFO(pAllocStringFromString)
2237  QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial)
2238  QUICK_ENTRY_POINT_INFO(pCheckCast)
2239  QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage)
2240  QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess)
2241  QUICK_ENTRY_POINT_INFO(pInitializeType)
2242  QUICK_ENTRY_POINT_INFO(pResolveString)
2243  QUICK_ENTRY_POINT_INFO(pSet8Instance)
2244  QUICK_ENTRY_POINT_INFO(pSet8Static)
2245  QUICK_ENTRY_POINT_INFO(pSet16Instance)
2246  QUICK_ENTRY_POINT_INFO(pSet16Static)
2247  QUICK_ENTRY_POINT_INFO(pSet32Instance)
2248  QUICK_ENTRY_POINT_INFO(pSet32Static)
2249  QUICK_ENTRY_POINT_INFO(pSet64Instance)
2250  QUICK_ENTRY_POINT_INFO(pSet64Static)
2251  QUICK_ENTRY_POINT_INFO(pSetObjInstance)
2252  QUICK_ENTRY_POINT_INFO(pSetObjStatic)
2253  QUICK_ENTRY_POINT_INFO(pGetByteInstance)
2254  QUICK_ENTRY_POINT_INFO(pGetBooleanInstance)
2255  QUICK_ENTRY_POINT_INFO(pGetByteStatic)
2256  QUICK_ENTRY_POINT_INFO(pGetBooleanStatic)
2257  QUICK_ENTRY_POINT_INFO(pGetShortInstance)
2258  QUICK_ENTRY_POINT_INFO(pGetCharInstance)
2259  QUICK_ENTRY_POINT_INFO(pGetShortStatic)
2260  QUICK_ENTRY_POINT_INFO(pGetCharStatic)
2261  QUICK_ENTRY_POINT_INFO(pGet32Instance)
2262  QUICK_ENTRY_POINT_INFO(pGet32Static)
2263  QUICK_ENTRY_POINT_INFO(pGet64Instance)
2264  QUICK_ENTRY_POINT_INFO(pGet64Static)
2265  QUICK_ENTRY_POINT_INFO(pGetObjInstance)
2266  QUICK_ENTRY_POINT_INFO(pGetObjStatic)
2267  QUICK_ENTRY_POINT_INFO(pAputObjectWithNullAndBoundCheck)
2268  QUICK_ENTRY_POINT_INFO(pAputObjectWithBoundCheck)
2269  QUICK_ENTRY_POINT_INFO(pAputObject)
2270  QUICK_ENTRY_POINT_INFO(pHandleFillArrayData)
2271  QUICK_ENTRY_POINT_INFO(pJniMethodStart)
2272  QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized)
2273  QUICK_ENTRY_POINT_INFO(pJniMethodEnd)
2274  QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized)
2275  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference)
2276  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized)
2277  QUICK_ENTRY_POINT_INFO(pQuickGenericJniTrampoline)
2278  QUICK_ENTRY_POINT_INFO(pLockObject)
2279  QUICK_ENTRY_POINT_INFO(pUnlockObject)
2280  QUICK_ENTRY_POINT_INFO(pCmpgDouble)
2281  QUICK_ENTRY_POINT_INFO(pCmpgFloat)
2282  QUICK_ENTRY_POINT_INFO(pCmplDouble)
2283  QUICK_ENTRY_POINT_INFO(pCmplFloat)
2284  QUICK_ENTRY_POINT_INFO(pFmod)
2285  QUICK_ENTRY_POINT_INFO(pL2d)
2286  QUICK_ENTRY_POINT_INFO(pFmodf)
2287  QUICK_ENTRY_POINT_INFO(pL2f)
2288  QUICK_ENTRY_POINT_INFO(pD2iz)
2289  QUICK_ENTRY_POINT_INFO(pF2iz)
2290  QUICK_ENTRY_POINT_INFO(pIdivmod)
2291  QUICK_ENTRY_POINT_INFO(pD2l)
2292  QUICK_ENTRY_POINT_INFO(pF2l)
2293  QUICK_ENTRY_POINT_INFO(pLdiv)
2294  QUICK_ENTRY_POINT_INFO(pLmod)
2295  QUICK_ENTRY_POINT_INFO(pLmul)
2296  QUICK_ENTRY_POINT_INFO(pShlLong)
2297  QUICK_ENTRY_POINT_INFO(pShrLong)
2298  QUICK_ENTRY_POINT_INFO(pUshrLong)
2299  QUICK_ENTRY_POINT_INFO(pIndexOf)
2300  QUICK_ENTRY_POINT_INFO(pStringCompareTo)
2301  QUICK_ENTRY_POINT_INFO(pMemcpy)
2302  QUICK_ENTRY_POINT_INFO(pQuickImtConflictTrampoline)
2303  QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline)
2304  QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge)
2305  QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck)
2306  QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck)
2307  QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck)
2308  QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck)
2309  QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck)
2310  QUICK_ENTRY_POINT_INFO(pTestSuspend)
2311  QUICK_ENTRY_POINT_INFO(pDeliverException)
2312  QUICK_ENTRY_POINT_INFO(pThrowArrayBounds)
2313  QUICK_ENTRY_POINT_INFO(pThrowDivZero)
2314  QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod)
2315  QUICK_ENTRY_POINT_INFO(pThrowNullPointer)
2316  QUICK_ENTRY_POINT_INFO(pThrowStackOverflow)
2317  QUICK_ENTRY_POINT_INFO(pDeoptimize)
2318  QUICK_ENTRY_POINT_INFO(pA64Load)
2319  QUICK_ENTRY_POINT_INFO(pA64Store)
2320  QUICK_ENTRY_POINT_INFO(pNewEmptyString)
2321  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_B)
2322  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BI)
2323  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BII)
2324  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIII)
2325  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIIString)
2326  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BString)
2327  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIICharset)
2328  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BCharset)
2329  QUICK_ENTRY_POINT_INFO(pNewStringFromChars_C)
2330  QUICK_ENTRY_POINT_INFO(pNewStringFromChars_CII)
2331  QUICK_ENTRY_POINT_INFO(pNewStringFromChars_IIC)
2332  QUICK_ENTRY_POINT_INFO(pNewStringFromCodePoints)
2333  QUICK_ENTRY_POINT_INFO(pNewStringFromString)
2334  QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer)
2335  QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder)
2336  QUICK_ENTRY_POINT_INFO(pReadBarrierJni)
2337  QUICK_ENTRY_POINT_INFO(pReadBarrierSlow)
2338#undef QUICK_ENTRY_POINT_INFO
2339
2340  os << offset;
2341}
2342
2343void Thread::QuickDeliverException() {
2344  // Get exception from thread.
2345  mirror::Throwable* exception = GetException();
2346  CHECK(exception != nullptr);
2347  // Don't leave exception visible while we try to find the handler, which may cause class
2348  // resolution.
2349  ClearException();
2350  bool is_deoptimization = (exception == GetDeoptimizationException());
2351  QuickExceptionHandler exception_handler(this, is_deoptimization);
2352  if (is_deoptimization) {
2353    exception_handler.DeoptimizeStack();
2354  } else {
2355    exception_handler.FindCatch(exception);
2356  }
2357  exception_handler.UpdateInstrumentationStack();
2358  exception_handler.DoLongJump();
2359}
2360
2361Context* Thread::GetLongJumpContext() {
2362  Context* result = tlsPtr_.long_jump_context;
2363  if (result == nullptr) {
2364    result = Context::Create();
2365  } else {
2366    tlsPtr_.long_jump_context = nullptr;  // Avoid context being shared.
2367    result->Reset();
2368  }
2369  return result;
2370}
2371
2372// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
2373//       so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
2374struct CurrentMethodVisitor FINAL : public StackVisitor {
2375  CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error)
2376      SHARED_REQUIRES(Locks::mutator_lock_)
2377      : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2378        this_object_(nullptr),
2379        method_(nullptr),
2380        dex_pc_(0),
2381        abort_on_error_(abort_on_error) {}
2382  bool VisitFrame() OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
2383    ArtMethod* m = GetMethod();
2384    if (m->IsRuntimeMethod()) {
2385      // Continue if this is a runtime method.
2386      return true;
2387    }
2388    if (context_ != nullptr) {
2389      this_object_ = GetThisObject();
2390    }
2391    method_ = m;
2392    dex_pc_ = GetDexPc(abort_on_error_);
2393    return false;
2394  }
2395  mirror::Object* this_object_;
2396  ArtMethod* method_;
2397  uint32_t dex_pc_;
2398  const bool abort_on_error_;
2399};
2400
2401ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error) const {
2402  CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr, abort_on_error);
2403  visitor.WalkStack(false);
2404  if (dex_pc != nullptr) {
2405    *dex_pc = visitor.dex_pc_;
2406  }
2407  return visitor.method_;
2408}
2409
2410bool Thread::HoldsLock(mirror::Object* object) const {
2411  if (object == nullptr) {
2412    return false;
2413  }
2414  return object->GetLockOwnerThreadId() == GetThreadId();
2415}
2416
2417// RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor).
2418template <typename RootVisitor>
2419class ReferenceMapVisitor : public StackVisitor {
2420 public:
2421  ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor)
2422      SHARED_REQUIRES(Locks::mutator_lock_)
2423        // We are visiting the references in compiled frames, so we do not need
2424        // to know the inlined frames.
2425      : StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames),
2426        visitor_(visitor) {}
2427
2428  bool VisitFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
2429    if (false) {
2430      LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
2431                << StringPrintf("@ PC:%04x", GetDexPc());
2432    }
2433    ShadowFrame* shadow_frame = GetCurrentShadowFrame();
2434    if (shadow_frame != nullptr) {
2435      VisitShadowFrame(shadow_frame);
2436    } else {
2437      VisitQuickFrame();
2438    }
2439    return true;
2440  }
2441
2442  void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_REQUIRES(Locks::mutator_lock_) {
2443    ArtMethod* m = shadow_frame->GetMethod();
2444    VisitDeclaringClass(m);
2445    DCHECK(m != nullptr);
2446    size_t num_regs = shadow_frame->NumberOfVRegs();
2447    if (m->IsNative() || shadow_frame->HasReferenceArray()) {
2448      // handle scope for JNI or References for interpreter.
2449      for (size_t reg = 0; reg < num_regs; ++reg) {
2450        mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2451        if (ref != nullptr) {
2452          mirror::Object* new_ref = ref;
2453          visitor_(&new_ref, reg, this);
2454          if (new_ref != ref) {
2455            shadow_frame->SetVRegReference(reg, new_ref);
2456          }
2457        }
2458      }
2459    } else {
2460      // Java method.
2461      // Portable path use DexGcMap and store in Method.native_gc_map_.
2462      const uint8_t* gc_map = m->GetNativeGcMap(sizeof(void*));
2463      CHECK(gc_map != nullptr) << PrettyMethod(m);
2464      verifier::DexPcToReferenceMap dex_gc_map(gc_map);
2465      uint32_t dex_pc = shadow_frame->GetDexPC();
2466      const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
2467      DCHECK(reg_bitmap != nullptr);
2468      num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
2469      for (size_t reg = 0; reg < num_regs; ++reg) {
2470        if (TestBitmap(reg, reg_bitmap)) {
2471          mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2472          if (ref != nullptr) {
2473            mirror::Object* new_ref = ref;
2474            visitor_(&new_ref, reg, this);
2475            if (new_ref != ref) {
2476              shadow_frame->SetVRegReference(reg, new_ref);
2477            }
2478          }
2479        }
2480      }
2481    }
2482  }
2483
2484 private:
2485  // Visiting the declaring class is necessary so that we don't unload the class of a method that
2486  // is executing. We need to ensure that the code stays mapped.
2487  void VisitDeclaringClass(ArtMethod* method) SHARED_REQUIRES(Locks::mutator_lock_) {
2488    mirror::Class* klass = method->GetDeclaringClassNoBarrier();
2489    // klass can be null for runtime methods.
2490    if (klass != nullptr) {
2491      mirror::Object* new_ref = klass;
2492      visitor_(&new_ref, -1, this);
2493      if (new_ref != klass) {
2494        method->CASDeclaringClass(klass, new_ref->AsClass());
2495      }
2496    }
2497  }
2498
2499  void VisitQuickFrame() SHARED_REQUIRES(Locks::mutator_lock_) {
2500    ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
2501    DCHECK(cur_quick_frame != nullptr);
2502    ArtMethod* m = *cur_quick_frame;
2503    VisitDeclaringClass(m);
2504
2505    // Process register map (which native and runtime methods don't have)
2506    if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
2507      if (m->IsOptimized(sizeof(void*))) {
2508        auto* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
2509            reinterpret_cast<uintptr_t>(cur_quick_frame));
2510        Runtime* runtime = Runtime::Current();
2511        const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
2512        uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
2513        CodeInfo code_info = m->GetOptimizedCodeInfo();
2514        StackMapEncoding encoding = code_info.ExtractEncoding();
2515        StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
2516        DCHECK(map.IsValid());
2517        MemoryRegion mask = map.GetStackMask(encoding);
2518        // Visit stack entries that hold pointers.
2519        for (size_t i = 0; i < mask.size_in_bits(); ++i) {
2520          if (mask.LoadBit(i)) {
2521            auto* ref_addr = vreg_base + i;
2522            mirror::Object* ref = ref_addr->AsMirrorPtr();
2523            if (ref != nullptr) {
2524              mirror::Object* new_ref = ref;
2525              visitor_(&new_ref, -1, this);
2526              if (ref != new_ref) {
2527                ref_addr->Assign(new_ref);
2528              }
2529            }
2530          }
2531        }
2532        // Visit callee-save registers that hold pointers.
2533        uint32_t register_mask = map.GetRegisterMask(encoding);
2534        for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
2535          if (register_mask & (1 << i)) {
2536            mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
2537            if (*ref_addr != nullptr) {
2538              visitor_(ref_addr, -1, this);
2539            }
2540          }
2541        }
2542      } else {
2543        const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
2544        CHECK(native_gc_map != nullptr) << PrettyMethod(m);
2545        const DexFile::CodeItem* code_item = m->GetCodeItem();
2546        // Can't be null or how would we compile its instructions?
2547        DCHECK(code_item != nullptr) << PrettyMethod(m);
2548        NativePcOffsetToReferenceMap map(native_gc_map);
2549        size_t num_regs = map.RegWidth() * 8;
2550        if (num_regs > 0) {
2551          Runtime* runtime = Runtime::Current();
2552          const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
2553          uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
2554          const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset);
2555          DCHECK(reg_bitmap != nullptr);
2556          const void* code_pointer = ArtMethod::EntryPointToCodePointer(entry_point);
2557          const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
2558          QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
2559          // For all dex registers in the bitmap
2560          DCHECK(cur_quick_frame != nullptr);
2561          for (size_t reg = 0; reg < num_regs; ++reg) {
2562            // Does this register hold a reference?
2563            if (TestBitmap(reg, reg_bitmap)) {
2564              uint32_t vmap_offset;
2565              if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
2566                int vmap_reg = vmap_table.ComputeRegister(frame_info.CoreSpillMask(), vmap_offset,
2567                                                          kReferenceVReg);
2568                // This is sound as spilled GPRs will be word sized (ie 32 or 64bit).
2569                mirror::Object** ref_addr =
2570                    reinterpret_cast<mirror::Object**>(GetGPRAddress(vmap_reg));
2571                if (*ref_addr != nullptr) {
2572                  visitor_(ref_addr, reg, this);
2573                }
2574              } else {
2575                StackReference<mirror::Object>* ref_addr =
2576                    reinterpret_cast<StackReference<mirror::Object>*>(GetVRegAddrFromQuickCode(
2577                        cur_quick_frame, code_item, frame_info.CoreSpillMask(),
2578                        frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), reg));
2579                mirror::Object* ref = ref_addr->AsMirrorPtr();
2580                if (ref != nullptr) {
2581                  mirror::Object* new_ref = ref;
2582                  visitor_(&new_ref, reg, this);
2583                  if (ref != new_ref) {
2584                    ref_addr->Assign(new_ref);
2585                  }
2586                }
2587              }
2588            }
2589          }
2590        }
2591      }
2592    }
2593  }
2594
2595  // Visitor for when we visit a root.
2596  RootVisitor& visitor_;
2597};
2598
2599class RootCallbackVisitor {
2600 public:
2601  RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {}
2602
2603  void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const
2604      SHARED_REQUIRES(Locks::mutator_lock_) {
2605    visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg));
2606  }
2607
2608 private:
2609  RootVisitor* const visitor_;
2610  const uint32_t tid_;
2611};
2612
2613void Thread::VisitRoots(RootVisitor* visitor) {
2614  const uint32_t thread_id = GetThreadId();
2615  visitor->VisitRootIfNonNull(&tlsPtr_.opeer, RootInfo(kRootThreadObject, thread_id));
2616  if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) {
2617    visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception),
2618                       RootInfo(kRootNativeStack, thread_id));
2619  }
2620  visitor->VisitRootIfNonNull(&tlsPtr_.monitor_enter_object, RootInfo(kRootNativeStack, thread_id));
2621  tlsPtr_.jni_env->locals.VisitRoots(visitor, RootInfo(kRootJNILocal, thread_id));
2622  tlsPtr_.jni_env->monitors.VisitRoots(visitor, RootInfo(kRootJNIMonitor, thread_id));
2623  HandleScopeVisitRoots(visitor, thread_id);
2624  if (tlsPtr_.debug_invoke_req != nullptr) {
2625    tlsPtr_.debug_invoke_req->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id));
2626  }
2627  // Visit roots for deoptimization.
2628  if (tlsPtr_.stacked_shadow_frame_record != nullptr) {
2629    RootCallbackVisitor visitor_to_callback(visitor, thread_id);
2630    ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitor_to_callback);
2631    for (StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
2632         record != nullptr;
2633         record = record->GetLink()) {
2634      for (ShadowFrame* shadow_frame = record->GetShadowFrame();
2635           shadow_frame != nullptr;
2636           shadow_frame = shadow_frame->GetLink()) {
2637        mapper.VisitShadowFrame(shadow_frame);
2638      }
2639    }
2640  }
2641  if (tlsPtr_.deoptimization_context_stack != nullptr) {
2642    for (DeoptimizationContextRecord* record = tlsPtr_.deoptimization_context_stack;
2643         record != nullptr;
2644         record = record->GetLink()) {
2645      if (record->IsReference()) {
2646        visitor->VisitRootIfNonNull(record->GetReturnValueAsGCRoot(),
2647                                    RootInfo(kRootThreadObject, thread_id));
2648      }
2649      visitor->VisitRootIfNonNull(record->GetPendingExceptionAsGCRoot(),
2650                                  RootInfo(kRootThreadObject, thread_id));
2651    }
2652  }
2653  for (auto* verifier = tlsPtr_.method_verifier; verifier != nullptr; verifier = verifier->link_) {
2654    verifier->VisitRoots(visitor, RootInfo(kRootNativeStack, thread_id));
2655  }
2656  // Visit roots on this thread's stack
2657  Context* context = GetLongJumpContext();
2658  RootCallbackVisitor visitor_to_callback(visitor, thread_id);
2659  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context, visitor_to_callback);
2660  mapper.WalkStack();
2661  ReleaseLongJumpContext(context);
2662  for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
2663    visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id));
2664  }
2665}
2666
2667class VerifyRootVisitor : public SingleRootVisitor {
2668 public:
2669  void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
2670      OVERRIDE SHARED_REQUIRES(Locks::mutator_lock_) {
2671    VerifyObject(root);
2672  }
2673};
2674
2675void Thread::VerifyStackImpl() {
2676  VerifyRootVisitor visitor;
2677  std::unique_ptr<Context> context(Context::Create());
2678  RootCallbackVisitor visitor_to_callback(&visitor, GetThreadId());
2679  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitor_to_callback);
2680  mapper.WalkStack();
2681}
2682
2683// Set the stack end to that to be used during a stack overflow
2684void Thread::SetStackEndForStackOverflow() {
2685  // During stack overflow we allow use of the full stack.
2686  if (tlsPtr_.stack_end == tlsPtr_.stack_begin) {
2687    // However, we seem to have already extended to use the full stack.
2688    LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently "
2689               << GetStackOverflowReservedBytes(kRuntimeISA) << ")?";
2690    DumpStack(LOG(ERROR));
2691    LOG(FATAL) << "Recursive stack overflow.";
2692  }
2693
2694  tlsPtr_.stack_end = tlsPtr_.stack_begin;
2695
2696  // Remove the stack overflow protection if is it set up.
2697  bool implicit_stack_check = !Runtime::Current()->ExplicitStackOverflowChecks();
2698  if (implicit_stack_check) {
2699    if (!UnprotectStack()) {
2700      LOG(ERROR) << "Unable to remove stack protection for stack overflow";
2701    }
2702  }
2703}
2704
2705void Thread::SetTlab(uint8_t* start, uint8_t* end) {
2706  DCHECK_LE(start, end);
2707  tlsPtr_.thread_local_start = start;
2708  tlsPtr_.thread_local_pos  = tlsPtr_.thread_local_start;
2709  tlsPtr_.thread_local_end = end;
2710  tlsPtr_.thread_local_objects = 0;
2711}
2712
2713bool Thread::HasTlab() const {
2714  bool has_tlab = tlsPtr_.thread_local_pos != nullptr;
2715  if (has_tlab) {
2716    DCHECK(tlsPtr_.thread_local_start != nullptr && tlsPtr_.thread_local_end != nullptr);
2717  } else {
2718    DCHECK(tlsPtr_.thread_local_start == nullptr && tlsPtr_.thread_local_end == nullptr);
2719  }
2720  return has_tlab;
2721}
2722
2723std::ostream& operator<<(std::ostream& os, const Thread& thread) {
2724  thread.ShortDump(os);
2725  return os;
2726}
2727
2728void Thread::ProtectStack() {
2729  void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
2730  VLOG(threads) << "Protecting stack at " << pregion;
2731  if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) {
2732    LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. "
2733        "Reason: "
2734        << strerror(errno) << " size:  " << kStackOverflowProtectedSize;
2735  }
2736}
2737
2738bool Thread::UnprotectStack() {
2739  void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
2740  VLOG(threads) << "Unprotecting stack at " << pregion;
2741  return mprotect(pregion, kStackOverflowProtectedSize, PROT_READ|PROT_WRITE) == 0;
2742}
2743
2744void Thread::ActivateSingleStepControl(SingleStepControl* ssc) {
2745  CHECK(Dbg::IsDebuggerActive());
2746  CHECK(GetSingleStepControl() == nullptr) << "Single step already active in thread " << *this;
2747  CHECK(ssc != nullptr);
2748  tlsPtr_.single_step_control = ssc;
2749}
2750
2751void Thread::DeactivateSingleStepControl() {
2752  CHECK(Dbg::IsDebuggerActive());
2753  CHECK(GetSingleStepControl() != nullptr) << "Single step not active in thread " << *this;
2754  SingleStepControl* ssc = GetSingleStepControl();
2755  tlsPtr_.single_step_control = nullptr;
2756  delete ssc;
2757}
2758
2759void Thread::SetDebugInvokeReq(DebugInvokeReq* req) {
2760  CHECK(Dbg::IsDebuggerActive());
2761  CHECK(GetInvokeReq() == nullptr) << "Debug invoke req already active in thread " << *this;
2762  CHECK(Thread::Current() != this) << "Debug invoke can't be dispatched by the thread itself";
2763  CHECK(req != nullptr);
2764  tlsPtr_.debug_invoke_req = req;
2765}
2766
2767void Thread::ClearDebugInvokeReq() {
2768  CHECK(GetInvokeReq() != nullptr) << "Debug invoke req not active in thread " << *this;
2769  CHECK(Thread::Current() == this) << "Debug invoke must be finished by the thread itself";
2770  DebugInvokeReq* req = tlsPtr_.debug_invoke_req;
2771  tlsPtr_.debug_invoke_req = nullptr;
2772  delete req;
2773}
2774
2775void Thread::PushVerifier(verifier::MethodVerifier* verifier) {
2776  verifier->link_ = tlsPtr_.method_verifier;
2777  tlsPtr_.method_verifier = verifier;
2778}
2779
2780void Thread::PopVerifier(verifier::MethodVerifier* verifier) {
2781  CHECK_EQ(tlsPtr_.method_verifier, verifier);
2782  tlsPtr_.method_verifier = verifier->link_;
2783}
2784
2785size_t Thread::NumberOfHeldMutexes() const {
2786  size_t count = 0;
2787  for (BaseMutex* mu : tlsPtr_.held_mutexes) {
2788    count += mu != nullptr ? 1 : 0;
2789  }
2790  return count;
2791}
2792
2793}  // namespace art
2794