thread.cc revision 3f5881fda3606b27e30bf903052c73b03910f90b
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define ATRACE_TAG ATRACE_TAG_DALVIK
18
19#include "thread.h"
20
21#include <cutils/trace.h>
22#include <pthread.h>
23#include <signal.h>
24#include <sys/resource.h>
25#include <sys/time.h>
26
27#include <algorithm>
28#include <bitset>
29#include <cerrno>
30#include <iostream>
31#include <list>
32#include <sstream>
33
34#include "arch/context.h"
35#include "base/mutex.h"
36#include "base/timing_logger.h"
37#include "base/to_str.h"
38#include "class_linker-inl.h"
39#include "class_linker.h"
40#include "debugger.h"
41#include "dex_file-inl.h"
42#include "entrypoints/entrypoint_utils.h"
43#include "entrypoints/quick/quick_alloc_entrypoints.h"
44#include "gc_map.h"
45#include "gc/accounting/card_table-inl.h"
46#include "gc/allocator/rosalloc.h"
47#include "gc/heap.h"
48#include "gc/space/space.h"
49#include "handle_scope-inl.h"
50#include "handle_scope.h"
51#include "indirect_reference_table-inl.h"
52#include "jni_internal.h"
53#include "mirror/art_field-inl.h"
54#include "mirror/art_method-inl.h"
55#include "mirror/class_loader.h"
56#include "mirror/class-inl.h"
57#include "mirror/object_array-inl.h"
58#include "mirror/stack_trace_element.h"
59#include "monitor.h"
60#include "object_lock.h"
61#include "quick_exception_handler.h"
62#include "quick/quick_method_frame_info.h"
63#include "reflection.h"
64#include "runtime.h"
65#include "scoped_thread_state_change.h"
66#include "ScopedLocalRef.h"
67#include "ScopedUtfChars.h"
68#include "stack.h"
69#include "thread_list.h"
70#include "thread-inl.h"
71#include "utils.h"
72#include "verifier/dex_gc_map.h"
73#include "verifier/method_verifier.h"
74#include "verify_object-inl.h"
75#include "vmap_table.h"
76#include "well_known_classes.h"
77
78namespace art {
79
80bool Thread::is_started_ = false;
81pthread_key_t Thread::pthread_key_self_;
82ConditionVariable* Thread::resume_cond_ = nullptr;
83const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA);
84
85static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
86
87void Thread::InitCardTable() {
88  tlsPtr_.card_table = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin();
89}
90
91static void UnimplementedEntryPoint() {
92  UNIMPLEMENTED(FATAL);
93}
94
95void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
96                     QuickEntryPoints* qpoints);
97
98void Thread::InitTlsEntryPoints() {
99  // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
100  uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.interpreter_entrypoints);
101  uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) +
102      sizeof(tlsPtr_.quick_entrypoints));
103  for (uintptr_t* it = begin; it != end; ++it) {
104    *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
105  }
106  InitEntryPoints(&tlsPtr_.interpreter_entrypoints, &tlsPtr_.jni_entrypoints,
107                  &tlsPtr_.quick_entrypoints);
108}
109
110void Thread::ResetQuickAllocEntryPointsForThread() {
111  ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints);
112}
113
114void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) {
115  tlsPtr_.deoptimization_shadow_frame = sf;
116}
117
118void Thread::SetDeoptimizationReturnValue(const JValue& ret_val) {
119  tls64_.deoptimization_return_value.SetJ(ret_val.GetJ());
120}
121
122ShadowFrame* Thread::GetAndClearDeoptimizationShadowFrame(JValue* ret_val) {
123  ShadowFrame* sf = tlsPtr_.deoptimization_shadow_frame;
124  tlsPtr_.deoptimization_shadow_frame = nullptr;
125  ret_val->SetJ(tls64_.deoptimization_return_value.GetJ());
126  return sf;
127}
128
129void Thread::SetShadowFrameUnderConstruction(ShadowFrame* sf) {
130  sf->SetLink(tlsPtr_.shadow_frame_under_construction);
131  tlsPtr_.shadow_frame_under_construction = sf;
132}
133
134void Thread::ClearShadowFrameUnderConstruction() {
135  CHECK_NE(static_cast<ShadowFrame*>(nullptr), tlsPtr_.shadow_frame_under_construction);
136  tlsPtr_.shadow_frame_under_construction = tlsPtr_.shadow_frame_under_construction->GetLink();
137}
138
139void Thread::InitTid() {
140  tls32_.tid = ::art::GetTid();
141}
142
143void Thread::InitAfterFork() {
144  // One thread (us) survived the fork, but we have a new tid so we need to
145  // update the value stashed in this Thread*.
146  InitTid();
147}
148
149void* Thread::CreateCallback(void* arg) {
150  Thread* self = reinterpret_cast<Thread*>(arg);
151  Runtime* runtime = Runtime::Current();
152  if (runtime == nullptr) {
153    LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self;
154    return nullptr;
155  }
156  {
157    // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true
158    //       after self->Init().
159    MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
160    // Check that if we got here we cannot be shutting down (as shutdown should never have started
161    // while threads are being born).
162    CHECK(!runtime->IsShuttingDownLocked());
163    CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM()));
164    Runtime::Current()->EndThreadBirth();
165  }
166  {
167    ScopedObjectAccess soa(self);
168
169    // Copy peer into self, deleting global reference when done.
170    CHECK(self->tlsPtr_.jpeer != nullptr);
171    self->tlsPtr_.opeer = soa.Decode<mirror::Object*>(self->tlsPtr_.jpeer);
172    self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer);
173    self->tlsPtr_.jpeer = nullptr;
174    self->SetThreadName(self->GetThreadName(soa)->ToModifiedUtf8().c_str());
175
176    mirror::ArtField* priorityField = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority);
177    self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer));
178    Dbg::PostThreadStart(self);
179
180    // Invoke the 'run' method of our java.lang.Thread.
181    mirror::Object* receiver = self->tlsPtr_.opeer;
182    jmethodID mid = WellKnownClasses::java_lang_Thread_run;
183    InvokeVirtualOrInterfaceWithJValues(soa, receiver, mid, nullptr);
184  }
185  // Detach and delete self.
186  Runtime::Current()->GetThreadList()->Unregister(self);
187
188  return nullptr;
189}
190
191Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
192                                  mirror::Object* thread_peer) {
193  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer);
194  Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer)));
195  // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
196  // to stop it from going away.
197  if (kIsDebugBuild) {
198    MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
199    if (result != nullptr && !result->IsSuspended()) {
200      Locks::thread_list_lock_->AssertHeld(soa.Self());
201    }
202  }
203  return result;
204}
205
206Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
207                                  jobject java_thread) {
208  return FromManagedThread(soa, soa.Decode<mirror::Object*>(java_thread));
209}
210
211static size_t FixStackSize(size_t stack_size) {
212  // A stack size of zero means "use the default".
213  if (stack_size == 0) {
214    stack_size = Runtime::Current()->GetDefaultStackSize();
215  }
216
217  // Dalvik used the bionic pthread default stack size for native threads,
218  // so include that here to support apps that expect large native stacks.
219  stack_size += 1 * MB;
220
221  // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN.
222  if (stack_size < PTHREAD_STACK_MIN) {
223    stack_size = PTHREAD_STACK_MIN;
224  }
225
226  if (Runtime::Current()->ExplicitStackOverflowChecks()) {
227    // It's likely that callers are trying to ensure they have at least a certain amount of
228    // stack space, so we should add our reserved space on top of what they requested, rather
229    // than implicitly take it away from them.
230    stack_size += GetStackOverflowReservedBytes(kRuntimeISA);
231  } else {
232    // If we are going to use implicit stack checks, allocate space for the protected
233    // region at the bottom of the stack.
234    stack_size += Thread::kStackOverflowImplicitCheckSize +
235        GetStackOverflowReservedBytes(kRuntimeISA);
236  }
237
238  // Some systems require the stack size to be a multiple of the system page size, so round up.
239  stack_size = RoundUp(stack_size, kPageSize);
240
241  return stack_size;
242}
243
244// Global variable to prevent the compiler optimizing away the page reads for the stack.
245uint8_t dont_optimize_this;
246
247// Install a protected region in the stack.  This is used to trigger a SIGSEGV if a stack
248// overflow is detected.  It is located right below the stack_begin_.
249//
250// There is a little complexity here that deserves a special mention.  On some
251// architectures, the stack created using a VM_GROWSDOWN flag
252// to prevent memory being allocated when it's not needed.  This flag makes the
253// kernel only allocate memory for the stack by growing down in memory.  Because we
254// want to put an mprotected region far away from that at the stack top, we need
255// to make sure the pages for the stack are mapped in before we call mprotect.  We do
256// this by reading every page from the stack bottom (highest address) to the stack top.
257// We then madvise this away.
258void Thread::InstallImplicitProtection() {
259  uint8_t* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
260  uint8_t* stack_himem = tlsPtr_.stack_end;
261  uint8_t* stack_top = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(&stack_himem) &
262      ~(kPageSize - 1));    // Page containing current top of stack.
263
264  // First remove the protection on the protected region as will want to read and
265  // write it.  This may fail (on the first attempt when the stack is not mapped)
266  // but we ignore that.
267  UnprotectStack();
268
269  // Map in the stack.  This must be done by reading from the
270  // current stack pointer downwards as the stack may be mapped using VM_GROWSDOWN
271  // in the kernel.  Any access more than a page below the current SP might cause
272  // a segv.
273
274  // Read every page from the high address to the low.
275  for (uint8_t* p = stack_top; p >= pregion; p -= kPageSize) {
276    dont_optimize_this = *p;
277  }
278
279  VLOG(threads) << "installing stack protected region at " << std::hex <<
280      static_cast<void*>(pregion) << " to " <<
281      static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
282
283  // Protect the bottom of the stack to prevent read/write to it.
284  ProtectStack();
285
286  // Tell the kernel that we won't be needing these pages any more.
287  // NB. madvise will probably write zeroes into the memory (on linux it does).
288  uint32_t unwanted_size = stack_top - pregion - kPageSize;
289  madvise(pregion, unwanted_size, MADV_DONTNEED);
290}
291
292void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
293  CHECK(java_peer != nullptr);
294  Thread* self = static_cast<JNIEnvExt*>(env)->self;
295  Runtime* runtime = Runtime::Current();
296
297  // Atomically start the birth of the thread ensuring the runtime isn't shutting down.
298  bool thread_start_during_shutdown = false;
299  {
300    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
301    if (runtime->IsShuttingDownLocked()) {
302      thread_start_during_shutdown = true;
303    } else {
304      runtime->StartThreadBirth();
305    }
306  }
307  if (thread_start_during_shutdown) {
308    ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError"));
309    env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown");
310    return;
311  }
312
313  Thread* child_thread = new Thread(is_daemon);
314  // Use global JNI ref to hold peer live while child thread starts.
315  child_thread->tlsPtr_.jpeer = env->NewGlobalRef(java_peer);
316  stack_size = FixStackSize(stack_size);
317
318  // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to
319  // assign it.
320  env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer,
321                    reinterpret_cast<jlong>(child_thread));
322
323  pthread_t new_pthread;
324  pthread_attr_t attr;
325  CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
326  CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED");
327  CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
328  int pthread_create_result = pthread_create(&new_pthread, &attr, Thread::CreateCallback, child_thread);
329  CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
330
331  if (pthread_create_result != 0) {
332    // pthread_create(3) failed, so clean up.
333    {
334      MutexLock mu(self, *Locks::runtime_shutdown_lock_);
335      runtime->EndThreadBirth();
336    }
337    // Manually delete the global reference since Thread::Init will not have been run.
338    env->DeleteGlobalRef(child_thread->tlsPtr_.jpeer);
339    child_thread->tlsPtr_.jpeer = nullptr;
340    delete child_thread;
341    child_thread = nullptr;
342    // TODO: remove from thread group?
343    env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0);
344    {
345      std::string msg(StringPrintf("pthread_create (%s stack) failed: %s",
346                                   PrettySize(stack_size).c_str(), strerror(pthread_create_result)));
347      ScopedObjectAccess soa(env);
348      soa.Self()->ThrowOutOfMemoryError(msg.c_str());
349    }
350  }
351}
352
353bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
354  // This function does all the initialization that must be run by the native thread it applies to.
355  // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
356  // we can handshake with the corresponding native thread when it's ready.) Check this native
357  // thread hasn't been through here already...
358  CHECK(Thread::Current() == nullptr);
359
360  // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
361  // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
362  tlsPtr_.pthread_self = pthread_self();
363  CHECK(is_started_);
364
365  SetUpAlternateSignalStack();
366  if (!InitStackHwm()) {
367    return false;
368  }
369  InitCpu();
370  InitTlsEntryPoints();
371  RemoveSuspendTrigger();
372  InitCardTable();
373  InitTid();
374
375  CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
376  DCHECK_EQ(Thread::Current(), this);
377
378  tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this);
379
380  tlsPtr_.jni_env = JNIEnvExt::Create(this, java_vm);
381  if (tlsPtr_.jni_env == nullptr) {
382    return false;
383  }
384
385  thread_list->Register(this);
386  return true;
387}
388
389Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group,
390                       bool create_peer) {
391  Runtime* runtime = Runtime::Current();
392  if (runtime == nullptr) {
393    LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name;
394    return nullptr;
395  }
396  Thread* self;
397  {
398    MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
399    if (runtime->IsShuttingDownLocked()) {
400      LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name;
401      return nullptr;
402    } else {
403      Runtime::Current()->StartThreadBirth();
404      self = new Thread(as_daemon);
405      bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
406      Runtime::Current()->EndThreadBirth();
407      if (!init_success) {
408        delete self;
409        return nullptr;
410      }
411    }
412  }
413
414  CHECK_NE(self->GetState(), kRunnable);
415  self->SetState(kNative);
416
417  // If we're the main thread, ClassLinker won't be created until after we're attached,
418  // so that thread needs a two-stage attach. Regular threads don't need this hack.
419  // In the compiler, all threads need this hack, because no-one's going to be getting
420  // a native peer!
421  if (create_peer) {
422    self->CreatePeer(thread_name, as_daemon, thread_group);
423  } else {
424    // These aren't necessary, but they improve diagnostics for unit tests & command-line tools.
425    if (thread_name != nullptr) {
426      self->tlsPtr_.name->assign(thread_name);
427      ::art::SetThreadName(thread_name);
428    } else if (self->GetJniEnv()->check_jni) {
429      LOG(WARNING) << *Thread::Current() << " attached without supplying a name";
430    }
431  }
432
433  {
434    ScopedObjectAccess soa(self);
435    Dbg::PostThreadStart(self);
436  }
437
438  return self;
439}
440
441void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) {
442  Runtime* runtime = Runtime::Current();
443  CHECK(runtime->IsStarted());
444  JNIEnv* env = tlsPtr_.jni_env;
445
446  if (thread_group == nullptr) {
447    thread_group = runtime->GetMainThreadGroup();
448  }
449  ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
450  // Add missing null check in case of OOM b/18297817
451  if (name != nullptr && thread_name.get() == nullptr) {
452    CHECK(IsExceptionPending());
453    return;
454  }
455  jint thread_priority = GetNativePriority();
456  jboolean thread_is_daemon = as_daemon;
457
458  ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread));
459  if (peer.get() == nullptr) {
460    CHECK(IsExceptionPending());
461    return;
462  }
463  {
464    ScopedObjectAccess soa(this);
465    tlsPtr_.opeer = soa.Decode<mirror::Object*>(peer.get());
466  }
467  env->CallNonvirtualVoidMethod(peer.get(),
468                                WellKnownClasses::java_lang_Thread,
469                                WellKnownClasses::java_lang_Thread_init,
470                                thread_group, thread_name.get(), thread_priority, thread_is_daemon);
471  AssertNoPendingException();
472
473  Thread* self = this;
474  DCHECK_EQ(self, Thread::Current());
475  env->SetLongField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer,
476                    reinterpret_cast<jlong>(self));
477
478  ScopedObjectAccess soa(self);
479  StackHandleScope<1> hs(self);
480  MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName(soa)));
481  if (peer_thread_name.Get() == nullptr) {
482    // The Thread constructor should have set the Thread.name to a
483    // non-null value. However, because we can run without code
484    // available (in the compiler, in tests), we manually assign the
485    // fields the constructor should have set.
486    if (runtime->IsActiveTransaction()) {
487      InitPeer<true>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
488    } else {
489      InitPeer<false>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
490    }
491    peer_thread_name.Assign(GetThreadName(soa));
492  }
493  // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
494  if (peer_thread_name.Get() != nullptr) {
495    SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
496  }
497}
498
499template<bool kTransactionActive>
500void Thread::InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
501                      jobject thread_name, jint thread_priority) {
502  soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->
503      SetBoolean<kTransactionActive>(tlsPtr_.opeer, thread_is_daemon);
504  soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->
505      SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object*>(thread_group));
506  soa.DecodeField(WellKnownClasses::java_lang_Thread_name)->
507      SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object*>(thread_name));
508  soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->
509      SetInt<kTransactionActive>(tlsPtr_.opeer, thread_priority);
510}
511
512void Thread::SetThreadName(const char* name) {
513  tlsPtr_.name->assign(name);
514  ::art::SetThreadName(name);
515  Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
516}
517
518bool Thread::InitStackHwm() {
519  void* read_stack_base;
520  size_t read_stack_size;
521  size_t read_guard_size;
522  GetThreadStack(tlsPtr_.pthread_self, &read_stack_base, &read_stack_size, &read_guard_size);
523
524  tlsPtr_.stack_begin = reinterpret_cast<uint8_t*>(read_stack_base);
525  tlsPtr_.stack_size = read_stack_size;
526
527  // The minimum stack size we can cope with is the overflow reserved bytes (typically
528  // 8K) + the protected region size (4K) + another page (4K).  Typically this will
529  // be 8+4+4 = 16K.  The thread won't be able to do much with this stack even the GC takes
530  // between 8K and 12K.
531  uint32_t min_stack = GetStackOverflowReservedBytes(kRuntimeISA) + kStackOverflowProtectedSize
532    + 4 * KB;
533  if (read_stack_size <= min_stack) {
534    // Note, as we know the stack is small, avoid operations that could use a lot of stack.
535    LogMessage::LogLineLowStack(__PRETTY_FUNCTION__, __LINE__, ERROR,
536                                "Attempt to attach a thread with a too-small stack");
537    return false;
538  }
539
540  // This is included in the SIGQUIT output, but it's useful here for thread debugging.
541  VLOG(threads) << StringPrintf("Native stack is at %p (%s with %s guard)",
542                                read_stack_base,
543                                PrettySize(read_stack_size).c_str(),
544                                PrettySize(read_guard_size).c_str());
545
546  // Set stack_end_ to the bottom of the stack saving space of stack overflows
547
548  Runtime* runtime = Runtime::Current();
549  bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler();
550  ResetDefaultStackEnd();
551
552  // Install the protected region if we are doing implicit overflow checks.
553  if (implicit_stack_check) {
554    // The thread might have protected region at the bottom.  We need
555    // to install our own region so we need to move the limits
556    // of the stack to make room for it.
557
558    tlsPtr_.stack_begin += read_guard_size + kStackOverflowProtectedSize;
559    tlsPtr_.stack_end += read_guard_size + kStackOverflowProtectedSize;
560    tlsPtr_.stack_size -= read_guard_size;
561
562    InstallImplicitProtection();
563  }
564
565  // Sanity check.
566  int stack_variable;
567  CHECK_GT(&stack_variable, reinterpret_cast<void*>(tlsPtr_.stack_end));
568
569  return true;
570}
571
572void Thread::ShortDump(std::ostream& os) const {
573  os << "Thread[";
574  if (GetThreadId() != 0) {
575    // If we're in kStarting, we won't have a thin lock id or tid yet.
576    os << GetThreadId()
577             << ",tid=" << GetTid() << ',';
578  }
579  os << GetState()
580           << ",Thread*=" << this
581           << ",peer=" << tlsPtr_.opeer
582           << ",\"" << *tlsPtr_.name << "\""
583           << "]";
584}
585
586void Thread::Dump(std::ostream& os) const {
587  DumpState(os);
588  DumpStack(os);
589}
590
591mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
592  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
593  return (tlsPtr_.opeer != nullptr) ? reinterpret_cast<mirror::String*>(f->GetObject(tlsPtr_.opeer)) : nullptr;
594}
595
596void Thread::GetThreadName(std::string& name) const {
597  name.assign(*tlsPtr_.name);
598}
599
600uint64_t Thread::GetCpuMicroTime() const {
601#if defined(__linux__)
602  clockid_t cpu_clock_id;
603  pthread_getcpuclockid(tlsPtr_.pthread_self, &cpu_clock_id);
604  timespec now;
605  clock_gettime(cpu_clock_id, &now);
606  return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000);
607#else  // __APPLE__
608  UNIMPLEMENTED(WARNING);
609  return -1;
610#endif
611}
612
613// Attempt to rectify locks so that we dump thread list with required locks before exiting.
614static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
615  LOG(ERROR) << *thread << " suspend count already zero.";
616  Locks::thread_suspend_count_lock_->Unlock(self);
617  if (!Locks::mutator_lock_->IsSharedHeld(self)) {
618    Locks::mutator_lock_->SharedTryLock(self);
619    if (!Locks::mutator_lock_->IsSharedHeld(self)) {
620      LOG(WARNING) << "Dumping thread list without holding mutator_lock_";
621    }
622  }
623  if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
624    Locks::thread_list_lock_->TryLock(self);
625    if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
626      LOG(WARNING) << "Dumping thread list without holding thread_list_lock_";
627    }
628  }
629  std::ostringstream ss;
630  Runtime::Current()->GetThreadList()->Dump(ss);
631  LOG(FATAL) << ss.str();
632}
633
634void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) {
635  if (kIsDebugBuild) {
636    DCHECK(delta == -1 || delta == +1 || delta == -tls32_.debug_suspend_count)
637          << delta << " " << tls32_.debug_suspend_count << " " << this;
638    DCHECK_GE(tls32_.suspend_count, tls32_.debug_suspend_count) << this;
639    Locks::thread_suspend_count_lock_->AssertHeld(self);
640    if (this != self && !IsSuspended()) {
641      Locks::thread_list_lock_->AssertHeld(self);
642    }
643  }
644  if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) {
645    UnsafeLogFatalForSuspendCount(self, this);
646    return;
647  }
648
649  tls32_.suspend_count += delta;
650  if (for_debugger) {
651    tls32_.debug_suspend_count += delta;
652  }
653
654  if (tls32_.suspend_count == 0) {
655    AtomicClearFlag(kSuspendRequest);
656  } else {
657    AtomicSetFlag(kSuspendRequest);
658    TriggerSuspend();
659  }
660}
661
662void Thread::RunCheckpointFunction() {
663  Closure *checkpoints[kMaxCheckpoints];
664
665  // Grab the suspend_count lock and copy the current set of
666  // checkpoints.  Then clear the list and the flag.  The RequestCheckpoint
667  // function will also grab this lock so we prevent a race between setting
668  // the kCheckpointRequest flag and clearing it.
669  {
670    MutexLock mu(this, *Locks::thread_suspend_count_lock_);
671    for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
672      checkpoints[i] = tlsPtr_.checkpoint_functions[i];
673      tlsPtr_.checkpoint_functions[i] = nullptr;
674    }
675    AtomicClearFlag(kCheckpointRequest);
676  }
677
678  // Outside the lock, run all the checkpoint functions that
679  // we collected.
680  bool found_checkpoint = false;
681  for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
682    if (checkpoints[i] != nullptr) {
683      ATRACE_BEGIN("Checkpoint function");
684      checkpoints[i]->Run(this);
685      ATRACE_END();
686      found_checkpoint = true;
687    }
688  }
689  CHECK(found_checkpoint);
690}
691
692bool Thread::RequestCheckpoint(Closure* function) {
693  union StateAndFlags old_state_and_flags;
694  old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
695  if (old_state_and_flags.as_struct.state != kRunnable) {
696    return false;  // Fail, thread is suspended and so can't run a checkpoint.
697  }
698
699  uint32_t available_checkpoint = kMaxCheckpoints;
700  for (uint32_t i = 0 ; i < kMaxCheckpoints; ++i) {
701    if (tlsPtr_.checkpoint_functions[i] == nullptr) {
702      available_checkpoint = i;
703      break;
704    }
705  }
706  if (available_checkpoint == kMaxCheckpoints) {
707    // No checkpoint functions available, we can't run a checkpoint
708    return false;
709  }
710  tlsPtr_.checkpoint_functions[available_checkpoint] = function;
711
712  // Checkpoint function installed now install flag bit.
713  // We must be runnable to request a checkpoint.
714  DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable);
715  union StateAndFlags new_state_and_flags;
716  new_state_and_flags.as_int = old_state_and_flags.as_int;
717  new_state_and_flags.as_struct.flags |= kCheckpointRequest;
718  bool success =
719      tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(old_state_and_flags.as_int,
720                                                                                       new_state_and_flags.as_int);
721  if (UNLIKELY(!success)) {
722    // The thread changed state before the checkpoint was installed.
723    CHECK_EQ(tlsPtr_.checkpoint_functions[available_checkpoint], function);
724    tlsPtr_.checkpoint_functions[available_checkpoint] = nullptr;
725  } else {
726    CHECK_EQ(ReadFlag(kCheckpointRequest), true);
727    TriggerSuspend();
728  }
729  return success;
730}
731
732Closure* Thread::GetFlipFunction() {
733  Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function);
734  Closure* func;
735  do {
736    func = atomic_func->LoadRelaxed();
737    if (func == nullptr) {
738      return nullptr;
739    }
740  } while (!atomic_func->CompareExchangeWeakSequentiallyConsistent(func, nullptr));
741  DCHECK(func != nullptr);
742  return func;
743}
744
745void Thread::SetFlipFunction(Closure* function) {
746  CHECK(function != nullptr);
747  Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function);
748  atomic_func->StoreSequentiallyConsistent(function);
749}
750
751void Thread::FullSuspendCheck() {
752  VLOG(threads) << this << " self-suspending";
753  ATRACE_BEGIN("Full suspend check");
754  // Make thread appear suspended to other threads, release mutator_lock_.
755  tls32_.suspended_at_suspend_check = true;
756  TransitionFromRunnableToSuspended(kSuspended);
757  // Transition back to runnable noting requests to suspend, re-acquire share on mutator_lock_.
758  TransitionFromSuspendedToRunnable();
759  tls32_.suspended_at_suspend_check = false;
760  ATRACE_END();
761  VLOG(threads) << this << " self-reviving";
762}
763
764void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
765  std::string group_name;
766  int priority;
767  bool is_daemon = false;
768  Thread* self = Thread::Current();
769
770  // If flip_function is not null, it means we have run a checkpoint
771  // before the thread wakes up to execute the flip function and the
772  // thread roots haven't been forwarded.  So the following access to
773  // the roots (opeer or methods in the frames) would be bad. Run it
774  // here. TODO: clean up.
775  if (thread != nullptr) {
776    ScopedObjectAccessUnchecked soa(self);
777    Thread* this_thread = const_cast<Thread*>(thread);
778    Closure* flip_func = this_thread->GetFlipFunction();
779    if (flip_func != nullptr) {
780      flip_func->Run(this_thread);
781    }
782  }
783
784  // Don't do this if we are aborting since the GC may have all the threads suspended. This will
785  // cause ScopedObjectAccessUnchecked to deadlock.
786  if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) {
787    ScopedObjectAccessUnchecked soa(self);
788    priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)
789        ->GetInt(thread->tlsPtr_.opeer);
790    is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)
791        ->GetBoolean(thread->tlsPtr_.opeer);
792
793    mirror::Object* thread_group =
794        soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->tlsPtr_.opeer);
795
796    if (thread_group != nullptr) {
797      mirror::ArtField* group_name_field =
798          soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
799      mirror::String* group_name_string =
800          reinterpret_cast<mirror::String*>(group_name_field->GetObject(thread_group));
801      group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>";
802    }
803  } else {
804    priority = GetNativePriority();
805  }
806
807  std::string scheduler_group_name(GetSchedulerGroupName(tid));
808  if (scheduler_group_name.empty()) {
809    scheduler_group_name = "default";
810  }
811
812  if (thread != nullptr) {
813    os << '"' << *thread->tlsPtr_.name << '"';
814    if (is_daemon) {
815      os << " daemon";
816    }
817    os << " prio=" << priority
818       << " tid=" << thread->GetThreadId()
819       << " " << thread->GetState();
820    if (thread->IsStillStarting()) {
821      os << " (still starting up)";
822    }
823    os << "\n";
824  } else {
825    os << '"' << ::art::GetThreadName(tid) << '"'
826       << " prio=" << priority
827       << " (not attached)\n";
828  }
829
830  if (thread != nullptr) {
831    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
832    os << "  | group=\"" << group_name << "\""
833       << " sCount=" << thread->tls32_.suspend_count
834       << " dsCount=" << thread->tls32_.debug_suspend_count
835       << " obj=" << reinterpret_cast<void*>(thread->tlsPtr_.opeer)
836       << " self=" << reinterpret_cast<const void*>(thread) << "\n";
837  }
838
839  os << "  | sysTid=" << tid
840     << " nice=" << getpriority(PRIO_PROCESS, tid)
841     << " cgrp=" << scheduler_group_name;
842  if (thread != nullptr) {
843    int policy;
844    sched_param sp;
845    CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp),
846                       __FUNCTION__);
847    os << " sched=" << policy << "/" << sp.sched_priority
848       << " handle=" << reinterpret_cast<void*>(thread->tlsPtr_.pthread_self);
849  }
850  os << "\n";
851
852  // Grab the scheduler stats for this thread.
853  std::string scheduler_stats;
854  if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) {
855    scheduler_stats.resize(scheduler_stats.size() - 1);  // Lose the trailing '\n'.
856  } else {
857    scheduler_stats = "0 0 0";
858  }
859
860  char native_thread_state = '?';
861  int utime = 0;
862  int stime = 0;
863  int task_cpu = 0;
864  GetTaskStats(tid, &native_thread_state, &utime, &stime, &task_cpu);
865
866  os << "  | state=" << native_thread_state
867     << " schedstat=( " << scheduler_stats << " )"
868     << " utm=" << utime
869     << " stm=" << stime
870     << " core=" << task_cpu
871     << " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
872  if (thread != nullptr) {
873    os << "  | stack=" << reinterpret_cast<void*>(thread->tlsPtr_.stack_begin) << "-"
874        << reinterpret_cast<void*>(thread->tlsPtr_.stack_end) << " stackSize="
875        << PrettySize(thread->tlsPtr_.stack_size) << "\n";
876    // Dump the held mutexes.
877    os << "  | held mutexes=";
878    for (size_t i = 0; i < kLockLevelCount; ++i) {
879      if (i != kMonitorLock) {
880        BaseMutex* mutex = thread->GetHeldMutex(static_cast<LockLevel>(i));
881        if (mutex != nullptr) {
882          os << " \"" << mutex->GetName() << "\"";
883          if (mutex->IsReaderWriterMutex()) {
884            ReaderWriterMutex* rw_mutex = down_cast<ReaderWriterMutex*>(mutex);
885            if (rw_mutex->GetExclusiveOwnerTid() == static_cast<uint64_t>(tid)) {
886              os << "(exclusive held)";
887            } else {
888              os << "(shared held)";
889            }
890          }
891        }
892      }
893    }
894    os << "\n";
895  }
896}
897
898void Thread::DumpState(std::ostream& os) const {
899  Thread::DumpState(os, this, GetTid());
900}
901
902struct StackDumpVisitor : public StackVisitor {
903  StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in)
904      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
905      : StackVisitor(thread_in, context), os(os_in), thread(thread_in),
906        can_allocate(can_allocate_in), last_method(nullptr), last_line_number(0),
907        repetition_count(0), frame_count(0) {
908  }
909
910  virtual ~StackDumpVisitor() {
911    if (frame_count == 0) {
912      os << "  (no managed stack frames)\n";
913    }
914  }
915
916  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
917    mirror::ArtMethod* m = GetMethod();
918    if (m->IsRuntimeMethod()) {
919      return true;
920    }
921    const int kMaxRepetition = 3;
922    mirror::Class* c = m->GetDeclaringClass();
923    mirror::DexCache* dex_cache = c->GetDexCache();
924    int line_number = -1;
925    if (dex_cache != nullptr) {  // be tolerant of bad input
926      const DexFile& dex_file = *dex_cache->GetDexFile();
927      line_number = dex_file.GetLineNumFromPC(m, GetDexPc(false));
928    }
929    if (line_number == last_line_number && last_method == m) {
930      ++repetition_count;
931    } else {
932      if (repetition_count >= kMaxRepetition) {
933        os << "  ... repeated " << (repetition_count - kMaxRepetition) << " times\n";
934      }
935      repetition_count = 0;
936      last_line_number = line_number;
937      last_method = m;
938    }
939    if (repetition_count < kMaxRepetition) {
940      os << "  at " << PrettyMethod(m, false);
941      if (m->IsNative()) {
942        os << "(Native method)";
943      } else {
944        const char* source_file(m->GetDeclaringClassSourceFile());
945        os << "(" << (source_file != nullptr ? source_file : "unavailable")
946           << ":" << line_number << ")";
947      }
948      os << "\n";
949      if (frame_count == 0) {
950        Monitor::DescribeWait(os, thread);
951      }
952      if (can_allocate) {
953        // Visit locks, but do not abort on errors. This would trigger a nested abort.
954        Monitor::VisitLocks(this, DumpLockedObject, &os, false);
955      }
956    }
957
958    ++frame_count;
959    return true;
960  }
961
962  static void DumpLockedObject(mirror::Object* o, void* context)
963      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
964    std::ostream& os = *reinterpret_cast<std::ostream*>(context);
965    os << "  - locked ";
966    if (o == nullptr) {
967      os << "an unknown object";
968    } else {
969      if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) &&
970          Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) {
971        // Getting the identity hashcode here would result in lock inflation and suspension of the
972        // current thread, which isn't safe if this is the only runnable thread.
973        os << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)", reinterpret_cast<intptr_t>(o),
974                           PrettyTypeOf(o).c_str());
975      } else {
976        // IdentityHashCode can cause thread suspension, which would invalidate o if it moved. So
977        // we get the pretty type beofre we call IdentityHashCode.
978        const std::string pretty_type(PrettyTypeOf(o));
979        os << StringPrintf("<0x%08x> (a %s)", o->IdentityHashCode(), pretty_type.c_str());
980      }
981    }
982    os << "\n";
983  }
984
985  std::ostream& os;
986  const Thread* thread;
987  const bool can_allocate;
988  mirror::ArtMethod* last_method;
989  int last_line_number;
990  int repetition_count;
991  int frame_count;
992};
993
994static bool ShouldShowNativeStack(const Thread* thread)
995    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
996  ThreadState state = thread->GetState();
997
998  // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting.
999  if (state > kWaiting && state < kStarting) {
1000    return true;
1001  }
1002
1003  // In an Object.wait variant or Thread.sleep? That's not interesting.
1004  if (state == kTimedWaiting || state == kSleeping || state == kWaiting) {
1005    return false;
1006  }
1007
1008  // Threads with no managed stack frames should be shown.
1009  const ManagedStack* managed_stack = thread->GetManagedStack();
1010  if (managed_stack == NULL || (managed_stack->GetTopQuickFrame() == NULL &&
1011      managed_stack->GetTopShadowFrame() == NULL)) {
1012    return true;
1013  }
1014
1015  // In some other native method? That's interesting.
1016  // We don't just check kNative because native methods will be in state kSuspended if they're
1017  // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the
1018  // thread-startup states if it's early enough in their life cycle (http://b/7432159).
1019  mirror::ArtMethod* current_method = thread->GetCurrentMethod(nullptr);
1020  return current_method != nullptr && current_method->IsNative();
1021}
1022
1023void Thread::DumpJavaStack(std::ostream& os) const {
1024  // If flip_function is not null, it means we have run a checkpoint
1025  // before the thread wakes up to execute the flip function and the
1026  // thread roots haven't been forwarded.  So the following access to
1027  // the roots (locks or methods in the frames) would be bad. Run it
1028  // here. TODO: clean up.
1029  {
1030    Thread* this_thread = const_cast<Thread*>(this);
1031    Closure* flip_func = this_thread->GetFlipFunction();
1032    if (flip_func != nullptr) {
1033      flip_func->Run(this_thread);
1034    }
1035  }
1036
1037  // Dumping the Java stack involves the verifier for locks. The verifier operates under the
1038  // assumption that there is no exception pending on entry. Thus, stash any pending exception.
1039  // Thread::Current() instead of this in case a thread is dumping the stack of another suspended
1040  // thread.
1041  StackHandleScope<1> scope(Thread::Current());
1042  Handle<mirror::Throwable> exc;
1043  bool have_exception = false;
1044  if (IsExceptionPending()) {
1045    exc = scope.NewHandle(GetException());
1046    const_cast<Thread*>(this)->ClearException();
1047    have_exception = true;
1048  }
1049
1050  std::unique_ptr<Context> context(Context::Create());
1051  StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(),
1052                          !tls32_.throwing_OutOfMemoryError);
1053  dumper.WalkStack();
1054
1055  if (have_exception) {
1056    const_cast<Thread*>(this)->SetException(exc.Get());
1057  }
1058}
1059
1060void Thread::DumpStack(std::ostream& os) const {
1061  // TODO: we call this code when dying but may not have suspended the thread ourself. The
1062  //       IsSuspended check is therefore racy with the use for dumping (normally we inhibit
1063  //       the race with the thread_suspend_count_lock_).
1064  bool dump_for_abort = (gAborting > 0);
1065  bool safe_to_dump = (this == Thread::Current() || IsSuspended());
1066  if (!kIsDebugBuild) {
1067    // We always want to dump the stack for an abort, however, there is no point dumping another
1068    // thread's stack in debug builds where we'll hit the not suspended check in the stack walk.
1069    safe_to_dump = (safe_to_dump || dump_for_abort);
1070  }
1071  if (safe_to_dump) {
1072    // If we're currently in native code, dump that stack before dumping the managed stack.
1073    if (dump_for_abort || ShouldShowNativeStack(this)) {
1074      DumpKernelStack(os, GetTid(), "  kernel: ", false);
1075      // b/20040863. Temporary workaround for x86 libunwind issue.
1076#if defined(__i386__) && defined(HAVE_ANDROID_OS)
1077      os << "Cannot dump native stack. b/20040863.\n";
1078#else
1079      DumpNativeStack(os, GetTid(), "  native: ", GetCurrentMethod(nullptr, !dump_for_abort));
1080#endif
1081    }
1082    DumpJavaStack(os);
1083  } else {
1084    os << "Not able to dump stack of thread that isn't suspended";
1085  }
1086}
1087
1088void Thread::ThreadExitCallback(void* arg) {
1089  Thread* self = reinterpret_cast<Thread*>(arg);
1090  if (self->tls32_.thread_exit_check_count == 0) {
1091    LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's "
1092        "going to use a pthread_key_create destructor?): " << *self;
1093    CHECK(is_started_);
1094    CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
1095    self->tls32_.thread_exit_check_count = 1;
1096  } else {
1097    LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
1098  }
1099}
1100
1101void Thread::Startup() {
1102  CHECK(!is_started_);
1103  is_started_ = true;
1104  {
1105    // MutexLock to keep annotalysis happy.
1106    //
1107    // Note we use nullptr for the thread because Thread::Current can
1108    // return garbage since (is_started_ == true) and
1109    // Thread::pthread_key_self_ is not yet initialized.
1110    // This was seen on glibc.
1111    MutexLock mu(nullptr, *Locks::thread_suspend_count_lock_);
1112    resume_cond_ = new ConditionVariable("Thread resumption condition variable",
1113                                         *Locks::thread_suspend_count_lock_);
1114  }
1115
1116  // Allocate a TLS slot.
1117  CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback),
1118                     "self key");
1119
1120  // Double-check the TLS slot allocation.
1121  if (pthread_getspecific(pthread_key_self_) != nullptr) {
1122    LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr";
1123  }
1124}
1125
1126void Thread::FinishStartup() {
1127  Runtime* runtime = Runtime::Current();
1128  CHECK(runtime->IsStarted());
1129
1130  // Finish attaching the main thread.
1131  ScopedObjectAccess soa(Thread::Current());
1132  Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup());
1133
1134  Runtime::Current()->GetClassLinker()->RunRootClinits();
1135}
1136
1137void Thread::Shutdown() {
1138  CHECK(is_started_);
1139  is_started_ = false;
1140  CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
1141  MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
1142  if (resume_cond_ != nullptr) {
1143    delete resume_cond_;
1144    resume_cond_ = nullptr;
1145  }
1146}
1147
1148Thread::Thread(bool daemon) : tls32_(daemon), wait_monitor_(nullptr), interrupted_(false) {
1149  wait_mutex_ = new Mutex("a thread wait mutex");
1150  wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
1151  tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>;
1152  tlsPtr_.name = new std::string(kThreadNameDuringStartup);
1153  tlsPtr_.nested_signal_state = static_cast<jmp_buf*>(malloc(sizeof(jmp_buf)));
1154
1155  CHECK_EQ((sizeof(Thread) % 4), 0U) << sizeof(Thread);
1156  tls32_.state_and_flags.as_struct.flags = 0;
1157  tls32_.state_and_flags.as_struct.state = kNative;
1158  memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes));
1159  std::fill(tlsPtr_.rosalloc_runs,
1160            tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBrackets,
1161            gc::allocator::RosAlloc::GetDedicatedFullRun());
1162  for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
1163    tlsPtr_.checkpoint_functions[i] = nullptr;
1164  }
1165  tlsPtr_.flip_function = nullptr;
1166  tls32_.suspended_at_suspend_check = false;
1167}
1168
1169bool Thread::IsStillStarting() const {
1170  // You might think you can check whether the state is kStarting, but for much of thread startup,
1171  // the thread is in kNative; it might also be in kVmWait.
1172  // You might think you can check whether the peer is nullptr, but the peer is actually created and
1173  // assigned fairly early on, and needs to be.
1174  // It turns out that the last thing to change is the thread name; that's a good proxy for "has
1175  // this thread _ever_ entered kRunnable".
1176  return (tlsPtr_.jpeer == nullptr && tlsPtr_.opeer == nullptr) ||
1177      (*tlsPtr_.name == kThreadNameDuringStartup);
1178}
1179
1180void Thread::AssertPendingException() const {
1181  if (UNLIKELY(!IsExceptionPending())) {
1182    LOG(FATAL) << "Pending exception expected.";
1183  }
1184}
1185
1186void Thread::AssertNoPendingException() const {
1187  if (UNLIKELY(IsExceptionPending())) {
1188    ScopedObjectAccess soa(Thread::Current());
1189    mirror::Throwable* exception = GetException();
1190    LOG(FATAL) << "No pending exception expected: " << exception->Dump();
1191  }
1192}
1193
1194void Thread::AssertNoPendingExceptionForNewException(const char* msg) const {
1195  if (UNLIKELY(IsExceptionPending())) {
1196    ScopedObjectAccess soa(Thread::Current());
1197    mirror::Throwable* exception = GetException();
1198    LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: "
1199        << exception->Dump();
1200  }
1201}
1202
1203class MonitorExitVisitor : public SingleRootVisitor {
1204 public:
1205  explicit MonitorExitVisitor(Thread* self) : self_(self) { }
1206
1207  // NO_THREAD_SAFETY_ANALYSIS due to MonitorExit.
1208  void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED)
1209      OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1210    if (self_->HoldsLock(entered_monitor)) {
1211      LOG(WARNING) << "Calling MonitorExit on object "
1212                   << entered_monitor << " (" << PrettyTypeOf(entered_monitor) << ")"
1213                   << " left locked by native thread "
1214                   << *Thread::Current() << " which is detaching";
1215      entered_monitor->MonitorExit(self_);
1216    }
1217  }
1218
1219 private:
1220  Thread* const self_;
1221};
1222
1223void Thread::Destroy() {
1224  Thread* self = this;
1225  DCHECK_EQ(self, Thread::Current());
1226
1227  if (tlsPtr_.jni_env != nullptr) {
1228    {
1229      ScopedObjectAccess soa(self);
1230      MonitorExitVisitor visitor(self);
1231      // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
1232      tlsPtr_.jni_env->monitors.VisitRoots(&visitor, RootInfo(kRootVMInternal));
1233    }
1234    // Release locally held global references which releasing may require the mutator lock.
1235    if (tlsPtr_.jpeer != nullptr) {
1236      // If pthread_create fails we don't have a jni env here.
1237      tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer);
1238      tlsPtr_.jpeer = nullptr;
1239    }
1240    if (tlsPtr_.class_loader_override != nullptr) {
1241      tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.class_loader_override);
1242      tlsPtr_.class_loader_override = nullptr;
1243    }
1244  }
1245
1246  if (tlsPtr_.opeer != nullptr) {
1247    ScopedObjectAccess soa(self);
1248    // We may need to call user-supplied managed code, do this before final clean-up.
1249    HandleUncaughtExceptions(soa);
1250    RemoveFromThreadGroup(soa);
1251
1252    // this.nativePeer = 0;
1253    if (Runtime::Current()->IsActiveTransaction()) {
1254      soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)
1255          ->SetLong<true>(tlsPtr_.opeer, 0);
1256    } else {
1257      soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)
1258          ->SetLong<false>(tlsPtr_.opeer, 0);
1259    }
1260    Dbg::PostThreadDeath(self);
1261
1262    // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
1263    // who is waiting.
1264    mirror::Object* lock =
1265        soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer);
1266    // (This conditional is only needed for tests, where Thread.lock won't have been set.)
1267    if (lock != nullptr) {
1268      StackHandleScope<1> hs(self);
1269      Handle<mirror::Object> h_obj(hs.NewHandle(lock));
1270      ObjectLock<mirror::Object> locker(self, h_obj);
1271      locker.NotifyAll();
1272    }
1273    tlsPtr_.opeer = nullptr;
1274  }
1275
1276  {
1277    ScopedObjectAccess soa(self);
1278    Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this);
1279  }
1280}
1281
1282Thread::~Thread() {
1283  CHECK(tlsPtr_.class_loader_override == nullptr);
1284  CHECK(tlsPtr_.jpeer == nullptr);
1285  CHECK(tlsPtr_.opeer == nullptr);
1286  bool initialized = (tlsPtr_.jni_env != nullptr);  // Did Thread::Init run?
1287  if (initialized) {
1288    delete tlsPtr_.jni_env;
1289    tlsPtr_.jni_env = nullptr;
1290  }
1291  CHECK_NE(GetState(), kRunnable);
1292  CHECK_NE(ReadFlag(kCheckpointRequest), true);
1293  CHECK(tlsPtr_.checkpoint_functions[0] == nullptr);
1294  CHECK(tlsPtr_.checkpoint_functions[1] == nullptr);
1295  CHECK(tlsPtr_.checkpoint_functions[2] == nullptr);
1296  CHECK(tlsPtr_.flip_function == nullptr);
1297  CHECK_EQ(tls32_.suspended_at_suspend_check, false);
1298
1299  // We may be deleting a still born thread.
1300  SetStateUnsafe(kTerminated);
1301
1302  delete wait_cond_;
1303  delete wait_mutex_;
1304
1305  if (tlsPtr_.long_jump_context != nullptr) {
1306    delete tlsPtr_.long_jump_context;
1307  }
1308
1309  if (initialized) {
1310    CleanupCpu();
1311  }
1312
1313  if (tlsPtr_.single_step_control != nullptr) {
1314    delete tlsPtr_.single_step_control;
1315  }
1316  delete tlsPtr_.instrumentation_stack;
1317  delete tlsPtr_.name;
1318  delete tlsPtr_.stack_trace_sample;
1319  free(tlsPtr_.nested_signal_state);
1320
1321  Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this);
1322
1323  TearDownAlternateSignalStack();
1324}
1325
1326void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) {
1327  if (!IsExceptionPending()) {
1328    return;
1329  }
1330  ScopedLocalRef<jobject> peer(tlsPtr_.jni_env, soa.AddLocalReference<jobject>(tlsPtr_.opeer));
1331  ScopedThreadStateChange tsc(this, kNative);
1332
1333  // Get and clear the exception.
1334  ScopedLocalRef<jthrowable> exception(tlsPtr_.jni_env, tlsPtr_.jni_env->ExceptionOccurred());
1335  tlsPtr_.jni_env->ExceptionClear();
1336
1337  // If the thread has its own handler, use that.
1338  ScopedLocalRef<jobject> handler(tlsPtr_.jni_env,
1339                                  tlsPtr_.jni_env->GetObjectField(peer.get(),
1340                                      WellKnownClasses::java_lang_Thread_uncaughtHandler));
1341  if (handler.get() == nullptr) {
1342    // Otherwise use the thread group's default handler.
1343    handler.reset(tlsPtr_.jni_env->GetObjectField(peer.get(),
1344                                                  WellKnownClasses::java_lang_Thread_group));
1345  }
1346
1347  // Call the handler.
1348  tlsPtr_.jni_env->CallVoidMethod(handler.get(),
1349      WellKnownClasses::java_lang_Thread__UncaughtExceptionHandler_uncaughtException,
1350      peer.get(), exception.get());
1351
1352  // If the handler threw, clear that exception too.
1353  tlsPtr_.jni_env->ExceptionClear();
1354}
1355
1356void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
1357  // this.group.removeThread(this);
1358  // group can be null if we're in the compiler or a test.
1359  mirror::Object* ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)
1360      ->GetObject(tlsPtr_.opeer);
1361  if (ogroup != nullptr) {
1362    ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup));
1363    ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(tlsPtr_.opeer));
1364    ScopedThreadStateChange tsc(soa.Self(), kNative);
1365    tlsPtr_.jni_env->CallVoidMethod(group.get(),
1366                                    WellKnownClasses::java_lang_ThreadGroup_removeThread,
1367                                    peer.get());
1368  }
1369}
1370
1371size_t Thread::NumHandleReferences() {
1372  size_t count = 0;
1373  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur != nullptr; cur = cur->GetLink()) {
1374    count += cur->NumberOfReferences();
1375  }
1376  return count;
1377}
1378
1379bool Thread::HandleScopeContains(jobject obj) const {
1380  StackReference<mirror::Object>* hs_entry =
1381      reinterpret_cast<StackReference<mirror::Object>*>(obj);
1382  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur!= nullptr; cur = cur->GetLink()) {
1383    if (cur->Contains(hs_entry)) {
1384      return true;
1385    }
1386  }
1387  // JNI code invoked from portable code uses shadow frames rather than the handle scope.
1388  return tlsPtr_.managed_stack.ShadowFramesContain(hs_entry);
1389}
1390
1391void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) {
1392  BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
1393      visitor, RootInfo(kRootNativeStack, thread_id));
1394  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
1395    for (size_t j = 0, count = cur->NumberOfReferences(); j < count; ++j) {
1396      buffered_visitor.VisitRootIfNonNull(cur->GetHandle(j).GetReference());
1397    }
1398  }
1399}
1400
1401mirror::Object* Thread::DecodeJObject(jobject obj) const {
1402  if (obj == nullptr) {
1403    return nullptr;
1404  }
1405  IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
1406  IndirectRefKind kind = GetIndirectRefKind(ref);
1407  mirror::Object* result;
1408  bool expect_null = false;
1409  // The "kinds" below are sorted by the frequency we expect to encounter them.
1410  if (kind == kLocal) {
1411    IndirectReferenceTable& locals = tlsPtr_.jni_env->locals;
1412    // Local references do not need a read barrier.
1413    result = locals.Get<kWithoutReadBarrier>(ref);
1414  } else if (kind == kHandleScopeOrInvalid) {
1415    // TODO: make stack indirect reference table lookup more efficient.
1416    // Check if this is a local reference in the handle scope.
1417    if (LIKELY(HandleScopeContains(obj))) {
1418      // Read from handle scope.
1419      result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr();
1420      VerifyObject(result);
1421    } else {
1422      tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of invalid jobject %p", obj);
1423      expect_null = true;
1424      result = nullptr;
1425    }
1426  } else if (kind == kGlobal) {
1427    result = tlsPtr_.jni_env->vm->DecodeGlobal(const_cast<Thread*>(this), ref);
1428  } else {
1429    DCHECK_EQ(kind, kWeakGlobal);
1430    result = tlsPtr_.jni_env->vm->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
1431    if (Runtime::Current()->IsClearedJniWeakGlobal(result)) {
1432      // This is a special case where it's okay to return nullptr.
1433      expect_null = true;
1434      result = nullptr;
1435    }
1436  }
1437
1438  if (UNLIKELY(!expect_null && result == nullptr)) {
1439    tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of deleted %s %p",
1440                                   ToStr<IndirectRefKind>(kind).c_str(), obj);
1441  }
1442  return result;
1443}
1444
1445// Implements java.lang.Thread.interrupted.
1446bool Thread::Interrupted() {
1447  MutexLock mu(Thread::Current(), *wait_mutex_);
1448  bool interrupted = IsInterruptedLocked();
1449  SetInterruptedLocked(false);
1450  return interrupted;
1451}
1452
1453// Implements java.lang.Thread.isInterrupted.
1454bool Thread::IsInterrupted() {
1455  MutexLock mu(Thread::Current(), *wait_mutex_);
1456  return IsInterruptedLocked();
1457}
1458
1459void Thread::Interrupt(Thread* self) {
1460  MutexLock mu(self, *wait_mutex_);
1461  if (interrupted_) {
1462    return;
1463  }
1464  interrupted_ = true;
1465  NotifyLocked(self);
1466}
1467
1468void Thread::Notify() {
1469  Thread* self = Thread::Current();
1470  MutexLock mu(self, *wait_mutex_);
1471  NotifyLocked(self);
1472}
1473
1474void Thread::NotifyLocked(Thread* self) {
1475  if (wait_monitor_ != nullptr) {
1476    wait_cond_->Signal(self);
1477  }
1478}
1479
1480void Thread::SetClassLoaderOverride(jobject class_loader_override) {
1481  if (tlsPtr_.class_loader_override != nullptr) {
1482    GetJniEnv()->DeleteGlobalRef(tlsPtr_.class_loader_override);
1483  }
1484  tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override);
1485}
1486
1487class CountStackDepthVisitor : public StackVisitor {
1488 public:
1489  explicit CountStackDepthVisitor(Thread* thread)
1490      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1491      : StackVisitor(thread, nullptr),
1492        depth_(0), skip_depth_(0), skipping_(true) {}
1493
1494  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1495    // We want to skip frames up to and including the exception's constructor.
1496    // Note we also skip the frame if it doesn't have a method (namely the callee
1497    // save frame)
1498    mirror::ArtMethod* m = GetMethod();
1499    if (skipping_ && !m->IsRuntimeMethod() &&
1500        !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) {
1501      skipping_ = false;
1502    }
1503    if (!skipping_) {
1504      if (!m->IsRuntimeMethod()) {  // Ignore runtime frames (in particular callee save).
1505        ++depth_;
1506      }
1507    } else {
1508      ++skip_depth_;
1509    }
1510    return true;
1511  }
1512
1513  int GetDepth() const {
1514    return depth_;
1515  }
1516
1517  int GetSkipDepth() const {
1518    return skip_depth_;
1519  }
1520
1521 private:
1522  uint32_t depth_;
1523  uint32_t skip_depth_;
1524  bool skipping_;
1525};
1526
1527template<bool kTransactionActive>
1528class BuildInternalStackTraceVisitor : public StackVisitor {
1529 public:
1530  explicit BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth)
1531      : StackVisitor(thread, nullptr), self_(self),
1532        skip_depth_(skip_depth), count_(0), dex_pc_trace_(nullptr), method_trace_(nullptr) {}
1533
1534  bool Init(int depth)
1535      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1536    // Allocate method trace with an extra slot that will hold the PC trace
1537    StackHandleScope<1> hs(self_);
1538    ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1539    Handle<mirror::ObjectArray<mirror::Object>> method_trace(
1540        hs.NewHandle(class_linker->AllocObjectArray<mirror::Object>(self_, depth + 1)));
1541    if (method_trace.Get() == nullptr) {
1542      return false;
1543    }
1544    mirror::IntArray* dex_pc_trace = mirror::IntArray::Alloc(self_, depth);
1545    if (dex_pc_trace == nullptr) {
1546      return false;
1547    }
1548    // Save PC trace in last element of method trace, also places it into the
1549    // object graph.
1550    // We are called from native: use non-transactional mode.
1551    method_trace->Set<kTransactionActive>(depth, dex_pc_trace);
1552    // Set the Object*s and assert that no thread suspension is now possible.
1553    const char* last_no_suspend_cause =
1554        self_->StartAssertNoThreadSuspension("Building internal stack trace");
1555    CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause;
1556    method_trace_ = method_trace.Get();
1557    dex_pc_trace_ = dex_pc_trace;
1558    return true;
1559  }
1560
1561  virtual ~BuildInternalStackTraceVisitor() {
1562    if (method_trace_ != nullptr) {
1563      self_->EndAssertNoThreadSuspension(nullptr);
1564    }
1565  }
1566
1567  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1568    if (method_trace_ == nullptr || dex_pc_trace_ == nullptr) {
1569      return true;  // We're probably trying to fillInStackTrace for an OutOfMemoryError.
1570    }
1571    if (skip_depth_ > 0) {
1572      skip_depth_--;
1573      return true;
1574    }
1575    mirror::ArtMethod* m = GetMethod();
1576    if (m->IsRuntimeMethod()) {
1577      return true;  // Ignore runtime frames (in particular callee save).
1578    }
1579    method_trace_->Set<kTransactionActive>(count_, m);
1580    dex_pc_trace_->Set<kTransactionActive>(count_,
1581        m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc());
1582    ++count_;
1583    return true;
1584  }
1585
1586  mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const {
1587    return method_trace_;
1588  }
1589
1590 private:
1591  Thread* const self_;
1592  // How many more frames to skip.
1593  int32_t skip_depth_;
1594  // Current position down stack trace.
1595  uint32_t count_;
1596  // Array of dex PC values.
1597  mirror::IntArray* dex_pc_trace_;
1598  // An array of the methods on the stack, the last entry is a reference to the PC trace.
1599  mirror::ObjectArray<mirror::Object>* method_trace_;
1600};
1601
1602template<bool kTransactionActive>
1603jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
1604  // Compute depth of stack
1605  CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
1606  count_visitor.WalkStack();
1607  int32_t depth = count_visitor.GetDepth();
1608  int32_t skip_depth = count_visitor.GetSkipDepth();
1609
1610  // Build internal stack trace.
1611  BuildInternalStackTraceVisitor<kTransactionActive> build_trace_visitor(soa.Self(),
1612                                                                         const_cast<Thread*>(this),
1613                                                                         skip_depth);
1614  if (!build_trace_visitor.Init(depth)) {
1615    return nullptr;  // Allocation failed.
1616  }
1617  build_trace_visitor.WalkStack();
1618  mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace();
1619  if (kIsDebugBuild) {
1620    for (int32_t i = 0; i < trace->GetLength(); ++i) {
1621      CHECK(trace->Get(i) != nullptr);
1622    }
1623  }
1624  return soa.AddLocalReference<jobjectArray>(trace);
1625}
1626template jobject Thread::CreateInternalStackTrace<false>(
1627    const ScopedObjectAccessAlreadyRunnable& soa) const;
1628template jobject Thread::CreateInternalStackTrace<true>(
1629    const ScopedObjectAccessAlreadyRunnable& soa) const;
1630
1631bool Thread::IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const {
1632  CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
1633  count_visitor.WalkStack();
1634  return count_visitor.GetDepth() == exception->GetStackDepth();
1635}
1636
1637jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
1638    const ScopedObjectAccessAlreadyRunnable& soa, jobject internal, jobjectArray output_array,
1639    int* stack_depth) {
1640  // Decode the internal stack trace into the depth, method trace and PC trace
1641  int32_t depth = soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal)->GetLength() - 1;
1642
1643  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1644
1645  jobjectArray result;
1646
1647  if (output_array != nullptr) {
1648    // Reuse the array we were given.
1649    result = output_array;
1650    // ...adjusting the number of frames we'll write to not exceed the array length.
1651    const int32_t traces_length =
1652        soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->GetLength();
1653    depth = std::min(depth, traces_length);
1654  } else {
1655    // Create java_trace array and place in local reference table
1656    mirror::ObjectArray<mirror::StackTraceElement>* java_traces =
1657        class_linker->AllocStackTraceElementArray(soa.Self(), depth);
1658    if (java_traces == nullptr) {
1659      return nullptr;
1660    }
1661    result = soa.AddLocalReference<jobjectArray>(java_traces);
1662  }
1663
1664  if (stack_depth != nullptr) {
1665    *stack_depth = depth;
1666  }
1667
1668  for (int32_t i = 0; i < depth; ++i) {
1669    mirror::ObjectArray<mirror::Object>* method_trace =
1670          soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal);
1671    // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
1672    mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(method_trace->Get(i));
1673    int32_t line_number;
1674    StackHandleScope<3> hs(soa.Self());
1675    auto class_name_object(hs.NewHandle<mirror::String>(nullptr));
1676    auto source_name_object(hs.NewHandle<mirror::String>(nullptr));
1677    if (method->IsProxyMethod()) {
1678      line_number = -1;
1679      class_name_object.Assign(method->GetDeclaringClass()->GetName());
1680      // source_name_object intentionally left null for proxy methods
1681    } else {
1682      mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth));
1683      uint32_t dex_pc = pc_trace->Get(i);
1684      line_number = method->GetLineNumFromDexPC(dex_pc);
1685      // Allocate element, potentially triggering GC
1686      // TODO: reuse class_name_object via Class::name_?
1687      const char* descriptor = method->GetDeclaringClassDescriptor();
1688      CHECK(descriptor != nullptr);
1689      std::string class_name(PrettyDescriptor(descriptor));
1690      class_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
1691      if (class_name_object.Get() == nullptr) {
1692        return nullptr;
1693      }
1694      const char* source_file = method->GetDeclaringClassSourceFile();
1695      if (source_file != nullptr) {
1696        source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
1697        if (source_name_object.Get() == nullptr) {
1698          return nullptr;
1699        }
1700      }
1701    }
1702    const char* method_name = method->GetName();
1703    CHECK(method_name != nullptr);
1704    Handle<mirror::String> method_name_object(
1705        hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
1706    if (method_name_object.Get() == nullptr) {
1707      return nullptr;
1708    }
1709    mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(
1710        soa.Self(), class_name_object, method_name_object, source_name_object, line_number);
1711    if (obj == nullptr) {
1712      return nullptr;
1713    }
1714    // We are called from native: use non-transactional mode.
1715    soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->Set<false>(i, obj);
1716  }
1717  return result;
1718}
1719
1720void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) {
1721  va_list args;
1722  va_start(args, fmt);
1723  ThrowNewExceptionV(exception_class_descriptor, fmt, args);
1724  va_end(args);
1725}
1726
1727void Thread::ThrowNewExceptionV(const char* exception_class_descriptor,
1728                                const char* fmt, va_list ap) {
1729  std::string msg;
1730  StringAppendV(&msg, fmt, ap);
1731  ThrowNewException(exception_class_descriptor, msg.c_str());
1732}
1733
1734void Thread::ThrowNewException(const char* exception_class_descriptor,
1735                               const char* msg) {
1736  // Callers should either clear or call ThrowNewWrappedException.
1737  AssertNoPendingExceptionForNewException(msg);
1738  ThrowNewWrappedException(exception_class_descriptor, msg);
1739}
1740
1741static mirror::ClassLoader* GetCurrentClassLoader(Thread* self)
1742    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1743  mirror::ArtMethod* method = self->GetCurrentMethod(nullptr);
1744  return method != nullptr
1745      ? method->GetDeclaringClass()->GetClassLoader()
1746      : nullptr;
1747}
1748
1749void Thread::ThrowNewWrappedException(const char* exception_class_descriptor,
1750                                      const char* msg) {
1751  DCHECK_EQ(this, Thread::Current());
1752  ScopedObjectAccessUnchecked soa(this);
1753  StackHandleScope<3> hs(soa.Self());
1754  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetCurrentClassLoader(soa.Self())));
1755  ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException()));
1756  ClearException();
1757  Runtime* runtime = Runtime::Current();
1758  Handle<mirror::Class> exception_class(
1759      hs.NewHandle(runtime->GetClassLinker()->FindClass(this, exception_class_descriptor,
1760                                                        class_loader)));
1761  if (UNLIKELY(exception_class.Get() == nullptr)) {
1762    CHECK(IsExceptionPending());
1763    LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
1764    return;
1765  }
1766
1767  if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(soa.Self(), exception_class, true,
1768                                                             true))) {
1769    DCHECK(IsExceptionPending());
1770    return;
1771  }
1772  DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass());
1773  Handle<mirror::Throwable> exception(
1774      hs.NewHandle(down_cast<mirror::Throwable*>(exception_class->AllocObject(this))));
1775
1776  // If we couldn't allocate the exception, throw the pre-allocated out of memory exception.
1777  if (exception.Get() == nullptr) {
1778    SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1779    return;
1780  }
1781
1782  // Choose an appropriate constructor and set up the arguments.
1783  const char* signature;
1784  ScopedLocalRef<jstring> msg_string(GetJniEnv(), nullptr);
1785  if (msg != nullptr) {
1786    // Ensure we remember this and the method over the String allocation.
1787    msg_string.reset(
1788        soa.AddLocalReference<jstring>(mirror::String::AllocFromModifiedUtf8(this, msg)));
1789    if (UNLIKELY(msg_string.get() == nullptr)) {
1790      CHECK(IsExceptionPending());  // OOME.
1791      return;
1792    }
1793    if (cause.get() == nullptr) {
1794      signature = "(Ljava/lang/String;)V";
1795    } else {
1796      signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V";
1797    }
1798  } else {
1799    if (cause.get() == nullptr) {
1800      signature = "()V";
1801    } else {
1802      signature = "(Ljava/lang/Throwable;)V";
1803    }
1804  }
1805  mirror::ArtMethod* exception_init_method =
1806      exception_class->FindDeclaredDirectMethod("<init>", signature);
1807
1808  CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in "
1809      << PrettyDescriptor(exception_class_descriptor);
1810
1811  if (UNLIKELY(!runtime->IsStarted())) {
1812    // Something is trying to throw an exception without a started runtime, which is the common
1813    // case in the compiler. We won't be able to invoke the constructor of the exception, so set
1814    // the exception fields directly.
1815    if (msg != nullptr) {
1816      exception->SetDetailMessage(down_cast<mirror::String*>(DecodeJObject(msg_string.get())));
1817    }
1818    if (cause.get() != nullptr) {
1819      exception->SetCause(down_cast<mirror::Throwable*>(DecodeJObject(cause.get())));
1820    }
1821    ScopedLocalRef<jobject> trace(GetJniEnv(),
1822                                  Runtime::Current()->IsActiveTransaction()
1823                                      ? CreateInternalStackTrace<true>(soa)
1824                                      : CreateInternalStackTrace<false>(soa));
1825    if (trace.get() != nullptr) {
1826      exception->SetStackState(down_cast<mirror::Throwable*>(DecodeJObject(trace.get())));
1827    }
1828    SetException(exception.Get());
1829  } else {
1830    jvalue jv_args[2];
1831    size_t i = 0;
1832
1833    if (msg != nullptr) {
1834      jv_args[i].l = msg_string.get();
1835      ++i;
1836    }
1837    if (cause.get() != nullptr) {
1838      jv_args[i].l = cause.get();
1839      ++i;
1840    }
1841    InvokeWithJValues(soa, exception.Get(), soa.EncodeMethod(exception_init_method), jv_args);
1842    if (LIKELY(!IsExceptionPending())) {
1843      SetException(exception.Get());
1844    }
1845  }
1846}
1847
1848void Thread::ThrowOutOfMemoryError(const char* msg) {
1849  LOG(WARNING) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
1850      msg, (tls32_.throwing_OutOfMemoryError ? " (recursive case)" : ""));
1851  if (!tls32_.throwing_OutOfMemoryError) {
1852    tls32_.throwing_OutOfMemoryError = true;
1853    ThrowNewException("Ljava/lang/OutOfMemoryError;", msg);
1854    tls32_.throwing_OutOfMemoryError = false;
1855  } else {
1856    Dump(LOG(WARNING));  // The pre-allocated OOME has no stack, so help out and log one.
1857    SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1858  }
1859}
1860
1861Thread* Thread::CurrentFromGdb() {
1862  return Thread::Current();
1863}
1864
1865void Thread::DumpFromGdb() const {
1866  std::ostringstream ss;
1867  Dump(ss);
1868  std::string str(ss.str());
1869  // log to stderr for debugging command line processes
1870  std::cerr << str;
1871#ifdef HAVE_ANDROID_OS
1872  // log to logcat for debugging frameworks processes
1873  LOG(INFO) << str;
1874#endif
1875}
1876
1877// Explicitly instantiate 32 and 64bit thread offset dumping support.
1878template void Thread::DumpThreadOffset<4>(std::ostream& os, uint32_t offset);
1879template void Thread::DumpThreadOffset<8>(std::ostream& os, uint32_t offset);
1880
1881template<size_t ptr_size>
1882void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
1883#define DO_THREAD_OFFSET(x, y) \
1884    if (offset == x.Uint32Value()) { \
1885      os << y; \
1886      return; \
1887    }
1888  DO_THREAD_OFFSET(ThreadFlagsOffset<ptr_size>(), "state_and_flags")
1889  DO_THREAD_OFFSET(CardTableOffset<ptr_size>(), "card_table")
1890  DO_THREAD_OFFSET(ExceptionOffset<ptr_size>(), "exception")
1891  DO_THREAD_OFFSET(PeerOffset<ptr_size>(), "peer");
1892  DO_THREAD_OFFSET(JniEnvOffset<ptr_size>(), "jni_env")
1893  DO_THREAD_OFFSET(SelfOffset<ptr_size>(), "self")
1894  DO_THREAD_OFFSET(StackEndOffset<ptr_size>(), "stack_end")
1895  DO_THREAD_OFFSET(ThinLockIdOffset<ptr_size>(), "thin_lock_thread_id")
1896  DO_THREAD_OFFSET(TopOfManagedStackOffset<ptr_size>(), "top_quick_frame_method")
1897  DO_THREAD_OFFSET(TopShadowFrameOffset<ptr_size>(), "top_shadow_frame")
1898  DO_THREAD_OFFSET(TopHandleScopeOffset<ptr_size>(), "top_handle_scope")
1899  DO_THREAD_OFFSET(ThreadSuspendTriggerOffset<ptr_size>(), "suspend_trigger")
1900#undef DO_THREAD_OFFSET
1901
1902#define INTERPRETER_ENTRY_POINT_INFO(x) \
1903    if (INTERPRETER_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
1904      os << #x; \
1905      return; \
1906    }
1907  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToInterpreterBridge)
1908  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToCompiledCodeBridge)
1909#undef INTERPRETER_ENTRY_POINT_INFO
1910
1911#define JNI_ENTRY_POINT_INFO(x) \
1912    if (JNI_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
1913      os << #x; \
1914      return; \
1915    }
1916  JNI_ENTRY_POINT_INFO(pDlsymLookup)
1917#undef JNI_ENTRY_POINT_INFO
1918
1919#define QUICK_ENTRY_POINT_INFO(x) \
1920    if (QUICK_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
1921      os << #x; \
1922      return; \
1923    }
1924  QUICK_ENTRY_POINT_INFO(pAllocArray)
1925  QUICK_ENTRY_POINT_INFO(pAllocArrayResolved)
1926  QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck)
1927  QUICK_ENTRY_POINT_INFO(pAllocObject)
1928  QUICK_ENTRY_POINT_INFO(pAllocObjectResolved)
1929  QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized)
1930  QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck)
1931  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray)
1932  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck)
1933  QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial)
1934  QUICK_ENTRY_POINT_INFO(pCheckCast)
1935  QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage)
1936  QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess)
1937  QUICK_ENTRY_POINT_INFO(pInitializeType)
1938  QUICK_ENTRY_POINT_INFO(pResolveString)
1939  QUICK_ENTRY_POINT_INFO(pSet8Instance)
1940  QUICK_ENTRY_POINT_INFO(pSet8Static)
1941  QUICK_ENTRY_POINT_INFO(pSet16Instance)
1942  QUICK_ENTRY_POINT_INFO(pSet16Static)
1943  QUICK_ENTRY_POINT_INFO(pSet32Instance)
1944  QUICK_ENTRY_POINT_INFO(pSet32Static)
1945  QUICK_ENTRY_POINT_INFO(pSet64Instance)
1946  QUICK_ENTRY_POINT_INFO(pSet64Static)
1947  QUICK_ENTRY_POINT_INFO(pSetObjInstance)
1948  QUICK_ENTRY_POINT_INFO(pSetObjStatic)
1949  QUICK_ENTRY_POINT_INFO(pGetByteInstance)
1950  QUICK_ENTRY_POINT_INFO(pGetBooleanInstance)
1951  QUICK_ENTRY_POINT_INFO(pGetByteStatic)
1952  QUICK_ENTRY_POINT_INFO(pGetBooleanStatic)
1953  QUICK_ENTRY_POINT_INFO(pGetShortInstance)
1954  QUICK_ENTRY_POINT_INFO(pGetCharInstance)
1955  QUICK_ENTRY_POINT_INFO(pGetShortStatic)
1956  QUICK_ENTRY_POINT_INFO(pGetCharStatic)
1957  QUICK_ENTRY_POINT_INFO(pGet32Instance)
1958  QUICK_ENTRY_POINT_INFO(pGet32Static)
1959  QUICK_ENTRY_POINT_INFO(pGet64Instance)
1960  QUICK_ENTRY_POINT_INFO(pGet64Static)
1961  QUICK_ENTRY_POINT_INFO(pGetObjInstance)
1962  QUICK_ENTRY_POINT_INFO(pGetObjStatic)
1963  QUICK_ENTRY_POINT_INFO(pAputObjectWithNullAndBoundCheck)
1964  QUICK_ENTRY_POINT_INFO(pAputObjectWithBoundCheck)
1965  QUICK_ENTRY_POINT_INFO(pAputObject)
1966  QUICK_ENTRY_POINT_INFO(pHandleFillArrayData)
1967  QUICK_ENTRY_POINT_INFO(pJniMethodStart)
1968  QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized)
1969  QUICK_ENTRY_POINT_INFO(pJniMethodEnd)
1970  QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized)
1971  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference)
1972  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized)
1973  QUICK_ENTRY_POINT_INFO(pQuickGenericJniTrampoline)
1974  QUICK_ENTRY_POINT_INFO(pLockObject)
1975  QUICK_ENTRY_POINT_INFO(pUnlockObject)
1976  QUICK_ENTRY_POINT_INFO(pCmpgDouble)
1977  QUICK_ENTRY_POINT_INFO(pCmpgFloat)
1978  QUICK_ENTRY_POINT_INFO(pCmplDouble)
1979  QUICK_ENTRY_POINT_INFO(pCmplFloat)
1980  QUICK_ENTRY_POINT_INFO(pFmod)
1981  QUICK_ENTRY_POINT_INFO(pL2d)
1982  QUICK_ENTRY_POINT_INFO(pFmodf)
1983  QUICK_ENTRY_POINT_INFO(pL2f)
1984  QUICK_ENTRY_POINT_INFO(pD2iz)
1985  QUICK_ENTRY_POINT_INFO(pF2iz)
1986  QUICK_ENTRY_POINT_INFO(pIdivmod)
1987  QUICK_ENTRY_POINT_INFO(pD2l)
1988  QUICK_ENTRY_POINT_INFO(pF2l)
1989  QUICK_ENTRY_POINT_INFO(pLdiv)
1990  QUICK_ENTRY_POINT_INFO(pLmod)
1991  QUICK_ENTRY_POINT_INFO(pLmul)
1992  QUICK_ENTRY_POINT_INFO(pShlLong)
1993  QUICK_ENTRY_POINT_INFO(pShrLong)
1994  QUICK_ENTRY_POINT_INFO(pUshrLong)
1995  QUICK_ENTRY_POINT_INFO(pIndexOf)
1996  QUICK_ENTRY_POINT_INFO(pStringCompareTo)
1997  QUICK_ENTRY_POINT_INFO(pMemcpy)
1998  QUICK_ENTRY_POINT_INFO(pQuickImtConflictTrampoline)
1999  QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline)
2000  QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge)
2001  QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck)
2002  QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck)
2003  QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck)
2004  QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck)
2005  QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck)
2006  QUICK_ENTRY_POINT_INFO(pTestSuspend)
2007  QUICK_ENTRY_POINT_INFO(pDeliverException)
2008  QUICK_ENTRY_POINT_INFO(pThrowArrayBounds)
2009  QUICK_ENTRY_POINT_INFO(pThrowDivZero)
2010  QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod)
2011  QUICK_ENTRY_POINT_INFO(pThrowNullPointer)
2012  QUICK_ENTRY_POINT_INFO(pThrowStackOverflow)
2013  QUICK_ENTRY_POINT_INFO(pDeoptimize)
2014  QUICK_ENTRY_POINT_INFO(pA64Load)
2015  QUICK_ENTRY_POINT_INFO(pA64Store)
2016#undef QUICK_ENTRY_POINT_INFO
2017
2018  os << offset;
2019}
2020
2021void Thread::QuickDeliverException() {
2022  // Get exception from thread.
2023  mirror::Throwable* exception = GetException();
2024  CHECK(exception != nullptr);
2025  // Don't leave exception visible while we try to find the handler, which may cause class
2026  // resolution.
2027  ClearException();
2028  bool is_deoptimization = (exception == GetDeoptimizationException());
2029  QuickExceptionHandler exception_handler(this, is_deoptimization);
2030  if (is_deoptimization) {
2031    exception_handler.DeoptimizeStack();
2032  } else {
2033    exception_handler.FindCatch(exception);
2034  }
2035  exception_handler.UpdateInstrumentationStack();
2036  exception_handler.DoLongJump();
2037}
2038
2039Context* Thread::GetLongJumpContext() {
2040  Context* result = tlsPtr_.long_jump_context;
2041  if (result == nullptr) {
2042    result = Context::Create();
2043  } else {
2044    tlsPtr_.long_jump_context = nullptr;  // Avoid context being shared.
2045    result->Reset();
2046  }
2047  return result;
2048}
2049
2050// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
2051//       so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
2052struct CurrentMethodVisitor FINAL : public StackVisitor {
2053  CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error)
2054      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2055      : StackVisitor(thread, context), this_object_(nullptr), method_(nullptr), dex_pc_(0),
2056        abort_on_error_(abort_on_error) {}
2057  bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2058    mirror::ArtMethod* m = GetMethod();
2059    if (m->IsRuntimeMethod()) {
2060      // Continue if this is a runtime method.
2061      return true;
2062    }
2063    if (context_ != nullptr) {
2064      this_object_ = GetThisObject();
2065    }
2066    method_ = m;
2067    dex_pc_ = GetDexPc(abort_on_error_);
2068    return false;
2069  }
2070  mirror::Object* this_object_;
2071  mirror::ArtMethod* method_;
2072  uint32_t dex_pc_;
2073  const bool abort_on_error_;
2074};
2075
2076mirror::ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error) const {
2077  CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr, abort_on_error);
2078  visitor.WalkStack(false);
2079  if (dex_pc != nullptr) {
2080    *dex_pc = visitor.dex_pc_;
2081  }
2082  return visitor.method_;
2083}
2084
2085bool Thread::HoldsLock(mirror::Object* object) const {
2086  if (object == nullptr) {
2087    return false;
2088  }
2089  return object->GetLockOwnerThreadId() == GetThreadId();
2090}
2091
2092// RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor).
2093template <typename RootVisitor>
2094class ReferenceMapVisitor : public StackVisitor {
2095 public:
2096  ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor)
2097      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2098      : StackVisitor(thread, context), visitor_(visitor) {}
2099
2100  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2101    if (false) {
2102      LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
2103                << StringPrintf("@ PC:%04x", GetDexPc());
2104    }
2105    ShadowFrame* shadow_frame = GetCurrentShadowFrame();
2106    if (shadow_frame != nullptr) {
2107      VisitShadowFrame(shadow_frame);
2108    } else {
2109      VisitQuickFrame();
2110    }
2111    return true;
2112  }
2113
2114  void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2115    mirror::ArtMethod** method_addr = shadow_frame->GetMethodAddress();
2116    visitor_(reinterpret_cast<mirror::Object**>(method_addr), 0 /*ignored*/, this);
2117    mirror::ArtMethod* m = *method_addr;
2118    DCHECK(m != nullptr);
2119    size_t num_regs = shadow_frame->NumberOfVRegs();
2120    if (m->IsNative() || shadow_frame->HasReferenceArray()) {
2121      // handle scope for JNI or References for interpreter.
2122      for (size_t reg = 0; reg < num_regs; ++reg) {
2123        mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2124        if (ref != nullptr) {
2125          mirror::Object* new_ref = ref;
2126          visitor_(&new_ref, reg, this);
2127          if (new_ref != ref) {
2128            shadow_frame->SetVRegReference(reg, new_ref);
2129          }
2130        }
2131      }
2132    } else {
2133      // Java method.
2134      // Portable path use DexGcMap and store in Method.native_gc_map_.
2135      const uint8_t* gc_map = m->GetNativeGcMap(sizeof(void*));
2136      CHECK(gc_map != nullptr) << PrettyMethod(m);
2137      verifier::DexPcToReferenceMap dex_gc_map(gc_map);
2138      uint32_t dex_pc = shadow_frame->GetDexPC();
2139      const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
2140      DCHECK(reg_bitmap != nullptr);
2141      num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
2142      for (size_t reg = 0; reg < num_regs; ++reg) {
2143        if (TestBitmap(reg, reg_bitmap)) {
2144          mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2145          if (ref != nullptr) {
2146            mirror::Object* new_ref = ref;
2147            visitor_(&new_ref, reg, this);
2148            if (new_ref != ref) {
2149              shadow_frame->SetVRegReference(reg, new_ref);
2150            }
2151          }
2152        }
2153      }
2154    }
2155  }
2156
2157 private:
2158  void VisitQuickFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2159    StackReference<mirror::ArtMethod>* cur_quick_frame = GetCurrentQuickFrame();
2160    mirror::ArtMethod* m = cur_quick_frame->AsMirrorPtr();
2161    mirror::ArtMethod* old_method = m;
2162    visitor_(reinterpret_cast<mirror::Object**>(&m), 0 /*ignored*/, this);
2163    if (m != old_method) {
2164      cur_quick_frame->Assign(m);
2165    }
2166
2167    // Process register map (which native and runtime methods don't have)
2168    if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
2169      if (m->IsOptimized(sizeof(void*))) {
2170        Runtime* runtime = Runtime::Current();
2171        const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
2172        uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
2173        CodeInfo code_info = m->GetOptimizedCodeInfo();
2174        StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
2175        MemoryRegion mask = map.GetStackMask(code_info);
2176        // Visit stack entries that hold pointers.
2177        for (size_t i = 0; i < mask.size_in_bits(); ++i) {
2178          if (mask.LoadBit(i)) {
2179            StackReference<mirror::Object>* ref_addr =
2180                  reinterpret_cast<StackReference<mirror::Object>*>(cur_quick_frame) + i;
2181            mirror::Object* ref = ref_addr->AsMirrorPtr();
2182            if (ref != nullptr) {
2183              mirror::Object* new_ref = ref;
2184              visitor_(&new_ref, -1, this);
2185              if (ref != new_ref) {
2186                ref_addr->Assign(new_ref);
2187              }
2188            }
2189          }
2190        }
2191        // Visit callee-save registers that hold pointers.
2192        uint32_t register_mask = map.GetRegisterMask(code_info);
2193        for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
2194          if (register_mask & (1 << i)) {
2195            mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
2196            if (*ref_addr != nullptr) {
2197              visitor_(ref_addr, -1, this);
2198            }
2199          }
2200        }
2201      } else {
2202        const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
2203        CHECK(native_gc_map != nullptr) << PrettyMethod(m);
2204        const DexFile::CodeItem* code_item = m->GetCodeItem();
2205        // Can't be nullptr or how would we compile its instructions?
2206        DCHECK(code_item != nullptr) << PrettyMethod(m);
2207        NativePcOffsetToReferenceMap map(native_gc_map);
2208        size_t num_regs = std::min(map.RegWidth() * 8,
2209                                   static_cast<size_t>(code_item->registers_size_));
2210        if (num_regs > 0) {
2211          Runtime* runtime = Runtime::Current();
2212          const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
2213          uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
2214          const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset);
2215          DCHECK(reg_bitmap != nullptr);
2216          const void* code_pointer = mirror::ArtMethod::EntryPointToCodePointer(entry_point);
2217          const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
2218          QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
2219          // For all dex registers in the bitmap
2220          DCHECK(cur_quick_frame != nullptr);
2221          for (size_t reg = 0; reg < num_regs; ++reg) {
2222            // Does this register hold a reference?
2223            if (TestBitmap(reg, reg_bitmap)) {
2224              uint32_t vmap_offset;
2225              if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
2226                int vmap_reg = vmap_table.ComputeRegister(frame_info.CoreSpillMask(), vmap_offset,
2227                                                          kReferenceVReg);
2228                // This is sound as spilled GPRs will be word sized (ie 32 or 64bit).
2229                mirror::Object** ref_addr =
2230                    reinterpret_cast<mirror::Object**>(GetGPRAddress(vmap_reg));
2231                if (*ref_addr != nullptr) {
2232                  visitor_(ref_addr, reg, this);
2233                }
2234              } else {
2235                StackReference<mirror::Object>* ref_addr =
2236                    reinterpret_cast<StackReference<mirror::Object>*>(GetVRegAddrFromQuickCode(
2237                        cur_quick_frame, code_item, frame_info.CoreSpillMask(),
2238                        frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), reg));
2239                mirror::Object* ref = ref_addr->AsMirrorPtr();
2240                if (ref != nullptr) {
2241                  mirror::Object* new_ref = ref;
2242                  visitor_(&new_ref, reg, this);
2243                  if (ref != new_ref) {
2244                    ref_addr->Assign(new_ref);
2245                  }
2246                }
2247              }
2248            }
2249          }
2250        }
2251      }
2252    }
2253  }
2254
2255  static bool TestBitmap(size_t reg, const uint8_t* reg_vector) {
2256    return ((reg_vector[reg / kBitsPerByte] >> (reg % kBitsPerByte)) & 0x01) != 0;
2257  }
2258
2259  // Visitor for when we visit a root.
2260  RootVisitor& visitor_;
2261};
2262
2263class RootCallbackVisitor {
2264 public:
2265  RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {}
2266
2267  void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const
2268      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2269    visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg));
2270  }
2271
2272 private:
2273  RootVisitor* const visitor_;
2274  const uint32_t tid_;
2275};
2276
2277void Thread::VisitRoots(RootVisitor* visitor) {
2278  const uint32_t thread_id = GetThreadId();
2279  visitor->VisitRootIfNonNull(&tlsPtr_.opeer, RootInfo(kRootThreadObject, thread_id));
2280  if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) {
2281    visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception),
2282                   RootInfo(kRootNativeStack, thread_id));
2283  }
2284  visitor->VisitRootIfNonNull(&tlsPtr_.monitor_enter_object, RootInfo(kRootNativeStack, thread_id));
2285  tlsPtr_.jni_env->locals.VisitRoots(visitor, RootInfo(kRootJNILocal, thread_id));
2286  tlsPtr_.jni_env->monitors.VisitRoots(visitor, RootInfo(kRootJNIMonitor, thread_id));
2287  HandleScopeVisitRoots(visitor, thread_id);
2288  if (tlsPtr_.debug_invoke_req != nullptr) {
2289    tlsPtr_.debug_invoke_req->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id));
2290  }
2291  if (tlsPtr_.single_step_control != nullptr) {
2292    tlsPtr_.single_step_control->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id));
2293  }
2294  if (tlsPtr_.deoptimization_shadow_frame != nullptr) {
2295    RootCallbackVisitor visitor_to_callback(visitor, thread_id);
2296    ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitor_to_callback);
2297    for (ShadowFrame* shadow_frame = tlsPtr_.deoptimization_shadow_frame; shadow_frame != nullptr;
2298        shadow_frame = shadow_frame->GetLink()) {
2299      mapper.VisitShadowFrame(shadow_frame);
2300    }
2301  }
2302  if (tlsPtr_.shadow_frame_under_construction != nullptr) {
2303    RootCallbackVisitor visitor_to_callback(visitor, thread_id);
2304    ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitor_to_callback);
2305    for (ShadowFrame* shadow_frame = tlsPtr_.shadow_frame_under_construction;
2306        shadow_frame != nullptr;
2307        shadow_frame = shadow_frame->GetLink()) {
2308      mapper.VisitShadowFrame(shadow_frame);
2309    }
2310  }
2311  if (tlsPtr_.method_verifier != nullptr) {
2312    tlsPtr_.method_verifier->VisitRoots(visitor, RootInfo(kRootNativeStack, thread_id));
2313  }
2314  // Visit roots on this thread's stack
2315  Context* context = GetLongJumpContext();
2316  RootCallbackVisitor visitor_to_callback(visitor, thread_id);
2317  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context, visitor_to_callback);
2318  mapper.WalkStack();
2319  ReleaseLongJumpContext(context);
2320  for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
2321    visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id));
2322    visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&frame.method_),
2323                       RootInfo(kRootVMInternal, thread_id));
2324  }
2325}
2326
2327class VerifyRootVisitor : public SingleRootVisitor {
2328 public:
2329  void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
2330      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2331    VerifyObject(root);
2332  }
2333};
2334
2335void Thread::VerifyStackImpl() {
2336  VerifyRootVisitor visitor;
2337  std::unique_ptr<Context> context(Context::Create());
2338  RootCallbackVisitor visitor_to_callback(&visitor, GetThreadId());
2339  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitor_to_callback);
2340  mapper.WalkStack();
2341}
2342
2343// Set the stack end to that to be used during a stack overflow
2344void Thread::SetStackEndForStackOverflow() {
2345  // During stack overflow we allow use of the full stack.
2346  if (tlsPtr_.stack_end == tlsPtr_.stack_begin) {
2347    // However, we seem to have already extended to use the full stack.
2348    LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently "
2349               << GetStackOverflowReservedBytes(kRuntimeISA) << ")?";
2350    DumpStack(LOG(ERROR));
2351    LOG(FATAL) << "Recursive stack overflow.";
2352  }
2353
2354  tlsPtr_.stack_end = tlsPtr_.stack_begin;
2355
2356  // Remove the stack overflow protection if is it set up.
2357  bool implicit_stack_check = !Runtime::Current()->ExplicitStackOverflowChecks();
2358  if (implicit_stack_check) {
2359    if (!UnprotectStack()) {
2360      LOG(ERROR) << "Unable to remove stack protection for stack overflow";
2361    }
2362  }
2363}
2364
2365void Thread::SetTlab(uint8_t* start, uint8_t* end) {
2366  DCHECK_LE(start, end);
2367  tlsPtr_.thread_local_start = start;
2368  tlsPtr_.thread_local_pos  = tlsPtr_.thread_local_start;
2369  tlsPtr_.thread_local_end = end;
2370  tlsPtr_.thread_local_objects = 0;
2371}
2372
2373bool Thread::HasTlab() const {
2374  bool has_tlab = tlsPtr_.thread_local_pos != nullptr;
2375  if (has_tlab) {
2376    DCHECK(tlsPtr_.thread_local_start != nullptr && tlsPtr_.thread_local_end != nullptr);
2377  } else {
2378    DCHECK(tlsPtr_.thread_local_start == nullptr && tlsPtr_.thread_local_end == nullptr);
2379  }
2380  return has_tlab;
2381}
2382
2383std::ostream& operator<<(std::ostream& os, const Thread& thread) {
2384  thread.ShortDump(os);
2385  return os;
2386}
2387
2388void Thread::ProtectStack() {
2389  void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
2390  VLOG(threads) << "Protecting stack at " << pregion;
2391  if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) {
2392    LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. "
2393        "Reason: "
2394        << strerror(errno) << " size:  " << kStackOverflowProtectedSize;
2395  }
2396}
2397
2398bool Thread::UnprotectStack() {
2399  void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
2400  VLOG(threads) << "Unprotecting stack at " << pregion;
2401  return mprotect(pregion, kStackOverflowProtectedSize, PROT_READ|PROT_WRITE) == 0;
2402}
2403
2404void Thread::ActivateSingleStepControl(SingleStepControl* ssc) {
2405  CHECK(Dbg::IsDebuggerActive());
2406  CHECK(GetSingleStepControl() == nullptr) << "Single step already active in thread " << *this;
2407  CHECK(ssc != nullptr);
2408  tlsPtr_.single_step_control = ssc;
2409}
2410
2411void Thread::DeactivateSingleStepControl() {
2412  CHECK(Dbg::IsDebuggerActive());
2413  CHECK(GetSingleStepControl() != nullptr) << "Single step not active in thread " << *this;
2414  SingleStepControl* ssc = GetSingleStepControl();
2415  tlsPtr_.single_step_control = nullptr;
2416  delete ssc;
2417}
2418
2419void Thread::SetDebugInvokeReq(DebugInvokeReq* req) {
2420  CHECK(Dbg::IsDebuggerActive());
2421  CHECK(GetInvokeReq() == nullptr) << "Debug invoke req already active in thread " << *this;
2422  CHECK(Thread::Current() != this) << "Debug invoke can't be dispatched by the thread itself";
2423  CHECK(req != nullptr);
2424  tlsPtr_.debug_invoke_req = req;
2425}
2426
2427void Thread::ClearDebugInvokeReq() {
2428  CHECK(Dbg::IsDebuggerActive());
2429  CHECK(GetInvokeReq() != nullptr) << "Debug invoke req not active in thread " << *this;
2430  CHECK(Thread::Current() == this) << "Debug invoke must be finished by the thread itself";
2431  // We do not own the DebugInvokeReq* so we must not delete it, it is the responsibility of
2432  // the owner (the JDWP thread).
2433  tlsPtr_.debug_invoke_req = nullptr;
2434}
2435
2436void Thread::SetVerifier(verifier::MethodVerifier* verifier) {
2437  CHECK(tlsPtr_.method_verifier == nullptr);
2438  tlsPtr_.method_verifier = verifier;
2439}
2440
2441void Thread::ClearVerifier(verifier::MethodVerifier* verifier) {
2442  CHECK_EQ(tlsPtr_.method_verifier, verifier);
2443  tlsPtr_.method_verifier = nullptr;
2444}
2445
2446}  // namespace art
2447