thread.cc revision 471b7cb6c1b8128bb343b49198f2654f9f5b8f86
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define ATRACE_TAG ATRACE_TAG_DALVIK
18
19#include "thread.h"
20
21#include <cutils/trace.h>
22#include <pthread.h>
23#include <signal.h>
24#include <sys/resource.h>
25#include <sys/time.h>
26
27#include <algorithm>
28#include <bitset>
29#include <cerrno>
30#include <iostream>
31#include <list>
32
33#include "arch/context.h"
34#include "base/mutex.h"
35#include "class_linker-inl.h"
36#include "class_linker.h"
37#include "debugger.h"
38#include "dex_file-inl.h"
39#include "entrypoints/entrypoint_utils.h"
40#include "entrypoints/quick/quick_alloc_entrypoints.h"
41#include "gc_map.h"
42#include "gc/accounting/card_table-inl.h"
43#include "gc/allocator/rosalloc.h"
44#include "gc/heap.h"
45#include "gc/space/space.h"
46#include "handle_scope-inl.h"
47#include "handle_scope.h"
48#include "indirect_reference_table-inl.h"
49#include "jni_internal.h"
50#include "mirror/art_field-inl.h"
51#include "mirror/art_method-inl.h"
52#include "mirror/class_loader.h"
53#include "mirror/class-inl.h"
54#include "mirror/object_array-inl.h"
55#include "mirror/stack_trace_element.h"
56#include "monitor.h"
57#include "object_lock.h"
58#include "quick_exception_handler.h"
59#include "quick/quick_method_frame_info.h"
60#include "reflection.h"
61#include "runtime.h"
62#include "scoped_thread_state_change.h"
63#include "ScopedLocalRef.h"
64#include "ScopedUtfChars.h"
65#include "stack.h"
66#include "thread_list.h"
67#include "thread-inl.h"
68#include "utils.h"
69#include "verifier/dex_gc_map.h"
70#include "verify_object-inl.h"
71#include "vmap_table.h"
72#include "well_known_classes.h"
73
74namespace art {
75
76bool Thread::is_started_ = false;
77pthread_key_t Thread::pthread_key_self_;
78ConditionVariable* Thread::resume_cond_ = nullptr;
79const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA);
80
81static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
82
83void Thread::InitCardTable() {
84  tlsPtr_.card_table = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin();
85}
86
87static void UnimplementedEntryPoint() {
88  UNIMPLEMENTED(FATAL);
89}
90
91void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
92                     PortableEntryPoints* ppoints, QuickEntryPoints* qpoints);
93
94void Thread::InitTlsEntryPoints() {
95  // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
96  uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.interpreter_entrypoints);
97  uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) +
98                                                sizeof(tlsPtr_.quick_entrypoints));
99  for (uintptr_t* it = begin; it != end; ++it) {
100    *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
101  }
102  InitEntryPoints(&tlsPtr_.interpreter_entrypoints, &tlsPtr_.jni_entrypoints,
103                  &tlsPtr_.portable_entrypoints, &tlsPtr_.quick_entrypoints);
104}
105
106void Thread::ResetQuickAllocEntryPointsForThread() {
107  ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints);
108}
109
110void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) {
111  tlsPtr_.deoptimization_shadow_frame = sf;
112}
113
114void Thread::SetDeoptimizationReturnValue(const JValue& ret_val) {
115  tls64_.deoptimization_return_value.SetJ(ret_val.GetJ());
116}
117
118ShadowFrame* Thread::GetAndClearDeoptimizationShadowFrame(JValue* ret_val) {
119  ShadowFrame* sf = tlsPtr_.deoptimization_shadow_frame;
120  tlsPtr_.deoptimization_shadow_frame = nullptr;
121  ret_val->SetJ(tls64_.deoptimization_return_value.GetJ());
122  return sf;
123}
124
125void Thread::SetShadowFrameUnderConstruction(ShadowFrame* sf) {
126  sf->SetLink(tlsPtr_.shadow_frame_under_construction);
127  tlsPtr_.shadow_frame_under_construction = sf;
128}
129
130void Thread::ClearShadowFrameUnderConstruction() {
131  CHECK_NE(static_cast<ShadowFrame*>(nullptr), tlsPtr_.shadow_frame_under_construction);
132  tlsPtr_.shadow_frame_under_construction = tlsPtr_.shadow_frame_under_construction->GetLink();
133}
134
135void Thread::InitTid() {
136  tls32_.tid = ::art::GetTid();
137}
138
139void Thread::InitAfterFork() {
140  // One thread (us) survived the fork, but we have a new tid so we need to
141  // update the value stashed in this Thread*.
142  InitTid();
143}
144
145void* Thread::CreateCallback(void* arg) {
146  Thread* self = reinterpret_cast<Thread*>(arg);
147  Runtime* runtime = Runtime::Current();
148  if (runtime == nullptr) {
149    LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self;
150    return nullptr;
151  }
152  {
153    // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true
154    //       after self->Init().
155    MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
156    // Check that if we got here we cannot be shutting down (as shutdown should never have started
157    // while threads are being born).
158    CHECK(!runtime->IsShuttingDownLocked());
159    self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
160    Runtime::Current()->EndThreadBirth();
161  }
162  {
163    ScopedObjectAccess soa(self);
164
165    // Copy peer into self, deleting global reference when done.
166    CHECK(self->tlsPtr_.jpeer != nullptr);
167    self->tlsPtr_.opeer = soa.Decode<mirror::Object*>(self->tlsPtr_.jpeer);
168    self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer);
169    self->tlsPtr_.jpeer = nullptr;
170    self->SetThreadName(self->GetThreadName(soa)->ToModifiedUtf8().c_str());
171
172    mirror::ArtField* priorityField = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority);
173    self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer));
174    Dbg::PostThreadStart(self);
175
176    // Invoke the 'run' method of our java.lang.Thread.
177    mirror::Object* receiver = self->tlsPtr_.opeer;
178    jmethodID mid = WellKnownClasses::java_lang_Thread_run;
179    InvokeVirtualOrInterfaceWithJValues(soa, receiver, mid, nullptr);
180  }
181  // Detach and delete self.
182  Runtime::Current()->GetThreadList()->Unregister(self);
183
184  return nullptr;
185}
186
187Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
188                                  mirror::Object* thread_peer) {
189  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer);
190  Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer)));
191  // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
192  // to stop it from going away.
193  if (kIsDebugBuild) {
194    MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
195    if (result != nullptr && !result->IsSuspended()) {
196      Locks::thread_list_lock_->AssertHeld(soa.Self());
197    }
198  }
199  return result;
200}
201
202Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
203                                  jobject java_thread) {
204  return FromManagedThread(soa, soa.Decode<mirror::Object*>(java_thread));
205}
206
207static size_t FixStackSize(size_t stack_size) {
208  // A stack size of zero means "use the default".
209  if (stack_size == 0) {
210    stack_size = Runtime::Current()->GetDefaultStackSize();
211  }
212
213  // Dalvik used the bionic pthread default stack size for native threads,
214  // so include that here to support apps that expect large native stacks.
215  stack_size += 1 * MB;
216
217  // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN.
218  if (stack_size < PTHREAD_STACK_MIN) {
219    stack_size = PTHREAD_STACK_MIN;
220  }
221
222  if (Runtime::Current()->ExplicitStackOverflowChecks()) {
223    // It's likely that callers are trying to ensure they have at least a certain amount of
224    // stack space, so we should add our reserved space on top of what they requested, rather
225    // than implicitly take it away from them.
226    stack_size += GetStackOverflowReservedBytes(kRuntimeISA);
227  } else {
228    // If we are going to use implicit stack checks, allocate space for the protected
229    // region at the bottom of the stack.
230    stack_size += Thread::kStackOverflowImplicitCheckSize +
231        GetStackOverflowReservedBytes(kRuntimeISA);
232  }
233
234  // Some systems require the stack size to be a multiple of the system page size, so round up.
235  stack_size = RoundUp(stack_size, kPageSize);
236
237  return stack_size;
238}
239
240// Global variable to prevent the compiler optimizing away the page reads for the stack.
241byte dont_optimize_this;
242
243// Install a protected region in the stack.  This is used to trigger a SIGSEGV if a stack
244// overflow is detected.  It is located right below the stack_begin_.
245//
246// There is a little complexity here that deserves a special mention.  On some
247// architectures, the stack created using a VM_GROWSDOWN flag
248// to prevent memory being allocated when it's not needed.  This flag makes the
249// kernel only allocate memory for the stack by growing down in memory.  Because we
250// want to put an mprotected region far away from that at the stack top, we need
251// to make sure the pages for the stack are mapped in before we call mprotect.  We do
252// this by reading every page from the stack bottom (highest address) to the stack top.
253// We then madvise this away.
254void Thread::InstallImplicitProtection() {
255  byte* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
256  byte* stack_himem = tlsPtr_.stack_end;
257  byte* stack_top = reinterpret_cast<byte*>(reinterpret_cast<uintptr_t>(&stack_himem) &
258      ~(kPageSize - 1));    // Page containing current top of stack.
259
260  // First remove the protection on the protected region as will want to read and
261  // write it.  This may fail (on the first attempt when the stack is not mapped)
262  // but we ignore that.
263  UnprotectStack();
264
265  // Map in the stack.  This must be done by reading from the
266  // current stack pointer downwards as the stack may be mapped using VM_GROWSDOWN
267  // in the kernel.  Any access more than a page below the current SP might cause
268  // a segv.
269
270  // Read every page from the high address to the low.
271  for (byte* p = stack_top; p >= pregion; p -= kPageSize) {
272    dont_optimize_this = *p;
273  }
274
275  VLOG(threads) << "installing stack protected region at " << std::hex <<
276      static_cast<void*>(pregion) << " to " <<
277      static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
278
279  // Protect the bottom of the stack to prevent read/write to it.
280  ProtectStack();
281
282  // Tell the kernel that we won't be needing these pages any more.
283  // NB. madvise will probably write zeroes into the memory (on linux it does).
284  uint32_t unwanted_size = stack_top - pregion - kPageSize;
285  madvise(pregion, unwanted_size, MADV_DONTNEED);
286}
287
288void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
289  CHECK(java_peer != nullptr);
290  Thread* self = static_cast<JNIEnvExt*>(env)->self;
291  Runtime* runtime = Runtime::Current();
292
293  // Atomically start the birth of the thread ensuring the runtime isn't shutting down.
294  bool thread_start_during_shutdown = false;
295  {
296    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
297    if (runtime->IsShuttingDownLocked()) {
298      thread_start_during_shutdown = true;
299    } else {
300      runtime->StartThreadBirth();
301    }
302  }
303  if (thread_start_during_shutdown) {
304    ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError"));
305    env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown");
306    return;
307  }
308
309  Thread* child_thread = new Thread(is_daemon);
310  // Use global JNI ref to hold peer live while child thread starts.
311  child_thread->tlsPtr_.jpeer = env->NewGlobalRef(java_peer);
312  stack_size = FixStackSize(stack_size);
313
314  // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to
315  // assign it.
316  env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer,
317                    reinterpret_cast<jlong>(child_thread));
318
319  pthread_t new_pthread;
320  pthread_attr_t attr;
321  CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
322  CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED");
323  CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
324  int pthread_create_result = pthread_create(&new_pthread, &attr, Thread::CreateCallback, child_thread);
325  CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
326
327  if (pthread_create_result != 0) {
328    // pthread_create(3) failed, so clean up.
329    {
330      MutexLock mu(self, *Locks::runtime_shutdown_lock_);
331      runtime->EndThreadBirth();
332    }
333    // Manually delete the global reference since Thread::Init will not have been run.
334    env->DeleteGlobalRef(child_thread->tlsPtr_.jpeer);
335    child_thread->tlsPtr_.jpeer = nullptr;
336    delete child_thread;
337    child_thread = nullptr;
338    // TODO: remove from thread group?
339    env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0);
340    {
341      std::string msg(StringPrintf("pthread_create (%s stack) failed: %s",
342                                   PrettySize(stack_size).c_str(), strerror(pthread_create_result)));
343      ScopedObjectAccess soa(env);
344      soa.Self()->ThrowOutOfMemoryError(msg.c_str());
345    }
346  }
347}
348
349void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
350  // This function does all the initialization that must be run by the native thread it applies to.
351  // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
352  // we can handshake with the corresponding native thread when it's ready.) Check this native
353  // thread hasn't been through here already...
354  CHECK(Thread::Current() == nullptr);
355  SetUpAlternateSignalStack();
356  InitCpu();
357  InitTlsEntryPoints();
358  RemoveSuspendTrigger();
359  InitCardTable();
360  InitTid();
361  // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
362  // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
363  tlsPtr_.pthread_self = pthread_self();
364  CHECK(is_started_);
365  CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
366  DCHECK_EQ(Thread::Current(), this);
367
368  tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this);
369  InitStackHwm();
370
371  tlsPtr_.jni_env = new JNIEnvExt(this, java_vm);
372  thread_list->Register(this);
373}
374
375Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group,
376                       bool create_peer) {
377  Thread* self;
378  Runtime* runtime = Runtime::Current();
379  if (runtime == nullptr) {
380    LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name;
381    return nullptr;
382  }
383  {
384    MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
385    if (runtime->IsShuttingDownLocked()) {
386      LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name;
387      return nullptr;
388    } else {
389      Runtime::Current()->StartThreadBirth();
390      self = new Thread(as_daemon);
391      self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
392      Runtime::Current()->EndThreadBirth();
393    }
394  }
395
396  CHECK_NE(self->GetState(), kRunnable);
397  self->SetState(kNative);
398
399  // If we're the main thread, ClassLinker won't be created until after we're attached,
400  // so that thread needs a two-stage attach. Regular threads don't need this hack.
401  // In the compiler, all threads need this hack, because no-one's going to be getting
402  // a native peer!
403  if (create_peer) {
404    self->CreatePeer(thread_name, as_daemon, thread_group);
405  } else {
406    // These aren't necessary, but they improve diagnostics for unit tests & command-line tools.
407    if (thread_name != nullptr) {
408      self->tlsPtr_.name->assign(thread_name);
409      ::art::SetThreadName(thread_name);
410    } else if (self->GetJniEnv()->check_jni) {
411      LOG(WARNING) << *Thread::Current() << " attached without supplying a name";
412    }
413  }
414
415  {
416    ScopedObjectAccess soa(self);
417    Dbg::PostThreadStart(self);
418  }
419
420  return self;
421}
422
423void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) {
424  Runtime* runtime = Runtime::Current();
425  CHECK(runtime->IsStarted());
426  JNIEnv* env = tlsPtr_.jni_env;
427
428  if (thread_group == nullptr) {
429    thread_group = runtime->GetMainThreadGroup();
430  }
431  ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
432  if (name != nullptr && thread_name.get() == nullptr) {
433    CHECK(IsExceptionPending());
434    return;
435  }
436  jint thread_priority = GetNativePriority();
437  jboolean thread_is_daemon = as_daemon;
438
439  ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread));
440  if (peer.get() == nullptr) {
441    CHECK(IsExceptionPending());
442    return;
443  }
444  {
445    ScopedObjectAccess soa(this);
446    tlsPtr_.opeer = soa.Decode<mirror::Object*>(peer.get());
447  }
448  env->CallNonvirtualVoidMethod(peer.get(),
449                                WellKnownClasses::java_lang_Thread,
450                                WellKnownClasses::java_lang_Thread_init,
451                                thread_group, thread_name.get(), thread_priority, thread_is_daemon);
452  AssertNoPendingException();
453
454  Thread* self = this;
455  DCHECK_EQ(self, Thread::Current());
456  env->SetLongField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer,
457                    reinterpret_cast<jlong>(self));
458
459  ScopedObjectAccess soa(self);
460  StackHandleScope<1> hs(self);
461  Handle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName(soa)));
462  if (peer_thread_name.Get() == nullptr) {
463    // The Thread constructor should have set the Thread.name to a
464    // non-null value. However, because we can run without code
465    // available (in the compiler, in tests), we manually assign the
466    // fields the constructor should have set.
467    if (runtime->IsActiveTransaction()) {
468      InitPeer<true>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
469    } else {
470      InitPeer<false>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
471    }
472    peer_thread_name.Assign(GetThreadName(soa));
473  }
474  // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
475  if (peer_thread_name.Get() != nullptr) {
476    SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
477  }
478}
479
480template<bool kTransactionActive>
481void Thread::InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
482                      jobject thread_name, jint thread_priority) {
483  soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->
484      SetBoolean<kTransactionActive>(tlsPtr_.opeer, thread_is_daemon);
485  soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->
486      SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object*>(thread_group));
487  soa.DecodeField(WellKnownClasses::java_lang_Thread_name)->
488      SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object*>(thread_name));
489  soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->
490      SetInt<kTransactionActive>(tlsPtr_.opeer, thread_priority);
491}
492
493void Thread::SetThreadName(const char* name) {
494  tlsPtr_.name->assign(name);
495  ::art::SetThreadName(name);
496  Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
497}
498
499void Thread::InitStackHwm() {
500  void* read_stack_base;
501  size_t read_stack_size;
502  size_t read_guard_size;
503  GetThreadStack(tlsPtr_.pthread_self, &read_stack_base, &read_stack_size, &read_guard_size);
504
505  // This is included in the SIGQUIT output, but it's useful here for thread debugging.
506  VLOG(threads) << StringPrintf("Native stack is at %p (%s with %s guard)",
507                                read_stack_base,
508                                PrettySize(read_stack_size).c_str(),
509                                PrettySize(read_guard_size).c_str());
510
511  tlsPtr_.stack_begin = reinterpret_cast<byte*>(read_stack_base);
512  tlsPtr_.stack_size = read_stack_size;
513
514  // The minimum stack size we can cope with is the overflow reserved bytes (typically
515  // 8K) + the protected region size (4K) + another page (4K).  Typically this will
516  // be 8+4+4 = 16K.  The thread won't be able to do much with this stack even the GC takes
517  // between 8K and 12K.
518  uint32_t min_stack = GetStackOverflowReservedBytes(kRuntimeISA) + kStackOverflowProtectedSize
519    + 4 * KB;
520  if (read_stack_size <= min_stack) {
521    LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << read_stack_size
522        << " bytes)";
523  }
524
525  // TODO: move this into the Linux GetThreadStack implementation.
526#if !defined(__APPLE__)
527  // If we're the main thread, check whether we were run with an unlimited stack. In that case,
528  // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
529  // will be broken because we'll die long before we get close to 2GB.
530  bool is_main_thread = (::art::GetTid() == getpid());
531  if (is_main_thread) {
532    rlimit stack_limit;
533    if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) {
534      PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed";
535    }
536    if (stack_limit.rlim_cur == RLIM_INFINITY) {
537      // Find the default stack size for new threads...
538      pthread_attr_t default_attributes;
539      size_t default_stack_size;
540      CHECK_PTHREAD_CALL(pthread_attr_init, (&default_attributes), "default stack size query");
541      CHECK_PTHREAD_CALL(pthread_attr_getstacksize, (&default_attributes, &default_stack_size),
542                         "default stack size query");
543      CHECK_PTHREAD_CALL(pthread_attr_destroy, (&default_attributes), "default stack size query");
544
545      // ...and use that as our limit.
546      size_t old_stack_size = read_stack_size;
547      tlsPtr_.stack_size = default_stack_size;
548      tlsPtr_.stack_begin += (old_stack_size - default_stack_size);
549      VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")"
550                    << " to " << PrettySize(default_stack_size)
551                    << " with base " << reinterpret_cast<void*>(tlsPtr_.stack_begin);
552    }
553  }
554#endif
555
556  // Set stack_end_ to the bottom of the stack saving space of stack overflows
557
558  Runtime* runtime = Runtime::Current();
559  bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsCompiler();
560  ResetDefaultStackEnd();
561
562  // Install the protected region if we are doing implicit overflow checks.
563  if (implicit_stack_check) {
564    // The thread might have protected region at the bottom.  We need
565    // to install our own region so we need to move the limits
566    // of the stack to make room for it.
567
568    tlsPtr_.stack_begin += read_guard_size + kStackOverflowProtectedSize;
569    tlsPtr_.stack_end += read_guard_size + kStackOverflowProtectedSize;
570    tlsPtr_.stack_size -= read_guard_size;
571
572    InstallImplicitProtection();
573  }
574
575  // Sanity check.
576  int stack_variable;
577  CHECK_GT(&stack_variable, reinterpret_cast<void*>(tlsPtr_.stack_end));
578}
579
580void Thread::ShortDump(std::ostream& os) const {
581  os << "Thread[";
582  if (GetThreadId() != 0) {
583    // If we're in kStarting, we won't have a thin lock id or tid yet.
584    os << GetThreadId()
585             << ",tid=" << GetTid() << ',';
586  }
587  os << GetState()
588           << ",Thread*=" << this
589           << ",peer=" << tlsPtr_.opeer
590           << ",\"" << *tlsPtr_.name << "\""
591           << "]";
592}
593
594void Thread::Dump(std::ostream& os) const {
595  DumpState(os);
596  DumpStack(os);
597}
598
599mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
600  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
601  return (tlsPtr_.opeer != nullptr) ? reinterpret_cast<mirror::String*>(f->GetObject(tlsPtr_.opeer)) : nullptr;
602}
603
604void Thread::GetThreadName(std::string& name) const {
605  name.assign(*tlsPtr_.name);
606}
607
608uint64_t Thread::GetCpuMicroTime() const {
609#if defined(HAVE_POSIX_CLOCKS)
610  clockid_t cpu_clock_id;
611  pthread_getcpuclockid(tlsPtr_.pthread_self, &cpu_clock_id);
612  timespec now;
613  clock_gettime(cpu_clock_id, &now);
614  return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000);
615#else
616  UNIMPLEMENTED(WARNING);
617  return -1;
618#endif
619}
620
621// Attempt to rectify locks so that we dump thread list with required locks before exiting.
622static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
623  LOG(ERROR) << *thread << " suspend count already zero.";
624  Locks::thread_suspend_count_lock_->Unlock(self);
625  if (!Locks::mutator_lock_->IsSharedHeld(self)) {
626    Locks::mutator_lock_->SharedTryLock(self);
627    if (!Locks::mutator_lock_->IsSharedHeld(self)) {
628      LOG(WARNING) << "Dumping thread list without holding mutator_lock_";
629    }
630  }
631  if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
632    Locks::thread_list_lock_->TryLock(self);
633    if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
634      LOG(WARNING) << "Dumping thread list without holding thread_list_lock_";
635    }
636  }
637  std::ostringstream ss;
638  Runtime::Current()->GetThreadList()->DumpLocked(ss);
639  LOG(FATAL) << ss.str();
640}
641
642void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) {
643  if (kIsDebugBuild) {
644    DCHECK(delta == -1 || delta == +1 || delta == -tls32_.debug_suspend_count)
645          << delta << " " << tls32_.debug_suspend_count << " " << this;
646    DCHECK_GE(tls32_.suspend_count, tls32_.debug_suspend_count) << this;
647    Locks::thread_suspend_count_lock_->AssertHeld(self);
648    if (this != self && !IsSuspended()) {
649      Locks::thread_list_lock_->AssertHeld(self);
650    }
651  }
652  if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) {
653    UnsafeLogFatalForSuspendCount(self, this);
654    return;
655  }
656
657  tls32_.suspend_count += delta;
658  if (for_debugger) {
659    tls32_.debug_suspend_count += delta;
660  }
661
662  if (tls32_.suspend_count == 0) {
663    AtomicClearFlag(kSuspendRequest);
664  } else {
665    AtomicSetFlag(kSuspendRequest);
666    TriggerSuspend();
667  }
668}
669
670void Thread::RunCheckpointFunction() {
671  Closure *checkpoints[kMaxCheckpoints];
672
673  // Grab the suspend_count lock and copy the current set of
674  // checkpoints.  Then clear the list and the flag.  The RequestCheckpoint
675  // function will also grab this lock so we prevent a race between setting
676  // the kCheckpointRequest flag and clearing it.
677  {
678    MutexLock mu(this, *Locks::thread_suspend_count_lock_);
679    for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
680      checkpoints[i] = tlsPtr_.checkpoint_functions[i];
681      tlsPtr_.checkpoint_functions[i] = nullptr;
682    }
683    AtomicClearFlag(kCheckpointRequest);
684  }
685
686  // Outside the lock, run all the checkpoint functions that
687  // we collected.
688  bool found_checkpoint = false;
689  for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
690    if (checkpoints[i] != nullptr) {
691      ATRACE_BEGIN("Checkpoint function");
692      checkpoints[i]->Run(this);
693      ATRACE_END();
694      found_checkpoint = true;
695    }
696  }
697  CHECK(found_checkpoint);
698}
699
700bool Thread::RequestCheckpoint(Closure* function) {
701  union StateAndFlags old_state_and_flags;
702  old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
703  if (old_state_and_flags.as_struct.state != kRunnable) {
704    return false;  // Fail, thread is suspended and so can't run a checkpoint.
705  }
706
707  uint32_t available_checkpoint = kMaxCheckpoints;
708  for (uint32_t i = 0 ; i < kMaxCheckpoints; ++i) {
709    if (tlsPtr_.checkpoint_functions[i] == nullptr) {
710      available_checkpoint = i;
711      break;
712    }
713  }
714  if (available_checkpoint == kMaxCheckpoints) {
715    // No checkpoint functions available, we can't run a checkpoint
716    return false;
717  }
718  tlsPtr_.checkpoint_functions[available_checkpoint] = function;
719
720  // Checkpoint function installed now install flag bit.
721  // We must be runnable to request a checkpoint.
722  DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable);
723  union StateAndFlags new_state_and_flags;
724  new_state_and_flags.as_int = old_state_and_flags.as_int;
725  new_state_and_flags.as_struct.flags |= kCheckpointRequest;
726  bool success =
727      tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(old_state_and_flags.as_int,
728                                                                                       new_state_and_flags.as_int);
729  if (UNLIKELY(!success)) {
730    // The thread changed state before the checkpoint was installed.
731    CHECK_EQ(tlsPtr_.checkpoint_functions[available_checkpoint], function);
732    tlsPtr_.checkpoint_functions[available_checkpoint] = nullptr;
733  } else {
734    CHECK_EQ(ReadFlag(kCheckpointRequest), true);
735    TriggerSuspend();
736  }
737  return success;
738}
739
740void Thread::FullSuspendCheck() {
741  VLOG(threads) << this << " self-suspending";
742  ATRACE_BEGIN("Full suspend check");
743  // Make thread appear suspended to other threads, release mutator_lock_.
744  TransitionFromRunnableToSuspended(kSuspended);
745  // Transition back to runnable noting requests to suspend, re-acquire share on mutator_lock_.
746  TransitionFromSuspendedToRunnable();
747  ATRACE_END();
748  VLOG(threads) << this << " self-reviving";
749}
750
751void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
752  std::string group_name;
753  int priority;
754  bool is_daemon = false;
755  Thread* self = Thread::Current();
756
757  // Don't do this if we are aborting since the GC may have all the threads suspended. This will
758  // cause ScopedObjectAccessUnchecked to deadlock.
759  if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) {
760    ScopedObjectAccessUnchecked soa(self);
761    priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)
762        ->GetInt(thread->tlsPtr_.opeer);
763    is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)
764        ->GetBoolean(thread->tlsPtr_.opeer);
765
766    mirror::Object* thread_group =
767        soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->tlsPtr_.opeer);
768
769    if (thread_group != nullptr) {
770      mirror::ArtField* group_name_field =
771          soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
772      mirror::String* group_name_string =
773          reinterpret_cast<mirror::String*>(group_name_field->GetObject(thread_group));
774      group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>";
775    }
776  } else {
777    priority = GetNativePriority();
778  }
779
780  std::string scheduler_group_name(GetSchedulerGroupName(tid));
781  if (scheduler_group_name.empty()) {
782    scheduler_group_name = "default";
783  }
784
785  if (thread != nullptr) {
786    os << '"' << *thread->tlsPtr_.name << '"';
787    if (is_daemon) {
788      os << " daemon";
789    }
790    os << " prio=" << priority
791       << " tid=" << thread->GetThreadId()
792       << " " << thread->GetState();
793    if (thread->IsStillStarting()) {
794      os << " (still starting up)";
795    }
796    os << "\n";
797  } else {
798    os << '"' << ::art::GetThreadName(tid) << '"'
799       << " prio=" << priority
800       << " (not attached)\n";
801  }
802
803  if (thread != nullptr) {
804    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
805    os << "  | group=\"" << group_name << "\""
806       << " sCount=" << thread->tls32_.suspend_count
807       << " dsCount=" << thread->tls32_.debug_suspend_count
808       << " obj=" << reinterpret_cast<void*>(thread->tlsPtr_.opeer)
809       << " self=" << reinterpret_cast<const void*>(thread) << "\n";
810  }
811
812  os << "  | sysTid=" << tid
813     << " nice=" << getpriority(PRIO_PROCESS, tid)
814     << " cgrp=" << scheduler_group_name;
815  if (thread != nullptr) {
816    int policy;
817    sched_param sp;
818    CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp),
819                       __FUNCTION__);
820    os << " sched=" << policy << "/" << sp.sched_priority
821       << " handle=" << reinterpret_cast<void*>(thread->tlsPtr_.pthread_self);
822  }
823  os << "\n";
824
825  // Grab the scheduler stats for this thread.
826  std::string scheduler_stats;
827  if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) {
828    scheduler_stats.resize(scheduler_stats.size() - 1);  // Lose the trailing '\n'.
829  } else {
830    scheduler_stats = "0 0 0";
831  }
832
833  char native_thread_state = '?';
834  int utime = 0;
835  int stime = 0;
836  int task_cpu = 0;
837  GetTaskStats(tid, &native_thread_state, &utime, &stime, &task_cpu);
838
839  os << "  | state=" << native_thread_state
840     << " schedstat=( " << scheduler_stats << " )"
841     << " utm=" << utime
842     << " stm=" << stime
843     << " core=" << task_cpu
844     << " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
845  if (thread != nullptr) {
846    os << "  | stack=" << reinterpret_cast<void*>(thread->tlsPtr_.stack_begin) << "-"
847        << reinterpret_cast<void*>(thread->tlsPtr_.stack_end) << " stackSize="
848        << PrettySize(thread->tlsPtr_.stack_size) << "\n";
849    // Dump the held mutexes.
850    os << "  | held mutexes=";
851    for (size_t i = 0; i < kLockLevelCount; ++i) {
852      if (i != kMonitorLock) {
853        BaseMutex* mutex = thread->GetHeldMutex(static_cast<LockLevel>(i));
854        if (mutex != nullptr) {
855          os << " \"" << mutex->GetName() << "\"";
856          if (mutex->IsReaderWriterMutex()) {
857            ReaderWriterMutex* rw_mutex = down_cast<ReaderWriterMutex*>(mutex);
858            if (rw_mutex->GetExclusiveOwnerTid() == static_cast<uint64_t>(tid)) {
859              os << "(exclusive held)";
860            } else {
861              os << "(shared held)";
862            }
863          }
864        }
865      }
866    }
867    os << "\n";
868  }
869}
870
871void Thread::DumpState(std::ostream& os) const {
872  Thread::DumpState(os, this, GetTid());
873}
874
875struct StackDumpVisitor : public StackVisitor {
876  StackDumpVisitor(std::ostream& os, Thread* thread, Context* context, bool can_allocate)
877      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
878      : StackVisitor(thread, context), os(os), thread(thread), can_allocate(can_allocate),
879        last_method(nullptr), last_line_number(0), repetition_count(0), frame_count(0) {
880  }
881
882  virtual ~StackDumpVisitor() {
883    if (frame_count == 0) {
884      os << "  (no managed stack frames)\n";
885    }
886  }
887
888  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
889    mirror::ArtMethod* m = GetMethod();
890    if (m->IsRuntimeMethod()) {
891      return true;
892    }
893    const int kMaxRepetition = 3;
894    mirror::Class* c = m->GetDeclaringClass();
895    mirror::DexCache* dex_cache = c->GetDexCache();
896    int line_number = -1;
897    if (dex_cache != nullptr) {  // be tolerant of bad input
898      const DexFile& dex_file = *dex_cache->GetDexFile();
899      line_number = dex_file.GetLineNumFromPC(m, GetDexPc(false));
900    }
901    if (line_number == last_line_number && last_method == m) {
902      ++repetition_count;
903    } else {
904      if (repetition_count >= kMaxRepetition) {
905        os << "  ... repeated " << (repetition_count - kMaxRepetition) << " times\n";
906      }
907      repetition_count = 0;
908      last_line_number = line_number;
909      last_method = m;
910    }
911    if (repetition_count < kMaxRepetition) {
912      os << "  at " << PrettyMethod(m, false);
913      if (m->IsNative()) {
914        os << "(Native method)";
915      } else {
916        const char* source_file(m->GetDeclaringClassSourceFile());
917        os << "(" << (source_file != nullptr ? source_file : "unavailable")
918           << ":" << line_number << ")";
919      }
920      os << "\n";
921      if (frame_count == 0) {
922        Monitor::DescribeWait(os, thread);
923      }
924      if (can_allocate) {
925        // Visit locks, but do not abort on errors. This would trigger a nested abort.
926        Monitor::VisitLocks(this, DumpLockedObject, &os, false);
927      }
928    }
929
930    ++frame_count;
931    return true;
932  }
933
934  static void DumpLockedObject(mirror::Object* o, void* context)
935      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
936    std::ostream& os = *reinterpret_cast<std::ostream*>(context);
937    os << "  - locked ";
938    if (o == nullptr) {
939      os << "an unknown object";
940    } else {
941      if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) &&
942          Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) {
943        // Getting the identity hashcode here would result in lock inflation and suspension of the
944        // current thread, which isn't safe if this is the only runnable thread.
945        os << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)", reinterpret_cast<intptr_t>(o),
946                           PrettyTypeOf(o).c_str());
947      } else {
948        os << StringPrintf("<0x%08x> (a %s)", o->IdentityHashCode(), PrettyTypeOf(o).c_str());
949      }
950    }
951    os << "\n";
952  }
953
954  std::ostream& os;
955  const Thread* thread;
956  const bool can_allocate;
957  mirror::ArtMethod* last_method;
958  int last_line_number;
959  int repetition_count;
960  int frame_count;
961};
962
963static bool ShouldShowNativeStack(const Thread* thread)
964    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
965  ThreadState state = thread->GetState();
966
967  // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting.
968  if (state > kWaiting && state < kStarting) {
969    return true;
970  }
971
972  // In an Object.wait variant or Thread.sleep? That's not interesting.
973  if (state == kTimedWaiting || state == kSleeping || state == kWaiting) {
974    return false;
975  }
976
977  // Threads with no managed stack frames should be shown.
978  const ManagedStack* managed_stack = thread->GetManagedStack();
979  if (managed_stack == NULL || (managed_stack->GetTopQuickFrame() == NULL &&
980      managed_stack->GetTopShadowFrame() == NULL)) {
981    return true;
982  }
983
984  // In some other native method? That's interesting.
985  // We don't just check kNative because native methods will be in state kSuspended if they're
986  // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the
987  // thread-startup states if it's early enough in their life cycle (http://b/7432159).
988  mirror::ArtMethod* current_method = thread->GetCurrentMethod(nullptr);
989  return current_method != nullptr && current_method->IsNative();
990}
991
992void Thread::DumpJavaStack(std::ostream& os) const {
993  // Dumping the Java stack involves the verifier for locks. The verifier operates under the
994  // assumption that there is no exception pending on entry. Thus, stash any pending exception.
995  // TODO: Find a way to avoid const_cast.
996  StackHandleScope<3> scope(const_cast<Thread*>(this));
997  Handle<mirror::Throwable> exc;
998  Handle<mirror::Object> throw_location_this_object;
999  Handle<mirror::ArtMethod> throw_location_method;
1000  uint32_t throw_location_dex_pc;
1001  bool have_exception = false;
1002  if (IsExceptionPending()) {
1003    ThrowLocation exc_location;
1004    exc = scope.NewHandle(GetException(&exc_location));
1005    throw_location_this_object = scope.NewHandle(exc_location.GetThis());
1006    throw_location_method = scope.NewHandle(exc_location.GetMethod());
1007    throw_location_dex_pc = exc_location.GetDexPc();
1008    const_cast<Thread*>(this)->ClearException();
1009    have_exception = true;
1010  }
1011
1012  std::unique_ptr<Context> context(Context::Create());
1013  StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(),
1014                          !tls32_.throwing_OutOfMemoryError);
1015  dumper.WalkStack();
1016
1017  if (have_exception) {
1018    ThrowLocation exc_location(throw_location_this_object.Get(),
1019                               throw_location_method.Get(),
1020                               throw_location_dex_pc);
1021    const_cast<Thread*>(this)->SetException(exc_location, exc.Get());
1022  }
1023}
1024
1025void Thread::DumpStack(std::ostream& os) const {
1026  // TODO: we call this code when dying but may not have suspended the thread ourself. The
1027  //       IsSuspended check is therefore racy with the use for dumping (normally we inhibit
1028  //       the race with the thread_suspend_count_lock_).
1029  bool dump_for_abort = (gAborting > 0);
1030  bool safe_to_dump = (this == Thread::Current() || IsSuspended());
1031  if (!kIsDebugBuild) {
1032    // We always want to dump the stack for an abort, however, there is no point dumping another
1033    // thread's stack in debug builds where we'll hit the not suspended check in the stack walk.
1034    safe_to_dump = (safe_to_dump || dump_for_abort);
1035  }
1036  if (safe_to_dump) {
1037    // If we're currently in native code, dump that stack before dumping the managed stack.
1038    if (dump_for_abort || ShouldShowNativeStack(this)) {
1039#ifndef NO_DUMP_NATIVE_STACKS
1040      DumpKernelStack(os, GetTid(), "  kernel: ", false);
1041      DumpNativeStack(os, GetTid(), "  native: ", GetCurrentMethod(nullptr, !dump_for_abort));
1042#endif
1043    }
1044    DumpJavaStack(os);
1045  } else {
1046    os << "Not able to dump stack of thread that isn't suspended";
1047  }
1048}
1049
1050void Thread::ThreadExitCallback(void* arg) {
1051  Thread* self = reinterpret_cast<Thread*>(arg);
1052  if (self->tls32_.thread_exit_check_count == 0) {
1053    LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's "
1054        "going to use a pthread_key_create destructor?): " << *self;
1055    CHECK(is_started_);
1056    CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
1057    self->tls32_.thread_exit_check_count = 1;
1058  } else {
1059    LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
1060  }
1061}
1062
1063void Thread::Startup() {
1064  CHECK(!is_started_);
1065  is_started_ = true;
1066  {
1067    // MutexLock to keep annotalysis happy.
1068    //
1069    // Note we use nullptr for the thread because Thread::Current can
1070    // return garbage since (is_started_ == true) and
1071    // Thread::pthread_key_self_ is not yet initialized.
1072    // This was seen on glibc.
1073    MutexLock mu(nullptr, *Locks::thread_suspend_count_lock_);
1074    resume_cond_ = new ConditionVariable("Thread resumption condition variable",
1075                                         *Locks::thread_suspend_count_lock_);
1076  }
1077
1078  // Allocate a TLS slot.
1079  CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key");
1080
1081  // Double-check the TLS slot allocation.
1082  if (pthread_getspecific(pthread_key_self_) != nullptr) {
1083    LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr";
1084  }
1085}
1086
1087void Thread::FinishStartup() {
1088  Runtime* runtime = Runtime::Current();
1089  CHECK(runtime->IsStarted());
1090
1091  // Finish attaching the main thread.
1092  ScopedObjectAccess soa(Thread::Current());
1093  Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup());
1094
1095  Runtime::Current()->GetClassLinker()->RunRootClinits();
1096}
1097
1098void Thread::Shutdown() {
1099  CHECK(is_started_);
1100  is_started_ = false;
1101  CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
1102  MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
1103  if (resume_cond_ != nullptr) {
1104    delete resume_cond_;
1105    resume_cond_ = nullptr;
1106  }
1107}
1108
1109Thread::Thread(bool daemon) : tls32_(daemon), wait_monitor_(nullptr), interrupted_(false) {
1110  wait_mutex_ = new Mutex("a thread wait mutex");
1111  wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
1112  tlsPtr_.debug_invoke_req = new DebugInvokeReq;
1113  tlsPtr_.single_step_control = new SingleStepControl;
1114  tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>;
1115  tlsPtr_.name = new std::string(kThreadNameDuringStartup);
1116  tlsPtr_.nested_signal_state = static_cast<jmp_buf*>(malloc(sizeof(jmp_buf)));
1117
1118  CHECK_EQ((sizeof(Thread) % 4), 0U) << sizeof(Thread);
1119  tls32_.state_and_flags.as_struct.flags = 0;
1120  tls32_.state_and_flags.as_struct.state = kNative;
1121  memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes));
1122  std::fill(tlsPtr_.rosalloc_runs,
1123            tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBrackets,
1124            gc::allocator::RosAlloc::GetDedicatedFullRun());
1125  for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
1126    tlsPtr_.checkpoint_functions[i] = nullptr;
1127  }
1128}
1129
1130bool Thread::IsStillStarting() const {
1131  // You might think you can check whether the state is kStarting, but for much of thread startup,
1132  // the thread is in kNative; it might also be in kVmWait.
1133  // You might think you can check whether the peer is nullptr, but the peer is actually created and
1134  // assigned fairly early on, and needs to be.
1135  // It turns out that the last thing to change is the thread name; that's a good proxy for "has
1136  // this thread _ever_ entered kRunnable".
1137  return (tlsPtr_.jpeer == nullptr && tlsPtr_.opeer == nullptr) ||
1138      (*tlsPtr_.name == kThreadNameDuringStartup);
1139}
1140
1141void Thread::AssertNoPendingException() const {
1142  if (UNLIKELY(IsExceptionPending())) {
1143    ScopedObjectAccess soa(Thread::Current());
1144    mirror::Throwable* exception = GetException(nullptr);
1145    LOG(FATAL) << "No pending exception expected: " << exception->Dump();
1146  }
1147}
1148
1149void Thread::AssertNoPendingExceptionForNewException(const char* msg) const {
1150  if (UNLIKELY(IsExceptionPending())) {
1151    ScopedObjectAccess soa(Thread::Current());
1152    mirror::Throwable* exception = GetException(nullptr);
1153    LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: "
1154        << exception->Dump();
1155  }
1156}
1157
1158static void MonitorExitVisitor(mirror::Object** object, void* arg, const RootInfo& /*root_info*/)
1159    NO_THREAD_SAFETY_ANALYSIS {
1160  Thread* self = reinterpret_cast<Thread*>(arg);
1161  mirror::Object* entered_monitor = *object;
1162  if (self->HoldsLock(entered_monitor)) {
1163    LOG(WARNING) << "Calling MonitorExit on object "
1164                 << object << " (" << PrettyTypeOf(entered_monitor) << ")"
1165                 << " left locked by native thread "
1166                 << *Thread::Current() << " which is detaching";
1167    entered_monitor->MonitorExit(self);
1168  }
1169}
1170
1171void Thread::Destroy() {
1172  Thread* self = this;
1173  DCHECK_EQ(self, Thread::Current());
1174
1175  if (tlsPtr_.opeer != nullptr) {
1176    ScopedObjectAccess soa(self);
1177    // We may need to call user-supplied managed code, do this before final clean-up.
1178    HandleUncaughtExceptions(soa);
1179    RemoveFromThreadGroup(soa);
1180
1181    // this.nativePeer = 0;
1182    if (Runtime::Current()->IsActiveTransaction()) {
1183      soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)
1184          ->SetLong<true>(tlsPtr_.opeer, 0);
1185    } else {
1186      soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)
1187          ->SetLong<false>(tlsPtr_.opeer, 0);
1188    }
1189    Dbg::PostThreadDeath(self);
1190
1191    // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
1192    // who is waiting.
1193    mirror::Object* lock =
1194        soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer);
1195    // (This conditional is only needed for tests, where Thread.lock won't have been set.)
1196    if (lock != nullptr) {
1197      StackHandleScope<1> hs(self);
1198      Handle<mirror::Object> h_obj(hs.NewHandle(lock));
1199      ObjectLock<mirror::Object> locker(self, h_obj);
1200      locker.NotifyAll();
1201    }
1202  }
1203
1204  // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
1205  if (tlsPtr_.jni_env != nullptr) {
1206    tlsPtr_.jni_env->monitors.VisitRoots(MonitorExitVisitor, self, RootInfo(kRootVMInternal));
1207  }
1208}
1209
1210Thread::~Thread() {
1211  if (tlsPtr_.jni_env != nullptr && tlsPtr_.jpeer != nullptr) {
1212    // If pthread_create fails we don't have a jni env here.
1213    tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer);
1214    tlsPtr_.jpeer = nullptr;
1215  }
1216  tlsPtr_.opeer = nullptr;
1217
1218  bool initialized = (tlsPtr_.jni_env != nullptr);  // Did Thread::Init run?
1219  if (initialized) {
1220    delete tlsPtr_.jni_env;
1221    tlsPtr_.jni_env = nullptr;
1222  }
1223  CHECK_NE(GetState(), kRunnable);
1224  CHECK_NE(ReadFlag(kCheckpointRequest), true);
1225  CHECK(tlsPtr_.checkpoint_functions[0] == nullptr);
1226  CHECK(tlsPtr_.checkpoint_functions[1] == nullptr);
1227  CHECK(tlsPtr_.checkpoint_functions[2] == nullptr);
1228
1229  // We may be deleting a still born thread.
1230  SetStateUnsafe(kTerminated);
1231
1232  delete wait_cond_;
1233  delete wait_mutex_;
1234
1235  if (tlsPtr_.long_jump_context != nullptr) {
1236    delete tlsPtr_.long_jump_context;
1237  }
1238
1239  if (initialized) {
1240    CleanupCpu();
1241  }
1242
1243  delete tlsPtr_.debug_invoke_req;
1244  delete tlsPtr_.single_step_control;
1245  delete tlsPtr_.instrumentation_stack;
1246  delete tlsPtr_.name;
1247  delete tlsPtr_.stack_trace_sample;
1248  free(tlsPtr_.nested_signal_state);
1249
1250  Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this);
1251
1252  TearDownAlternateSignalStack();
1253}
1254
1255void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) {
1256  if (!IsExceptionPending()) {
1257    return;
1258  }
1259  ScopedLocalRef<jobject> peer(tlsPtr_.jni_env, soa.AddLocalReference<jobject>(tlsPtr_.opeer));
1260  ScopedThreadStateChange tsc(this, kNative);
1261
1262  // Get and clear the exception.
1263  ScopedLocalRef<jthrowable> exception(tlsPtr_.jni_env, tlsPtr_.jni_env->ExceptionOccurred());
1264  tlsPtr_.jni_env->ExceptionClear();
1265
1266  // If the thread has its own handler, use that.
1267  ScopedLocalRef<jobject> handler(tlsPtr_.jni_env,
1268                                  tlsPtr_.jni_env->GetObjectField(peer.get(),
1269                                      WellKnownClasses::java_lang_Thread_uncaughtHandler));
1270  if (handler.get() == nullptr) {
1271    // Otherwise use the thread group's default handler.
1272    handler.reset(tlsPtr_.jni_env->GetObjectField(peer.get(),
1273                                                  WellKnownClasses::java_lang_Thread_group));
1274  }
1275
1276  // Call the handler.
1277  tlsPtr_.jni_env->CallVoidMethod(handler.get(),
1278      WellKnownClasses::java_lang_Thread$UncaughtExceptionHandler_uncaughtException,
1279      peer.get(), exception.get());
1280
1281  // If the handler threw, clear that exception too.
1282  tlsPtr_.jni_env->ExceptionClear();
1283}
1284
1285void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
1286  // this.group.removeThread(this);
1287  // group can be null if we're in the compiler or a test.
1288  mirror::Object* ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)
1289      ->GetObject(tlsPtr_.opeer);
1290  if (ogroup != nullptr) {
1291    ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup));
1292    ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(tlsPtr_.opeer));
1293    ScopedThreadStateChange tsc(soa.Self(), kNative);
1294    tlsPtr_.jni_env->CallVoidMethod(group.get(),
1295                                    WellKnownClasses::java_lang_ThreadGroup_removeThread,
1296                                    peer.get());
1297  }
1298}
1299
1300size_t Thread::NumHandleReferences() {
1301  size_t count = 0;
1302  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
1303    count += cur->NumberOfReferences();
1304  }
1305  return count;
1306}
1307
1308bool Thread::HandleScopeContains(jobject obj) const {
1309  StackReference<mirror::Object>* hs_entry =
1310      reinterpret_cast<StackReference<mirror::Object>*>(obj);
1311  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
1312    if (cur->Contains(hs_entry)) {
1313      return true;
1314    }
1315  }
1316  // JNI code invoked from portable code uses shadow frames rather than the handle scope.
1317  return tlsPtr_.managed_stack.ShadowFramesContain(hs_entry);
1318}
1319
1320void Thread::HandleScopeVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id) {
1321  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
1322    size_t num_refs = cur->NumberOfReferences();
1323    for (size_t j = 0; j < num_refs; ++j) {
1324      mirror::Object* object = cur->GetReference(j);
1325      if (object != nullptr) {
1326        mirror::Object* old_obj = object;
1327        visitor(&object, arg, RootInfo(kRootNativeStack, thread_id));
1328        if (old_obj != object) {
1329          cur->SetReference(j, object);
1330        }
1331      }
1332    }
1333  }
1334}
1335
1336mirror::Object* Thread::DecodeJObject(jobject obj) const {
1337  Locks::mutator_lock_->AssertSharedHeld(this);
1338  if (obj == nullptr) {
1339    return nullptr;
1340  }
1341  IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
1342  IndirectRefKind kind = GetIndirectRefKind(ref);
1343  mirror::Object* result;
1344  // The "kinds" below are sorted by the frequency we expect to encounter them.
1345  if (kind == kLocal) {
1346    IndirectReferenceTable& locals = tlsPtr_.jni_env->locals;
1347    // Local references do not need a read barrier.
1348    result = locals.Get<kWithoutReadBarrier>(ref);
1349  } else if (kind == kHandleScopeOrInvalid) {
1350    // TODO: make stack indirect reference table lookup more efficient.
1351    // Check if this is a local reference in the handle scope.
1352    if (LIKELY(HandleScopeContains(obj))) {
1353      // Read from handle scope.
1354      result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr();
1355      VerifyObject(result);
1356    } else {
1357      result = kInvalidIndirectRefObject;
1358    }
1359  } else if (kind == kGlobal) {
1360    JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
1361    result = vm->globals.SynchronizedGet(const_cast<Thread*>(this), &vm->globals_lock, ref);
1362  } else {
1363    DCHECK_EQ(kind, kWeakGlobal);
1364    result = Runtime::Current()->GetJavaVM()->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
1365    if (result == kClearedJniWeakGlobal) {
1366      // This is a special case where it's okay to return nullptr.
1367      return nullptr;
1368    }
1369  }
1370
1371  if (UNLIKELY(result == nullptr)) {
1372    JniAbortF(nullptr, "use of deleted %s %p", ToStr<IndirectRefKind>(kind).c_str(), obj);
1373  }
1374  return result;
1375}
1376
1377// Implements java.lang.Thread.interrupted.
1378bool Thread::Interrupted() {
1379  MutexLock mu(Thread::Current(), *wait_mutex_);
1380  bool interrupted = IsInterruptedLocked();
1381  SetInterruptedLocked(false);
1382  return interrupted;
1383}
1384
1385// Implements java.lang.Thread.isInterrupted.
1386bool Thread::IsInterrupted() {
1387  MutexLock mu(Thread::Current(), *wait_mutex_);
1388  return IsInterruptedLocked();
1389}
1390
1391void Thread::Interrupt(Thread* self) {
1392  MutexLock mu(self, *wait_mutex_);
1393  if (interrupted_) {
1394    return;
1395  }
1396  interrupted_ = true;
1397  NotifyLocked(self);
1398}
1399
1400void Thread::Notify() {
1401  Thread* self = Thread::Current();
1402  MutexLock mu(self, *wait_mutex_);
1403  NotifyLocked(self);
1404}
1405
1406void Thread::NotifyLocked(Thread* self) {
1407  if (wait_monitor_ != nullptr) {
1408    wait_cond_->Signal(self);
1409  }
1410}
1411
1412class CountStackDepthVisitor : public StackVisitor {
1413 public:
1414  explicit CountStackDepthVisitor(Thread* thread)
1415      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1416      : StackVisitor(thread, nullptr),
1417        depth_(0), skip_depth_(0), skipping_(true) {}
1418
1419  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1420    // We want to skip frames up to and including the exception's constructor.
1421    // Note we also skip the frame if it doesn't have a method (namely the callee
1422    // save frame)
1423    mirror::ArtMethod* m = GetMethod();
1424    if (skipping_ && !m->IsRuntimeMethod() &&
1425        !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) {
1426      skipping_ = false;
1427    }
1428    if (!skipping_) {
1429      if (!m->IsRuntimeMethod()) {  // Ignore runtime frames (in particular callee save).
1430        ++depth_;
1431      }
1432    } else {
1433      ++skip_depth_;
1434    }
1435    return true;
1436  }
1437
1438  int GetDepth() const {
1439    return depth_;
1440  }
1441
1442  int GetSkipDepth() const {
1443    return skip_depth_;
1444  }
1445
1446 private:
1447  uint32_t depth_;
1448  uint32_t skip_depth_;
1449  bool skipping_;
1450};
1451
1452template<bool kTransactionActive>
1453class BuildInternalStackTraceVisitor : public StackVisitor {
1454 public:
1455  explicit BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth)
1456      : StackVisitor(thread, nullptr), self_(self),
1457        skip_depth_(skip_depth), count_(0), dex_pc_trace_(nullptr), method_trace_(nullptr) {}
1458
1459  bool Init(int depth)
1460      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1461    // Allocate method trace with an extra slot that will hold the PC trace
1462    StackHandleScope<1> hs(self_);
1463    ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1464    Handle<mirror::ObjectArray<mirror::Object>> method_trace(
1465        hs.NewHandle(class_linker->AllocObjectArray<mirror::Object>(self_, depth + 1)));
1466    if (method_trace.Get() == nullptr) {
1467      return false;
1468    }
1469    mirror::IntArray* dex_pc_trace = mirror::IntArray::Alloc(self_, depth);
1470    if (dex_pc_trace == nullptr) {
1471      return false;
1472    }
1473    // Save PC trace in last element of method trace, also places it into the
1474    // object graph.
1475    // We are called from native: use non-transactional mode.
1476    method_trace->Set<kTransactionActive>(depth, dex_pc_trace);
1477    // Set the Object*s and assert that no thread suspension is now possible.
1478    const char* last_no_suspend_cause =
1479        self_->StartAssertNoThreadSuspension("Building internal stack trace");
1480    CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause;
1481    method_trace_ = method_trace.Get();
1482    dex_pc_trace_ = dex_pc_trace;
1483    return true;
1484  }
1485
1486  virtual ~BuildInternalStackTraceVisitor() {
1487    if (method_trace_ != nullptr) {
1488      self_->EndAssertNoThreadSuspension(nullptr);
1489    }
1490  }
1491
1492  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1493    if (method_trace_ == nullptr || dex_pc_trace_ == nullptr) {
1494      return true;  // We're probably trying to fillInStackTrace for an OutOfMemoryError.
1495    }
1496    if (skip_depth_ > 0) {
1497      skip_depth_--;
1498      return true;
1499    }
1500    mirror::ArtMethod* m = GetMethod();
1501    if (m->IsRuntimeMethod()) {
1502      return true;  // Ignore runtime frames (in particular callee save).
1503    }
1504    method_trace_->Set<kTransactionActive>(count_, m);
1505    dex_pc_trace_->Set<kTransactionActive>(count_,
1506        m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc());
1507    ++count_;
1508    return true;
1509  }
1510
1511  mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const {
1512    return method_trace_;
1513  }
1514
1515 private:
1516  Thread* const self_;
1517  // How many more frames to skip.
1518  int32_t skip_depth_;
1519  // Current position down stack trace.
1520  uint32_t count_;
1521  // Array of dex PC values.
1522  mirror::IntArray* dex_pc_trace_;
1523  // An array of the methods on the stack, the last entry is a reference to the PC trace.
1524  mirror::ObjectArray<mirror::Object>* method_trace_;
1525};
1526
1527template<bool kTransactionActive>
1528jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
1529  // Compute depth of stack
1530  CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
1531  count_visitor.WalkStack();
1532  int32_t depth = count_visitor.GetDepth();
1533  int32_t skip_depth = count_visitor.GetSkipDepth();
1534
1535  // Build internal stack trace.
1536  BuildInternalStackTraceVisitor<kTransactionActive> build_trace_visitor(soa.Self(),
1537                                                                         const_cast<Thread*>(this),
1538                                                                         skip_depth);
1539  if (!build_trace_visitor.Init(depth)) {
1540    return nullptr;  // Allocation failed.
1541  }
1542  build_trace_visitor.WalkStack();
1543  mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace();
1544  if (kIsDebugBuild) {
1545    for (int32_t i = 0; i < trace->GetLength(); ++i) {
1546      CHECK(trace->Get(i) != nullptr);
1547    }
1548  }
1549  return soa.AddLocalReference<jobjectArray>(trace);
1550}
1551template jobject Thread::CreateInternalStackTrace<false>(
1552    const ScopedObjectAccessAlreadyRunnable& soa) const;
1553template jobject Thread::CreateInternalStackTrace<true>(
1554    const ScopedObjectAccessAlreadyRunnable& soa) const;
1555
1556jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
1557    const ScopedObjectAccessAlreadyRunnable& soa, jobject internal, jobjectArray output_array,
1558    int* stack_depth) {
1559  // Decode the internal stack trace into the depth, method trace and PC trace
1560  int32_t depth = soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal)->GetLength() - 1;
1561
1562  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1563
1564  jobjectArray result;
1565
1566  if (output_array != nullptr) {
1567    // Reuse the array we were given.
1568    result = output_array;
1569    // ...adjusting the number of frames we'll write to not exceed the array length.
1570    const int32_t traces_length =
1571        soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->GetLength();
1572    depth = std::min(depth, traces_length);
1573  } else {
1574    // Create java_trace array and place in local reference table
1575    mirror::ObjectArray<mirror::StackTraceElement>* java_traces =
1576        class_linker->AllocStackTraceElementArray(soa.Self(), depth);
1577    if (java_traces == nullptr) {
1578      return nullptr;
1579    }
1580    result = soa.AddLocalReference<jobjectArray>(java_traces);
1581  }
1582
1583  if (stack_depth != nullptr) {
1584    *stack_depth = depth;
1585  }
1586
1587  for (int32_t i = 0; i < depth; ++i) {
1588    mirror::ObjectArray<mirror::Object>* method_trace =
1589          soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal);
1590    // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
1591    mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(method_trace->Get(i));
1592    int32_t line_number;
1593    StackHandleScope<3> hs(soa.Self());
1594    auto class_name_object(hs.NewHandle<mirror::String>(nullptr));
1595    auto source_name_object(hs.NewHandle<mirror::String>(nullptr));
1596    if (method->IsProxyMethod()) {
1597      line_number = -1;
1598      class_name_object.Assign(method->GetDeclaringClass()->GetName());
1599      // source_name_object intentionally left null for proxy methods
1600    } else {
1601      mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth));
1602      uint32_t dex_pc = pc_trace->Get(i);
1603      line_number = method->GetLineNumFromDexPC(dex_pc);
1604      // Allocate element, potentially triggering GC
1605      // TODO: reuse class_name_object via Class::name_?
1606      const char* descriptor = method->GetDeclaringClassDescriptor();
1607      CHECK(descriptor != nullptr);
1608      std::string class_name(PrettyDescriptor(descriptor));
1609      class_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
1610      if (class_name_object.Get() == nullptr) {
1611        return nullptr;
1612      }
1613      const char* source_file = method->GetDeclaringClassSourceFile();
1614      if (source_file != nullptr) {
1615        source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
1616        if (source_name_object.Get() == nullptr) {
1617          return nullptr;
1618        }
1619      }
1620    }
1621    const char* method_name = method->GetName();
1622    CHECK(method_name != nullptr);
1623    Handle<mirror::String> method_name_object(
1624        hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
1625    if (method_name_object.Get() == nullptr) {
1626      return nullptr;
1627    }
1628    mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(
1629        soa.Self(), class_name_object, method_name_object, source_name_object, line_number);
1630    if (obj == nullptr) {
1631      return nullptr;
1632    }
1633    // We are called from native: use non-transactional mode.
1634    soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->Set<false>(i, obj);
1635  }
1636  return result;
1637}
1638
1639void Thread::ThrowNewExceptionF(const ThrowLocation& throw_location,
1640                                const char* exception_class_descriptor, const char* fmt, ...) {
1641  va_list args;
1642  va_start(args, fmt);
1643  ThrowNewExceptionV(throw_location, exception_class_descriptor,
1644                     fmt, args);
1645  va_end(args);
1646}
1647
1648void Thread::ThrowNewExceptionV(const ThrowLocation& throw_location,
1649                                const char* exception_class_descriptor,
1650                                const char* fmt, va_list ap) {
1651  std::string msg;
1652  StringAppendV(&msg, fmt, ap);
1653  ThrowNewException(throw_location, exception_class_descriptor, msg.c_str());
1654}
1655
1656void Thread::ThrowNewException(const ThrowLocation& throw_location, const char* exception_class_descriptor,
1657                               const char* msg) {
1658  // Callers should either clear or call ThrowNewWrappedException.
1659  AssertNoPendingExceptionForNewException(msg);
1660  ThrowNewWrappedException(throw_location, exception_class_descriptor, msg);
1661}
1662
1663void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
1664                                      const char* exception_class_descriptor,
1665                                      const char* msg) {
1666  DCHECK_EQ(this, Thread::Current());
1667  ScopedObjectAccessUnchecked soa(this);
1668  StackHandleScope<5> hs(soa.Self());
1669  // Ensure we don't forget arguments over object allocation.
1670  Handle<mirror::Object> saved_throw_this(hs.NewHandle(throw_location.GetThis()));
1671  Handle<mirror::ArtMethod> saved_throw_method(hs.NewHandle(throw_location.GetMethod()));
1672  // Ignore the cause throw location. TODO: should we report this as a re-throw?
1673  ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException(nullptr)));
1674  bool is_exception_reported = IsExceptionReportedToInstrumentation();
1675  ClearException();
1676  Runtime* runtime = Runtime::Current();
1677
1678  mirror::ClassLoader* cl = nullptr;
1679  if (saved_throw_method.Get() != nullptr) {
1680    cl = saved_throw_method.Get()->GetDeclaringClass()->GetClassLoader();
1681  }
1682  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(cl));
1683  Handle<mirror::Class> exception_class(
1684      hs.NewHandle(runtime->GetClassLinker()->FindClass(this, exception_class_descriptor,
1685                                                        class_loader)));
1686  if (UNLIKELY(exception_class.Get() == nullptr)) {
1687    CHECK(IsExceptionPending());
1688    LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
1689    return;
1690  }
1691
1692  if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(exception_class, true, true))) {
1693    DCHECK(IsExceptionPending());
1694    return;
1695  }
1696  DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass());
1697  Handle<mirror::Throwable> exception(
1698      hs.NewHandle(down_cast<mirror::Throwable*>(exception_class->AllocObject(this))));
1699
1700  // If we couldn't allocate the exception, throw the pre-allocated out of memory exception.
1701  if (exception.Get() == nullptr) {
1702    ThrowLocation gc_safe_throw_location(saved_throw_this.Get(), saved_throw_method.Get(),
1703                                         throw_location.GetDexPc());
1704    SetException(gc_safe_throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1705    SetExceptionReportedToInstrumentation(is_exception_reported);
1706    return;
1707  }
1708
1709  // Choose an appropriate constructor and set up the arguments.
1710  const char* signature;
1711  ScopedLocalRef<jstring> msg_string(GetJniEnv(), nullptr);
1712  if (msg != nullptr) {
1713    // Ensure we remember this and the method over the String allocation.
1714    msg_string.reset(
1715        soa.AddLocalReference<jstring>(mirror::String::AllocFromModifiedUtf8(this, msg)));
1716    if (UNLIKELY(msg_string.get() == nullptr)) {
1717      CHECK(IsExceptionPending());  // OOME.
1718      return;
1719    }
1720    if (cause.get() == nullptr) {
1721      signature = "(Ljava/lang/String;)V";
1722    } else {
1723      signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V";
1724    }
1725  } else {
1726    if (cause.get() == nullptr) {
1727      signature = "()V";
1728    } else {
1729      signature = "(Ljava/lang/Throwable;)V";
1730    }
1731  }
1732  mirror::ArtMethod* exception_init_method =
1733      exception_class->FindDeclaredDirectMethod("<init>", signature);
1734
1735  CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in "
1736      << PrettyDescriptor(exception_class_descriptor);
1737
1738  if (UNLIKELY(!runtime->IsStarted())) {
1739    // Something is trying to throw an exception without a started runtime, which is the common
1740    // case in the compiler. We won't be able to invoke the constructor of the exception, so set
1741    // the exception fields directly.
1742    if (msg != nullptr) {
1743      exception->SetDetailMessage(down_cast<mirror::String*>(DecodeJObject(msg_string.get())));
1744    }
1745    if (cause.get() != nullptr) {
1746      exception->SetCause(down_cast<mirror::Throwable*>(DecodeJObject(cause.get())));
1747    }
1748    ScopedLocalRef<jobject> trace(GetJniEnv(),
1749                                  Runtime::Current()->IsActiveTransaction()
1750                                      ? CreateInternalStackTrace<true>(soa)
1751                                      : CreateInternalStackTrace<false>(soa));
1752    if (trace.get() != nullptr) {
1753      exception->SetStackState(down_cast<mirror::Throwable*>(DecodeJObject(trace.get())));
1754    }
1755    ThrowLocation gc_safe_throw_location(saved_throw_this.Get(), saved_throw_method.Get(),
1756                                         throw_location.GetDexPc());
1757    SetException(gc_safe_throw_location, exception.Get());
1758    SetExceptionReportedToInstrumentation(is_exception_reported);
1759  } else {
1760    jvalue jv_args[2];
1761    size_t i = 0;
1762
1763    if (msg != nullptr) {
1764      jv_args[i].l = msg_string.get();
1765      ++i;
1766    }
1767    if (cause.get() != nullptr) {
1768      jv_args[i].l = cause.get();
1769      ++i;
1770    }
1771    InvokeWithJValues(soa, exception.Get(), soa.EncodeMethod(exception_init_method), jv_args);
1772    if (LIKELY(!IsExceptionPending())) {
1773      ThrowLocation gc_safe_throw_location(saved_throw_this.Get(), saved_throw_method.Get(),
1774                                           throw_location.GetDexPc());
1775      SetException(gc_safe_throw_location, exception.Get());
1776      SetExceptionReportedToInstrumentation(is_exception_reported);
1777    }
1778  }
1779}
1780
1781void Thread::ThrowOutOfMemoryError(const char* msg) {
1782  LOG(ERROR) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
1783      msg, (tls32_.throwing_OutOfMemoryError ? " (recursive case)" : ""));
1784  ThrowLocation throw_location = GetCurrentLocationForThrow();
1785  if (!tls32_.throwing_OutOfMemoryError) {
1786    tls32_.throwing_OutOfMemoryError = true;
1787    ThrowNewException(throw_location, "Ljava/lang/OutOfMemoryError;", msg);
1788    tls32_.throwing_OutOfMemoryError = false;
1789  } else {
1790    Dump(LOG(ERROR));  // The pre-allocated OOME has no stack, so help out and log one.
1791    SetException(throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1792  }
1793}
1794
1795Thread* Thread::CurrentFromGdb() {
1796  return Thread::Current();
1797}
1798
1799void Thread::DumpFromGdb() const {
1800  std::ostringstream ss;
1801  Dump(ss);
1802  std::string str(ss.str());
1803  // log to stderr for debugging command line processes
1804  std::cerr << str;
1805#ifdef HAVE_ANDROID_OS
1806  // log to logcat for debugging frameworks processes
1807  LOG(INFO) << str;
1808#endif
1809}
1810
1811// Explicitly instantiate 32 and 64bit thread offset dumping support.
1812template void Thread::DumpThreadOffset<4>(std::ostream& os, uint32_t offset);
1813template void Thread::DumpThreadOffset<8>(std::ostream& os, uint32_t offset);
1814
1815template<size_t ptr_size>
1816void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
1817#define DO_THREAD_OFFSET(x, y) \
1818    if (offset == x.Uint32Value()) { \
1819      os << y; \
1820      return; \
1821    }
1822  DO_THREAD_OFFSET(ThreadFlagsOffset<ptr_size>(), "state_and_flags")
1823  DO_THREAD_OFFSET(CardTableOffset<ptr_size>(), "card_table")
1824  DO_THREAD_OFFSET(ExceptionOffset<ptr_size>(), "exception")
1825  DO_THREAD_OFFSET(PeerOffset<ptr_size>(), "peer");
1826  DO_THREAD_OFFSET(JniEnvOffset<ptr_size>(), "jni_env")
1827  DO_THREAD_OFFSET(SelfOffset<ptr_size>(), "self")
1828  DO_THREAD_OFFSET(StackEndOffset<ptr_size>(), "stack_end")
1829  DO_THREAD_OFFSET(ThinLockIdOffset<ptr_size>(), "thin_lock_thread_id")
1830  DO_THREAD_OFFSET(TopOfManagedStackOffset<ptr_size>(), "top_quick_frame_method")
1831  DO_THREAD_OFFSET(TopOfManagedStackPcOffset<ptr_size>(), "top_quick_frame_pc")
1832  DO_THREAD_OFFSET(TopShadowFrameOffset<ptr_size>(), "top_shadow_frame")
1833  DO_THREAD_OFFSET(TopHandleScopeOffset<ptr_size>(), "top_handle_scope")
1834  DO_THREAD_OFFSET(ThreadSuspendTriggerOffset<ptr_size>(), "suspend_trigger")
1835#undef DO_THREAD_OFFSET
1836
1837#define INTERPRETER_ENTRY_POINT_INFO(x) \
1838    if (INTERPRETER_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
1839      os << #x; \
1840      return; \
1841    }
1842  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToInterpreterBridge)
1843  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToCompiledCodeBridge)
1844#undef INTERPRETER_ENTRY_POINT_INFO
1845
1846#define JNI_ENTRY_POINT_INFO(x) \
1847    if (JNI_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
1848      os << #x; \
1849      return; \
1850    }
1851  JNI_ENTRY_POINT_INFO(pDlsymLookup)
1852#undef JNI_ENTRY_POINT_INFO
1853
1854#define PORTABLE_ENTRY_POINT_INFO(x) \
1855    if (PORTABLE_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
1856      os << #x; \
1857      return; \
1858    }
1859  PORTABLE_ENTRY_POINT_INFO(pPortableImtConflictTrampoline)
1860  PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampoline)
1861  PORTABLE_ENTRY_POINT_INFO(pPortableToInterpreterBridge)
1862#undef PORTABLE_ENTRY_POINT_INFO
1863
1864#define QUICK_ENTRY_POINT_INFO(x) \
1865    if (QUICK_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
1866      os << #x; \
1867      return; \
1868    }
1869  QUICK_ENTRY_POINT_INFO(pAllocArray)
1870  QUICK_ENTRY_POINT_INFO(pAllocArrayResolved)
1871  QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck)
1872  QUICK_ENTRY_POINT_INFO(pAllocObject)
1873  QUICK_ENTRY_POINT_INFO(pAllocObjectResolved)
1874  QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized)
1875  QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck)
1876  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray)
1877  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck)
1878  QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial)
1879  QUICK_ENTRY_POINT_INFO(pCheckCast)
1880  QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage)
1881  QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess)
1882  QUICK_ENTRY_POINT_INFO(pInitializeType)
1883  QUICK_ENTRY_POINT_INFO(pResolveString)
1884  QUICK_ENTRY_POINT_INFO(pSet32Instance)
1885  QUICK_ENTRY_POINT_INFO(pSet32Static)
1886  QUICK_ENTRY_POINT_INFO(pSet64Instance)
1887  QUICK_ENTRY_POINT_INFO(pSet64Static)
1888  QUICK_ENTRY_POINT_INFO(pSetObjInstance)
1889  QUICK_ENTRY_POINT_INFO(pSetObjStatic)
1890  QUICK_ENTRY_POINT_INFO(pGet32Instance)
1891  QUICK_ENTRY_POINT_INFO(pGet32Static)
1892  QUICK_ENTRY_POINT_INFO(pGet64Instance)
1893  QUICK_ENTRY_POINT_INFO(pGet64Static)
1894  QUICK_ENTRY_POINT_INFO(pGetObjInstance)
1895  QUICK_ENTRY_POINT_INFO(pGetObjStatic)
1896  QUICK_ENTRY_POINT_INFO(pAputObjectWithNullAndBoundCheck)
1897  QUICK_ENTRY_POINT_INFO(pAputObjectWithBoundCheck)
1898  QUICK_ENTRY_POINT_INFO(pAputObject)
1899  QUICK_ENTRY_POINT_INFO(pHandleFillArrayData)
1900  QUICK_ENTRY_POINT_INFO(pJniMethodStart)
1901  QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized)
1902  QUICK_ENTRY_POINT_INFO(pJniMethodEnd)
1903  QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized)
1904  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference)
1905  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized)
1906  QUICK_ENTRY_POINT_INFO(pQuickGenericJniTrampoline)
1907  QUICK_ENTRY_POINT_INFO(pLockObject)
1908  QUICK_ENTRY_POINT_INFO(pUnlockObject)
1909  QUICK_ENTRY_POINT_INFO(pCmpgDouble)
1910  QUICK_ENTRY_POINT_INFO(pCmpgFloat)
1911  QUICK_ENTRY_POINT_INFO(pCmplDouble)
1912  QUICK_ENTRY_POINT_INFO(pCmplFloat)
1913  QUICK_ENTRY_POINT_INFO(pFmod)
1914  QUICK_ENTRY_POINT_INFO(pL2d)
1915  QUICK_ENTRY_POINT_INFO(pFmodf)
1916  QUICK_ENTRY_POINT_INFO(pL2f)
1917  QUICK_ENTRY_POINT_INFO(pD2iz)
1918  QUICK_ENTRY_POINT_INFO(pF2iz)
1919  QUICK_ENTRY_POINT_INFO(pIdivmod)
1920  QUICK_ENTRY_POINT_INFO(pD2l)
1921  QUICK_ENTRY_POINT_INFO(pF2l)
1922  QUICK_ENTRY_POINT_INFO(pLdiv)
1923  QUICK_ENTRY_POINT_INFO(pLmod)
1924  QUICK_ENTRY_POINT_INFO(pLmul)
1925  QUICK_ENTRY_POINT_INFO(pShlLong)
1926  QUICK_ENTRY_POINT_INFO(pShrLong)
1927  QUICK_ENTRY_POINT_INFO(pUshrLong)
1928  QUICK_ENTRY_POINT_INFO(pIndexOf)
1929  QUICK_ENTRY_POINT_INFO(pStringCompareTo)
1930  QUICK_ENTRY_POINT_INFO(pMemcpy)
1931  QUICK_ENTRY_POINT_INFO(pQuickImtConflictTrampoline)
1932  QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline)
1933  QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge)
1934  QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck)
1935  QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck)
1936  QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck)
1937  QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck)
1938  QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck)
1939  QUICK_ENTRY_POINT_INFO(pTestSuspend)
1940  QUICK_ENTRY_POINT_INFO(pDeliverException)
1941  QUICK_ENTRY_POINT_INFO(pThrowArrayBounds)
1942  QUICK_ENTRY_POINT_INFO(pThrowDivZero)
1943  QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod)
1944  QUICK_ENTRY_POINT_INFO(pThrowNullPointer)
1945  QUICK_ENTRY_POINT_INFO(pThrowStackOverflow)
1946  QUICK_ENTRY_POINT_INFO(pA64Load)
1947  QUICK_ENTRY_POINT_INFO(pA64Store)
1948#undef QUICK_ENTRY_POINT_INFO
1949
1950  os << offset;
1951}
1952
1953void Thread::QuickDeliverException() {
1954  // Get exception from thread.
1955  ThrowLocation throw_location;
1956  mirror::Throwable* exception = GetException(&throw_location);
1957  CHECK(exception != nullptr);
1958  // Don't leave exception visible while we try to find the handler, which may cause class
1959  // resolution.
1960  bool is_exception_reported = IsExceptionReportedToInstrumentation();
1961  ClearException();
1962  bool is_deoptimization = (exception == GetDeoptimizationException());
1963  QuickExceptionHandler exception_handler(this, is_deoptimization);
1964  if (is_deoptimization) {
1965    exception_handler.DeoptimizeStack();
1966  } else {
1967    exception_handler.FindCatch(throw_location, exception, is_exception_reported);
1968  }
1969  exception_handler.UpdateInstrumentationStack();
1970  exception_handler.DoLongJump();
1971  LOG(FATAL) << "UNREACHABLE";
1972}
1973
1974Context* Thread::GetLongJumpContext() {
1975  Context* result = tlsPtr_.long_jump_context;
1976  if (result == nullptr) {
1977    result = Context::Create();
1978  } else {
1979    tlsPtr_.long_jump_context = nullptr;  // Avoid context being shared.
1980    result->Reset();
1981  }
1982  return result;
1983}
1984
1985// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
1986//       so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
1987struct CurrentMethodVisitor FINAL : public StackVisitor {
1988  CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error)
1989      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1990      : StackVisitor(thread, context), this_object_(nullptr), method_(nullptr), dex_pc_(0),
1991        abort_on_error_(abort_on_error) {}
1992  bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1993    mirror::ArtMethod* m = GetMethod();
1994    if (m->IsRuntimeMethod()) {
1995      // Continue if this is a runtime method.
1996      return true;
1997    }
1998    if (context_ != nullptr) {
1999      this_object_ = GetThisObject();
2000    }
2001    method_ = m;
2002    dex_pc_ = GetDexPc(abort_on_error_);
2003    return false;
2004  }
2005  mirror::Object* this_object_;
2006  mirror::ArtMethod* method_;
2007  uint32_t dex_pc_;
2008  const bool abort_on_error_;
2009};
2010
2011mirror::ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error) const {
2012  CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr, abort_on_error);
2013  visitor.WalkStack(false);
2014  if (dex_pc != nullptr) {
2015    *dex_pc = visitor.dex_pc_;
2016  }
2017  return visitor.method_;
2018}
2019
2020ThrowLocation Thread::GetCurrentLocationForThrow() {
2021  Context* context = GetLongJumpContext();
2022  CurrentMethodVisitor visitor(this, context, true);
2023  visitor.WalkStack(false);
2024  ReleaseLongJumpContext(context);
2025  return ThrowLocation(visitor.this_object_, visitor.method_, visitor.dex_pc_);
2026}
2027
2028bool Thread::HoldsLock(mirror::Object* object) const {
2029  if (object == nullptr) {
2030    return false;
2031  }
2032  return object->GetLockOwnerThreadId() == GetThreadId();
2033}
2034
2035// RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor).
2036template <typename RootVisitor>
2037class ReferenceMapVisitor : public StackVisitor {
2038 public:
2039  ReferenceMapVisitor(Thread* thread, Context* context, const RootVisitor& visitor)
2040      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2041      : StackVisitor(thread, context), visitor_(visitor) {}
2042
2043  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2044    if (false) {
2045      LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
2046                << StringPrintf("@ PC:%04x", GetDexPc());
2047    }
2048    ShadowFrame* shadow_frame = GetCurrentShadowFrame();
2049    if (shadow_frame != nullptr) {
2050      VisitShadowFrame(shadow_frame);
2051    } else {
2052      VisitQuickFrame();
2053    }
2054    return true;
2055  }
2056
2057  void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2058    mirror::ArtMethod** method_addr = shadow_frame->GetMethodAddress();
2059    visitor_(reinterpret_cast<mirror::Object**>(method_addr), 0 /*ignored*/, this);
2060    mirror::ArtMethod* m = *method_addr;
2061    DCHECK(m != nullptr);
2062    size_t num_regs = shadow_frame->NumberOfVRegs();
2063    if (m->IsNative() || shadow_frame->HasReferenceArray()) {
2064      // handle scope for JNI or References for interpreter.
2065      for (size_t reg = 0; reg < num_regs; ++reg) {
2066        mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2067        if (ref != nullptr) {
2068          mirror::Object* new_ref = ref;
2069          visitor_(&new_ref, reg, this);
2070          if (new_ref != ref) {
2071            shadow_frame->SetVRegReference(reg, new_ref);
2072          }
2073        }
2074      }
2075    } else {
2076      // Java method.
2077      // Portable path use DexGcMap and store in Method.native_gc_map_.
2078      const uint8_t* gc_map = m->GetNativeGcMap(sizeof(void*));
2079      CHECK(gc_map != nullptr) << PrettyMethod(m);
2080      verifier::DexPcToReferenceMap dex_gc_map(gc_map);
2081      uint32_t dex_pc = shadow_frame->GetDexPC();
2082      const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
2083      DCHECK(reg_bitmap != nullptr);
2084      num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
2085      for (size_t reg = 0; reg < num_regs; ++reg) {
2086        if (TestBitmap(reg, reg_bitmap)) {
2087          mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2088          if (ref != nullptr) {
2089            mirror::Object* new_ref = ref;
2090            visitor_(&new_ref, reg, this);
2091            if (new_ref != ref) {
2092              shadow_frame->SetVRegReference(reg, new_ref);
2093            }
2094          }
2095        }
2096      }
2097    }
2098  }
2099
2100 private:
2101  void VisitQuickFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2102    StackReference<mirror::ArtMethod>* cur_quick_frame = GetCurrentQuickFrame();
2103    mirror::ArtMethod* m = cur_quick_frame->AsMirrorPtr();
2104    mirror::ArtMethod* old_method = m;
2105    visitor_(reinterpret_cast<mirror::Object**>(&m), 0 /*ignored*/, this);
2106    if (m != old_method) {
2107      cur_quick_frame->Assign(m);
2108    }
2109
2110    // Process register map (which native and runtime methods don't have)
2111    if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
2112      const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
2113      CHECK(native_gc_map != nullptr) << PrettyMethod(m);
2114      const DexFile::CodeItem* code_item = m->GetCodeItem();
2115      DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be nullptr or how would we compile its instructions?
2116      NativePcOffsetToReferenceMap map(native_gc_map);
2117      size_t num_regs = std::min(map.RegWidth() * 8,
2118                                 static_cast<size_t>(code_item->registers_size_));
2119      if (num_regs > 0) {
2120        Runtime* runtime = Runtime::Current();
2121        const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
2122        uintptr_t native_pc_offset = m->NativePcOffset(GetCurrentQuickFramePc(), entry_point);
2123        const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset);
2124        DCHECK(reg_bitmap != nullptr);
2125        const void* code_pointer = mirror::ArtMethod::EntryPointToCodePointer(entry_point);
2126        const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
2127        QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
2128        // For all dex registers in the bitmap
2129        StackReference<mirror::ArtMethod>* cur_quick_frame = GetCurrentQuickFrame();
2130        DCHECK(cur_quick_frame != nullptr);
2131        for (size_t reg = 0; reg < num_regs; ++reg) {
2132          // Does this register hold a reference?
2133          if (TestBitmap(reg, reg_bitmap)) {
2134            uint32_t vmap_offset;
2135            if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
2136              int vmap_reg = vmap_table.ComputeRegister(frame_info.CoreSpillMask(), vmap_offset,
2137                                                        kReferenceVReg);
2138              // This is sound as spilled GPRs will be word sized (ie 32 or 64bit).
2139              mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(vmap_reg));
2140              if (*ref_addr != nullptr) {
2141                visitor_(ref_addr, reg, this);
2142              }
2143            } else {
2144              StackReference<mirror::Object>* ref_addr =
2145                  reinterpret_cast<StackReference<mirror::Object>*>(
2146                      GetVRegAddr(cur_quick_frame, code_item, frame_info.CoreSpillMask(),
2147                                  frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), reg));
2148              mirror::Object* ref = ref_addr->AsMirrorPtr();
2149              if (ref != nullptr) {
2150                mirror::Object* new_ref = ref;
2151                visitor_(&new_ref, reg, this);
2152                if (ref != new_ref) {
2153                  ref_addr->Assign(new_ref);
2154                }
2155              }
2156            }
2157          }
2158        }
2159      }
2160    }
2161  }
2162
2163  static bool TestBitmap(size_t reg, const uint8_t* reg_vector) {
2164    return ((reg_vector[reg / kBitsPerByte] >> (reg % kBitsPerByte)) & 0x01) != 0;
2165  }
2166
2167  // Visitor for when we visit a root.
2168  const RootVisitor& visitor_;
2169};
2170
2171class RootCallbackVisitor {
2172 public:
2173  RootCallbackVisitor(RootCallback* callback, void* arg, uint32_t tid)
2174     : callback_(callback), arg_(arg), tid_(tid) {}
2175
2176  void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const {
2177    callback_(obj, arg_, JavaFrameRootInfo(tid_, stack_visitor, vreg));
2178  }
2179
2180 private:
2181  RootCallback* const callback_;
2182  void* const arg_;
2183  const uint32_t tid_;
2184};
2185
2186void Thread::SetClassLoaderOverride(mirror::ClassLoader* class_loader_override) {
2187  VerifyObject(class_loader_override);
2188  tlsPtr_.class_loader_override = class_loader_override;
2189}
2190
2191void Thread::VisitRoots(RootCallback* visitor, void* arg) {
2192  uint32_t thread_id = GetThreadId();
2193  if (tlsPtr_.opeer != nullptr) {
2194    visitor(&tlsPtr_.opeer, arg, RootInfo(kRootThreadObject, thread_id));
2195  }
2196  if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) {
2197    visitor(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception), arg,
2198            RootInfo(kRootNativeStack, thread_id));
2199  }
2200  tlsPtr_.throw_location.VisitRoots(visitor, arg);
2201  if (tlsPtr_.class_loader_override != nullptr) {
2202    visitor(reinterpret_cast<mirror::Object**>(&tlsPtr_.class_loader_override), arg,
2203            RootInfo(kRootNativeStack, thread_id));
2204  }
2205  if (tlsPtr_.monitor_enter_object != nullptr) {
2206    visitor(&tlsPtr_.monitor_enter_object, arg, RootInfo(kRootNativeStack, thread_id));
2207  }
2208  tlsPtr_.jni_env->locals.VisitRoots(visitor, arg, RootInfo(kRootJNILocal, thread_id));
2209  tlsPtr_.jni_env->monitors.VisitRoots(visitor, arg, RootInfo(kRootJNIMonitor, thread_id));
2210  HandleScopeVisitRoots(visitor, arg, thread_id);
2211  if (tlsPtr_.debug_invoke_req != nullptr) {
2212    tlsPtr_.debug_invoke_req->VisitRoots(visitor, arg, RootInfo(kRootDebugger, thread_id));
2213  }
2214  if (tlsPtr_.single_step_control != nullptr) {
2215    tlsPtr_.single_step_control->VisitRoots(visitor, arg, RootInfo(kRootDebugger, thread_id));
2216  }
2217  if (tlsPtr_.deoptimization_shadow_frame != nullptr) {
2218    RootCallbackVisitor visitorToCallback(visitor, arg, thread_id);
2219    ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitorToCallback);
2220    for (ShadowFrame* shadow_frame = tlsPtr_.deoptimization_shadow_frame; shadow_frame != nullptr;
2221        shadow_frame = shadow_frame->GetLink()) {
2222      mapper.VisitShadowFrame(shadow_frame);
2223    }
2224  }
2225  if (tlsPtr_.shadow_frame_under_construction != nullptr) {
2226    RootCallbackVisitor visitor_to_callback(visitor, arg, thread_id);
2227    ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitor_to_callback);
2228    for (ShadowFrame* shadow_frame = tlsPtr_.shadow_frame_under_construction;
2229        shadow_frame != nullptr;
2230        shadow_frame = shadow_frame->GetLink()) {
2231      mapper.VisitShadowFrame(shadow_frame);
2232    }
2233  }
2234  // Visit roots on this thread's stack
2235  Context* context = GetLongJumpContext();
2236  RootCallbackVisitor visitor_to_callback(visitor, arg, thread_id);
2237  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context, visitor_to_callback);
2238  mapper.WalkStack();
2239  ReleaseLongJumpContext(context);
2240  for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
2241    if (frame.this_object_ != nullptr) {
2242      visitor(&frame.this_object_, arg, RootInfo(kRootVMInternal, thread_id));
2243    }
2244    DCHECK(frame.method_ != nullptr);
2245    visitor(reinterpret_cast<mirror::Object**>(&frame.method_), arg,
2246            RootInfo(kRootVMInternal, thread_id));
2247  }
2248}
2249
2250static void VerifyRoot(mirror::Object** root, void* /*arg*/, const RootInfo& /*root_info*/)
2251    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2252  VerifyObject(*root);
2253}
2254
2255void Thread::VerifyStackImpl() {
2256  std::unique_ptr<Context> context(Context::Create());
2257  RootCallbackVisitor visitorToCallback(VerifyRoot, Runtime::Current()->GetHeap(), GetThreadId());
2258  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitorToCallback);
2259  mapper.WalkStack();
2260}
2261
2262// Set the stack end to that to be used during a stack overflow
2263void Thread::SetStackEndForStackOverflow() {
2264  // During stack overflow we allow use of the full stack.
2265  if (tlsPtr_.stack_end == tlsPtr_.stack_begin) {
2266    // However, we seem to have already extended to use the full stack.
2267    LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently "
2268               << GetStackOverflowReservedBytes(kRuntimeISA) << ")?";
2269    DumpStack(LOG(ERROR));
2270    LOG(FATAL) << "Recursive stack overflow.";
2271  }
2272
2273  tlsPtr_.stack_end = tlsPtr_.stack_begin;
2274
2275  // Remove the stack overflow protection if is it set up.
2276  bool implicit_stack_check = !Runtime::Current()->ExplicitStackOverflowChecks();
2277  if (implicit_stack_check) {
2278    if (!UnprotectStack()) {
2279      LOG(ERROR) << "Unable to remove stack protection for stack overflow";
2280    }
2281  }
2282}
2283
2284void Thread::SetTlab(byte* start, byte* end) {
2285  DCHECK_LE(start, end);
2286  tlsPtr_.thread_local_start = start;
2287  tlsPtr_.thread_local_pos  = tlsPtr_.thread_local_start;
2288  tlsPtr_.thread_local_end = end;
2289  tlsPtr_.thread_local_objects = 0;
2290}
2291
2292bool Thread::HasTlab() const {
2293  bool has_tlab = tlsPtr_.thread_local_pos != nullptr;
2294  if (has_tlab) {
2295    DCHECK(tlsPtr_.thread_local_start != nullptr && tlsPtr_.thread_local_end != nullptr);
2296  } else {
2297    DCHECK(tlsPtr_.thread_local_start == nullptr && tlsPtr_.thread_local_end == nullptr);
2298  }
2299  return has_tlab;
2300}
2301
2302std::ostream& operator<<(std::ostream& os, const Thread& thread) {
2303  thread.ShortDump(os);
2304  return os;
2305}
2306
2307void Thread::ProtectStack() {
2308  void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
2309  VLOG(threads) << "Protecting stack at " << pregion;
2310  if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) {
2311    LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. "
2312        "Reason: "
2313        << strerror(errno) << " size:  " << kStackOverflowProtectedSize;
2314  }
2315}
2316
2317bool Thread::UnprotectStack() {
2318  void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
2319  VLOG(threads) << "Unprotecting stack at " << pregion;
2320  return mprotect(pregion, kStackOverflowProtectedSize, PROT_READ|PROT_WRITE) == 0;
2321}
2322
2323
2324}  // namespace art
2325