thread.cc revision bb87e0f1a52de656bc77cb01cb887e51a0e5198b
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define ATRACE_TAG ATRACE_TAG_DALVIK
18
19#include "thread.h"
20
21#include <cutils/trace.h>
22#include <pthread.h>
23#include <signal.h>
24#include <sys/resource.h>
25#include <sys/time.h>
26
27#include <algorithm>
28#include <bitset>
29#include <cerrno>
30#include <iostream>
31#include <list>
32#include <sstream>
33
34#include "arch/context.h"
35#include "base/mutex.h"
36#include "base/timing_logger.h"
37#include "base/to_str.h"
38#include "class_linker-inl.h"
39#include "class_linker.h"
40#include "debugger.h"
41#include "dex_file-inl.h"
42#include "entrypoints/entrypoint_utils.h"
43#include "entrypoints/quick/quick_alloc_entrypoints.h"
44#include "gc_map.h"
45#include "gc/accounting/card_table-inl.h"
46#include "gc/allocator/rosalloc.h"
47#include "gc/heap.h"
48#include "gc/space/space.h"
49#include "handle_scope-inl.h"
50#include "handle_scope.h"
51#include "indirect_reference_table-inl.h"
52#include "jni_internal.h"
53#include "mirror/art_field-inl.h"
54#include "mirror/art_method-inl.h"
55#include "mirror/class_loader.h"
56#include "mirror/class-inl.h"
57#include "mirror/object_array-inl.h"
58#include "mirror/stack_trace_element.h"
59#include "monitor.h"
60#include "object_lock.h"
61#include "quick_exception_handler.h"
62#include "quick/quick_method_frame_info.h"
63#include "reflection.h"
64#include "runtime.h"
65#include "scoped_thread_state_change.h"
66#include "ScopedLocalRef.h"
67#include "ScopedUtfChars.h"
68#include "stack.h"
69#include "thread_list.h"
70#include "thread-inl.h"
71#include "utils.h"
72#include "verifier/dex_gc_map.h"
73#include "verifier/method_verifier.h"
74#include "verify_object-inl.h"
75#include "vmap_table.h"
76#include "well_known_classes.h"
77
78namespace art {
79
80bool Thread::is_started_ = false;
81pthread_key_t Thread::pthread_key_self_;
82ConditionVariable* Thread::resume_cond_ = nullptr;
83const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA);
84
85static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
86
87void Thread::InitCardTable() {
88  tlsPtr_.card_table = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin();
89}
90
91static void UnimplementedEntryPoint() {
92  UNIMPLEMENTED(FATAL);
93}
94
95void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
96                     QuickEntryPoints* qpoints);
97
98void Thread::InitTlsEntryPoints() {
99  // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
100  uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.interpreter_entrypoints);
101  uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) +
102      sizeof(tlsPtr_.quick_entrypoints));
103  for (uintptr_t* it = begin; it != end; ++it) {
104    *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
105  }
106  InitEntryPoints(&tlsPtr_.interpreter_entrypoints, &tlsPtr_.jni_entrypoints,
107                  &tlsPtr_.quick_entrypoints);
108}
109
110void Thread::ResetQuickAllocEntryPointsForThread() {
111  ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints);
112}
113
114void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) {
115  tlsPtr_.deoptimization_shadow_frame = sf;
116}
117
118void Thread::SetDeoptimizationReturnValue(const JValue& ret_val) {
119  tls64_.deoptimization_return_value.SetJ(ret_val.GetJ());
120}
121
122ShadowFrame* Thread::GetAndClearDeoptimizationShadowFrame(JValue* ret_val) {
123  ShadowFrame* sf = tlsPtr_.deoptimization_shadow_frame;
124  tlsPtr_.deoptimization_shadow_frame = nullptr;
125  ret_val->SetJ(tls64_.deoptimization_return_value.GetJ());
126  return sf;
127}
128
129void Thread::SetShadowFrameUnderConstruction(ShadowFrame* sf) {
130  sf->SetLink(tlsPtr_.shadow_frame_under_construction);
131  tlsPtr_.shadow_frame_under_construction = sf;
132}
133
134void Thread::ClearShadowFrameUnderConstruction() {
135  CHECK_NE(static_cast<ShadowFrame*>(nullptr), tlsPtr_.shadow_frame_under_construction);
136  tlsPtr_.shadow_frame_under_construction = tlsPtr_.shadow_frame_under_construction->GetLink();
137}
138
139void Thread::InitTid() {
140  tls32_.tid = ::art::GetTid();
141}
142
143void Thread::InitAfterFork() {
144  // One thread (us) survived the fork, but we have a new tid so we need to
145  // update the value stashed in this Thread*.
146  InitTid();
147}
148
149void* Thread::CreateCallback(void* arg) {
150  Thread* self = reinterpret_cast<Thread*>(arg);
151  Runtime* runtime = Runtime::Current();
152  if (runtime == nullptr) {
153    LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self;
154    return nullptr;
155  }
156  {
157    // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true
158    //       after self->Init().
159    MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
160    // Check that if we got here we cannot be shutting down (as shutdown should never have started
161    // while threads are being born).
162    CHECK(!runtime->IsShuttingDownLocked());
163    CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM()));
164    Runtime::Current()->EndThreadBirth();
165  }
166  {
167    ScopedObjectAccess soa(self);
168
169    // Copy peer into self, deleting global reference when done.
170    CHECK(self->tlsPtr_.jpeer != nullptr);
171    self->tlsPtr_.opeer = soa.Decode<mirror::Object*>(self->tlsPtr_.jpeer);
172    self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer);
173    self->tlsPtr_.jpeer = nullptr;
174    self->SetThreadName(self->GetThreadName(soa)->ToModifiedUtf8().c_str());
175
176    mirror::ArtField* priorityField = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority);
177    self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer));
178    Dbg::PostThreadStart(self);
179
180    // Invoke the 'run' method of our java.lang.Thread.
181    mirror::Object* receiver = self->tlsPtr_.opeer;
182    jmethodID mid = WellKnownClasses::java_lang_Thread_run;
183    InvokeVirtualOrInterfaceWithJValues(soa, receiver, mid, nullptr);
184  }
185  // Detach and delete self.
186  Runtime::Current()->GetThreadList()->Unregister(self);
187
188  return nullptr;
189}
190
191Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
192                                  mirror::Object* thread_peer) {
193  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer);
194  Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer)));
195  // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
196  // to stop it from going away.
197  if (kIsDebugBuild) {
198    MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
199    if (result != nullptr && !result->IsSuspended()) {
200      Locks::thread_list_lock_->AssertHeld(soa.Self());
201    }
202  }
203  return result;
204}
205
206Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
207                                  jobject java_thread) {
208  return FromManagedThread(soa, soa.Decode<mirror::Object*>(java_thread));
209}
210
211static size_t FixStackSize(size_t stack_size) {
212  // A stack size of zero means "use the default".
213  if (stack_size == 0) {
214    stack_size = Runtime::Current()->GetDefaultStackSize();
215  }
216
217  // Dalvik used the bionic pthread default stack size for native threads,
218  // so include that here to support apps that expect large native stacks.
219  stack_size += 1 * MB;
220
221  // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN.
222  if (stack_size < PTHREAD_STACK_MIN) {
223    stack_size = PTHREAD_STACK_MIN;
224  }
225
226  if (Runtime::Current()->ExplicitStackOverflowChecks()) {
227    // It's likely that callers are trying to ensure they have at least a certain amount of
228    // stack space, so we should add our reserved space on top of what they requested, rather
229    // than implicitly take it away from them.
230    stack_size += GetStackOverflowReservedBytes(kRuntimeISA);
231  } else {
232    // If we are going to use implicit stack checks, allocate space for the protected
233    // region at the bottom of the stack.
234    stack_size += Thread::kStackOverflowImplicitCheckSize +
235        GetStackOverflowReservedBytes(kRuntimeISA);
236  }
237
238  // Some systems require the stack size to be a multiple of the system page size, so round up.
239  stack_size = RoundUp(stack_size, kPageSize);
240
241  return stack_size;
242}
243
244// Global variable to prevent the compiler optimizing away the page reads for the stack.
245uint8_t dont_optimize_this;
246
247// Install a protected region in the stack.  This is used to trigger a SIGSEGV if a stack
248// overflow is detected.  It is located right below the stack_begin_.
249//
250// There is a little complexity here that deserves a special mention.  On some
251// architectures, the stack created using a VM_GROWSDOWN flag
252// to prevent memory being allocated when it's not needed.  This flag makes the
253// kernel only allocate memory for the stack by growing down in memory.  Because we
254// want to put an mprotected region far away from that at the stack top, we need
255// to make sure the pages for the stack are mapped in before we call mprotect.  We do
256// this by reading every page from the stack bottom (highest address) to the stack top.
257// We then madvise this away.
258void Thread::InstallImplicitProtection() {
259  uint8_t* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
260  uint8_t* stack_himem = tlsPtr_.stack_end;
261  uint8_t* stack_top = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(&stack_himem) &
262      ~(kPageSize - 1));    // Page containing current top of stack.
263
264  // First remove the protection on the protected region as will want to read and
265  // write it.  This may fail (on the first attempt when the stack is not mapped)
266  // but we ignore that.
267  UnprotectStack();
268
269  // Map in the stack.  This must be done by reading from the
270  // current stack pointer downwards as the stack may be mapped using VM_GROWSDOWN
271  // in the kernel.  Any access more than a page below the current SP might cause
272  // a segv.
273
274  // Read every page from the high address to the low.
275  for (uint8_t* p = stack_top; p >= pregion; p -= kPageSize) {
276    dont_optimize_this = *p;
277  }
278
279  VLOG(threads) << "installing stack protected region at " << std::hex <<
280      static_cast<void*>(pregion) << " to " <<
281      static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
282
283  // Protect the bottom of the stack to prevent read/write to it.
284  ProtectStack();
285
286  // Tell the kernel that we won't be needing these pages any more.
287  // NB. madvise will probably write zeroes into the memory (on linux it does).
288  uint32_t unwanted_size = stack_top - pregion - kPageSize;
289  madvise(pregion, unwanted_size, MADV_DONTNEED);
290}
291
292void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
293  CHECK(java_peer != nullptr);
294  Thread* self = static_cast<JNIEnvExt*>(env)->self;
295  Runtime* runtime = Runtime::Current();
296
297  // Atomically start the birth of the thread ensuring the runtime isn't shutting down.
298  bool thread_start_during_shutdown = false;
299  {
300    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
301    if (runtime->IsShuttingDownLocked()) {
302      thread_start_during_shutdown = true;
303    } else {
304      runtime->StartThreadBirth();
305    }
306  }
307  if (thread_start_during_shutdown) {
308    ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError"));
309    env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown");
310    return;
311  }
312
313  Thread* child_thread = new Thread(is_daemon);
314  // Use global JNI ref to hold peer live while child thread starts.
315  child_thread->tlsPtr_.jpeer = env->NewGlobalRef(java_peer);
316  stack_size = FixStackSize(stack_size);
317
318  // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to
319  // assign it.
320  env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer,
321                    reinterpret_cast<jlong>(child_thread));
322
323  pthread_t new_pthread;
324  pthread_attr_t attr;
325  CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
326  CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED");
327  CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
328  int pthread_create_result = pthread_create(&new_pthread, &attr, Thread::CreateCallback, child_thread);
329  CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
330
331  if (pthread_create_result != 0) {
332    // pthread_create(3) failed, so clean up.
333    {
334      MutexLock mu(self, *Locks::runtime_shutdown_lock_);
335      runtime->EndThreadBirth();
336    }
337    // Manually delete the global reference since Thread::Init will not have been run.
338    env->DeleteGlobalRef(child_thread->tlsPtr_.jpeer);
339    child_thread->tlsPtr_.jpeer = nullptr;
340    delete child_thread;
341    child_thread = nullptr;
342    // TODO: remove from thread group?
343    env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0);
344    {
345      std::string msg(StringPrintf("pthread_create (%s stack) failed: %s",
346                                   PrettySize(stack_size).c_str(), strerror(pthread_create_result)));
347      ScopedObjectAccess soa(env);
348      soa.Self()->ThrowOutOfMemoryError(msg.c_str());
349    }
350  }
351}
352
353bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
354  // This function does all the initialization that must be run by the native thread it applies to.
355  // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
356  // we can handshake with the corresponding native thread when it's ready.) Check this native
357  // thread hasn't been through here already...
358  CHECK(Thread::Current() == nullptr);
359
360  // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
361  // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
362  tlsPtr_.pthread_self = pthread_self();
363  CHECK(is_started_);
364
365  SetUpAlternateSignalStack();
366  if (!InitStackHwm()) {
367    return false;
368  }
369  InitCpu();
370  InitTlsEntryPoints();
371  RemoveSuspendTrigger();
372  InitCardTable();
373  InitTid();
374
375  CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
376  DCHECK_EQ(Thread::Current(), this);
377
378  tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this);
379
380  tlsPtr_.jni_env = new JNIEnvExt(this, java_vm);
381  thread_list->Register(this);
382  return true;
383}
384
385Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group,
386                       bool create_peer) {
387  Runtime* runtime = Runtime::Current();
388  if (runtime == nullptr) {
389    LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name;
390    return nullptr;
391  }
392  Thread* self;
393  {
394    MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
395    if (runtime->IsShuttingDownLocked()) {
396      LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name;
397      return nullptr;
398    } else {
399      Runtime::Current()->StartThreadBirth();
400      self = new Thread(as_daemon);
401      bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
402      Runtime::Current()->EndThreadBirth();
403      if (!init_success) {
404        delete self;
405        return nullptr;
406      }
407    }
408  }
409
410  CHECK_NE(self->GetState(), kRunnable);
411  self->SetState(kNative);
412
413  // If we're the main thread, ClassLinker won't be created until after we're attached,
414  // so that thread needs a two-stage attach. Regular threads don't need this hack.
415  // In the compiler, all threads need this hack, because no-one's going to be getting
416  // a native peer!
417  if (create_peer) {
418    self->CreatePeer(thread_name, as_daemon, thread_group);
419  } else {
420    // These aren't necessary, but they improve diagnostics for unit tests & command-line tools.
421    if (thread_name != nullptr) {
422      self->tlsPtr_.name->assign(thread_name);
423      ::art::SetThreadName(thread_name);
424    } else if (self->GetJniEnv()->check_jni) {
425      LOG(WARNING) << *Thread::Current() << " attached without supplying a name";
426    }
427  }
428
429  {
430    ScopedObjectAccess soa(self);
431    Dbg::PostThreadStart(self);
432  }
433
434  return self;
435}
436
437void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) {
438  Runtime* runtime = Runtime::Current();
439  CHECK(runtime->IsStarted());
440  JNIEnv* env = tlsPtr_.jni_env;
441
442  if (thread_group == nullptr) {
443    thread_group = runtime->GetMainThreadGroup();
444  }
445  ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
446  // Add missing null check in case of OOM b/18297817
447  if (name != nullptr && thread_name.get() == nullptr) {
448    CHECK(IsExceptionPending());
449    return;
450  }
451  jint thread_priority = GetNativePriority();
452  jboolean thread_is_daemon = as_daemon;
453
454  ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread));
455  if (peer.get() == nullptr) {
456    CHECK(IsExceptionPending());
457    return;
458  }
459  {
460    ScopedObjectAccess soa(this);
461    tlsPtr_.opeer = soa.Decode<mirror::Object*>(peer.get());
462  }
463  env->CallNonvirtualVoidMethod(peer.get(),
464                                WellKnownClasses::java_lang_Thread,
465                                WellKnownClasses::java_lang_Thread_init,
466                                thread_group, thread_name.get(), thread_priority, thread_is_daemon);
467  AssertNoPendingException();
468
469  Thread* self = this;
470  DCHECK_EQ(self, Thread::Current());
471  env->SetLongField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer,
472                    reinterpret_cast<jlong>(self));
473
474  ScopedObjectAccess soa(self);
475  StackHandleScope<1> hs(self);
476  MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName(soa)));
477  if (peer_thread_name.Get() == nullptr) {
478    // The Thread constructor should have set the Thread.name to a
479    // non-null value. However, because we can run without code
480    // available (in the compiler, in tests), we manually assign the
481    // fields the constructor should have set.
482    if (runtime->IsActiveTransaction()) {
483      InitPeer<true>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
484    } else {
485      InitPeer<false>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
486    }
487    peer_thread_name.Assign(GetThreadName(soa));
488  }
489  // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
490  if (peer_thread_name.Get() != nullptr) {
491    SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
492  }
493}
494
495template<bool kTransactionActive>
496void Thread::InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
497                      jobject thread_name, jint thread_priority) {
498  soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->
499      SetBoolean<kTransactionActive>(tlsPtr_.opeer, thread_is_daemon);
500  soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->
501      SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object*>(thread_group));
502  soa.DecodeField(WellKnownClasses::java_lang_Thread_name)->
503      SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object*>(thread_name));
504  soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->
505      SetInt<kTransactionActive>(tlsPtr_.opeer, thread_priority);
506}
507
508void Thread::SetThreadName(const char* name) {
509  tlsPtr_.name->assign(name);
510  ::art::SetThreadName(name);
511  Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
512}
513
514bool Thread::InitStackHwm() {
515  void* read_stack_base;
516  size_t read_stack_size;
517  size_t read_guard_size;
518  GetThreadStack(tlsPtr_.pthread_self, &read_stack_base, &read_stack_size, &read_guard_size);
519
520  tlsPtr_.stack_begin = reinterpret_cast<uint8_t*>(read_stack_base);
521  tlsPtr_.stack_size = read_stack_size;
522
523  // The minimum stack size we can cope with is the overflow reserved bytes (typically
524  // 8K) + the protected region size (4K) + another page (4K).  Typically this will
525  // be 8+4+4 = 16K.  The thread won't be able to do much with this stack even the GC takes
526  // between 8K and 12K.
527  uint32_t min_stack = GetStackOverflowReservedBytes(kRuntimeISA) + kStackOverflowProtectedSize
528    + 4 * KB;
529  if (read_stack_size <= min_stack) {
530    // Note, as we know the stack is small, avoid operations that could use a lot of stack.
531    LogMessage::LogLineLowStack(__PRETTY_FUNCTION__, __LINE__, ERROR,
532                                "Attempt to attach a thread with a too-small stack");
533    return false;
534  }
535
536  // This is included in the SIGQUIT output, but it's useful here for thread debugging.
537  VLOG(threads) << StringPrintf("Native stack is at %p (%s with %s guard)",
538                                read_stack_base,
539                                PrettySize(read_stack_size).c_str(),
540                                PrettySize(read_guard_size).c_str());
541
542  // Set stack_end_ to the bottom of the stack saving space of stack overflows
543
544  Runtime* runtime = Runtime::Current();
545  bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler();
546  ResetDefaultStackEnd();
547
548  // Install the protected region if we are doing implicit overflow checks.
549  if (implicit_stack_check) {
550    // The thread might have protected region at the bottom.  We need
551    // to install our own region so we need to move the limits
552    // of the stack to make room for it.
553
554    tlsPtr_.stack_begin += read_guard_size + kStackOverflowProtectedSize;
555    tlsPtr_.stack_end += read_guard_size + kStackOverflowProtectedSize;
556    tlsPtr_.stack_size -= read_guard_size;
557
558    InstallImplicitProtection();
559  }
560
561  // Sanity check.
562  int stack_variable;
563  CHECK_GT(&stack_variable, reinterpret_cast<void*>(tlsPtr_.stack_end));
564
565  return true;
566}
567
568void Thread::ShortDump(std::ostream& os) const {
569  os << "Thread[";
570  if (GetThreadId() != 0) {
571    // If we're in kStarting, we won't have a thin lock id or tid yet.
572    os << GetThreadId()
573             << ",tid=" << GetTid() << ',';
574  }
575  os << GetState()
576           << ",Thread*=" << this
577           << ",peer=" << tlsPtr_.opeer
578           << ",\"" << *tlsPtr_.name << "\""
579           << "]";
580}
581
582void Thread::Dump(std::ostream& os) const {
583  DumpState(os);
584  DumpStack(os);
585}
586
587mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
588  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
589  return (tlsPtr_.opeer != nullptr) ? reinterpret_cast<mirror::String*>(f->GetObject(tlsPtr_.opeer)) : nullptr;
590}
591
592void Thread::GetThreadName(std::string& name) const {
593  name.assign(*tlsPtr_.name);
594}
595
596uint64_t Thread::GetCpuMicroTime() const {
597#if defined(__linux__)
598  clockid_t cpu_clock_id;
599  pthread_getcpuclockid(tlsPtr_.pthread_self, &cpu_clock_id);
600  timespec now;
601  clock_gettime(cpu_clock_id, &now);
602  return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000);
603#else  // __APPLE__
604  UNIMPLEMENTED(WARNING);
605  return -1;
606#endif
607}
608
609// Attempt to rectify locks so that we dump thread list with required locks before exiting.
610static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
611  LOG(ERROR) << *thread << " suspend count already zero.";
612  Locks::thread_suspend_count_lock_->Unlock(self);
613  if (!Locks::mutator_lock_->IsSharedHeld(self)) {
614    Locks::mutator_lock_->SharedTryLock(self);
615    if (!Locks::mutator_lock_->IsSharedHeld(self)) {
616      LOG(WARNING) << "Dumping thread list without holding mutator_lock_";
617    }
618  }
619  if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
620    Locks::thread_list_lock_->TryLock(self);
621    if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
622      LOG(WARNING) << "Dumping thread list without holding thread_list_lock_";
623    }
624  }
625  std::ostringstream ss;
626  Runtime::Current()->GetThreadList()->Dump(ss);
627  LOG(FATAL) << ss.str();
628}
629
630void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) {
631  if (kIsDebugBuild) {
632    DCHECK(delta == -1 || delta == +1 || delta == -tls32_.debug_suspend_count)
633          << delta << " " << tls32_.debug_suspend_count << " " << this;
634    DCHECK_GE(tls32_.suspend_count, tls32_.debug_suspend_count) << this;
635    Locks::thread_suspend_count_lock_->AssertHeld(self);
636    if (this != self && !IsSuspended()) {
637      Locks::thread_list_lock_->AssertHeld(self);
638    }
639  }
640  if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) {
641    UnsafeLogFatalForSuspendCount(self, this);
642    return;
643  }
644
645  tls32_.suspend_count += delta;
646  if (for_debugger) {
647    tls32_.debug_suspend_count += delta;
648  }
649
650  if (tls32_.suspend_count == 0) {
651    AtomicClearFlag(kSuspendRequest);
652  } else {
653    AtomicSetFlag(kSuspendRequest);
654    TriggerSuspend();
655  }
656}
657
658void Thread::RunCheckpointFunction() {
659  Closure *checkpoints[kMaxCheckpoints];
660
661  // Grab the suspend_count lock and copy the current set of
662  // checkpoints.  Then clear the list and the flag.  The RequestCheckpoint
663  // function will also grab this lock so we prevent a race between setting
664  // the kCheckpointRequest flag and clearing it.
665  {
666    MutexLock mu(this, *Locks::thread_suspend_count_lock_);
667    for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
668      checkpoints[i] = tlsPtr_.checkpoint_functions[i];
669      tlsPtr_.checkpoint_functions[i] = nullptr;
670    }
671    AtomicClearFlag(kCheckpointRequest);
672  }
673
674  // Outside the lock, run all the checkpoint functions that
675  // we collected.
676  bool found_checkpoint = false;
677  for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
678    if (checkpoints[i] != nullptr) {
679      ATRACE_BEGIN("Checkpoint function");
680      checkpoints[i]->Run(this);
681      ATRACE_END();
682      found_checkpoint = true;
683    }
684  }
685  CHECK(found_checkpoint);
686}
687
688bool Thread::RequestCheckpoint(Closure* function) {
689  union StateAndFlags old_state_and_flags;
690  old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
691  if (old_state_and_flags.as_struct.state != kRunnable) {
692    return false;  // Fail, thread is suspended and so can't run a checkpoint.
693  }
694
695  uint32_t available_checkpoint = kMaxCheckpoints;
696  for (uint32_t i = 0 ; i < kMaxCheckpoints; ++i) {
697    if (tlsPtr_.checkpoint_functions[i] == nullptr) {
698      available_checkpoint = i;
699      break;
700    }
701  }
702  if (available_checkpoint == kMaxCheckpoints) {
703    // No checkpoint functions available, we can't run a checkpoint
704    return false;
705  }
706  tlsPtr_.checkpoint_functions[available_checkpoint] = function;
707
708  // Checkpoint function installed now install flag bit.
709  // We must be runnable to request a checkpoint.
710  DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable);
711  union StateAndFlags new_state_and_flags;
712  new_state_and_flags.as_int = old_state_and_flags.as_int;
713  new_state_and_flags.as_struct.flags |= kCheckpointRequest;
714  bool success =
715      tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(old_state_and_flags.as_int,
716                                                                                       new_state_and_flags.as_int);
717  if (UNLIKELY(!success)) {
718    // The thread changed state before the checkpoint was installed.
719    CHECK_EQ(tlsPtr_.checkpoint_functions[available_checkpoint], function);
720    tlsPtr_.checkpoint_functions[available_checkpoint] = nullptr;
721  } else {
722    CHECK_EQ(ReadFlag(kCheckpointRequest), true);
723    TriggerSuspend();
724  }
725  return success;
726}
727
728Closure* Thread::GetFlipFunction() {
729  Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function);
730  Closure* func;
731  do {
732    func = atomic_func->LoadRelaxed();
733    if (func == nullptr) {
734      return nullptr;
735    }
736  } while (!atomic_func->CompareExchangeWeakSequentiallyConsistent(func, nullptr));
737  DCHECK(func != nullptr);
738  return func;
739}
740
741void Thread::SetFlipFunction(Closure* function) {
742  CHECK(function != nullptr);
743  Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function);
744  atomic_func->StoreSequentiallyConsistent(function);
745}
746
747void Thread::FullSuspendCheck() {
748  VLOG(threads) << this << " self-suspending";
749  ATRACE_BEGIN("Full suspend check");
750  // Make thread appear suspended to other threads, release mutator_lock_.
751  tls32_.suspended_at_suspend_check = true;
752  TransitionFromRunnableToSuspended(kSuspended);
753  // Transition back to runnable noting requests to suspend, re-acquire share on mutator_lock_.
754  TransitionFromSuspendedToRunnable();
755  tls32_.suspended_at_suspend_check = false;
756  ATRACE_END();
757  VLOG(threads) << this << " self-reviving";
758}
759
760void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
761  std::string group_name;
762  int priority;
763  bool is_daemon = false;
764  Thread* self = Thread::Current();
765
766  // If flip_function is not null, it means we have run a checkpoint
767  // before the thread wakes up to execute the flip function and the
768  // thread roots haven't been forwarded.  So the following access to
769  // the roots (opeer or methods in the frames) would be bad. Run it
770  // here. TODO: clean up.
771  if (thread != nullptr) {
772    ScopedObjectAccessUnchecked soa(self);
773    Thread* this_thread = const_cast<Thread*>(thread);
774    Closure* flip_func = this_thread->GetFlipFunction();
775    if (flip_func != nullptr) {
776      flip_func->Run(this_thread);
777    }
778  }
779
780  // Don't do this if we are aborting since the GC may have all the threads suspended. This will
781  // cause ScopedObjectAccessUnchecked to deadlock.
782  if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) {
783    ScopedObjectAccessUnchecked soa(self);
784    priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)
785        ->GetInt(thread->tlsPtr_.opeer);
786    is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)
787        ->GetBoolean(thread->tlsPtr_.opeer);
788
789    mirror::Object* thread_group =
790        soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->tlsPtr_.opeer);
791
792    if (thread_group != nullptr) {
793      mirror::ArtField* group_name_field =
794          soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
795      mirror::String* group_name_string =
796          reinterpret_cast<mirror::String*>(group_name_field->GetObject(thread_group));
797      group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>";
798    }
799  } else {
800    priority = GetNativePriority();
801  }
802
803  std::string scheduler_group_name(GetSchedulerGroupName(tid));
804  if (scheduler_group_name.empty()) {
805    scheduler_group_name = "default";
806  }
807
808  if (thread != nullptr) {
809    os << '"' << *thread->tlsPtr_.name << '"';
810    if (is_daemon) {
811      os << " daemon";
812    }
813    os << " prio=" << priority
814       << " tid=" << thread->GetThreadId()
815       << " " << thread->GetState();
816    if (thread->IsStillStarting()) {
817      os << " (still starting up)";
818    }
819    os << "\n";
820  } else {
821    os << '"' << ::art::GetThreadName(tid) << '"'
822       << " prio=" << priority
823       << " (not attached)\n";
824  }
825
826  if (thread != nullptr) {
827    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
828    os << "  | group=\"" << group_name << "\""
829       << " sCount=" << thread->tls32_.suspend_count
830       << " dsCount=" << thread->tls32_.debug_suspend_count
831       << " obj=" << reinterpret_cast<void*>(thread->tlsPtr_.opeer)
832       << " self=" << reinterpret_cast<const void*>(thread) << "\n";
833  }
834
835  os << "  | sysTid=" << tid
836     << " nice=" << getpriority(PRIO_PROCESS, tid)
837     << " cgrp=" << scheduler_group_name;
838  if (thread != nullptr) {
839    int policy;
840    sched_param sp;
841    CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp),
842                       __FUNCTION__);
843    os << " sched=" << policy << "/" << sp.sched_priority
844       << " handle=" << reinterpret_cast<void*>(thread->tlsPtr_.pthread_self);
845  }
846  os << "\n";
847
848  // Grab the scheduler stats for this thread.
849  std::string scheduler_stats;
850  if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) {
851    scheduler_stats.resize(scheduler_stats.size() - 1);  // Lose the trailing '\n'.
852  } else {
853    scheduler_stats = "0 0 0";
854  }
855
856  char native_thread_state = '?';
857  int utime = 0;
858  int stime = 0;
859  int task_cpu = 0;
860  GetTaskStats(tid, &native_thread_state, &utime, &stime, &task_cpu);
861
862  os << "  | state=" << native_thread_state
863     << " schedstat=( " << scheduler_stats << " )"
864     << " utm=" << utime
865     << " stm=" << stime
866     << " core=" << task_cpu
867     << " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
868  if (thread != nullptr) {
869    os << "  | stack=" << reinterpret_cast<void*>(thread->tlsPtr_.stack_begin) << "-"
870        << reinterpret_cast<void*>(thread->tlsPtr_.stack_end) << " stackSize="
871        << PrettySize(thread->tlsPtr_.stack_size) << "\n";
872    // Dump the held mutexes.
873    os << "  | held mutexes=";
874    for (size_t i = 0; i < kLockLevelCount; ++i) {
875      if (i != kMonitorLock) {
876        BaseMutex* mutex = thread->GetHeldMutex(static_cast<LockLevel>(i));
877        if (mutex != nullptr) {
878          os << " \"" << mutex->GetName() << "\"";
879          if (mutex->IsReaderWriterMutex()) {
880            ReaderWriterMutex* rw_mutex = down_cast<ReaderWriterMutex*>(mutex);
881            if (rw_mutex->GetExclusiveOwnerTid() == static_cast<uint64_t>(tid)) {
882              os << "(exclusive held)";
883            } else {
884              os << "(shared held)";
885            }
886          }
887        }
888      }
889    }
890    os << "\n";
891  }
892}
893
894void Thread::DumpState(std::ostream& os) const {
895  Thread::DumpState(os, this, GetTid());
896}
897
898struct StackDumpVisitor : public StackVisitor {
899  StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in)
900      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
901      : StackVisitor(thread_in, context), os(os_in), thread(thread_in),
902        can_allocate(can_allocate_in), last_method(nullptr), last_line_number(0),
903        repetition_count(0), frame_count(0) {
904  }
905
906  virtual ~StackDumpVisitor() {
907    if (frame_count == 0) {
908      os << "  (no managed stack frames)\n";
909    }
910  }
911
912  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
913    mirror::ArtMethod* m = GetMethod();
914    if (m->IsRuntimeMethod()) {
915      return true;
916    }
917    const int kMaxRepetition = 3;
918    mirror::Class* c = m->GetDeclaringClass();
919    mirror::DexCache* dex_cache = c->GetDexCache();
920    int line_number = -1;
921    if (dex_cache != nullptr) {  // be tolerant of bad input
922      const DexFile& dex_file = *dex_cache->GetDexFile();
923      line_number = dex_file.GetLineNumFromPC(m, GetDexPc(false));
924    }
925    if (line_number == last_line_number && last_method == m) {
926      ++repetition_count;
927    } else {
928      if (repetition_count >= kMaxRepetition) {
929        os << "  ... repeated " << (repetition_count - kMaxRepetition) << " times\n";
930      }
931      repetition_count = 0;
932      last_line_number = line_number;
933      last_method = m;
934    }
935    if (repetition_count < kMaxRepetition) {
936      os << "  at " << PrettyMethod(m, false);
937      if (m->IsNative()) {
938        os << "(Native method)";
939      } else {
940        const char* source_file(m->GetDeclaringClassSourceFile());
941        os << "(" << (source_file != nullptr ? source_file : "unavailable")
942           << ":" << line_number << ")";
943      }
944      os << "\n";
945      if (frame_count == 0) {
946        Monitor::DescribeWait(os, thread);
947      }
948      if (can_allocate) {
949        // Visit locks, but do not abort on errors. This would trigger a nested abort.
950        Monitor::VisitLocks(this, DumpLockedObject, &os, false);
951      }
952    }
953
954    ++frame_count;
955    return true;
956  }
957
958  static void DumpLockedObject(mirror::Object* o, void* context)
959      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
960    std::ostream& os = *reinterpret_cast<std::ostream*>(context);
961    os << "  - locked ";
962    if (o == nullptr) {
963      os << "an unknown object";
964    } else {
965      if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) &&
966          Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) {
967        // Getting the identity hashcode here would result in lock inflation and suspension of the
968        // current thread, which isn't safe if this is the only runnable thread.
969        os << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)", reinterpret_cast<intptr_t>(o),
970                           PrettyTypeOf(o).c_str());
971      } else {
972        // IdentityHashCode can cause thread suspension, which would invalidate o if it moved. So
973        // we get the pretty type beofre we call IdentityHashCode.
974        const std::string pretty_type(PrettyTypeOf(o));
975        os << StringPrintf("<0x%08x> (a %s)", o->IdentityHashCode(), pretty_type.c_str());
976      }
977    }
978    os << "\n";
979  }
980
981  std::ostream& os;
982  const Thread* thread;
983  const bool can_allocate;
984  mirror::ArtMethod* last_method;
985  int last_line_number;
986  int repetition_count;
987  int frame_count;
988};
989
990static bool ShouldShowNativeStack(const Thread* thread)
991    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
992  ThreadState state = thread->GetState();
993
994  // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting.
995  if (state > kWaiting && state < kStarting) {
996    return true;
997  }
998
999  // In an Object.wait variant or Thread.sleep? That's not interesting.
1000  if (state == kTimedWaiting || state == kSleeping || state == kWaiting) {
1001    return false;
1002  }
1003
1004  // Threads with no managed stack frames should be shown.
1005  const ManagedStack* managed_stack = thread->GetManagedStack();
1006  if (managed_stack == NULL || (managed_stack->GetTopQuickFrame() == NULL &&
1007      managed_stack->GetTopShadowFrame() == NULL)) {
1008    return true;
1009  }
1010
1011  // In some other native method? That's interesting.
1012  // We don't just check kNative because native methods will be in state kSuspended if they're
1013  // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the
1014  // thread-startup states if it's early enough in their life cycle (http://b/7432159).
1015  mirror::ArtMethod* current_method = thread->GetCurrentMethod(nullptr);
1016  return current_method != nullptr && current_method->IsNative();
1017}
1018
1019void Thread::DumpJavaStack(std::ostream& os) const {
1020  // If flip_function is not null, it means we have run a checkpoint
1021  // before the thread wakes up to execute the flip function and the
1022  // thread roots haven't been forwarded.  So the following access to
1023  // the roots (locks or methods in the frames) would be bad. Run it
1024  // here. TODO: clean up.
1025  {
1026    Thread* this_thread = const_cast<Thread*>(this);
1027    Closure* flip_func = this_thread->GetFlipFunction();
1028    if (flip_func != nullptr) {
1029      flip_func->Run(this_thread);
1030    }
1031  }
1032
1033  // Dumping the Java stack involves the verifier for locks. The verifier operates under the
1034  // assumption that there is no exception pending on entry. Thus, stash any pending exception.
1035  // Thread::Current() instead of this in case a thread is dumping the stack of another suspended
1036  // thread.
1037  StackHandleScope<1> scope(Thread::Current());
1038  Handle<mirror::Throwable> exc;
1039  bool have_exception = false;
1040  if (IsExceptionPending()) {
1041    exc = scope.NewHandle(GetException());
1042    const_cast<Thread*>(this)->ClearException();
1043    have_exception = true;
1044  }
1045
1046  std::unique_ptr<Context> context(Context::Create());
1047  StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(),
1048                          !tls32_.throwing_OutOfMemoryError);
1049  dumper.WalkStack();
1050
1051  if (have_exception) {
1052    const_cast<Thread*>(this)->SetException(exc.Get());
1053  }
1054}
1055
1056void Thread::DumpStack(std::ostream& os) const {
1057  // TODO: we call this code when dying but may not have suspended the thread ourself. The
1058  //       IsSuspended check is therefore racy with the use for dumping (normally we inhibit
1059  //       the race with the thread_suspend_count_lock_).
1060  bool dump_for_abort = (gAborting > 0);
1061  bool safe_to_dump = (this == Thread::Current() || IsSuspended());
1062  if (!kIsDebugBuild) {
1063    // We always want to dump the stack for an abort, however, there is no point dumping another
1064    // thread's stack in debug builds where we'll hit the not suspended check in the stack walk.
1065    safe_to_dump = (safe_to_dump || dump_for_abort);
1066  }
1067  if (safe_to_dump) {
1068    // If we're currently in native code, dump that stack before dumping the managed stack.
1069    if (dump_for_abort || ShouldShowNativeStack(this)) {
1070      DumpKernelStack(os, GetTid(), "  kernel: ", false);
1071      // b/20040863. Temporary workaround for x86 libunwind issue.
1072#if defined(__i386__) && defined(HAVE_ANDROID_OS)
1073      os << "Cannot dump native stack. b/20040863.\n";
1074#else
1075      DumpNativeStack(os, GetTid(), "  native: ", GetCurrentMethod(nullptr, !dump_for_abort));
1076#endif
1077    }
1078    DumpJavaStack(os);
1079  } else {
1080    os << "Not able to dump stack of thread that isn't suspended";
1081  }
1082}
1083
1084void Thread::ThreadExitCallback(void* arg) {
1085  Thread* self = reinterpret_cast<Thread*>(arg);
1086  if (self->tls32_.thread_exit_check_count == 0) {
1087    LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's "
1088        "going to use a pthread_key_create destructor?): " << *self;
1089    CHECK(is_started_);
1090    CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
1091    self->tls32_.thread_exit_check_count = 1;
1092  } else {
1093    LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
1094  }
1095}
1096
1097void Thread::Startup() {
1098  CHECK(!is_started_);
1099  is_started_ = true;
1100  {
1101    // MutexLock to keep annotalysis happy.
1102    //
1103    // Note we use nullptr for the thread because Thread::Current can
1104    // return garbage since (is_started_ == true) and
1105    // Thread::pthread_key_self_ is not yet initialized.
1106    // This was seen on glibc.
1107    MutexLock mu(nullptr, *Locks::thread_suspend_count_lock_);
1108    resume_cond_ = new ConditionVariable("Thread resumption condition variable",
1109                                         *Locks::thread_suspend_count_lock_);
1110  }
1111
1112  // Allocate a TLS slot.
1113  CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback),
1114                     "self key");
1115
1116  // Double-check the TLS slot allocation.
1117  if (pthread_getspecific(pthread_key_self_) != nullptr) {
1118    LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr";
1119  }
1120}
1121
1122void Thread::FinishStartup() {
1123  Runtime* runtime = Runtime::Current();
1124  CHECK(runtime->IsStarted());
1125
1126  // Finish attaching the main thread.
1127  ScopedObjectAccess soa(Thread::Current());
1128  Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup());
1129
1130  Runtime::Current()->GetClassLinker()->RunRootClinits();
1131}
1132
1133void Thread::Shutdown() {
1134  CHECK(is_started_);
1135  is_started_ = false;
1136  CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
1137  MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
1138  if (resume_cond_ != nullptr) {
1139    delete resume_cond_;
1140    resume_cond_ = nullptr;
1141  }
1142}
1143
1144Thread::Thread(bool daemon) : tls32_(daemon), wait_monitor_(nullptr), interrupted_(false) {
1145  wait_mutex_ = new Mutex("a thread wait mutex");
1146  wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
1147  tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>;
1148  tlsPtr_.name = new std::string(kThreadNameDuringStartup);
1149  tlsPtr_.nested_signal_state = static_cast<jmp_buf*>(malloc(sizeof(jmp_buf)));
1150
1151  CHECK_EQ((sizeof(Thread) % 4), 0U) << sizeof(Thread);
1152  tls32_.state_and_flags.as_struct.flags = 0;
1153  tls32_.state_and_flags.as_struct.state = kNative;
1154  memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes));
1155  std::fill(tlsPtr_.rosalloc_runs,
1156            tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBrackets,
1157            gc::allocator::RosAlloc::GetDedicatedFullRun());
1158  for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
1159    tlsPtr_.checkpoint_functions[i] = nullptr;
1160  }
1161  tlsPtr_.flip_function = nullptr;
1162  tls32_.suspended_at_suspend_check = false;
1163}
1164
1165bool Thread::IsStillStarting() const {
1166  // You might think you can check whether the state is kStarting, but for much of thread startup,
1167  // the thread is in kNative; it might also be in kVmWait.
1168  // You might think you can check whether the peer is nullptr, but the peer is actually created and
1169  // assigned fairly early on, and needs to be.
1170  // It turns out that the last thing to change is the thread name; that's a good proxy for "has
1171  // this thread _ever_ entered kRunnable".
1172  return (tlsPtr_.jpeer == nullptr && tlsPtr_.opeer == nullptr) ||
1173      (*tlsPtr_.name == kThreadNameDuringStartup);
1174}
1175
1176void Thread::AssertPendingException() const {
1177  if (UNLIKELY(!IsExceptionPending())) {
1178    LOG(FATAL) << "Pending exception expected.";
1179  }
1180}
1181
1182void Thread::AssertNoPendingException() const {
1183  if (UNLIKELY(IsExceptionPending())) {
1184    ScopedObjectAccess soa(Thread::Current());
1185    mirror::Throwable* exception = GetException();
1186    LOG(FATAL) << "No pending exception expected: " << exception->Dump();
1187  }
1188}
1189
1190void Thread::AssertNoPendingExceptionForNewException(const char* msg) const {
1191  if (UNLIKELY(IsExceptionPending())) {
1192    ScopedObjectAccess soa(Thread::Current());
1193    mirror::Throwable* exception = GetException();
1194    LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: "
1195        << exception->Dump();
1196  }
1197}
1198
1199class MonitorExitVisitor : public SingleRootVisitor {
1200 public:
1201  explicit MonitorExitVisitor(Thread* self) : self_(self) { }
1202
1203  // NO_THREAD_SAFETY_ANALYSIS due to MonitorExit.
1204  void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED)
1205      OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1206    if (self_->HoldsLock(entered_monitor)) {
1207      LOG(WARNING) << "Calling MonitorExit on object "
1208                   << entered_monitor << " (" << PrettyTypeOf(entered_monitor) << ")"
1209                   << " left locked by native thread "
1210                   << *Thread::Current() << " which is detaching";
1211      entered_monitor->MonitorExit(self_);
1212    }
1213  }
1214
1215 private:
1216  Thread* const self_;
1217};
1218
1219void Thread::Destroy() {
1220  Thread* self = this;
1221  DCHECK_EQ(self, Thread::Current());
1222
1223  if (tlsPtr_.jni_env != nullptr) {
1224    {
1225      ScopedObjectAccess soa(self);
1226      MonitorExitVisitor visitor(self);
1227      // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
1228      tlsPtr_.jni_env->monitors.VisitRoots(&visitor, RootInfo(kRootVMInternal));
1229    }
1230    // Release locally held global references which releasing may require the mutator lock.
1231    if (tlsPtr_.jpeer != nullptr) {
1232      // If pthread_create fails we don't have a jni env here.
1233      tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer);
1234      tlsPtr_.jpeer = nullptr;
1235    }
1236    if (tlsPtr_.class_loader_override != nullptr) {
1237      tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.class_loader_override);
1238      tlsPtr_.class_loader_override = nullptr;
1239    }
1240  }
1241
1242  if (tlsPtr_.opeer != nullptr) {
1243    ScopedObjectAccess soa(self);
1244    // We may need to call user-supplied managed code, do this before final clean-up.
1245    HandleUncaughtExceptions(soa);
1246    RemoveFromThreadGroup(soa);
1247
1248    // this.nativePeer = 0;
1249    if (Runtime::Current()->IsActiveTransaction()) {
1250      soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)
1251          ->SetLong<true>(tlsPtr_.opeer, 0);
1252    } else {
1253      soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)
1254          ->SetLong<false>(tlsPtr_.opeer, 0);
1255    }
1256    Dbg::PostThreadDeath(self);
1257
1258    // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
1259    // who is waiting.
1260    mirror::Object* lock =
1261        soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer);
1262    // (This conditional is only needed for tests, where Thread.lock won't have been set.)
1263    if (lock != nullptr) {
1264      StackHandleScope<1> hs(self);
1265      Handle<mirror::Object> h_obj(hs.NewHandle(lock));
1266      ObjectLock<mirror::Object> locker(self, h_obj);
1267      locker.NotifyAll();
1268    }
1269    tlsPtr_.opeer = nullptr;
1270  }
1271
1272  {
1273    ScopedObjectAccess soa(self);
1274    Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this);
1275  }
1276}
1277
1278Thread::~Thread() {
1279  CHECK(tlsPtr_.class_loader_override == nullptr);
1280  CHECK(tlsPtr_.jpeer == nullptr);
1281  CHECK(tlsPtr_.opeer == nullptr);
1282  bool initialized = (tlsPtr_.jni_env != nullptr);  // Did Thread::Init run?
1283  if (initialized) {
1284    delete tlsPtr_.jni_env;
1285    tlsPtr_.jni_env = nullptr;
1286  }
1287  CHECK_NE(GetState(), kRunnable);
1288  CHECK_NE(ReadFlag(kCheckpointRequest), true);
1289  CHECK(tlsPtr_.checkpoint_functions[0] == nullptr);
1290  CHECK(tlsPtr_.checkpoint_functions[1] == nullptr);
1291  CHECK(tlsPtr_.checkpoint_functions[2] == nullptr);
1292  CHECK(tlsPtr_.flip_function == nullptr);
1293  CHECK_EQ(tls32_.suspended_at_suspend_check, false);
1294
1295  // We may be deleting a still born thread.
1296  SetStateUnsafe(kTerminated);
1297
1298  delete wait_cond_;
1299  delete wait_mutex_;
1300
1301  if (tlsPtr_.long_jump_context != nullptr) {
1302    delete tlsPtr_.long_jump_context;
1303  }
1304
1305  if (initialized) {
1306    CleanupCpu();
1307  }
1308
1309  if (tlsPtr_.single_step_control != nullptr) {
1310    delete tlsPtr_.single_step_control;
1311  }
1312  delete tlsPtr_.instrumentation_stack;
1313  delete tlsPtr_.name;
1314  delete tlsPtr_.stack_trace_sample;
1315  free(tlsPtr_.nested_signal_state);
1316
1317  Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this);
1318
1319  TearDownAlternateSignalStack();
1320}
1321
1322void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) {
1323  if (!IsExceptionPending()) {
1324    return;
1325  }
1326  ScopedLocalRef<jobject> peer(tlsPtr_.jni_env, soa.AddLocalReference<jobject>(tlsPtr_.opeer));
1327  ScopedThreadStateChange tsc(this, kNative);
1328
1329  // Get and clear the exception.
1330  ScopedLocalRef<jthrowable> exception(tlsPtr_.jni_env, tlsPtr_.jni_env->ExceptionOccurred());
1331  tlsPtr_.jni_env->ExceptionClear();
1332
1333  // If the thread has its own handler, use that.
1334  ScopedLocalRef<jobject> handler(tlsPtr_.jni_env,
1335                                  tlsPtr_.jni_env->GetObjectField(peer.get(),
1336                                      WellKnownClasses::java_lang_Thread_uncaughtHandler));
1337  if (handler.get() == nullptr) {
1338    // Otherwise use the thread group's default handler.
1339    handler.reset(tlsPtr_.jni_env->GetObjectField(peer.get(),
1340                                                  WellKnownClasses::java_lang_Thread_group));
1341  }
1342
1343  // Call the handler.
1344  tlsPtr_.jni_env->CallVoidMethod(handler.get(),
1345      WellKnownClasses::java_lang_Thread__UncaughtExceptionHandler_uncaughtException,
1346      peer.get(), exception.get());
1347
1348  // If the handler threw, clear that exception too.
1349  tlsPtr_.jni_env->ExceptionClear();
1350}
1351
1352void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
1353  // this.group.removeThread(this);
1354  // group can be null if we're in the compiler or a test.
1355  mirror::Object* ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)
1356      ->GetObject(tlsPtr_.opeer);
1357  if (ogroup != nullptr) {
1358    ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup));
1359    ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(tlsPtr_.opeer));
1360    ScopedThreadStateChange tsc(soa.Self(), kNative);
1361    tlsPtr_.jni_env->CallVoidMethod(group.get(),
1362                                    WellKnownClasses::java_lang_ThreadGroup_removeThread,
1363                                    peer.get());
1364  }
1365}
1366
1367size_t Thread::NumHandleReferences() {
1368  size_t count = 0;
1369  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur != nullptr; cur = cur->GetLink()) {
1370    count += cur->NumberOfReferences();
1371  }
1372  return count;
1373}
1374
1375bool Thread::HandleScopeContains(jobject obj) const {
1376  StackReference<mirror::Object>* hs_entry =
1377      reinterpret_cast<StackReference<mirror::Object>*>(obj);
1378  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur!= nullptr; cur = cur->GetLink()) {
1379    if (cur->Contains(hs_entry)) {
1380      return true;
1381    }
1382  }
1383  // JNI code invoked from portable code uses shadow frames rather than the handle scope.
1384  return tlsPtr_.managed_stack.ShadowFramesContain(hs_entry);
1385}
1386
1387void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) {
1388  BufferedRootVisitor<128> buffered_visitor(visitor, RootInfo(kRootNativeStack, thread_id));
1389  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
1390    for (size_t j = 0, count = cur->NumberOfReferences(); j < count; ++j) {
1391      buffered_visitor.VisitRootIfNonNull(cur->GetHandle(j).GetReference());
1392    }
1393  }
1394}
1395
1396mirror::Object* Thread::DecodeJObject(jobject obj) const {
1397  if (obj == nullptr) {
1398    return nullptr;
1399  }
1400  IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
1401  IndirectRefKind kind = GetIndirectRefKind(ref);
1402  mirror::Object* result;
1403  bool expect_null = false;
1404  // The "kinds" below are sorted by the frequency we expect to encounter them.
1405  if (kind == kLocal) {
1406    IndirectReferenceTable& locals = tlsPtr_.jni_env->locals;
1407    // Local references do not need a read barrier.
1408    result = locals.Get<kWithoutReadBarrier>(ref);
1409  } else if (kind == kHandleScopeOrInvalid) {
1410    // TODO: make stack indirect reference table lookup more efficient.
1411    // Check if this is a local reference in the handle scope.
1412    if (LIKELY(HandleScopeContains(obj))) {
1413      // Read from handle scope.
1414      result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr();
1415      VerifyObject(result);
1416    } else {
1417      tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of invalid jobject %p", obj);
1418      expect_null = true;
1419      result = nullptr;
1420    }
1421  } else if (kind == kGlobal) {
1422    result = tlsPtr_.jni_env->vm->DecodeGlobal(const_cast<Thread*>(this), ref);
1423  } else {
1424    DCHECK_EQ(kind, kWeakGlobal);
1425    result = tlsPtr_.jni_env->vm->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
1426    if (Runtime::Current()->IsClearedJniWeakGlobal(result)) {
1427      // This is a special case where it's okay to return nullptr.
1428      expect_null = true;
1429      result = nullptr;
1430    }
1431  }
1432
1433  if (UNLIKELY(!expect_null && result == nullptr)) {
1434    tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of deleted %s %p",
1435                                   ToStr<IndirectRefKind>(kind).c_str(), obj);
1436  }
1437  return result;
1438}
1439
1440// Implements java.lang.Thread.interrupted.
1441bool Thread::Interrupted() {
1442  MutexLock mu(Thread::Current(), *wait_mutex_);
1443  bool interrupted = IsInterruptedLocked();
1444  SetInterruptedLocked(false);
1445  return interrupted;
1446}
1447
1448// Implements java.lang.Thread.isInterrupted.
1449bool Thread::IsInterrupted() {
1450  MutexLock mu(Thread::Current(), *wait_mutex_);
1451  return IsInterruptedLocked();
1452}
1453
1454void Thread::Interrupt(Thread* self) {
1455  MutexLock mu(self, *wait_mutex_);
1456  if (interrupted_) {
1457    return;
1458  }
1459  interrupted_ = true;
1460  NotifyLocked(self);
1461}
1462
1463void Thread::Notify() {
1464  Thread* self = Thread::Current();
1465  MutexLock mu(self, *wait_mutex_);
1466  NotifyLocked(self);
1467}
1468
1469void Thread::NotifyLocked(Thread* self) {
1470  if (wait_monitor_ != nullptr) {
1471    wait_cond_->Signal(self);
1472  }
1473}
1474
1475void Thread::SetClassLoaderOverride(jobject class_loader_override) {
1476  if (tlsPtr_.class_loader_override != nullptr) {
1477    GetJniEnv()->DeleteGlobalRef(tlsPtr_.class_loader_override);
1478  }
1479  tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override);
1480}
1481
1482class CountStackDepthVisitor : public StackVisitor {
1483 public:
1484  explicit CountStackDepthVisitor(Thread* thread)
1485      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1486      : StackVisitor(thread, nullptr),
1487        depth_(0), skip_depth_(0), skipping_(true) {}
1488
1489  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1490    // We want to skip frames up to and including the exception's constructor.
1491    // Note we also skip the frame if it doesn't have a method (namely the callee
1492    // save frame)
1493    mirror::ArtMethod* m = GetMethod();
1494    if (skipping_ && !m->IsRuntimeMethod() &&
1495        !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) {
1496      skipping_ = false;
1497    }
1498    if (!skipping_) {
1499      if (!m->IsRuntimeMethod()) {  // Ignore runtime frames (in particular callee save).
1500        ++depth_;
1501      }
1502    } else {
1503      ++skip_depth_;
1504    }
1505    return true;
1506  }
1507
1508  int GetDepth() const {
1509    return depth_;
1510  }
1511
1512  int GetSkipDepth() const {
1513    return skip_depth_;
1514  }
1515
1516 private:
1517  uint32_t depth_;
1518  uint32_t skip_depth_;
1519  bool skipping_;
1520};
1521
1522template<bool kTransactionActive>
1523class BuildInternalStackTraceVisitor : public StackVisitor {
1524 public:
1525  explicit BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth)
1526      : StackVisitor(thread, nullptr), self_(self),
1527        skip_depth_(skip_depth), count_(0), dex_pc_trace_(nullptr), method_trace_(nullptr) {}
1528
1529  bool Init(int depth)
1530      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1531    // Allocate method trace with an extra slot that will hold the PC trace
1532    StackHandleScope<1> hs(self_);
1533    ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1534    Handle<mirror::ObjectArray<mirror::Object>> method_trace(
1535        hs.NewHandle(class_linker->AllocObjectArray<mirror::Object>(self_, depth + 1)));
1536    if (method_trace.Get() == nullptr) {
1537      return false;
1538    }
1539    mirror::IntArray* dex_pc_trace = mirror::IntArray::Alloc(self_, depth);
1540    if (dex_pc_trace == nullptr) {
1541      return false;
1542    }
1543    // Save PC trace in last element of method trace, also places it into the
1544    // object graph.
1545    // We are called from native: use non-transactional mode.
1546    method_trace->Set<kTransactionActive>(depth, dex_pc_trace);
1547    // Set the Object*s and assert that no thread suspension is now possible.
1548    const char* last_no_suspend_cause =
1549        self_->StartAssertNoThreadSuspension("Building internal stack trace");
1550    CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause;
1551    method_trace_ = method_trace.Get();
1552    dex_pc_trace_ = dex_pc_trace;
1553    return true;
1554  }
1555
1556  virtual ~BuildInternalStackTraceVisitor() {
1557    if (method_trace_ != nullptr) {
1558      self_->EndAssertNoThreadSuspension(nullptr);
1559    }
1560  }
1561
1562  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1563    if (method_trace_ == nullptr || dex_pc_trace_ == nullptr) {
1564      return true;  // We're probably trying to fillInStackTrace for an OutOfMemoryError.
1565    }
1566    if (skip_depth_ > 0) {
1567      skip_depth_--;
1568      return true;
1569    }
1570    mirror::ArtMethod* m = GetMethod();
1571    if (m->IsRuntimeMethod()) {
1572      return true;  // Ignore runtime frames (in particular callee save).
1573    }
1574    method_trace_->Set<kTransactionActive>(count_, m);
1575    dex_pc_trace_->Set<kTransactionActive>(count_,
1576        m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc());
1577    ++count_;
1578    return true;
1579  }
1580
1581  mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const {
1582    return method_trace_;
1583  }
1584
1585 private:
1586  Thread* const self_;
1587  // How many more frames to skip.
1588  int32_t skip_depth_;
1589  // Current position down stack trace.
1590  uint32_t count_;
1591  // Array of dex PC values.
1592  mirror::IntArray* dex_pc_trace_;
1593  // An array of the methods on the stack, the last entry is a reference to the PC trace.
1594  mirror::ObjectArray<mirror::Object>* method_trace_;
1595};
1596
1597template<bool kTransactionActive>
1598jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
1599  // Compute depth of stack
1600  CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
1601  count_visitor.WalkStack();
1602  int32_t depth = count_visitor.GetDepth();
1603  int32_t skip_depth = count_visitor.GetSkipDepth();
1604
1605  // Build internal stack trace.
1606  BuildInternalStackTraceVisitor<kTransactionActive> build_trace_visitor(soa.Self(),
1607                                                                         const_cast<Thread*>(this),
1608                                                                         skip_depth);
1609  if (!build_trace_visitor.Init(depth)) {
1610    return nullptr;  // Allocation failed.
1611  }
1612  build_trace_visitor.WalkStack();
1613  mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace();
1614  if (kIsDebugBuild) {
1615    for (int32_t i = 0; i < trace->GetLength(); ++i) {
1616      CHECK(trace->Get(i) != nullptr);
1617    }
1618  }
1619  return soa.AddLocalReference<jobjectArray>(trace);
1620}
1621template jobject Thread::CreateInternalStackTrace<false>(
1622    const ScopedObjectAccessAlreadyRunnable& soa) const;
1623template jobject Thread::CreateInternalStackTrace<true>(
1624    const ScopedObjectAccessAlreadyRunnable& soa) const;
1625
1626bool Thread::IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const {
1627  CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
1628  count_visitor.WalkStack();
1629  return count_visitor.GetDepth() == exception->GetStackDepth();
1630}
1631
1632jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
1633    const ScopedObjectAccessAlreadyRunnable& soa, jobject internal, jobjectArray output_array,
1634    int* stack_depth) {
1635  // Decode the internal stack trace into the depth, method trace and PC trace
1636  int32_t depth = soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal)->GetLength() - 1;
1637
1638  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1639
1640  jobjectArray result;
1641
1642  if (output_array != nullptr) {
1643    // Reuse the array we were given.
1644    result = output_array;
1645    // ...adjusting the number of frames we'll write to not exceed the array length.
1646    const int32_t traces_length =
1647        soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->GetLength();
1648    depth = std::min(depth, traces_length);
1649  } else {
1650    // Create java_trace array and place in local reference table
1651    mirror::ObjectArray<mirror::StackTraceElement>* java_traces =
1652        class_linker->AllocStackTraceElementArray(soa.Self(), depth);
1653    if (java_traces == nullptr) {
1654      return nullptr;
1655    }
1656    result = soa.AddLocalReference<jobjectArray>(java_traces);
1657  }
1658
1659  if (stack_depth != nullptr) {
1660    *stack_depth = depth;
1661  }
1662
1663  for (int32_t i = 0; i < depth; ++i) {
1664    mirror::ObjectArray<mirror::Object>* method_trace =
1665          soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal);
1666    // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
1667    mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(method_trace->Get(i));
1668    int32_t line_number;
1669    StackHandleScope<3> hs(soa.Self());
1670    auto class_name_object(hs.NewHandle<mirror::String>(nullptr));
1671    auto source_name_object(hs.NewHandle<mirror::String>(nullptr));
1672    if (method->IsProxyMethod()) {
1673      line_number = -1;
1674      class_name_object.Assign(method->GetDeclaringClass()->GetName());
1675      // source_name_object intentionally left null for proxy methods
1676    } else {
1677      mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth));
1678      uint32_t dex_pc = pc_trace->Get(i);
1679      line_number = method->GetLineNumFromDexPC(dex_pc);
1680      // Allocate element, potentially triggering GC
1681      // TODO: reuse class_name_object via Class::name_?
1682      const char* descriptor = method->GetDeclaringClassDescriptor();
1683      CHECK(descriptor != nullptr);
1684      std::string class_name(PrettyDescriptor(descriptor));
1685      class_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
1686      if (class_name_object.Get() == nullptr) {
1687        return nullptr;
1688      }
1689      const char* source_file = method->GetDeclaringClassSourceFile();
1690      if (source_file != nullptr) {
1691        source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
1692        if (source_name_object.Get() == nullptr) {
1693          return nullptr;
1694        }
1695      }
1696    }
1697    const char* method_name = method->GetName();
1698    CHECK(method_name != nullptr);
1699    Handle<mirror::String> method_name_object(
1700        hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
1701    if (method_name_object.Get() == nullptr) {
1702      return nullptr;
1703    }
1704    mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(
1705        soa.Self(), class_name_object, method_name_object, source_name_object, line_number);
1706    if (obj == nullptr) {
1707      return nullptr;
1708    }
1709    // We are called from native: use non-transactional mode.
1710    soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->Set<false>(i, obj);
1711  }
1712  return result;
1713}
1714
1715void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) {
1716  va_list args;
1717  va_start(args, fmt);
1718  ThrowNewExceptionV(exception_class_descriptor, fmt, args);
1719  va_end(args);
1720}
1721
1722void Thread::ThrowNewExceptionV(const char* exception_class_descriptor,
1723                                const char* fmt, va_list ap) {
1724  std::string msg;
1725  StringAppendV(&msg, fmt, ap);
1726  ThrowNewException(exception_class_descriptor, msg.c_str());
1727}
1728
1729void Thread::ThrowNewException(const char* exception_class_descriptor,
1730                               const char* msg) {
1731  // Callers should either clear or call ThrowNewWrappedException.
1732  AssertNoPendingExceptionForNewException(msg);
1733  ThrowNewWrappedException(exception_class_descriptor, msg);
1734}
1735
1736static mirror::ClassLoader* GetCurrentClassLoader(Thread* self)
1737    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1738  mirror::ArtMethod* method = self->GetCurrentMethod(nullptr);
1739  return method != nullptr
1740      ? method->GetDeclaringClass()->GetClassLoader()
1741      : nullptr;
1742}
1743
1744void Thread::ThrowNewWrappedException(const char* exception_class_descriptor,
1745                                      const char* msg) {
1746  DCHECK_EQ(this, Thread::Current());
1747  ScopedObjectAccessUnchecked soa(this);
1748  StackHandleScope<3> hs(soa.Self());
1749  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetCurrentClassLoader(soa.Self())));
1750  ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException()));
1751  ClearException();
1752  Runtime* runtime = Runtime::Current();
1753  Handle<mirror::Class> exception_class(
1754      hs.NewHandle(runtime->GetClassLinker()->FindClass(this, exception_class_descriptor,
1755                                                        class_loader)));
1756  if (UNLIKELY(exception_class.Get() == nullptr)) {
1757    CHECK(IsExceptionPending());
1758    LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
1759    return;
1760  }
1761
1762  if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(soa.Self(), exception_class, true,
1763                                                             true))) {
1764    DCHECK(IsExceptionPending());
1765    return;
1766  }
1767  DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass());
1768  Handle<mirror::Throwable> exception(
1769      hs.NewHandle(down_cast<mirror::Throwable*>(exception_class->AllocObject(this))));
1770
1771  // If we couldn't allocate the exception, throw the pre-allocated out of memory exception.
1772  if (exception.Get() == nullptr) {
1773    SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1774    return;
1775  }
1776
1777  // Choose an appropriate constructor and set up the arguments.
1778  const char* signature;
1779  ScopedLocalRef<jstring> msg_string(GetJniEnv(), nullptr);
1780  if (msg != nullptr) {
1781    // Ensure we remember this and the method over the String allocation.
1782    msg_string.reset(
1783        soa.AddLocalReference<jstring>(mirror::String::AllocFromModifiedUtf8(this, msg)));
1784    if (UNLIKELY(msg_string.get() == nullptr)) {
1785      CHECK(IsExceptionPending());  // OOME.
1786      return;
1787    }
1788    if (cause.get() == nullptr) {
1789      signature = "(Ljava/lang/String;)V";
1790    } else {
1791      signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V";
1792    }
1793  } else {
1794    if (cause.get() == nullptr) {
1795      signature = "()V";
1796    } else {
1797      signature = "(Ljava/lang/Throwable;)V";
1798    }
1799  }
1800  mirror::ArtMethod* exception_init_method =
1801      exception_class->FindDeclaredDirectMethod("<init>", signature);
1802
1803  CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in "
1804      << PrettyDescriptor(exception_class_descriptor);
1805
1806  if (UNLIKELY(!runtime->IsStarted())) {
1807    // Something is trying to throw an exception without a started runtime, which is the common
1808    // case in the compiler. We won't be able to invoke the constructor of the exception, so set
1809    // the exception fields directly.
1810    if (msg != nullptr) {
1811      exception->SetDetailMessage(down_cast<mirror::String*>(DecodeJObject(msg_string.get())));
1812    }
1813    if (cause.get() != nullptr) {
1814      exception->SetCause(down_cast<mirror::Throwable*>(DecodeJObject(cause.get())));
1815    }
1816    ScopedLocalRef<jobject> trace(GetJniEnv(),
1817                                  Runtime::Current()->IsActiveTransaction()
1818                                      ? CreateInternalStackTrace<true>(soa)
1819                                      : CreateInternalStackTrace<false>(soa));
1820    if (trace.get() != nullptr) {
1821      exception->SetStackState(down_cast<mirror::Throwable*>(DecodeJObject(trace.get())));
1822    }
1823    SetException(exception.Get());
1824  } else {
1825    jvalue jv_args[2];
1826    size_t i = 0;
1827
1828    if (msg != nullptr) {
1829      jv_args[i].l = msg_string.get();
1830      ++i;
1831    }
1832    if (cause.get() != nullptr) {
1833      jv_args[i].l = cause.get();
1834      ++i;
1835    }
1836    InvokeWithJValues(soa, exception.Get(), soa.EncodeMethod(exception_init_method), jv_args);
1837    if (LIKELY(!IsExceptionPending())) {
1838      SetException(exception.Get());
1839    }
1840  }
1841}
1842
1843void Thread::ThrowOutOfMemoryError(const char* msg) {
1844  LOG(WARNING) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
1845      msg, (tls32_.throwing_OutOfMemoryError ? " (recursive case)" : ""));
1846  if (!tls32_.throwing_OutOfMemoryError) {
1847    tls32_.throwing_OutOfMemoryError = true;
1848    ThrowNewException("Ljava/lang/OutOfMemoryError;", msg);
1849    tls32_.throwing_OutOfMemoryError = false;
1850  } else {
1851    Dump(LOG(WARNING));  // The pre-allocated OOME has no stack, so help out and log one.
1852    SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1853  }
1854}
1855
1856Thread* Thread::CurrentFromGdb() {
1857  return Thread::Current();
1858}
1859
1860void Thread::DumpFromGdb() const {
1861  std::ostringstream ss;
1862  Dump(ss);
1863  std::string str(ss.str());
1864  // log to stderr for debugging command line processes
1865  std::cerr << str;
1866#ifdef HAVE_ANDROID_OS
1867  // log to logcat for debugging frameworks processes
1868  LOG(INFO) << str;
1869#endif
1870}
1871
1872// Explicitly instantiate 32 and 64bit thread offset dumping support.
1873template void Thread::DumpThreadOffset<4>(std::ostream& os, uint32_t offset);
1874template void Thread::DumpThreadOffset<8>(std::ostream& os, uint32_t offset);
1875
1876template<size_t ptr_size>
1877void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
1878#define DO_THREAD_OFFSET(x, y) \
1879    if (offset == x.Uint32Value()) { \
1880      os << y; \
1881      return; \
1882    }
1883  DO_THREAD_OFFSET(ThreadFlagsOffset<ptr_size>(), "state_and_flags")
1884  DO_THREAD_OFFSET(CardTableOffset<ptr_size>(), "card_table")
1885  DO_THREAD_OFFSET(ExceptionOffset<ptr_size>(), "exception")
1886  DO_THREAD_OFFSET(PeerOffset<ptr_size>(), "peer");
1887  DO_THREAD_OFFSET(JniEnvOffset<ptr_size>(), "jni_env")
1888  DO_THREAD_OFFSET(SelfOffset<ptr_size>(), "self")
1889  DO_THREAD_OFFSET(StackEndOffset<ptr_size>(), "stack_end")
1890  DO_THREAD_OFFSET(ThinLockIdOffset<ptr_size>(), "thin_lock_thread_id")
1891  DO_THREAD_OFFSET(TopOfManagedStackOffset<ptr_size>(), "top_quick_frame_method")
1892  DO_THREAD_OFFSET(TopShadowFrameOffset<ptr_size>(), "top_shadow_frame")
1893  DO_THREAD_OFFSET(TopHandleScopeOffset<ptr_size>(), "top_handle_scope")
1894  DO_THREAD_OFFSET(ThreadSuspendTriggerOffset<ptr_size>(), "suspend_trigger")
1895#undef DO_THREAD_OFFSET
1896
1897#define INTERPRETER_ENTRY_POINT_INFO(x) \
1898    if (INTERPRETER_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
1899      os << #x; \
1900      return; \
1901    }
1902  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToInterpreterBridge)
1903  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToCompiledCodeBridge)
1904#undef INTERPRETER_ENTRY_POINT_INFO
1905
1906#define JNI_ENTRY_POINT_INFO(x) \
1907    if (JNI_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
1908      os << #x; \
1909      return; \
1910    }
1911  JNI_ENTRY_POINT_INFO(pDlsymLookup)
1912#undef JNI_ENTRY_POINT_INFO
1913
1914#define QUICK_ENTRY_POINT_INFO(x) \
1915    if (QUICK_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
1916      os << #x; \
1917      return; \
1918    }
1919  QUICK_ENTRY_POINT_INFO(pAllocArray)
1920  QUICK_ENTRY_POINT_INFO(pAllocArrayResolved)
1921  QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck)
1922  QUICK_ENTRY_POINT_INFO(pAllocObject)
1923  QUICK_ENTRY_POINT_INFO(pAllocObjectResolved)
1924  QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized)
1925  QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck)
1926  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray)
1927  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck)
1928  QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial)
1929  QUICK_ENTRY_POINT_INFO(pCheckCast)
1930  QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage)
1931  QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess)
1932  QUICK_ENTRY_POINT_INFO(pInitializeType)
1933  QUICK_ENTRY_POINT_INFO(pResolveString)
1934  QUICK_ENTRY_POINT_INFO(pSet8Instance)
1935  QUICK_ENTRY_POINT_INFO(pSet8Static)
1936  QUICK_ENTRY_POINT_INFO(pSet16Instance)
1937  QUICK_ENTRY_POINT_INFO(pSet16Static)
1938  QUICK_ENTRY_POINT_INFO(pSet32Instance)
1939  QUICK_ENTRY_POINT_INFO(pSet32Static)
1940  QUICK_ENTRY_POINT_INFO(pSet64Instance)
1941  QUICK_ENTRY_POINT_INFO(pSet64Static)
1942  QUICK_ENTRY_POINT_INFO(pSetObjInstance)
1943  QUICK_ENTRY_POINT_INFO(pSetObjStatic)
1944  QUICK_ENTRY_POINT_INFO(pGetByteInstance)
1945  QUICK_ENTRY_POINT_INFO(pGetBooleanInstance)
1946  QUICK_ENTRY_POINT_INFO(pGetByteStatic)
1947  QUICK_ENTRY_POINT_INFO(pGetBooleanStatic)
1948  QUICK_ENTRY_POINT_INFO(pGetShortInstance)
1949  QUICK_ENTRY_POINT_INFO(pGetCharInstance)
1950  QUICK_ENTRY_POINT_INFO(pGetShortStatic)
1951  QUICK_ENTRY_POINT_INFO(pGetCharStatic)
1952  QUICK_ENTRY_POINT_INFO(pGet32Instance)
1953  QUICK_ENTRY_POINT_INFO(pGet32Static)
1954  QUICK_ENTRY_POINT_INFO(pGet64Instance)
1955  QUICK_ENTRY_POINT_INFO(pGet64Static)
1956  QUICK_ENTRY_POINT_INFO(pGetObjInstance)
1957  QUICK_ENTRY_POINT_INFO(pGetObjStatic)
1958  QUICK_ENTRY_POINT_INFO(pAputObjectWithNullAndBoundCheck)
1959  QUICK_ENTRY_POINT_INFO(pAputObjectWithBoundCheck)
1960  QUICK_ENTRY_POINT_INFO(pAputObject)
1961  QUICK_ENTRY_POINT_INFO(pHandleFillArrayData)
1962  QUICK_ENTRY_POINT_INFO(pJniMethodStart)
1963  QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized)
1964  QUICK_ENTRY_POINT_INFO(pJniMethodEnd)
1965  QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized)
1966  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference)
1967  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized)
1968  QUICK_ENTRY_POINT_INFO(pQuickGenericJniTrampoline)
1969  QUICK_ENTRY_POINT_INFO(pLockObject)
1970  QUICK_ENTRY_POINT_INFO(pUnlockObject)
1971  QUICK_ENTRY_POINT_INFO(pCmpgDouble)
1972  QUICK_ENTRY_POINT_INFO(pCmpgFloat)
1973  QUICK_ENTRY_POINT_INFO(pCmplDouble)
1974  QUICK_ENTRY_POINT_INFO(pCmplFloat)
1975  QUICK_ENTRY_POINT_INFO(pFmod)
1976  QUICK_ENTRY_POINT_INFO(pL2d)
1977  QUICK_ENTRY_POINT_INFO(pFmodf)
1978  QUICK_ENTRY_POINT_INFO(pL2f)
1979  QUICK_ENTRY_POINT_INFO(pD2iz)
1980  QUICK_ENTRY_POINT_INFO(pF2iz)
1981  QUICK_ENTRY_POINT_INFO(pIdivmod)
1982  QUICK_ENTRY_POINT_INFO(pD2l)
1983  QUICK_ENTRY_POINT_INFO(pF2l)
1984  QUICK_ENTRY_POINT_INFO(pLdiv)
1985  QUICK_ENTRY_POINT_INFO(pLmod)
1986  QUICK_ENTRY_POINT_INFO(pLmul)
1987  QUICK_ENTRY_POINT_INFO(pShlLong)
1988  QUICK_ENTRY_POINT_INFO(pShrLong)
1989  QUICK_ENTRY_POINT_INFO(pUshrLong)
1990  QUICK_ENTRY_POINT_INFO(pIndexOf)
1991  QUICK_ENTRY_POINT_INFO(pStringCompareTo)
1992  QUICK_ENTRY_POINT_INFO(pMemcpy)
1993  QUICK_ENTRY_POINT_INFO(pQuickImtConflictTrampoline)
1994  QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline)
1995  QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge)
1996  QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck)
1997  QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck)
1998  QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck)
1999  QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck)
2000  QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck)
2001  QUICK_ENTRY_POINT_INFO(pTestSuspend)
2002  QUICK_ENTRY_POINT_INFO(pDeliverException)
2003  QUICK_ENTRY_POINT_INFO(pThrowArrayBounds)
2004  QUICK_ENTRY_POINT_INFO(pThrowDivZero)
2005  QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod)
2006  QUICK_ENTRY_POINT_INFO(pThrowNullPointer)
2007  QUICK_ENTRY_POINT_INFO(pThrowStackOverflow)
2008  QUICK_ENTRY_POINT_INFO(pDeoptimize)
2009  QUICK_ENTRY_POINT_INFO(pA64Load)
2010  QUICK_ENTRY_POINT_INFO(pA64Store)
2011#undef QUICK_ENTRY_POINT_INFO
2012
2013  os << offset;
2014}
2015
2016void Thread::QuickDeliverException() {
2017  // Get exception from thread.
2018  mirror::Throwable* exception = GetException();
2019  CHECK(exception != nullptr);
2020  // Don't leave exception visible while we try to find the handler, which may cause class
2021  // resolution.
2022  ClearException();
2023  bool is_deoptimization = (exception == GetDeoptimizationException());
2024  QuickExceptionHandler exception_handler(this, is_deoptimization);
2025  if (is_deoptimization) {
2026    exception_handler.DeoptimizeStack();
2027  } else {
2028    exception_handler.FindCatch(exception);
2029  }
2030  exception_handler.UpdateInstrumentationStack();
2031  exception_handler.DoLongJump();
2032}
2033
2034Context* Thread::GetLongJumpContext() {
2035  Context* result = tlsPtr_.long_jump_context;
2036  if (result == nullptr) {
2037    result = Context::Create();
2038  } else {
2039    tlsPtr_.long_jump_context = nullptr;  // Avoid context being shared.
2040    result->Reset();
2041  }
2042  return result;
2043}
2044
2045// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
2046//       so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
2047struct CurrentMethodVisitor FINAL : public StackVisitor {
2048  CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error)
2049      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2050      : StackVisitor(thread, context), this_object_(nullptr), method_(nullptr), dex_pc_(0),
2051        abort_on_error_(abort_on_error) {}
2052  bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2053    mirror::ArtMethod* m = GetMethod();
2054    if (m->IsRuntimeMethod()) {
2055      // Continue if this is a runtime method.
2056      return true;
2057    }
2058    if (context_ != nullptr) {
2059      this_object_ = GetThisObject();
2060    }
2061    method_ = m;
2062    dex_pc_ = GetDexPc(abort_on_error_);
2063    return false;
2064  }
2065  mirror::Object* this_object_;
2066  mirror::ArtMethod* method_;
2067  uint32_t dex_pc_;
2068  const bool abort_on_error_;
2069};
2070
2071mirror::ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error) const {
2072  CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr, abort_on_error);
2073  visitor.WalkStack(false);
2074  if (dex_pc != nullptr) {
2075    *dex_pc = visitor.dex_pc_;
2076  }
2077  return visitor.method_;
2078}
2079
2080bool Thread::HoldsLock(mirror::Object* object) const {
2081  if (object == nullptr) {
2082    return false;
2083  }
2084  return object->GetLockOwnerThreadId() == GetThreadId();
2085}
2086
2087// RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor).
2088template <typename RootVisitor>
2089class ReferenceMapVisitor : public StackVisitor {
2090 public:
2091  ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor)
2092      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2093      : StackVisitor(thread, context), visitor_(visitor) {}
2094
2095  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2096    if (false) {
2097      LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
2098                << StringPrintf("@ PC:%04x", GetDexPc());
2099    }
2100    ShadowFrame* shadow_frame = GetCurrentShadowFrame();
2101    if (shadow_frame != nullptr) {
2102      VisitShadowFrame(shadow_frame);
2103    } else {
2104      VisitQuickFrame();
2105    }
2106    return true;
2107  }
2108
2109  void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2110    mirror::ArtMethod** method_addr = shadow_frame->GetMethodAddress();
2111    visitor_(reinterpret_cast<mirror::Object**>(method_addr), 0 /*ignored*/, this);
2112    mirror::ArtMethod* m = *method_addr;
2113    DCHECK(m != nullptr);
2114    size_t num_regs = shadow_frame->NumberOfVRegs();
2115    if (m->IsNative() || shadow_frame->HasReferenceArray()) {
2116      // handle scope for JNI or References for interpreter.
2117      for (size_t reg = 0; reg < num_regs; ++reg) {
2118        mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2119        if (ref != nullptr) {
2120          mirror::Object* new_ref = ref;
2121          visitor_(&new_ref, reg, this);
2122          if (new_ref != ref) {
2123            shadow_frame->SetVRegReference(reg, new_ref);
2124          }
2125        }
2126      }
2127    } else {
2128      // Java method.
2129      // Portable path use DexGcMap and store in Method.native_gc_map_.
2130      const uint8_t* gc_map = m->GetNativeGcMap(sizeof(void*));
2131      CHECK(gc_map != nullptr) << PrettyMethod(m);
2132      verifier::DexPcToReferenceMap dex_gc_map(gc_map);
2133      uint32_t dex_pc = shadow_frame->GetDexPC();
2134      const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
2135      DCHECK(reg_bitmap != nullptr);
2136      num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
2137      for (size_t reg = 0; reg < num_regs; ++reg) {
2138        if (TestBitmap(reg, reg_bitmap)) {
2139          mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2140          if (ref != nullptr) {
2141            mirror::Object* new_ref = ref;
2142            visitor_(&new_ref, reg, this);
2143            if (new_ref != ref) {
2144              shadow_frame->SetVRegReference(reg, new_ref);
2145            }
2146          }
2147        }
2148      }
2149    }
2150  }
2151
2152 private:
2153  void VisitQuickFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2154    StackReference<mirror::ArtMethod>* cur_quick_frame = GetCurrentQuickFrame();
2155    mirror::ArtMethod* m = cur_quick_frame->AsMirrorPtr();
2156    mirror::ArtMethod* old_method = m;
2157    visitor_(reinterpret_cast<mirror::Object**>(&m), 0 /*ignored*/, this);
2158    if (m != old_method) {
2159      cur_quick_frame->Assign(m);
2160    }
2161
2162    // Process register map (which native and runtime methods don't have)
2163    if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
2164      if (m->IsOptimized(sizeof(void*))) {
2165        Runtime* runtime = Runtime::Current();
2166        const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
2167        uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
2168        CodeInfo code_info = m->GetOptimizedCodeInfo();
2169        StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset);
2170        MemoryRegion mask = map.GetStackMask(code_info);
2171        // Visit stack entries that hold pointers.
2172        for (size_t i = 0; i < mask.size_in_bits(); ++i) {
2173          if (mask.LoadBit(i)) {
2174            StackReference<mirror::Object>* ref_addr =
2175                  reinterpret_cast<StackReference<mirror::Object>*>(cur_quick_frame) + i;
2176            mirror::Object* ref = ref_addr->AsMirrorPtr();
2177            if (ref != nullptr) {
2178              mirror::Object* new_ref = ref;
2179              visitor_(&new_ref, -1, this);
2180              if (ref != new_ref) {
2181                ref_addr->Assign(new_ref);
2182              }
2183            }
2184          }
2185        }
2186        // Visit callee-save registers that hold pointers.
2187        uint32_t register_mask = map.GetRegisterMask(code_info);
2188        for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
2189          if (register_mask & (1 << i)) {
2190            mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
2191            if (*ref_addr != nullptr) {
2192              visitor_(ref_addr, -1, this);
2193            }
2194          }
2195        }
2196      } else {
2197        const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
2198        CHECK(native_gc_map != nullptr) << PrettyMethod(m);
2199        const DexFile::CodeItem* code_item = m->GetCodeItem();
2200        // Can't be nullptr or how would we compile its instructions?
2201        DCHECK(code_item != nullptr) << PrettyMethod(m);
2202        NativePcOffsetToReferenceMap map(native_gc_map);
2203        size_t num_regs = std::min(map.RegWidth() * 8,
2204                                   static_cast<size_t>(code_item->registers_size_));
2205        if (num_regs > 0) {
2206          Runtime* runtime = Runtime::Current();
2207          const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
2208          uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
2209          const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset);
2210          DCHECK(reg_bitmap != nullptr);
2211          const void* code_pointer = mirror::ArtMethod::EntryPointToCodePointer(entry_point);
2212          const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
2213          QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
2214          // For all dex registers in the bitmap
2215          DCHECK(cur_quick_frame != nullptr);
2216          for (size_t reg = 0; reg < num_regs; ++reg) {
2217            // Does this register hold a reference?
2218            if (TestBitmap(reg, reg_bitmap)) {
2219              uint32_t vmap_offset;
2220              if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
2221                int vmap_reg = vmap_table.ComputeRegister(frame_info.CoreSpillMask(), vmap_offset,
2222                                                          kReferenceVReg);
2223                // This is sound as spilled GPRs will be word sized (ie 32 or 64bit).
2224                mirror::Object** ref_addr =
2225                    reinterpret_cast<mirror::Object**>(GetGPRAddress(vmap_reg));
2226                if (*ref_addr != nullptr) {
2227                  visitor_(ref_addr, reg, this);
2228                }
2229              } else {
2230                StackReference<mirror::Object>* ref_addr =
2231                    reinterpret_cast<StackReference<mirror::Object>*>(GetVRegAddrFromQuickCode(
2232                        cur_quick_frame, code_item, frame_info.CoreSpillMask(),
2233                        frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), reg));
2234                mirror::Object* ref = ref_addr->AsMirrorPtr();
2235                if (ref != nullptr) {
2236                  mirror::Object* new_ref = ref;
2237                  visitor_(&new_ref, reg, this);
2238                  if (ref != new_ref) {
2239                    ref_addr->Assign(new_ref);
2240                  }
2241                }
2242              }
2243            }
2244          }
2245        }
2246      }
2247    }
2248  }
2249
2250  static bool TestBitmap(size_t reg, const uint8_t* reg_vector) {
2251    return ((reg_vector[reg / kBitsPerByte] >> (reg % kBitsPerByte)) & 0x01) != 0;
2252  }
2253
2254  // Visitor for when we visit a root.
2255  RootVisitor& visitor_;
2256};
2257
2258class RootCallbackVisitor {
2259 public:
2260  RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {}
2261
2262  void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const
2263      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2264    visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg));
2265  }
2266
2267 private:
2268  RootVisitor* const visitor_;
2269  const uint32_t tid_;
2270};
2271
2272void Thread::VisitRoots(RootVisitor* visitor) {
2273  const uint32_t thread_id = GetThreadId();
2274  visitor->VisitRootIfNonNull(&tlsPtr_.opeer, RootInfo(kRootThreadObject, thread_id));
2275  if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) {
2276    visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception),
2277                   RootInfo(kRootNativeStack, thread_id));
2278  }
2279  visitor->VisitRootIfNonNull(&tlsPtr_.monitor_enter_object, RootInfo(kRootNativeStack, thread_id));
2280  tlsPtr_.jni_env->locals.VisitRoots(visitor, RootInfo(kRootJNILocal, thread_id));
2281  tlsPtr_.jni_env->monitors.VisitRoots(visitor, RootInfo(kRootJNIMonitor, thread_id));
2282  HandleScopeVisitRoots(visitor, thread_id);
2283  if (tlsPtr_.debug_invoke_req != nullptr) {
2284    tlsPtr_.debug_invoke_req->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id));
2285  }
2286  if (tlsPtr_.single_step_control != nullptr) {
2287    tlsPtr_.single_step_control->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id));
2288  }
2289  if (tlsPtr_.deoptimization_shadow_frame != nullptr) {
2290    RootCallbackVisitor visitor_to_callback(visitor, thread_id);
2291    ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitor_to_callback);
2292    for (ShadowFrame* shadow_frame = tlsPtr_.deoptimization_shadow_frame; shadow_frame != nullptr;
2293        shadow_frame = shadow_frame->GetLink()) {
2294      mapper.VisitShadowFrame(shadow_frame);
2295    }
2296  }
2297  if (tlsPtr_.shadow_frame_under_construction != nullptr) {
2298    RootCallbackVisitor visitor_to_callback(visitor, thread_id);
2299    ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitor_to_callback);
2300    for (ShadowFrame* shadow_frame = tlsPtr_.shadow_frame_under_construction;
2301        shadow_frame != nullptr;
2302        shadow_frame = shadow_frame->GetLink()) {
2303      mapper.VisitShadowFrame(shadow_frame);
2304    }
2305  }
2306  if (tlsPtr_.method_verifier != nullptr) {
2307    tlsPtr_.method_verifier->VisitRoots(visitor, RootInfo(kRootNativeStack, thread_id));
2308  }
2309  // Visit roots on this thread's stack
2310  Context* context = GetLongJumpContext();
2311  RootCallbackVisitor visitor_to_callback(visitor, thread_id);
2312  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context, visitor_to_callback);
2313  mapper.WalkStack();
2314  ReleaseLongJumpContext(context);
2315  for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
2316    visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id));
2317    visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&frame.method_),
2318                       RootInfo(kRootVMInternal, thread_id));
2319  }
2320}
2321
2322class VerifyRootVisitor : public SingleRootVisitor {
2323 public:
2324  void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
2325      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2326    VerifyObject(root);
2327  }
2328};
2329
2330void Thread::VerifyStackImpl() {
2331  VerifyRootVisitor visitor;
2332  std::unique_ptr<Context> context(Context::Create());
2333  RootCallbackVisitor visitor_to_callback(&visitor, GetThreadId());
2334  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitor_to_callback);
2335  mapper.WalkStack();
2336}
2337
2338// Set the stack end to that to be used during a stack overflow
2339void Thread::SetStackEndForStackOverflow() {
2340  // During stack overflow we allow use of the full stack.
2341  if (tlsPtr_.stack_end == tlsPtr_.stack_begin) {
2342    // However, we seem to have already extended to use the full stack.
2343    LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently "
2344               << GetStackOverflowReservedBytes(kRuntimeISA) << ")?";
2345    DumpStack(LOG(ERROR));
2346    LOG(FATAL) << "Recursive stack overflow.";
2347  }
2348
2349  tlsPtr_.stack_end = tlsPtr_.stack_begin;
2350
2351  // Remove the stack overflow protection if is it set up.
2352  bool implicit_stack_check = !Runtime::Current()->ExplicitStackOverflowChecks();
2353  if (implicit_stack_check) {
2354    if (!UnprotectStack()) {
2355      LOG(ERROR) << "Unable to remove stack protection for stack overflow";
2356    }
2357  }
2358}
2359
2360void Thread::SetTlab(uint8_t* start, uint8_t* end) {
2361  DCHECK_LE(start, end);
2362  tlsPtr_.thread_local_start = start;
2363  tlsPtr_.thread_local_pos  = tlsPtr_.thread_local_start;
2364  tlsPtr_.thread_local_end = end;
2365  tlsPtr_.thread_local_objects = 0;
2366}
2367
2368bool Thread::HasTlab() const {
2369  bool has_tlab = tlsPtr_.thread_local_pos != nullptr;
2370  if (has_tlab) {
2371    DCHECK(tlsPtr_.thread_local_start != nullptr && tlsPtr_.thread_local_end != nullptr);
2372  } else {
2373    DCHECK(tlsPtr_.thread_local_start == nullptr && tlsPtr_.thread_local_end == nullptr);
2374  }
2375  return has_tlab;
2376}
2377
2378std::ostream& operator<<(std::ostream& os, const Thread& thread) {
2379  thread.ShortDump(os);
2380  return os;
2381}
2382
2383void Thread::ProtectStack() {
2384  void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
2385  VLOG(threads) << "Protecting stack at " << pregion;
2386  if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) {
2387    LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. "
2388        "Reason: "
2389        << strerror(errno) << " size:  " << kStackOverflowProtectedSize;
2390  }
2391}
2392
2393bool Thread::UnprotectStack() {
2394  void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
2395  VLOG(threads) << "Unprotecting stack at " << pregion;
2396  return mprotect(pregion, kStackOverflowProtectedSize, PROT_READ|PROT_WRITE) == 0;
2397}
2398
2399void Thread::ActivateSingleStepControl(SingleStepControl* ssc) {
2400  CHECK(Dbg::IsDebuggerActive());
2401  CHECK(GetSingleStepControl() == nullptr) << "Single step already active in thread " << *this;
2402  CHECK(ssc != nullptr);
2403  tlsPtr_.single_step_control = ssc;
2404}
2405
2406void Thread::DeactivateSingleStepControl() {
2407  CHECK(Dbg::IsDebuggerActive());
2408  CHECK(GetSingleStepControl() != nullptr) << "Single step not active in thread " << *this;
2409  SingleStepControl* ssc = GetSingleStepControl();
2410  tlsPtr_.single_step_control = nullptr;
2411  delete ssc;
2412}
2413
2414void Thread::SetDebugInvokeReq(DebugInvokeReq* req) {
2415  CHECK(Dbg::IsDebuggerActive());
2416  CHECK(GetInvokeReq() == nullptr) << "Debug invoke req already active in thread " << *this;
2417  CHECK(Thread::Current() != this) << "Debug invoke can't be dispatched by the thread itself";
2418  CHECK(req != nullptr);
2419  tlsPtr_.debug_invoke_req = req;
2420}
2421
2422void Thread::ClearDebugInvokeReq() {
2423  CHECK(Dbg::IsDebuggerActive());
2424  CHECK(GetInvokeReq() != nullptr) << "Debug invoke req not active in thread " << *this;
2425  CHECK(Thread::Current() == this) << "Debug invoke must be finished by the thread itself";
2426  // We do not own the DebugInvokeReq* so we must not delete it, it is the responsibility of
2427  // the owner (the JDWP thread).
2428  tlsPtr_.debug_invoke_req = nullptr;
2429}
2430
2431void Thread::SetVerifier(verifier::MethodVerifier* verifier) {
2432  CHECK(tlsPtr_.method_verifier == nullptr);
2433  tlsPtr_.method_verifier = verifier;
2434}
2435
2436void Thread::ClearVerifier(verifier::MethodVerifier* verifier) {
2437  CHECK_EQ(tlsPtr_.method_verifier, verifier);
2438  tlsPtr_.method_verifier = nullptr;
2439}
2440
2441}  // namespace art
2442