thread.cc revision 41f9cc28f2c9edd3903ba6ca1c75b022445552ad
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define ATRACE_TAG ATRACE_TAG_DALVIK
18
19#include "thread.h"
20
21#include <cutils/trace.h>
22#include <pthread.h>
23#include <signal.h>
24#include <sys/resource.h>
25#include <sys/time.h>
26
27#include <algorithm>
28#include <bitset>
29#include <cerrno>
30#include <iostream>
31#include <list>
32#include <sstream>
33
34#include "arch/context.h"
35#include "art_field-inl.h"
36#include "art_method-inl.h"
37#include "base/bit_utils.h"
38#include "base/mutex.h"
39#include "base/timing_logger.h"
40#include "base/to_str.h"
41#include "class_linker-inl.h"
42#include "debugger.h"
43#include "dex_file-inl.h"
44#include "entrypoints/entrypoint_utils.h"
45#include "entrypoints/quick/quick_alloc_entrypoints.h"
46#include "gc_map.h"
47#include "gc/accounting/card_table-inl.h"
48#include "gc/allocator/rosalloc.h"
49#include "gc/heap.h"
50#include "gc/space/space.h"
51#include "handle_scope-inl.h"
52#include "indirect_reference_table-inl.h"
53#include "jni_internal.h"
54#include "mirror/class_loader.h"
55#include "mirror/class-inl.h"
56#include "mirror/object_array-inl.h"
57#include "mirror/stack_trace_element.h"
58#include "monitor.h"
59#include "object_lock.h"
60#include "quick_exception_handler.h"
61#include "quick/quick_method_frame_info.h"
62#include "reflection.h"
63#include "runtime.h"
64#include "scoped_thread_state_change.h"
65#include "ScopedLocalRef.h"
66#include "ScopedUtfChars.h"
67#include "stack.h"
68#include "thread_list.h"
69#include "thread-inl.h"
70#include "utils.h"
71#include "verifier/dex_gc_map.h"
72#include "verifier/method_verifier.h"
73#include "verify_object-inl.h"
74#include "vmap_table.h"
75#include "well_known_classes.h"
76
77namespace art {
78
79bool Thread::is_started_ = false;
80pthread_key_t Thread::pthread_key_self_;
81ConditionVariable* Thread::resume_cond_ = nullptr;
82const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA);
83
84static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
85
86void Thread::InitCardTable() {
87  tlsPtr_.card_table = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin();
88}
89
90static void UnimplementedEntryPoint() {
91  UNIMPLEMENTED(FATAL);
92}
93
94void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
95                     QuickEntryPoints* qpoints);
96
97void Thread::InitTlsEntryPoints() {
98  // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
99  uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.interpreter_entrypoints);
100  uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) +
101      sizeof(tlsPtr_.quick_entrypoints));
102  for (uintptr_t* it = begin; it != end; ++it) {
103    *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
104  }
105  InitEntryPoints(&tlsPtr_.interpreter_entrypoints, &tlsPtr_.jni_entrypoints,
106                  &tlsPtr_.quick_entrypoints);
107}
108
109void Thread::InitStringEntryPoints() {
110  ScopedObjectAccess soa(this);
111  QuickEntryPoints* qpoints = &tlsPtr_.quick_entrypoints;
112  qpoints->pNewEmptyString = reinterpret_cast<void(*)()>(
113      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newEmptyString));
114  qpoints->pNewStringFromBytes_B = reinterpret_cast<void(*)()>(
115      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_B));
116  qpoints->pNewStringFromBytes_BI = reinterpret_cast<void(*)()>(
117      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BI));
118  qpoints->pNewStringFromBytes_BII = reinterpret_cast<void(*)()>(
119      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BII));
120  qpoints->pNewStringFromBytes_BIII = reinterpret_cast<void(*)()>(
121      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIII));
122  qpoints->pNewStringFromBytes_BIIString = reinterpret_cast<void(*)()>(
123      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIIString));
124  qpoints->pNewStringFromBytes_BString = reinterpret_cast<void(*)()>(
125      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BString));
126  qpoints->pNewStringFromBytes_BIICharset = reinterpret_cast<void(*)()>(
127      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIICharset));
128  qpoints->pNewStringFromBytes_BCharset = reinterpret_cast<void(*)()>(
129      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BCharset));
130  qpoints->pNewStringFromChars_C = reinterpret_cast<void(*)()>(
131      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromChars_C));
132  qpoints->pNewStringFromChars_CII = reinterpret_cast<void(*)()>(
133      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromChars_CII));
134  qpoints->pNewStringFromChars_IIC = reinterpret_cast<void(*)()>(
135      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromChars_IIC));
136  qpoints->pNewStringFromCodePoints = reinterpret_cast<void(*)()>(
137      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromCodePoints));
138  qpoints->pNewStringFromString = reinterpret_cast<void(*)()>(
139      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromString));
140  qpoints->pNewStringFromStringBuffer = reinterpret_cast<void(*)()>(
141      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromStringBuffer));
142  qpoints->pNewStringFromStringBuilder = reinterpret_cast<void(*)()>(
143      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromStringBuilder));
144}
145
146void Thread::ResetQuickAllocEntryPointsForThread() {
147  ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints);
148}
149
150class DeoptimizationReturnValueRecord {
151 public:
152  DeoptimizationReturnValueRecord(const JValue& ret_val,
153                                  bool is_reference,
154                                  DeoptimizationReturnValueRecord* link)
155      : ret_val_(ret_val), is_reference_(is_reference), link_(link) {}
156
157  JValue GetReturnValue() const { return ret_val_; }
158  bool IsReference() const { return is_reference_; }
159  DeoptimizationReturnValueRecord* GetLink() const { return link_; }
160  mirror::Object** GetGCRoot() {
161    DCHECK(is_reference_);
162    return ret_val_.GetGCRoot();
163  }
164
165 private:
166  JValue ret_val_;
167  const bool is_reference_;
168  DeoptimizationReturnValueRecord* const link_;
169
170  DISALLOW_COPY_AND_ASSIGN(DeoptimizationReturnValueRecord);
171};
172
173class StackedShadowFrameRecord {
174 public:
175  StackedShadowFrameRecord(ShadowFrame* shadow_frame,
176                           StackedShadowFrameType type,
177                           StackedShadowFrameRecord* link)
178      : shadow_frame_(shadow_frame),
179        type_(type),
180        link_(link) {}
181
182  ShadowFrame* GetShadowFrame() const { return shadow_frame_; }
183  StackedShadowFrameType GetType() const { return type_; }
184  StackedShadowFrameRecord* GetLink() const { return link_; }
185
186 private:
187  ShadowFrame* const shadow_frame_;
188  const StackedShadowFrameType type_;
189  StackedShadowFrameRecord* const link_;
190
191  DISALLOW_COPY_AND_ASSIGN(StackedShadowFrameRecord);
192};
193
194void Thread::PushAndClearDeoptimizationReturnValue() {
195  DeoptimizationReturnValueRecord* record = new DeoptimizationReturnValueRecord(
196      tls64_.deoptimization_return_value,
197      tls32_.deoptimization_return_value_is_reference,
198      tlsPtr_.deoptimization_return_value_stack);
199  tlsPtr_.deoptimization_return_value_stack = record;
200  ClearDeoptimizationReturnValue();
201}
202
203JValue Thread::PopDeoptimizationReturnValue() {
204  DeoptimizationReturnValueRecord* record = tlsPtr_.deoptimization_return_value_stack;
205  DCHECK(record != nullptr);
206  tlsPtr_.deoptimization_return_value_stack = record->GetLink();
207  JValue ret_val(record->GetReturnValue());
208  delete record;
209  return ret_val;
210}
211
212void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type) {
213  StackedShadowFrameRecord* record = new StackedShadowFrameRecord(
214      sf, type, tlsPtr_.stacked_shadow_frame_record);
215  tlsPtr_.stacked_shadow_frame_record = record;
216}
217
218ShadowFrame* Thread::PopStackedShadowFrame(StackedShadowFrameType type) {
219  StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
220  DCHECK(record != nullptr);
221  DCHECK_EQ(record->GetType(), type);
222  tlsPtr_.stacked_shadow_frame_record = record->GetLink();
223  ShadowFrame* shadow_frame = record->GetShadowFrame();
224  delete record;
225  return shadow_frame;
226}
227
228void Thread::InitTid() {
229  tls32_.tid = ::art::GetTid();
230}
231
232void Thread::InitAfterFork() {
233  // One thread (us) survived the fork, but we have a new tid so we need to
234  // update the value stashed in this Thread*.
235  InitTid();
236}
237
238void* Thread::CreateCallback(void* arg) {
239  Thread* self = reinterpret_cast<Thread*>(arg);
240  Runtime* runtime = Runtime::Current();
241  if (runtime == nullptr) {
242    LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self;
243    return nullptr;
244  }
245  {
246    // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true
247    //       after self->Init().
248    MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
249    // Check that if we got here we cannot be shutting down (as shutdown should never have started
250    // while threads are being born).
251    CHECK(!runtime->IsShuttingDownLocked());
252    // Note: given that the JNIEnv is created in the parent thread, the only failure point here is
253    //       a mess in InitStackHwm. We do not have a reasonable way to recover from that, so abort
254    //       the runtime in such a case. In case this ever changes, we need to make sure here to
255    //       delete the tmp_jni_env, as we own it at this point.
256    CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM(), self->tlsPtr_.tmp_jni_env));
257    self->tlsPtr_.tmp_jni_env = nullptr;
258    Runtime::Current()->EndThreadBirth();
259  }
260  {
261    ScopedObjectAccess soa(self);
262    self->InitStringEntryPoints();
263
264    // Copy peer into self, deleting global reference when done.
265    CHECK(self->tlsPtr_.jpeer != nullptr);
266    self->tlsPtr_.opeer = soa.Decode<mirror::Object*>(self->tlsPtr_.jpeer);
267    self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer);
268    self->tlsPtr_.jpeer = nullptr;
269    self->SetThreadName(self->GetThreadName(soa)->ToModifiedUtf8().c_str());
270
271    ArtField* priorityField = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority);
272    self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer));
273    Dbg::PostThreadStart(self);
274
275    // Invoke the 'run' method of our java.lang.Thread.
276    mirror::Object* receiver = self->tlsPtr_.opeer;
277    jmethodID mid = WellKnownClasses::java_lang_Thread_run;
278    ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
279    InvokeVirtualOrInterfaceWithJValues(soa, ref.get(), mid, nullptr);
280  }
281  // Detach and delete self.
282  Runtime::Current()->GetThreadList()->Unregister(self);
283
284  return nullptr;
285}
286
287Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
288                                  mirror::Object* thread_peer) {
289  ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer);
290  Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer)));
291  // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
292  // to stop it from going away.
293  if (kIsDebugBuild) {
294    MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
295    if (result != nullptr && !result->IsSuspended()) {
296      Locks::thread_list_lock_->AssertHeld(soa.Self());
297    }
298  }
299  return result;
300}
301
302Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
303                                  jobject java_thread) {
304  return FromManagedThread(soa, soa.Decode<mirror::Object*>(java_thread));
305}
306
307static size_t FixStackSize(size_t stack_size) {
308  // A stack size of zero means "use the default".
309  if (stack_size == 0) {
310    stack_size = Runtime::Current()->GetDefaultStackSize();
311  }
312
313  // Dalvik used the bionic pthread default stack size for native threads,
314  // so include that here to support apps that expect large native stacks.
315  stack_size += 1 * MB;
316
317  // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN.
318  if (stack_size < PTHREAD_STACK_MIN) {
319    stack_size = PTHREAD_STACK_MIN;
320  }
321
322  if (Runtime::Current()->ExplicitStackOverflowChecks()) {
323    // It's likely that callers are trying to ensure they have at least a certain amount of
324    // stack space, so we should add our reserved space on top of what they requested, rather
325    // than implicitly take it away from them.
326    stack_size += GetStackOverflowReservedBytes(kRuntimeISA);
327  } else {
328    // If we are going to use implicit stack checks, allocate space for the protected
329    // region at the bottom of the stack.
330    stack_size += Thread::kStackOverflowImplicitCheckSize +
331        GetStackOverflowReservedBytes(kRuntimeISA);
332  }
333
334  // Some systems require the stack size to be a multiple of the system page size, so round up.
335  stack_size = RoundUp(stack_size, kPageSize);
336
337  return stack_size;
338}
339
340// Global variable to prevent the compiler optimizing away the page reads for the stack.
341uint8_t dont_optimize_this;
342
343// Install a protected region in the stack.  This is used to trigger a SIGSEGV if a stack
344// overflow is detected.  It is located right below the stack_begin_.
345//
346// There is a little complexity here that deserves a special mention.  On some
347// architectures, the stack created using a VM_GROWSDOWN flag
348// to prevent memory being allocated when it's not needed.  This flag makes the
349// kernel only allocate memory for the stack by growing down in memory.  Because we
350// want to put an mprotected region far away from that at the stack top, we need
351// to make sure the pages for the stack are mapped in before we call mprotect.  We do
352// this by reading every page from the stack bottom (highest address) to the stack top.
353// We then madvise this away.
354void Thread::InstallImplicitProtection() {
355  uint8_t* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
356  uint8_t* stack_himem = tlsPtr_.stack_end;
357  uint8_t* stack_top = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(&stack_himem) &
358      ~(kPageSize - 1));    // Page containing current top of stack.
359
360  // First remove the protection on the protected region as will want to read and
361  // write it.  This may fail (on the first attempt when the stack is not mapped)
362  // but we ignore that.
363  UnprotectStack();
364
365  // Map in the stack.  This must be done by reading from the
366  // current stack pointer downwards as the stack may be mapped using VM_GROWSDOWN
367  // in the kernel.  Any access more than a page below the current SP might cause
368  // a segv.
369
370  // Read every page from the high address to the low.
371  for (uint8_t* p = stack_top; p >= pregion; p -= kPageSize) {
372    dont_optimize_this = *p;
373  }
374
375  VLOG(threads) << "installing stack protected region at " << std::hex <<
376      static_cast<void*>(pregion) << " to " <<
377      static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
378
379  // Protect the bottom of the stack to prevent read/write to it.
380  ProtectStack();
381
382  // Tell the kernel that we won't be needing these pages any more.
383  // NB. madvise will probably write zeroes into the memory (on linux it does).
384  uint32_t unwanted_size = stack_top - pregion - kPageSize;
385  madvise(pregion, unwanted_size, MADV_DONTNEED);
386}
387
388void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
389  CHECK(java_peer != nullptr);
390  Thread* self = static_cast<JNIEnvExt*>(env)->self;
391  Runtime* runtime = Runtime::Current();
392
393  // Atomically start the birth of the thread ensuring the runtime isn't shutting down.
394  bool thread_start_during_shutdown = false;
395  {
396    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
397    if (runtime->IsShuttingDownLocked()) {
398      thread_start_during_shutdown = true;
399    } else {
400      runtime->StartThreadBirth();
401    }
402  }
403  if (thread_start_during_shutdown) {
404    ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError"));
405    env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown");
406    return;
407  }
408
409  Thread* child_thread = new Thread(is_daemon);
410  // Use global JNI ref to hold peer live while child thread starts.
411  child_thread->tlsPtr_.jpeer = env->NewGlobalRef(java_peer);
412  stack_size = FixStackSize(stack_size);
413
414  // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to
415  // assign it.
416  env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer,
417                    reinterpret_cast<jlong>(child_thread));
418
419  // Try to allocate a JNIEnvExt for the thread. We do this here as we might be out of memory and
420  // do not have a good way to report this on the child's side.
421  std::unique_ptr<JNIEnvExt> child_jni_env_ext(
422      JNIEnvExt::Create(child_thread, Runtime::Current()->GetJavaVM()));
423
424  int pthread_create_result = 0;
425  if (child_jni_env_ext.get() != nullptr) {
426    pthread_t new_pthread;
427    pthread_attr_t attr;
428    child_thread->tlsPtr_.tmp_jni_env = child_jni_env_ext.get();
429    CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
430    CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED),
431                       "PTHREAD_CREATE_DETACHED");
432    CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
433    pthread_create_result = pthread_create(&new_pthread,
434                                           &attr,
435                                           Thread::CreateCallback,
436                                           child_thread);
437    CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
438
439    if (pthread_create_result == 0) {
440      // pthread_create started the new thread. The child is now responsible for managing the
441      // JNIEnvExt we created.
442      // Note: we can't check for tmp_jni_env == nullptr, as that would require synchronization
443      //       between the threads.
444      child_jni_env_ext.release();
445      return;
446    }
447  }
448
449  // Either JNIEnvExt::Create or pthread_create(3) failed, so clean up.
450  {
451    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
452    runtime->EndThreadBirth();
453  }
454  // Manually delete the global reference since Thread::Init will not have been run.
455  env->DeleteGlobalRef(child_thread->tlsPtr_.jpeer);
456  child_thread->tlsPtr_.jpeer = nullptr;
457  delete child_thread;
458  child_thread = nullptr;
459  // TODO: remove from thread group?
460  env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0);
461  {
462    std::string msg(child_jni_env_ext.get() == nullptr ?
463        "Could not allocate JNI Env" :
464        StringPrintf("pthread_create (%s stack) failed: %s",
465                                 PrettySize(stack_size).c_str(), strerror(pthread_create_result)));
466    ScopedObjectAccess soa(env);
467    soa.Self()->ThrowOutOfMemoryError(msg.c_str());
468  }
469}
470
471bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_env_ext) {
472  // This function does all the initialization that must be run by the native thread it applies to.
473  // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
474  // we can handshake with the corresponding native thread when it's ready.) Check this native
475  // thread hasn't been through here already...
476  CHECK(Thread::Current() == nullptr);
477
478  // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
479  // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
480  tlsPtr_.pthread_self = pthread_self();
481  CHECK(is_started_);
482
483  SetUpAlternateSignalStack();
484  if (!InitStackHwm()) {
485    return false;
486  }
487  InitCpu();
488  InitTlsEntryPoints();
489  RemoveSuspendTrigger();
490  InitCardTable();
491  InitTid();
492
493  CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
494  DCHECK_EQ(Thread::Current(), this);
495
496  tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this);
497
498  if (jni_env_ext != nullptr) {
499    DCHECK_EQ(jni_env_ext->vm, java_vm);
500    DCHECK_EQ(jni_env_ext->self, this);
501    tlsPtr_.jni_env = jni_env_ext;
502  } else {
503    tlsPtr_.jni_env = JNIEnvExt::Create(this, java_vm);
504    if (tlsPtr_.jni_env == nullptr) {
505      return false;
506    }
507  }
508
509  thread_list->Register(this);
510  return true;
511}
512
513Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group,
514                       bool create_peer) {
515  Runtime* runtime = Runtime::Current();
516  if (runtime == nullptr) {
517    LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name;
518    return nullptr;
519  }
520  Thread* self;
521  {
522    MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
523    if (runtime->IsShuttingDownLocked()) {
524      LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name;
525      return nullptr;
526    } else {
527      Runtime::Current()->StartThreadBirth();
528      self = new Thread(as_daemon);
529      bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
530      Runtime::Current()->EndThreadBirth();
531      if (!init_success) {
532        delete self;
533        return nullptr;
534      }
535    }
536  }
537
538  self->InitStringEntryPoints();
539
540  CHECK_NE(self->GetState(), kRunnable);
541  self->SetState(kNative);
542
543  // If we're the main thread, ClassLinker won't be created until after we're attached,
544  // so that thread needs a two-stage attach. Regular threads don't need this hack.
545  // In the compiler, all threads need this hack, because no-one's going to be getting
546  // a native peer!
547  if (create_peer) {
548    self->CreatePeer(thread_name, as_daemon, thread_group);
549  } else {
550    // These aren't necessary, but they improve diagnostics for unit tests & command-line tools.
551    if (thread_name != nullptr) {
552      self->tlsPtr_.name->assign(thread_name);
553      ::art::SetThreadName(thread_name);
554    } else if (self->GetJniEnv()->check_jni) {
555      LOG(WARNING) << *Thread::Current() << " attached without supplying a name";
556    }
557  }
558
559  {
560    ScopedObjectAccess soa(self);
561    Dbg::PostThreadStart(self);
562  }
563
564  return self;
565}
566
567void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) {
568  Runtime* runtime = Runtime::Current();
569  CHECK(runtime->IsStarted());
570  JNIEnv* env = tlsPtr_.jni_env;
571
572  if (thread_group == nullptr) {
573    thread_group = runtime->GetMainThreadGroup();
574  }
575  ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
576  // Add missing null check in case of OOM b/18297817
577  if (name != nullptr && thread_name.get() == nullptr) {
578    CHECK(IsExceptionPending());
579    return;
580  }
581  jint thread_priority = GetNativePriority();
582  jboolean thread_is_daemon = as_daemon;
583
584  ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread));
585  if (peer.get() == nullptr) {
586    CHECK(IsExceptionPending());
587    return;
588  }
589  {
590    ScopedObjectAccess soa(this);
591    tlsPtr_.opeer = soa.Decode<mirror::Object*>(peer.get());
592  }
593  env->CallNonvirtualVoidMethod(peer.get(),
594                                WellKnownClasses::java_lang_Thread,
595                                WellKnownClasses::java_lang_Thread_init,
596                                thread_group, thread_name.get(), thread_priority, thread_is_daemon);
597  AssertNoPendingException();
598
599  Thread* self = this;
600  DCHECK_EQ(self, Thread::Current());
601  env->SetLongField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer,
602                    reinterpret_cast<jlong>(self));
603
604  ScopedObjectAccess soa(self);
605  StackHandleScope<1> hs(self);
606  MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName(soa)));
607  if (peer_thread_name.Get() == nullptr) {
608    // The Thread constructor should have set the Thread.name to a
609    // non-null value. However, because we can run without code
610    // available (in the compiler, in tests), we manually assign the
611    // fields the constructor should have set.
612    if (runtime->IsActiveTransaction()) {
613      InitPeer<true>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
614    } else {
615      InitPeer<false>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
616    }
617    peer_thread_name.Assign(GetThreadName(soa));
618  }
619  // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
620  if (peer_thread_name.Get() != nullptr) {
621    SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
622  }
623}
624
625template<bool kTransactionActive>
626void Thread::InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
627                      jobject thread_name, jint thread_priority) {
628  soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->
629      SetBoolean<kTransactionActive>(tlsPtr_.opeer, thread_is_daemon);
630  soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->
631      SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object*>(thread_group));
632  soa.DecodeField(WellKnownClasses::java_lang_Thread_name)->
633      SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object*>(thread_name));
634  soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->
635      SetInt<kTransactionActive>(tlsPtr_.opeer, thread_priority);
636}
637
638void Thread::SetThreadName(const char* name) {
639  tlsPtr_.name->assign(name);
640  ::art::SetThreadName(name);
641  Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
642}
643
644bool Thread::InitStackHwm() {
645  void* read_stack_base;
646  size_t read_stack_size;
647  size_t read_guard_size;
648  GetThreadStack(tlsPtr_.pthread_self, &read_stack_base, &read_stack_size, &read_guard_size);
649
650  tlsPtr_.stack_begin = reinterpret_cast<uint8_t*>(read_stack_base);
651  tlsPtr_.stack_size = read_stack_size;
652
653  // The minimum stack size we can cope with is the overflow reserved bytes (typically
654  // 8K) + the protected region size (4K) + another page (4K).  Typically this will
655  // be 8+4+4 = 16K.  The thread won't be able to do much with this stack even the GC takes
656  // between 8K and 12K.
657  uint32_t min_stack = GetStackOverflowReservedBytes(kRuntimeISA) + kStackOverflowProtectedSize
658    + 4 * KB;
659  if (read_stack_size <= min_stack) {
660    // Note, as we know the stack is small, avoid operations that could use a lot of stack.
661    LogMessage::LogLineLowStack(__PRETTY_FUNCTION__, __LINE__, ERROR,
662                                "Attempt to attach a thread with a too-small stack");
663    return false;
664  }
665
666  // This is included in the SIGQUIT output, but it's useful here for thread debugging.
667  VLOG(threads) << StringPrintf("Native stack is at %p (%s with %s guard)",
668                                read_stack_base,
669                                PrettySize(read_stack_size).c_str(),
670                                PrettySize(read_guard_size).c_str());
671
672  // Set stack_end_ to the bottom of the stack saving space of stack overflows
673
674  Runtime* runtime = Runtime::Current();
675  bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler();
676  ResetDefaultStackEnd();
677
678  // Install the protected region if we are doing implicit overflow checks.
679  if (implicit_stack_check) {
680    // The thread might have protected region at the bottom.  We need
681    // to install our own region so we need to move the limits
682    // of the stack to make room for it.
683
684    tlsPtr_.stack_begin += read_guard_size + kStackOverflowProtectedSize;
685    tlsPtr_.stack_end += read_guard_size + kStackOverflowProtectedSize;
686    tlsPtr_.stack_size -= read_guard_size;
687
688    InstallImplicitProtection();
689  }
690
691  // Sanity check.
692  int stack_variable;
693  CHECK_GT(&stack_variable, reinterpret_cast<void*>(tlsPtr_.stack_end));
694
695  return true;
696}
697
698void Thread::ShortDump(std::ostream& os) const {
699  os << "Thread[";
700  if (GetThreadId() != 0) {
701    // If we're in kStarting, we won't have a thin lock id or tid yet.
702    os << GetThreadId()
703       << ",tid=" << GetTid() << ',';
704  }
705  os << GetState()
706     << ",Thread*=" << this
707     << ",peer=" << tlsPtr_.opeer
708     << ",\"" << (tlsPtr_.name != nullptr ? *tlsPtr_.name : "null") << "\""
709     << "]";
710}
711
712void Thread::Dump(std::ostream& os) const {
713  DumpState(os);
714  DumpStack(os);
715}
716
717mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
718  ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
719  return (tlsPtr_.opeer != nullptr) ?
720      reinterpret_cast<mirror::String*>(f->GetObject(tlsPtr_.opeer)) : nullptr;
721}
722
723void Thread::GetThreadName(std::string& name) const {
724  name.assign(*tlsPtr_.name);
725}
726
727uint64_t Thread::GetCpuMicroTime() const {
728#if defined(__linux__)
729  clockid_t cpu_clock_id;
730  pthread_getcpuclockid(tlsPtr_.pthread_self, &cpu_clock_id);
731  timespec now;
732  clock_gettime(cpu_clock_id, &now);
733  return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000);
734#else  // __APPLE__
735  UNIMPLEMENTED(WARNING);
736  return -1;
737#endif
738}
739
740// Attempt to rectify locks so that we dump thread list with required locks before exiting.
741static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
742  LOG(ERROR) << *thread << " suspend count already zero.";
743  Locks::thread_suspend_count_lock_->Unlock(self);
744  if (!Locks::mutator_lock_->IsSharedHeld(self)) {
745    Locks::mutator_lock_->SharedTryLock(self);
746    if (!Locks::mutator_lock_->IsSharedHeld(self)) {
747      LOG(WARNING) << "Dumping thread list without holding mutator_lock_";
748    }
749  }
750  if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
751    Locks::thread_list_lock_->TryLock(self);
752    if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
753      LOG(WARNING) << "Dumping thread list without holding thread_list_lock_";
754    }
755  }
756  std::ostringstream ss;
757  Runtime::Current()->GetThreadList()->Dump(ss);
758  LOG(FATAL) << ss.str();
759}
760
761void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) {
762  if (kIsDebugBuild) {
763    DCHECK(delta == -1 || delta == +1 || delta == -tls32_.debug_suspend_count)
764          << delta << " " << tls32_.debug_suspend_count << " " << this;
765    DCHECK_GE(tls32_.suspend_count, tls32_.debug_suspend_count) << this;
766    Locks::thread_suspend_count_lock_->AssertHeld(self);
767    if (this != self && !IsSuspended()) {
768      Locks::thread_list_lock_->AssertHeld(self);
769    }
770  }
771  if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) {
772    UnsafeLogFatalForSuspendCount(self, this);
773    return;
774  }
775
776  tls32_.suspend_count += delta;
777  if (for_debugger) {
778    tls32_.debug_suspend_count += delta;
779  }
780
781  if (tls32_.suspend_count == 0) {
782    AtomicClearFlag(kSuspendRequest);
783  } else {
784    AtomicSetFlag(kSuspendRequest);
785    TriggerSuspend();
786  }
787}
788
789void Thread::RunCheckpointFunction() {
790  Closure *checkpoints[kMaxCheckpoints];
791
792  // Grab the suspend_count lock and copy the current set of
793  // checkpoints.  Then clear the list and the flag.  The RequestCheckpoint
794  // function will also grab this lock so we prevent a race between setting
795  // the kCheckpointRequest flag and clearing it.
796  {
797    MutexLock mu(this, *Locks::thread_suspend_count_lock_);
798    for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
799      checkpoints[i] = tlsPtr_.checkpoint_functions[i];
800      tlsPtr_.checkpoint_functions[i] = nullptr;
801    }
802    AtomicClearFlag(kCheckpointRequest);
803  }
804
805  // Outside the lock, run all the checkpoint functions that
806  // we collected.
807  bool found_checkpoint = false;
808  for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
809    if (checkpoints[i] != nullptr) {
810      ATRACE_BEGIN("Checkpoint function");
811      checkpoints[i]->Run(this);
812      ATRACE_END();
813      found_checkpoint = true;
814    }
815  }
816  CHECK(found_checkpoint);
817}
818
819bool Thread::RequestCheckpoint(Closure* function) {
820  union StateAndFlags old_state_and_flags;
821  old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
822  if (old_state_and_flags.as_struct.state != kRunnable) {
823    return false;  // Fail, thread is suspended and so can't run a checkpoint.
824  }
825
826  uint32_t available_checkpoint = kMaxCheckpoints;
827  for (uint32_t i = 0 ; i < kMaxCheckpoints; ++i) {
828    if (tlsPtr_.checkpoint_functions[i] == nullptr) {
829      available_checkpoint = i;
830      break;
831    }
832  }
833  if (available_checkpoint == kMaxCheckpoints) {
834    // No checkpoint functions available, we can't run a checkpoint
835    return false;
836  }
837  tlsPtr_.checkpoint_functions[available_checkpoint] = function;
838
839  // Checkpoint function installed now install flag bit.
840  // We must be runnable to request a checkpoint.
841  DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable);
842  union StateAndFlags new_state_and_flags;
843  new_state_and_flags.as_int = old_state_and_flags.as_int;
844  new_state_and_flags.as_struct.flags |= kCheckpointRequest;
845  bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(
846      old_state_and_flags.as_int, new_state_and_flags.as_int);
847  if (UNLIKELY(!success)) {
848    // The thread changed state before the checkpoint was installed.
849    CHECK_EQ(tlsPtr_.checkpoint_functions[available_checkpoint], function);
850    tlsPtr_.checkpoint_functions[available_checkpoint] = nullptr;
851  } else {
852    CHECK_EQ(ReadFlag(kCheckpointRequest), true);
853    TriggerSuspend();
854  }
855  return success;
856}
857
858Closure* Thread::GetFlipFunction() {
859  Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function);
860  Closure* func;
861  do {
862    func = atomic_func->LoadRelaxed();
863    if (func == nullptr) {
864      return nullptr;
865    }
866  } while (!atomic_func->CompareExchangeWeakSequentiallyConsistent(func, nullptr));
867  DCHECK(func != nullptr);
868  return func;
869}
870
871void Thread::SetFlipFunction(Closure* function) {
872  CHECK(function != nullptr);
873  Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function);
874  atomic_func->StoreSequentiallyConsistent(function);
875}
876
877void Thread::FullSuspendCheck() {
878  VLOG(threads) << this << " self-suspending";
879  ATRACE_BEGIN("Full suspend check");
880  // Make thread appear suspended to other threads, release mutator_lock_.
881  tls32_.suspended_at_suspend_check = true;
882  TransitionFromRunnableToSuspended(kSuspended);
883  // Transition back to runnable noting requests to suspend, re-acquire share on mutator_lock_.
884  TransitionFromSuspendedToRunnable();
885  tls32_.suspended_at_suspend_check = false;
886  ATRACE_END();
887  VLOG(threads) << this << " self-reviving";
888}
889
890void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
891  std::string group_name;
892  int priority;
893  bool is_daemon = false;
894  Thread* self = Thread::Current();
895
896  // If flip_function is not null, it means we have run a checkpoint
897  // before the thread wakes up to execute the flip function and the
898  // thread roots haven't been forwarded.  So the following access to
899  // the roots (opeer or methods in the frames) would be bad. Run it
900  // here. TODO: clean up.
901  if (thread != nullptr) {
902    ScopedObjectAccessUnchecked soa(self);
903    Thread* this_thread = const_cast<Thread*>(thread);
904    Closure* flip_func = this_thread->GetFlipFunction();
905    if (flip_func != nullptr) {
906      flip_func->Run(this_thread);
907    }
908  }
909
910  // Don't do this if we are aborting since the GC may have all the threads suspended. This will
911  // cause ScopedObjectAccessUnchecked to deadlock.
912  if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) {
913    ScopedObjectAccessUnchecked soa(self);
914    priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)
915        ->GetInt(thread->tlsPtr_.opeer);
916    is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)
917        ->GetBoolean(thread->tlsPtr_.opeer);
918
919    mirror::Object* thread_group =
920        soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->tlsPtr_.opeer);
921
922    if (thread_group != nullptr) {
923      ArtField* group_name_field =
924          soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
925      mirror::String* group_name_string =
926          reinterpret_cast<mirror::String*>(group_name_field->GetObject(thread_group));
927      group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>";
928    }
929  } else {
930    priority = GetNativePriority();
931  }
932
933  std::string scheduler_group_name(GetSchedulerGroupName(tid));
934  if (scheduler_group_name.empty()) {
935    scheduler_group_name = "default";
936  }
937
938  if (thread != nullptr) {
939    os << '"' << *thread->tlsPtr_.name << '"';
940    if (is_daemon) {
941      os << " daemon";
942    }
943    os << " prio=" << priority
944       << " tid=" << thread->GetThreadId()
945       << " " << thread->GetState();
946    if (thread->IsStillStarting()) {
947      os << " (still starting up)";
948    }
949    os << "\n";
950  } else {
951    os << '"' << ::art::GetThreadName(tid) << '"'
952       << " prio=" << priority
953       << " (not attached)\n";
954  }
955
956  if (thread != nullptr) {
957    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
958    os << "  | group=\"" << group_name << "\""
959       << " sCount=" << thread->tls32_.suspend_count
960       << " dsCount=" << thread->tls32_.debug_suspend_count
961       << " obj=" << reinterpret_cast<void*>(thread->tlsPtr_.opeer)
962       << " self=" << reinterpret_cast<const void*>(thread) << "\n";
963  }
964
965  os << "  | sysTid=" << tid
966     << " nice=" << getpriority(PRIO_PROCESS, tid)
967     << " cgrp=" << scheduler_group_name;
968  if (thread != nullptr) {
969    int policy;
970    sched_param sp;
971    CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp),
972                       __FUNCTION__);
973    os << " sched=" << policy << "/" << sp.sched_priority
974       << " handle=" << reinterpret_cast<void*>(thread->tlsPtr_.pthread_self);
975  }
976  os << "\n";
977
978  // Grab the scheduler stats for this thread.
979  std::string scheduler_stats;
980  if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) {
981    scheduler_stats.resize(scheduler_stats.size() - 1);  // Lose the trailing '\n'.
982  } else {
983    scheduler_stats = "0 0 0";
984  }
985
986  char native_thread_state = '?';
987  int utime = 0;
988  int stime = 0;
989  int task_cpu = 0;
990  GetTaskStats(tid, &native_thread_state, &utime, &stime, &task_cpu);
991
992  os << "  | state=" << native_thread_state
993     << " schedstat=( " << scheduler_stats << " )"
994     << " utm=" << utime
995     << " stm=" << stime
996     << " core=" << task_cpu
997     << " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
998  if (thread != nullptr) {
999    os << "  | stack=" << reinterpret_cast<void*>(thread->tlsPtr_.stack_begin) << "-"
1000        << reinterpret_cast<void*>(thread->tlsPtr_.stack_end) << " stackSize="
1001        << PrettySize(thread->tlsPtr_.stack_size) << "\n";
1002    // Dump the held mutexes.
1003    os << "  | held mutexes=";
1004    for (size_t i = 0; i < kLockLevelCount; ++i) {
1005      if (i != kMonitorLock) {
1006        BaseMutex* mutex = thread->GetHeldMutex(static_cast<LockLevel>(i));
1007        if (mutex != nullptr) {
1008          os << " \"" << mutex->GetName() << "\"";
1009          if (mutex->IsReaderWriterMutex()) {
1010            ReaderWriterMutex* rw_mutex = down_cast<ReaderWriterMutex*>(mutex);
1011            if (rw_mutex->GetExclusiveOwnerTid() == static_cast<uint64_t>(tid)) {
1012              os << "(exclusive held)";
1013            } else {
1014              os << "(shared held)";
1015            }
1016          }
1017        }
1018      }
1019    }
1020    os << "\n";
1021  }
1022}
1023
1024void Thread::DumpState(std::ostream& os) const {
1025  Thread::DumpState(os, this, GetTid());
1026}
1027
1028struct StackDumpVisitor : public StackVisitor {
1029  StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in)
1030      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1031      : StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
1032        os(os_in),
1033        thread(thread_in),
1034        can_allocate(can_allocate_in),
1035        last_method(nullptr),
1036        last_line_number(0),
1037        repetition_count(0),
1038        frame_count(0) {}
1039
1040  virtual ~StackDumpVisitor() {
1041    if (frame_count == 0) {
1042      os << "  (no managed stack frames)\n";
1043    }
1044  }
1045
1046  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1047    ArtMethod* m = GetMethod();
1048    if (m->IsRuntimeMethod()) {
1049      return true;
1050    }
1051    const int kMaxRepetition = 3;
1052    mirror::Class* c = m->GetDeclaringClass();
1053    mirror::DexCache* dex_cache = c->GetDexCache();
1054    int line_number = -1;
1055    if (dex_cache != nullptr) {  // be tolerant of bad input
1056      const DexFile& dex_file = *dex_cache->GetDexFile();
1057      line_number = dex_file.GetLineNumFromPC(m, GetDexPc(false));
1058    }
1059    if (line_number == last_line_number && last_method == m) {
1060      ++repetition_count;
1061    } else {
1062      if (repetition_count >= kMaxRepetition) {
1063        os << "  ... repeated " << (repetition_count - kMaxRepetition) << " times\n";
1064      }
1065      repetition_count = 0;
1066      last_line_number = line_number;
1067      last_method = m;
1068    }
1069    if (repetition_count < kMaxRepetition) {
1070      os << "  at " << PrettyMethod(m, false);
1071      if (m->IsNative()) {
1072        os << "(Native method)";
1073      } else {
1074        const char* source_file(m->GetDeclaringClassSourceFile());
1075        os << "(" << (source_file != nullptr ? source_file : "unavailable")
1076           << ":" << line_number << ")";
1077      }
1078      os << "\n";
1079      if (frame_count == 0) {
1080        Monitor::DescribeWait(os, thread);
1081      }
1082      if (can_allocate) {
1083        // Visit locks, but do not abort on errors. This would trigger a nested abort.
1084        Monitor::VisitLocks(this, DumpLockedObject, &os, false);
1085      }
1086    }
1087
1088    ++frame_count;
1089    return true;
1090  }
1091
1092  static void DumpLockedObject(mirror::Object* o, void* context)
1093      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1094    std::ostream& os = *reinterpret_cast<std::ostream*>(context);
1095    os << "  - locked ";
1096    if (o == nullptr) {
1097      os << "an unknown object";
1098    } else {
1099      if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) &&
1100          Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) {
1101        // Getting the identity hashcode here would result in lock inflation and suspension of the
1102        // current thread, which isn't safe if this is the only runnable thread.
1103        os << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)", reinterpret_cast<intptr_t>(o),
1104                           PrettyTypeOf(o).c_str());
1105      } else {
1106        // IdentityHashCode can cause thread suspension, which would invalidate o if it moved. So
1107        // we get the pretty type beofre we call IdentityHashCode.
1108        const std::string pretty_type(PrettyTypeOf(o));
1109        os << StringPrintf("<0x%08x> (a %s)", o->IdentityHashCode(), pretty_type.c_str());
1110      }
1111    }
1112    os << "\n";
1113  }
1114
1115  std::ostream& os;
1116  const Thread* thread;
1117  const bool can_allocate;
1118  ArtMethod* last_method;
1119  int last_line_number;
1120  int repetition_count;
1121  int frame_count;
1122};
1123
1124static bool ShouldShowNativeStack(const Thread* thread)
1125    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1126  ThreadState state = thread->GetState();
1127
1128  // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting.
1129  if (state > kWaiting && state < kStarting) {
1130    return true;
1131  }
1132
1133  // In an Object.wait variant or Thread.sleep? That's not interesting.
1134  if (state == kTimedWaiting || state == kSleeping || state == kWaiting) {
1135    return false;
1136  }
1137
1138  // Threads with no managed stack frames should be shown.
1139  const ManagedStack* managed_stack = thread->GetManagedStack();
1140  if (managed_stack == nullptr || (managed_stack->GetTopQuickFrame() == nullptr &&
1141      managed_stack->GetTopShadowFrame() == nullptr)) {
1142    return true;
1143  }
1144
1145  // In some other native method? That's interesting.
1146  // We don't just check kNative because native methods will be in state kSuspended if they're
1147  // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the
1148  // thread-startup states if it's early enough in their life cycle (http://b/7432159).
1149  ArtMethod* current_method = thread->GetCurrentMethod(nullptr);
1150  return current_method != nullptr && current_method->IsNative();
1151}
1152
1153void Thread::DumpJavaStack(std::ostream& os) const {
1154  // If flip_function is not null, it means we have run a checkpoint
1155  // before the thread wakes up to execute the flip function and the
1156  // thread roots haven't been forwarded.  So the following access to
1157  // the roots (locks or methods in the frames) would be bad. Run it
1158  // here. TODO: clean up.
1159  {
1160    Thread* this_thread = const_cast<Thread*>(this);
1161    Closure* flip_func = this_thread->GetFlipFunction();
1162    if (flip_func != nullptr) {
1163      flip_func->Run(this_thread);
1164    }
1165  }
1166
1167  // Dumping the Java stack involves the verifier for locks. The verifier operates under the
1168  // assumption that there is no exception pending on entry. Thus, stash any pending exception.
1169  // Thread::Current() instead of this in case a thread is dumping the stack of another suspended
1170  // thread.
1171  StackHandleScope<1> scope(Thread::Current());
1172  Handle<mirror::Throwable> exc;
1173  bool have_exception = false;
1174  if (IsExceptionPending()) {
1175    exc = scope.NewHandle(GetException());
1176    const_cast<Thread*>(this)->ClearException();
1177    have_exception = true;
1178  }
1179
1180  std::unique_ptr<Context> context(Context::Create());
1181  StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(),
1182                          !tls32_.throwing_OutOfMemoryError);
1183  dumper.WalkStack();
1184
1185  if (have_exception) {
1186    const_cast<Thread*>(this)->SetException(exc.Get());
1187  }
1188}
1189
1190void Thread::DumpStack(std::ostream& os) const {
1191  // TODO: we call this code when dying but may not have suspended the thread ourself. The
1192  //       IsSuspended check is therefore racy with the use for dumping (normally we inhibit
1193  //       the race with the thread_suspend_count_lock_).
1194  bool dump_for_abort = (gAborting > 0);
1195  bool safe_to_dump = (this == Thread::Current() || IsSuspended());
1196  if (!kIsDebugBuild) {
1197    // We always want to dump the stack for an abort, however, there is no point dumping another
1198    // thread's stack in debug builds where we'll hit the not suspended check in the stack walk.
1199    safe_to_dump = (safe_to_dump || dump_for_abort);
1200  }
1201  if (safe_to_dump) {
1202    // If we're currently in native code, dump that stack before dumping the managed stack.
1203    if (dump_for_abort || ShouldShowNativeStack(this)) {
1204      DumpKernelStack(os, GetTid(), "  kernel: ", false);
1205      DumpNativeStack(os, GetTid(), "  native: ", GetCurrentMethod(nullptr, !dump_for_abort));
1206    }
1207    DumpJavaStack(os);
1208  } else {
1209    os << "Not able to dump stack of thread that isn't suspended";
1210  }
1211}
1212
1213void Thread::ThreadExitCallback(void* arg) {
1214  Thread* self = reinterpret_cast<Thread*>(arg);
1215  if (self->tls32_.thread_exit_check_count == 0) {
1216    LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's "
1217        "going to use a pthread_key_create destructor?): " << *self;
1218    CHECK(is_started_);
1219    CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
1220    self->tls32_.thread_exit_check_count = 1;
1221  } else {
1222    LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
1223  }
1224}
1225
1226void Thread::Startup() {
1227  CHECK(!is_started_);
1228  is_started_ = true;
1229  {
1230    // MutexLock to keep annotalysis happy.
1231    //
1232    // Note we use null for the thread because Thread::Current can
1233    // return garbage since (is_started_ == true) and
1234    // Thread::pthread_key_self_ is not yet initialized.
1235    // This was seen on glibc.
1236    MutexLock mu(nullptr, *Locks::thread_suspend_count_lock_);
1237    resume_cond_ = new ConditionVariable("Thread resumption condition variable",
1238                                         *Locks::thread_suspend_count_lock_);
1239  }
1240
1241  // Allocate a TLS slot.
1242  CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback),
1243                     "self key");
1244
1245  // Double-check the TLS slot allocation.
1246  if (pthread_getspecific(pthread_key_self_) != nullptr) {
1247    LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr";
1248  }
1249}
1250
1251void Thread::FinishStartup() {
1252  Runtime* runtime = Runtime::Current();
1253  CHECK(runtime->IsStarted());
1254
1255  // Finish attaching the main thread.
1256  ScopedObjectAccess soa(Thread::Current());
1257  Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup());
1258
1259  Runtime::Current()->GetClassLinker()->RunRootClinits();
1260}
1261
1262void Thread::Shutdown() {
1263  CHECK(is_started_);
1264  is_started_ = false;
1265  CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
1266  MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
1267  if (resume_cond_ != nullptr) {
1268    delete resume_cond_;
1269    resume_cond_ = nullptr;
1270  }
1271}
1272
1273Thread::Thread(bool daemon) : tls32_(daemon), wait_monitor_(nullptr), interrupted_(false) {
1274  wait_mutex_ = new Mutex("a thread wait mutex");
1275  wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
1276  tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>;
1277  tlsPtr_.name = new std::string(kThreadNameDuringStartup);
1278  tlsPtr_.nested_signal_state = static_cast<jmp_buf*>(malloc(sizeof(jmp_buf)));
1279
1280  CHECK_EQ((sizeof(Thread) % 4), 0U) << sizeof(Thread);
1281  tls32_.state_and_flags.as_struct.flags = 0;
1282  tls32_.state_and_flags.as_struct.state = kNative;
1283  memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes));
1284  std::fill(tlsPtr_.rosalloc_runs,
1285            tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBrackets,
1286            gc::allocator::RosAlloc::GetDedicatedFullRun());
1287  for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
1288    tlsPtr_.checkpoint_functions[i] = nullptr;
1289  }
1290  tlsPtr_.flip_function = nullptr;
1291  tls32_.suspended_at_suspend_check = false;
1292}
1293
1294bool Thread::IsStillStarting() const {
1295  // You might think you can check whether the state is kStarting, but for much of thread startup,
1296  // the thread is in kNative; it might also be in kVmWait.
1297  // You might think you can check whether the peer is null, but the peer is actually created and
1298  // assigned fairly early on, and needs to be.
1299  // It turns out that the last thing to change is the thread name; that's a good proxy for "has
1300  // this thread _ever_ entered kRunnable".
1301  return (tlsPtr_.jpeer == nullptr && tlsPtr_.opeer == nullptr) ||
1302      (*tlsPtr_.name == kThreadNameDuringStartup);
1303}
1304
1305void Thread::AssertPendingException() const {
1306  CHECK(IsExceptionPending()) << "Pending exception expected.";
1307}
1308
1309void Thread::AssertPendingOOMException() const {
1310  AssertPendingException();
1311  auto* e = GetException();
1312  CHECK_EQ(e->GetClass(), DecodeJObject(WellKnownClasses::java_lang_OutOfMemoryError)->AsClass())
1313      << e->Dump();
1314}
1315
1316void Thread::AssertNoPendingException() const {
1317  if (UNLIKELY(IsExceptionPending())) {
1318    ScopedObjectAccess soa(Thread::Current());
1319    mirror::Throwable* exception = GetException();
1320    LOG(FATAL) << "No pending exception expected: " << exception->Dump();
1321  }
1322}
1323
1324void Thread::AssertNoPendingExceptionForNewException(const char* msg) const {
1325  if (UNLIKELY(IsExceptionPending())) {
1326    ScopedObjectAccess soa(Thread::Current());
1327    mirror::Throwable* exception = GetException();
1328    LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: "
1329        << exception->Dump();
1330  }
1331}
1332
1333class MonitorExitVisitor : public SingleRootVisitor {
1334 public:
1335  explicit MonitorExitVisitor(Thread* self) : self_(self) { }
1336
1337  // NO_THREAD_SAFETY_ANALYSIS due to MonitorExit.
1338  void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED)
1339      OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1340    if (self_->HoldsLock(entered_monitor)) {
1341      LOG(WARNING) << "Calling MonitorExit on object "
1342                   << entered_monitor << " (" << PrettyTypeOf(entered_monitor) << ")"
1343                   << " left locked by native thread "
1344                   << *Thread::Current() << " which is detaching";
1345      entered_monitor->MonitorExit(self_);
1346    }
1347  }
1348
1349 private:
1350  Thread* const self_;
1351};
1352
1353void Thread::Destroy() {
1354  Thread* self = this;
1355  DCHECK_EQ(self, Thread::Current());
1356
1357  if (tlsPtr_.jni_env != nullptr) {
1358    {
1359      ScopedObjectAccess soa(self);
1360      MonitorExitVisitor visitor(self);
1361      // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
1362      tlsPtr_.jni_env->monitors.VisitRoots(&visitor, RootInfo(kRootVMInternal));
1363    }
1364    // Release locally held global references which releasing may require the mutator lock.
1365    if (tlsPtr_.jpeer != nullptr) {
1366      // If pthread_create fails we don't have a jni env here.
1367      tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer);
1368      tlsPtr_.jpeer = nullptr;
1369    }
1370    if (tlsPtr_.class_loader_override != nullptr) {
1371      tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.class_loader_override);
1372      tlsPtr_.class_loader_override = nullptr;
1373    }
1374  }
1375
1376  if (tlsPtr_.opeer != nullptr) {
1377    ScopedObjectAccess soa(self);
1378    // We may need to call user-supplied managed code, do this before final clean-up.
1379    HandleUncaughtExceptions(soa);
1380    RemoveFromThreadGroup(soa);
1381
1382    // this.nativePeer = 0;
1383    if (Runtime::Current()->IsActiveTransaction()) {
1384      soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)
1385          ->SetLong<true>(tlsPtr_.opeer, 0);
1386    } else {
1387      soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)
1388          ->SetLong<false>(tlsPtr_.opeer, 0);
1389    }
1390    Dbg::PostThreadDeath(self);
1391
1392    // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
1393    // who is waiting.
1394    mirror::Object* lock =
1395        soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer);
1396    // (This conditional is only needed for tests, where Thread.lock won't have been set.)
1397    if (lock != nullptr) {
1398      StackHandleScope<1> hs(self);
1399      Handle<mirror::Object> h_obj(hs.NewHandle(lock));
1400      ObjectLock<mirror::Object> locker(self, h_obj);
1401      locker.NotifyAll();
1402    }
1403    tlsPtr_.opeer = nullptr;
1404  }
1405
1406  {
1407    ScopedObjectAccess soa(self);
1408    Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this);
1409  }
1410}
1411
1412Thread::~Thread() {
1413  CHECK(tlsPtr_.class_loader_override == nullptr);
1414  CHECK(tlsPtr_.jpeer == nullptr);
1415  CHECK(tlsPtr_.opeer == nullptr);
1416  bool initialized = (tlsPtr_.jni_env != nullptr);  // Did Thread::Init run?
1417  if (initialized) {
1418    delete tlsPtr_.jni_env;
1419    tlsPtr_.jni_env = nullptr;
1420  }
1421  CHECK_NE(GetState(), kRunnable);
1422  CHECK_NE(ReadFlag(kCheckpointRequest), true);
1423  CHECK(tlsPtr_.checkpoint_functions[0] == nullptr);
1424  CHECK(tlsPtr_.checkpoint_functions[1] == nullptr);
1425  CHECK(tlsPtr_.checkpoint_functions[2] == nullptr);
1426  CHECK(tlsPtr_.flip_function == nullptr);
1427  CHECK_EQ(tls32_.suspended_at_suspend_check, false);
1428
1429  // We may be deleting a still born thread.
1430  SetStateUnsafe(kTerminated);
1431
1432  delete wait_cond_;
1433  delete wait_mutex_;
1434
1435  if (tlsPtr_.long_jump_context != nullptr) {
1436    delete tlsPtr_.long_jump_context;
1437  }
1438
1439  if (initialized) {
1440    CleanupCpu();
1441  }
1442
1443  if (tlsPtr_.single_step_control != nullptr) {
1444    delete tlsPtr_.single_step_control;
1445  }
1446  delete tlsPtr_.instrumentation_stack;
1447  delete tlsPtr_.name;
1448  delete tlsPtr_.stack_trace_sample;
1449  free(tlsPtr_.nested_signal_state);
1450
1451  Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this);
1452
1453  TearDownAlternateSignalStack();
1454}
1455
1456void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) {
1457  if (!IsExceptionPending()) {
1458    return;
1459  }
1460  ScopedLocalRef<jobject> peer(tlsPtr_.jni_env, soa.AddLocalReference<jobject>(tlsPtr_.opeer));
1461  ScopedThreadStateChange tsc(this, kNative);
1462
1463  // Get and clear the exception.
1464  ScopedLocalRef<jthrowable> exception(tlsPtr_.jni_env, tlsPtr_.jni_env->ExceptionOccurred());
1465  tlsPtr_.jni_env->ExceptionClear();
1466
1467  // If the thread has its own handler, use that.
1468  ScopedLocalRef<jobject> handler(tlsPtr_.jni_env,
1469                                  tlsPtr_.jni_env->GetObjectField(peer.get(),
1470                                      WellKnownClasses::java_lang_Thread_uncaughtHandler));
1471  if (handler.get() == nullptr) {
1472    // Otherwise use the thread group's default handler.
1473    handler.reset(tlsPtr_.jni_env->GetObjectField(peer.get(),
1474                                                  WellKnownClasses::java_lang_Thread_group));
1475  }
1476
1477  // Call the handler.
1478  tlsPtr_.jni_env->CallVoidMethod(handler.get(),
1479      WellKnownClasses::java_lang_Thread__UncaughtExceptionHandler_uncaughtException,
1480      peer.get(), exception.get());
1481
1482  // If the handler threw, clear that exception too.
1483  tlsPtr_.jni_env->ExceptionClear();
1484}
1485
1486void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
1487  // this.group.removeThread(this);
1488  // group can be null if we're in the compiler or a test.
1489  mirror::Object* ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)
1490      ->GetObject(tlsPtr_.opeer);
1491  if (ogroup != nullptr) {
1492    ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup));
1493    ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(tlsPtr_.opeer));
1494    ScopedThreadStateChange tsc(soa.Self(), kNative);
1495    tlsPtr_.jni_env->CallVoidMethod(group.get(),
1496                                    WellKnownClasses::java_lang_ThreadGroup_removeThread,
1497                                    peer.get());
1498  }
1499}
1500
1501size_t Thread::NumHandleReferences() {
1502  size_t count = 0;
1503  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur != nullptr; cur = cur->GetLink()) {
1504    count += cur->NumberOfReferences();
1505  }
1506  return count;
1507}
1508
1509bool Thread::HandleScopeContains(jobject obj) const {
1510  StackReference<mirror::Object>* hs_entry =
1511      reinterpret_cast<StackReference<mirror::Object>*>(obj);
1512  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur!= nullptr; cur = cur->GetLink()) {
1513    if (cur->Contains(hs_entry)) {
1514      return true;
1515    }
1516  }
1517  // JNI code invoked from portable code uses shadow frames rather than the handle scope.
1518  return tlsPtr_.managed_stack.ShadowFramesContain(hs_entry);
1519}
1520
1521void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) {
1522  BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
1523      visitor, RootInfo(kRootNativeStack, thread_id));
1524  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
1525    for (size_t j = 0, count = cur->NumberOfReferences(); j < count; ++j) {
1526      // GetReference returns a pointer to the stack reference within the handle scope. If this
1527      // needs to be updated, it will be done by the root visitor.
1528      buffered_visitor.VisitRootIfNonNull(cur->GetHandle(j).GetReference());
1529    }
1530  }
1531}
1532
1533mirror::Object* Thread::DecodeJObject(jobject obj) const {
1534  if (obj == nullptr) {
1535    return nullptr;
1536  }
1537  IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
1538  IndirectRefKind kind = GetIndirectRefKind(ref);
1539  mirror::Object* result;
1540  bool expect_null = false;
1541  // The "kinds" below are sorted by the frequency we expect to encounter them.
1542  if (kind == kLocal) {
1543    IndirectReferenceTable& locals = tlsPtr_.jni_env->locals;
1544    // Local references do not need a read barrier.
1545    result = locals.Get<kWithoutReadBarrier>(ref);
1546  } else if (kind == kHandleScopeOrInvalid) {
1547    // TODO: make stack indirect reference table lookup more efficient.
1548    // Check if this is a local reference in the handle scope.
1549    if (LIKELY(HandleScopeContains(obj))) {
1550      // Read from handle scope.
1551      result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr();
1552      VerifyObject(result);
1553    } else {
1554      tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of invalid jobject %p", obj);
1555      expect_null = true;
1556      result = nullptr;
1557    }
1558  } else if (kind == kGlobal) {
1559    result = tlsPtr_.jni_env->vm->DecodeGlobal(const_cast<Thread*>(this), ref);
1560  } else {
1561    DCHECK_EQ(kind, kWeakGlobal);
1562    result = tlsPtr_.jni_env->vm->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
1563    if (Runtime::Current()->IsClearedJniWeakGlobal(result)) {
1564      // This is a special case where it's okay to return null.
1565      expect_null = true;
1566      result = nullptr;
1567    }
1568  }
1569
1570  if (UNLIKELY(!expect_null && result == nullptr)) {
1571    tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of deleted %s %p",
1572                                   ToStr<IndirectRefKind>(kind).c_str(), obj);
1573  }
1574  return result;
1575}
1576
1577// Implements java.lang.Thread.interrupted.
1578bool Thread::Interrupted() {
1579  MutexLock mu(Thread::Current(), *wait_mutex_);
1580  bool interrupted = IsInterruptedLocked();
1581  SetInterruptedLocked(false);
1582  return interrupted;
1583}
1584
1585// Implements java.lang.Thread.isInterrupted.
1586bool Thread::IsInterrupted() {
1587  MutexLock mu(Thread::Current(), *wait_mutex_);
1588  return IsInterruptedLocked();
1589}
1590
1591void Thread::Interrupt(Thread* self) {
1592  MutexLock mu(self, *wait_mutex_);
1593  if (interrupted_) {
1594    return;
1595  }
1596  interrupted_ = true;
1597  NotifyLocked(self);
1598}
1599
1600void Thread::Notify() {
1601  Thread* self = Thread::Current();
1602  MutexLock mu(self, *wait_mutex_);
1603  NotifyLocked(self);
1604}
1605
1606void Thread::NotifyLocked(Thread* self) {
1607  if (wait_monitor_ != nullptr) {
1608    wait_cond_->Signal(self);
1609  }
1610}
1611
1612void Thread::SetClassLoaderOverride(jobject class_loader_override) {
1613  if (tlsPtr_.class_loader_override != nullptr) {
1614    GetJniEnv()->DeleteGlobalRef(tlsPtr_.class_loader_override);
1615  }
1616  tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override);
1617}
1618
1619class CountStackDepthVisitor : public StackVisitor {
1620 public:
1621  explicit CountStackDepthVisitor(Thread* thread)
1622      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1623      : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
1624        depth_(0), skip_depth_(0), skipping_(true) {}
1625
1626  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1627    // We want to skip frames up to and including the exception's constructor.
1628    // Note we also skip the frame if it doesn't have a method (namely the callee
1629    // save frame)
1630    ArtMethod* m = GetMethod();
1631    if (skipping_ && !m->IsRuntimeMethod() &&
1632        !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) {
1633      skipping_ = false;
1634    }
1635    if (!skipping_) {
1636      if (!m->IsRuntimeMethod()) {  // Ignore runtime frames (in particular callee save).
1637        ++depth_;
1638      }
1639    } else {
1640      ++skip_depth_;
1641    }
1642    return true;
1643  }
1644
1645  int GetDepth() const {
1646    return depth_;
1647  }
1648
1649  int GetSkipDepth() const {
1650    return skip_depth_;
1651  }
1652
1653 private:
1654  uint32_t depth_;
1655  uint32_t skip_depth_;
1656  bool skipping_;
1657};
1658
1659template<bool kTransactionActive>
1660class BuildInternalStackTraceVisitor : public StackVisitor {
1661 public:
1662  explicit BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth)
1663      : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
1664        self_(self),
1665        skip_depth_(skip_depth),
1666        count_(0),
1667        trace_(nullptr),
1668        pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {}
1669
1670  bool Init(int depth)
1671      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1672    // Allocate method trace with format [method pointers][pcs].
1673    auto* cl = Runtime::Current()->GetClassLinker();
1674    trace_ = cl->AllocPointerArray(self_, depth * 2);
1675    if (trace_ == nullptr) {
1676      self_->AssertPendingOOMException();
1677      return false;
1678    }
1679    // If We are called from native, use non-transactional mode.
1680    const char* last_no_suspend_cause =
1681        self_->StartAssertNoThreadSuspension("Building internal stack trace");
1682    CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause;
1683    return true;
1684  }
1685
1686  virtual ~BuildInternalStackTraceVisitor() {
1687    if (trace_ != nullptr) {
1688      self_->EndAssertNoThreadSuspension(nullptr);
1689    }
1690  }
1691
1692  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1693    if (trace_ == nullptr) {
1694      return true;  // We're probably trying to fillInStackTrace for an OutOfMemoryError.
1695    }
1696    if (skip_depth_ > 0) {
1697      skip_depth_--;
1698      return true;
1699    }
1700    ArtMethod* m = GetMethod();
1701    if (m->IsRuntimeMethod()) {
1702      return true;  // Ignore runtime frames (in particular callee save).
1703    }
1704    trace_->SetElementPtrSize<kTransactionActive>(
1705        count_, m, pointer_size_);
1706    trace_->SetElementPtrSize<kTransactionActive>(
1707        trace_->GetLength() / 2 + count_, m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc(),
1708            pointer_size_);
1709    ++count_;
1710    return true;
1711  }
1712
1713  mirror::PointerArray* GetInternalStackTrace() const {
1714    return trace_;
1715  }
1716
1717 private:
1718  Thread* const self_;
1719  // How many more frames to skip.
1720  int32_t skip_depth_;
1721  // Current position down stack trace.
1722  uint32_t count_;
1723  // An array of the methods on the stack, the last entries are the dex PCs.
1724  mirror::PointerArray* trace_;
1725  // For cross compilation.
1726  size_t pointer_size_;
1727};
1728
1729template<bool kTransactionActive>
1730jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
1731  // Compute depth of stack
1732  CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
1733  count_visitor.WalkStack();
1734  int32_t depth = count_visitor.GetDepth();
1735  int32_t skip_depth = count_visitor.GetSkipDepth();
1736
1737  // Build internal stack trace.
1738  BuildInternalStackTraceVisitor<kTransactionActive> build_trace_visitor(soa.Self(),
1739                                                                         const_cast<Thread*>(this),
1740                                                                         skip_depth);
1741  if (!build_trace_visitor.Init(depth)) {
1742    return nullptr;  // Allocation failed.
1743  }
1744  build_trace_visitor.WalkStack();
1745  mirror::PointerArray* trace = build_trace_visitor.GetInternalStackTrace();
1746  if (kIsDebugBuild) {
1747    // Second half is dex PCs.
1748    for (uint32_t i = 0; i < static_cast<uint32_t>(trace->GetLength() / 2); ++i) {
1749      auto* method = trace->GetElementPtrSize<ArtMethod*>(
1750          i, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
1751      CHECK(method != nullptr);
1752    }
1753  }
1754  return soa.AddLocalReference<jobject>(trace);
1755}
1756template jobject Thread::CreateInternalStackTrace<false>(
1757    const ScopedObjectAccessAlreadyRunnable& soa) const;
1758template jobject Thread::CreateInternalStackTrace<true>(
1759    const ScopedObjectAccessAlreadyRunnable& soa) const;
1760
1761bool Thread::IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const {
1762  CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
1763  count_visitor.WalkStack();
1764  return count_visitor.GetDepth() == exception->GetStackDepth();
1765}
1766
1767jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
1768    const ScopedObjectAccessAlreadyRunnable& soa, jobject internal, jobjectArray output_array,
1769    int* stack_depth) {
1770  // Decode the internal stack trace into the depth, method trace and PC trace
1771  int32_t depth = soa.Decode<mirror::PointerArray*>(internal)->GetLength() / 2;
1772
1773  auto* cl = Runtime::Current()->GetClassLinker();
1774
1775  jobjectArray result;
1776
1777  if (output_array != nullptr) {
1778    // Reuse the array we were given.
1779    result = output_array;
1780    // ...adjusting the number of frames we'll write to not exceed the array length.
1781    const int32_t traces_length =
1782        soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->GetLength();
1783    depth = std::min(depth, traces_length);
1784  } else {
1785    // Create java_trace array and place in local reference table
1786    mirror::ObjectArray<mirror::StackTraceElement>* java_traces =
1787        cl->AllocStackTraceElementArray(soa.Self(), depth);
1788    if (java_traces == nullptr) {
1789      return nullptr;
1790    }
1791    result = soa.AddLocalReference<jobjectArray>(java_traces);
1792  }
1793
1794  if (stack_depth != nullptr) {
1795    *stack_depth = depth;
1796  }
1797
1798  for (int32_t i = 0; i < depth; ++i) {
1799    auto* method_trace = soa.Decode<mirror::PointerArray*>(internal);
1800    // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
1801    ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, sizeof(void*));
1802    uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>(
1803        i + method_trace->GetLength() / 2, sizeof(void*));
1804    int32_t line_number;
1805    StackHandleScope<3> hs(soa.Self());
1806    auto class_name_object(hs.NewHandle<mirror::String>(nullptr));
1807    auto source_name_object(hs.NewHandle<mirror::String>(nullptr));
1808    if (method->IsProxyMethod()) {
1809      line_number = -1;
1810      class_name_object.Assign(method->GetDeclaringClass()->GetName());
1811      // source_name_object intentionally left null for proxy methods
1812    } else {
1813      line_number = method->GetLineNumFromDexPC(dex_pc);
1814      // Allocate element, potentially triggering GC
1815      // TODO: reuse class_name_object via Class::name_?
1816      const char* descriptor = method->GetDeclaringClassDescriptor();
1817      CHECK(descriptor != nullptr);
1818      std::string class_name(PrettyDescriptor(descriptor));
1819      class_name_object.Assign(
1820          mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
1821      if (class_name_object.Get() == nullptr) {
1822        soa.Self()->AssertPendingOOMException();
1823        return nullptr;
1824      }
1825      const char* source_file = method->GetDeclaringClassSourceFile();
1826      if (source_file != nullptr) {
1827        source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
1828        if (source_name_object.Get() == nullptr) {
1829          soa.Self()->AssertPendingOOMException();
1830          return nullptr;
1831        }
1832      }
1833    }
1834    const char* method_name = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetName();
1835    CHECK(method_name != nullptr);
1836    Handle<mirror::String> method_name_object(
1837        hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
1838    if (method_name_object.Get() == nullptr) {
1839      return nullptr;
1840    }
1841    mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(
1842        soa.Self(), class_name_object, method_name_object, source_name_object, line_number);
1843    if (obj == nullptr) {
1844      return nullptr;
1845    }
1846    // We are called from native: use non-transactional mode.
1847    soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->Set<false>(i, obj);
1848  }
1849  return result;
1850}
1851
1852void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) {
1853  va_list args;
1854  va_start(args, fmt);
1855  ThrowNewExceptionV(exception_class_descriptor, fmt, args);
1856  va_end(args);
1857}
1858
1859void Thread::ThrowNewExceptionV(const char* exception_class_descriptor,
1860                                const char* fmt, va_list ap) {
1861  std::string msg;
1862  StringAppendV(&msg, fmt, ap);
1863  ThrowNewException(exception_class_descriptor, msg.c_str());
1864}
1865
1866void Thread::ThrowNewException(const char* exception_class_descriptor,
1867                               const char* msg) {
1868  // Callers should either clear or call ThrowNewWrappedException.
1869  AssertNoPendingExceptionForNewException(msg);
1870  ThrowNewWrappedException(exception_class_descriptor, msg);
1871}
1872
1873static mirror::ClassLoader* GetCurrentClassLoader(Thread* self)
1874    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1875  ArtMethod* method = self->GetCurrentMethod(nullptr);
1876  return method != nullptr
1877      ? method->GetDeclaringClass()->GetClassLoader()
1878      : nullptr;
1879}
1880
1881void Thread::ThrowNewWrappedException(const char* exception_class_descriptor,
1882                                      const char* msg) {
1883  DCHECK_EQ(this, Thread::Current());
1884  ScopedObjectAccessUnchecked soa(this);
1885  StackHandleScope<3> hs(soa.Self());
1886  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetCurrentClassLoader(soa.Self())));
1887  ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException()));
1888  ClearException();
1889  Runtime* runtime = Runtime::Current();
1890  auto* cl = runtime->GetClassLinker();
1891  Handle<mirror::Class> exception_class(
1892      hs.NewHandle(cl->FindClass(this, exception_class_descriptor, class_loader)));
1893  if (UNLIKELY(exception_class.Get() == nullptr)) {
1894    CHECK(IsExceptionPending());
1895    LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
1896    return;
1897  }
1898
1899  if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(soa.Self(), exception_class, true,
1900                                                             true))) {
1901    DCHECK(IsExceptionPending());
1902    return;
1903  }
1904  DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass());
1905  Handle<mirror::Throwable> exception(
1906      hs.NewHandle(down_cast<mirror::Throwable*>(exception_class->AllocObject(this))));
1907
1908  // If we couldn't allocate the exception, throw the pre-allocated out of memory exception.
1909  if (exception.Get() == nullptr) {
1910    SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1911    return;
1912  }
1913
1914  // Choose an appropriate constructor and set up the arguments.
1915  const char* signature;
1916  ScopedLocalRef<jstring> msg_string(GetJniEnv(), nullptr);
1917  if (msg != nullptr) {
1918    // Ensure we remember this and the method over the String allocation.
1919    msg_string.reset(
1920        soa.AddLocalReference<jstring>(mirror::String::AllocFromModifiedUtf8(this, msg)));
1921    if (UNLIKELY(msg_string.get() == nullptr)) {
1922      CHECK(IsExceptionPending());  // OOME.
1923      return;
1924    }
1925    if (cause.get() == nullptr) {
1926      signature = "(Ljava/lang/String;)V";
1927    } else {
1928      signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V";
1929    }
1930  } else {
1931    if (cause.get() == nullptr) {
1932      signature = "()V";
1933    } else {
1934      signature = "(Ljava/lang/Throwable;)V";
1935    }
1936  }
1937  ArtMethod* exception_init_method =
1938      exception_class->FindDeclaredDirectMethod("<init>", signature, cl->GetImagePointerSize());
1939
1940  CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in "
1941      << PrettyDescriptor(exception_class_descriptor);
1942
1943  if (UNLIKELY(!runtime->IsStarted())) {
1944    // Something is trying to throw an exception without a started runtime, which is the common
1945    // case in the compiler. We won't be able to invoke the constructor of the exception, so set
1946    // the exception fields directly.
1947    if (msg != nullptr) {
1948      exception->SetDetailMessage(down_cast<mirror::String*>(DecodeJObject(msg_string.get())));
1949    }
1950    if (cause.get() != nullptr) {
1951      exception->SetCause(down_cast<mirror::Throwable*>(DecodeJObject(cause.get())));
1952    }
1953    ScopedLocalRef<jobject> trace(GetJniEnv(),
1954                                  Runtime::Current()->IsActiveTransaction()
1955                                      ? CreateInternalStackTrace<true>(soa)
1956                                      : CreateInternalStackTrace<false>(soa));
1957    if (trace.get() != nullptr) {
1958      exception->SetStackState(down_cast<mirror::Throwable*>(DecodeJObject(trace.get())));
1959    }
1960    SetException(exception.Get());
1961  } else {
1962    jvalue jv_args[2];
1963    size_t i = 0;
1964
1965    if (msg != nullptr) {
1966      jv_args[i].l = msg_string.get();
1967      ++i;
1968    }
1969    if (cause.get() != nullptr) {
1970      jv_args[i].l = cause.get();
1971      ++i;
1972    }
1973    ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(exception.Get()));
1974    InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(exception_init_method), jv_args);
1975    if (LIKELY(!IsExceptionPending())) {
1976      SetException(exception.Get());
1977    }
1978  }
1979}
1980
1981void Thread::ThrowOutOfMemoryError(const char* msg) {
1982  LOG(WARNING) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
1983      msg, (tls32_.throwing_OutOfMemoryError ? " (recursive case)" : ""));
1984  if (!tls32_.throwing_OutOfMemoryError) {
1985    tls32_.throwing_OutOfMemoryError = true;
1986    ThrowNewException("Ljava/lang/OutOfMemoryError;", msg);
1987    tls32_.throwing_OutOfMemoryError = false;
1988  } else {
1989    Dump(LOG(WARNING));  // The pre-allocated OOME has no stack, so help out and log one.
1990    SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1991  }
1992}
1993
1994Thread* Thread::CurrentFromGdb() {
1995  return Thread::Current();
1996}
1997
1998void Thread::DumpFromGdb() const {
1999  std::ostringstream ss;
2000  Dump(ss);
2001  std::string str(ss.str());
2002  // log to stderr for debugging command line processes
2003  std::cerr << str;
2004#ifdef HAVE_ANDROID_OS
2005  // log to logcat for debugging frameworks processes
2006  LOG(INFO) << str;
2007#endif
2008}
2009
2010// Explicitly instantiate 32 and 64bit thread offset dumping support.
2011template void Thread::DumpThreadOffset<4>(std::ostream& os, uint32_t offset);
2012template void Thread::DumpThreadOffset<8>(std::ostream& os, uint32_t offset);
2013
2014template<size_t ptr_size>
2015void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
2016#define DO_THREAD_OFFSET(x, y) \
2017    if (offset == x.Uint32Value()) { \
2018      os << y; \
2019      return; \
2020    }
2021  DO_THREAD_OFFSET(ThreadFlagsOffset<ptr_size>(), "state_and_flags")
2022  DO_THREAD_OFFSET(CardTableOffset<ptr_size>(), "card_table")
2023  DO_THREAD_OFFSET(ExceptionOffset<ptr_size>(), "exception")
2024  DO_THREAD_OFFSET(PeerOffset<ptr_size>(), "peer");
2025  DO_THREAD_OFFSET(JniEnvOffset<ptr_size>(), "jni_env")
2026  DO_THREAD_OFFSET(SelfOffset<ptr_size>(), "self")
2027  DO_THREAD_OFFSET(StackEndOffset<ptr_size>(), "stack_end")
2028  DO_THREAD_OFFSET(ThinLockIdOffset<ptr_size>(), "thin_lock_thread_id")
2029  DO_THREAD_OFFSET(TopOfManagedStackOffset<ptr_size>(), "top_quick_frame_method")
2030  DO_THREAD_OFFSET(TopShadowFrameOffset<ptr_size>(), "top_shadow_frame")
2031  DO_THREAD_OFFSET(TopHandleScopeOffset<ptr_size>(), "top_handle_scope")
2032  DO_THREAD_OFFSET(ThreadSuspendTriggerOffset<ptr_size>(), "suspend_trigger")
2033#undef DO_THREAD_OFFSET
2034
2035#define INTERPRETER_ENTRY_POINT_INFO(x) \
2036    if (INTERPRETER_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
2037      os << #x; \
2038      return; \
2039    }
2040  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToInterpreterBridge)
2041  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToCompiledCodeBridge)
2042#undef INTERPRETER_ENTRY_POINT_INFO
2043
2044#define JNI_ENTRY_POINT_INFO(x) \
2045    if (JNI_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
2046      os << #x; \
2047      return; \
2048    }
2049  JNI_ENTRY_POINT_INFO(pDlsymLookup)
2050#undef JNI_ENTRY_POINT_INFO
2051
2052#define QUICK_ENTRY_POINT_INFO(x) \
2053    if (QUICK_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
2054      os << #x; \
2055      return; \
2056    }
2057  QUICK_ENTRY_POINT_INFO(pAllocArray)
2058  QUICK_ENTRY_POINT_INFO(pAllocArrayResolved)
2059  QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck)
2060  QUICK_ENTRY_POINT_INFO(pAllocObject)
2061  QUICK_ENTRY_POINT_INFO(pAllocObjectResolved)
2062  QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized)
2063  QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck)
2064  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray)
2065  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck)
2066  QUICK_ENTRY_POINT_INFO(pAllocStringFromBytes)
2067  QUICK_ENTRY_POINT_INFO(pAllocStringFromChars)
2068  QUICK_ENTRY_POINT_INFO(pAllocStringFromString)
2069  QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial)
2070  QUICK_ENTRY_POINT_INFO(pCheckCast)
2071  QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage)
2072  QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess)
2073  QUICK_ENTRY_POINT_INFO(pInitializeType)
2074  QUICK_ENTRY_POINT_INFO(pResolveString)
2075  QUICK_ENTRY_POINT_INFO(pSet8Instance)
2076  QUICK_ENTRY_POINT_INFO(pSet8Static)
2077  QUICK_ENTRY_POINT_INFO(pSet16Instance)
2078  QUICK_ENTRY_POINT_INFO(pSet16Static)
2079  QUICK_ENTRY_POINT_INFO(pSet32Instance)
2080  QUICK_ENTRY_POINT_INFO(pSet32Static)
2081  QUICK_ENTRY_POINT_INFO(pSet64Instance)
2082  QUICK_ENTRY_POINT_INFO(pSet64Static)
2083  QUICK_ENTRY_POINT_INFO(pSetObjInstance)
2084  QUICK_ENTRY_POINT_INFO(pSetObjStatic)
2085  QUICK_ENTRY_POINT_INFO(pGetByteInstance)
2086  QUICK_ENTRY_POINT_INFO(pGetBooleanInstance)
2087  QUICK_ENTRY_POINT_INFO(pGetByteStatic)
2088  QUICK_ENTRY_POINT_INFO(pGetBooleanStatic)
2089  QUICK_ENTRY_POINT_INFO(pGetShortInstance)
2090  QUICK_ENTRY_POINT_INFO(pGetCharInstance)
2091  QUICK_ENTRY_POINT_INFO(pGetShortStatic)
2092  QUICK_ENTRY_POINT_INFO(pGetCharStatic)
2093  QUICK_ENTRY_POINT_INFO(pGet32Instance)
2094  QUICK_ENTRY_POINT_INFO(pGet32Static)
2095  QUICK_ENTRY_POINT_INFO(pGet64Instance)
2096  QUICK_ENTRY_POINT_INFO(pGet64Static)
2097  QUICK_ENTRY_POINT_INFO(pGetObjInstance)
2098  QUICK_ENTRY_POINT_INFO(pGetObjStatic)
2099  QUICK_ENTRY_POINT_INFO(pAputObjectWithNullAndBoundCheck)
2100  QUICK_ENTRY_POINT_INFO(pAputObjectWithBoundCheck)
2101  QUICK_ENTRY_POINT_INFO(pAputObject)
2102  QUICK_ENTRY_POINT_INFO(pHandleFillArrayData)
2103  QUICK_ENTRY_POINT_INFO(pJniMethodStart)
2104  QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized)
2105  QUICK_ENTRY_POINT_INFO(pJniMethodEnd)
2106  QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized)
2107  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference)
2108  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized)
2109  QUICK_ENTRY_POINT_INFO(pQuickGenericJniTrampoline)
2110  QUICK_ENTRY_POINT_INFO(pLockObject)
2111  QUICK_ENTRY_POINT_INFO(pUnlockObject)
2112  QUICK_ENTRY_POINT_INFO(pCmpgDouble)
2113  QUICK_ENTRY_POINT_INFO(pCmpgFloat)
2114  QUICK_ENTRY_POINT_INFO(pCmplDouble)
2115  QUICK_ENTRY_POINT_INFO(pCmplFloat)
2116  QUICK_ENTRY_POINT_INFO(pFmod)
2117  QUICK_ENTRY_POINT_INFO(pL2d)
2118  QUICK_ENTRY_POINT_INFO(pFmodf)
2119  QUICK_ENTRY_POINT_INFO(pL2f)
2120  QUICK_ENTRY_POINT_INFO(pD2iz)
2121  QUICK_ENTRY_POINT_INFO(pF2iz)
2122  QUICK_ENTRY_POINT_INFO(pIdivmod)
2123  QUICK_ENTRY_POINT_INFO(pD2l)
2124  QUICK_ENTRY_POINT_INFO(pF2l)
2125  QUICK_ENTRY_POINT_INFO(pLdiv)
2126  QUICK_ENTRY_POINT_INFO(pLmod)
2127  QUICK_ENTRY_POINT_INFO(pLmul)
2128  QUICK_ENTRY_POINT_INFO(pShlLong)
2129  QUICK_ENTRY_POINT_INFO(pShrLong)
2130  QUICK_ENTRY_POINT_INFO(pUshrLong)
2131  QUICK_ENTRY_POINT_INFO(pIndexOf)
2132  QUICK_ENTRY_POINT_INFO(pStringCompareTo)
2133  QUICK_ENTRY_POINT_INFO(pMemcpy)
2134  QUICK_ENTRY_POINT_INFO(pQuickImtConflictTrampoline)
2135  QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline)
2136  QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge)
2137  QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck)
2138  QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck)
2139  QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck)
2140  QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck)
2141  QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck)
2142  QUICK_ENTRY_POINT_INFO(pTestSuspend)
2143  QUICK_ENTRY_POINT_INFO(pDeliverException)
2144  QUICK_ENTRY_POINT_INFO(pThrowArrayBounds)
2145  QUICK_ENTRY_POINT_INFO(pThrowDivZero)
2146  QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod)
2147  QUICK_ENTRY_POINT_INFO(pThrowNullPointer)
2148  QUICK_ENTRY_POINT_INFO(pThrowStackOverflow)
2149  QUICK_ENTRY_POINT_INFO(pDeoptimize)
2150  QUICK_ENTRY_POINT_INFO(pA64Load)
2151  QUICK_ENTRY_POINT_INFO(pA64Store)
2152  QUICK_ENTRY_POINT_INFO(pNewEmptyString)
2153  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_B)
2154  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BI)
2155  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BII)
2156  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIII)
2157  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIIString)
2158  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BString)
2159  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIICharset)
2160  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BCharset)
2161  QUICK_ENTRY_POINT_INFO(pNewStringFromChars_C)
2162  QUICK_ENTRY_POINT_INFO(pNewStringFromChars_CII)
2163  QUICK_ENTRY_POINT_INFO(pNewStringFromChars_IIC)
2164  QUICK_ENTRY_POINT_INFO(pNewStringFromCodePoints)
2165  QUICK_ENTRY_POINT_INFO(pNewStringFromString)
2166  QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer)
2167  QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder)
2168  QUICK_ENTRY_POINT_INFO(pReadBarrierJni)
2169#undef QUICK_ENTRY_POINT_INFO
2170
2171  os << offset;
2172}
2173
2174void Thread::QuickDeliverException() {
2175  // Get exception from thread.
2176  mirror::Throwable* exception = GetException();
2177  CHECK(exception != nullptr);
2178  // Don't leave exception visible while we try to find the handler, which may cause class
2179  // resolution.
2180  ClearException();
2181  bool is_deoptimization = (exception == GetDeoptimizationException());
2182  QuickExceptionHandler exception_handler(this, is_deoptimization);
2183  if (is_deoptimization) {
2184    exception_handler.DeoptimizeStack();
2185  } else {
2186    exception_handler.FindCatch(exception);
2187  }
2188  exception_handler.UpdateInstrumentationStack();
2189  exception_handler.DoLongJump();
2190}
2191
2192Context* Thread::GetLongJumpContext() {
2193  Context* result = tlsPtr_.long_jump_context;
2194  if (result == nullptr) {
2195    result = Context::Create();
2196  } else {
2197    tlsPtr_.long_jump_context = nullptr;  // Avoid context being shared.
2198    result->Reset();
2199  }
2200  return result;
2201}
2202
2203// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
2204//       so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
2205struct CurrentMethodVisitor FINAL : public StackVisitor {
2206  CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error)
2207      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2208      : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2209        this_object_(nullptr),
2210        method_(nullptr),
2211        dex_pc_(0),
2212        abort_on_error_(abort_on_error) {}
2213  bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2214    ArtMethod* m = GetMethod();
2215    if (m->IsRuntimeMethod()) {
2216      // Continue if this is a runtime method.
2217      return true;
2218    }
2219    if (context_ != nullptr) {
2220      this_object_ = GetThisObject();
2221    }
2222    method_ = m;
2223    dex_pc_ = GetDexPc(abort_on_error_);
2224    return false;
2225  }
2226  mirror::Object* this_object_;
2227  ArtMethod* method_;
2228  uint32_t dex_pc_;
2229  const bool abort_on_error_;
2230};
2231
2232ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error) const {
2233  CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr, abort_on_error);
2234  visitor.WalkStack(false);
2235  if (dex_pc != nullptr) {
2236    *dex_pc = visitor.dex_pc_;
2237  }
2238  return visitor.method_;
2239}
2240
2241bool Thread::HoldsLock(mirror::Object* object) const {
2242  if (object == nullptr) {
2243    return false;
2244  }
2245  return object->GetLockOwnerThreadId() == GetThreadId();
2246}
2247
2248// RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor).
2249template <typename RootVisitor>
2250class ReferenceMapVisitor : public StackVisitor {
2251 public:
2252  ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor)
2253      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2254        // We are visiting the references in compiled frames, so we do not need
2255        // to know the inlined frames.
2256      : StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames),
2257        visitor_(visitor) {}
2258
2259  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2260    if (false) {
2261      LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
2262                << StringPrintf("@ PC:%04x", GetDexPc());
2263    }
2264    ShadowFrame* shadow_frame = GetCurrentShadowFrame();
2265    if (shadow_frame != nullptr) {
2266      VisitShadowFrame(shadow_frame);
2267    } else {
2268      VisitQuickFrame();
2269    }
2270    return true;
2271  }
2272
2273  void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2274    ArtMethod* m = shadow_frame->GetMethod();
2275    DCHECK(m != nullptr);
2276    size_t num_regs = shadow_frame->NumberOfVRegs();
2277    if (m->IsNative() || shadow_frame->HasReferenceArray()) {
2278      // handle scope for JNI or References for interpreter.
2279      for (size_t reg = 0; reg < num_regs; ++reg) {
2280        mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2281        if (ref != nullptr) {
2282          mirror::Object* new_ref = ref;
2283          visitor_(&new_ref, reg, this);
2284          if (new_ref != ref) {
2285            shadow_frame->SetVRegReference(reg, new_ref);
2286          }
2287        }
2288      }
2289    } else {
2290      // Java method.
2291      // Portable path use DexGcMap and store in Method.native_gc_map_.
2292      const uint8_t* gc_map = m->GetNativeGcMap(sizeof(void*));
2293      CHECK(gc_map != nullptr) << PrettyMethod(m);
2294      verifier::DexPcToReferenceMap dex_gc_map(gc_map);
2295      uint32_t dex_pc = shadow_frame->GetDexPC();
2296      const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
2297      DCHECK(reg_bitmap != nullptr);
2298      num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
2299      for (size_t reg = 0; reg < num_regs; ++reg) {
2300        if (TestBitmap(reg, reg_bitmap)) {
2301          mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2302          if (ref != nullptr) {
2303            mirror::Object* new_ref = ref;
2304            visitor_(&new_ref, reg, this);
2305            if (new_ref != ref) {
2306              shadow_frame->SetVRegReference(reg, new_ref);
2307            }
2308          }
2309        }
2310      }
2311    }
2312  }
2313
2314 private:
2315  void VisitQuickFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2316    auto* cur_quick_frame = GetCurrentQuickFrame();
2317    DCHECK(cur_quick_frame != nullptr);
2318    auto* m = *cur_quick_frame;
2319
2320    // Process register map (which native and runtime methods don't have)
2321    if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
2322      if (m->IsOptimized(sizeof(void*))) {
2323        auto* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
2324            reinterpret_cast<uintptr_t>(cur_quick_frame));
2325        Runtime* runtime = Runtime::Current();
2326        const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
2327        uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
2328        CodeInfo code_info = m->GetOptimizedCodeInfo();
2329        StackMapEncoding encoding = code_info.ExtractEncoding();
2330        StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
2331        DCHECK(map.IsValid());
2332        MemoryRegion mask = map.GetStackMask(encoding);
2333        // Visit stack entries that hold pointers.
2334        for (size_t i = 0; i < mask.size_in_bits(); ++i) {
2335          if (mask.LoadBit(i)) {
2336            auto* ref_addr = vreg_base + i;
2337            mirror::Object* ref = ref_addr->AsMirrorPtr();
2338            if (ref != nullptr) {
2339              mirror::Object* new_ref = ref;
2340              visitor_(&new_ref, -1, this);
2341              if (ref != new_ref) {
2342                ref_addr->Assign(new_ref);
2343              }
2344            }
2345          }
2346        }
2347        // Visit callee-save registers that hold pointers.
2348        uint32_t register_mask = map.GetRegisterMask(encoding);
2349        for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
2350          if (register_mask & (1 << i)) {
2351            mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
2352            if (*ref_addr != nullptr) {
2353              visitor_(ref_addr, -1, this);
2354            }
2355          }
2356        }
2357      } else {
2358        const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
2359        CHECK(native_gc_map != nullptr) << PrettyMethod(m);
2360        const DexFile::CodeItem* code_item = m->GetCodeItem();
2361        // Can't be null or how would we compile its instructions?
2362        DCHECK(code_item != nullptr) << PrettyMethod(m);
2363        NativePcOffsetToReferenceMap map(native_gc_map);
2364        size_t num_regs = map.RegWidth() * 8;
2365        if (num_regs > 0) {
2366          Runtime* runtime = Runtime::Current();
2367          const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
2368          uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
2369          const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset);
2370          DCHECK(reg_bitmap != nullptr);
2371          const void* code_pointer = ArtMethod::EntryPointToCodePointer(entry_point);
2372          const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
2373          QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
2374          // For all dex registers in the bitmap
2375          DCHECK(cur_quick_frame != nullptr);
2376          for (size_t reg = 0; reg < num_regs; ++reg) {
2377            // Does this register hold a reference?
2378            if (TestBitmap(reg, reg_bitmap)) {
2379              uint32_t vmap_offset;
2380              if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
2381                int vmap_reg = vmap_table.ComputeRegister(frame_info.CoreSpillMask(), vmap_offset,
2382                                                          kReferenceVReg);
2383                // This is sound as spilled GPRs will be word sized (ie 32 or 64bit).
2384                mirror::Object** ref_addr =
2385                    reinterpret_cast<mirror::Object**>(GetGPRAddress(vmap_reg));
2386                if (*ref_addr != nullptr) {
2387                  visitor_(ref_addr, reg, this);
2388                }
2389              } else {
2390                StackReference<mirror::Object>* ref_addr =
2391                    reinterpret_cast<StackReference<mirror::Object>*>(GetVRegAddrFromQuickCode(
2392                        cur_quick_frame, code_item, frame_info.CoreSpillMask(),
2393                        frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), reg));
2394                mirror::Object* ref = ref_addr->AsMirrorPtr();
2395                if (ref != nullptr) {
2396                  mirror::Object* new_ref = ref;
2397                  visitor_(&new_ref, reg, this);
2398                  if (ref != new_ref) {
2399                    ref_addr->Assign(new_ref);
2400                  }
2401                }
2402              }
2403            }
2404          }
2405        }
2406      }
2407    }
2408  }
2409
2410  // Visitor for when we visit a root.
2411  RootVisitor& visitor_;
2412};
2413
2414class RootCallbackVisitor {
2415 public:
2416  RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {}
2417
2418  void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const
2419      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2420    visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg));
2421  }
2422
2423 private:
2424  RootVisitor* const visitor_;
2425  const uint32_t tid_;
2426};
2427
2428void Thread::VisitRoots(RootVisitor* visitor) {
2429  const uint32_t thread_id = GetThreadId();
2430  visitor->VisitRootIfNonNull(&tlsPtr_.opeer, RootInfo(kRootThreadObject, thread_id));
2431  if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) {
2432    visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception),
2433                   RootInfo(kRootNativeStack, thread_id));
2434  }
2435  visitor->VisitRootIfNonNull(&tlsPtr_.monitor_enter_object, RootInfo(kRootNativeStack, thread_id));
2436  tlsPtr_.jni_env->locals.VisitRoots(visitor, RootInfo(kRootJNILocal, thread_id));
2437  tlsPtr_.jni_env->monitors.VisitRoots(visitor, RootInfo(kRootJNIMonitor, thread_id));
2438  HandleScopeVisitRoots(visitor, thread_id);
2439  if (tlsPtr_.debug_invoke_req != nullptr) {
2440    tlsPtr_.debug_invoke_req->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id));
2441  }
2442  if (tlsPtr_.stacked_shadow_frame_record != nullptr) {
2443    RootCallbackVisitor visitor_to_callback(visitor, thread_id);
2444    ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitor_to_callback);
2445    for (StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
2446         record != nullptr;
2447         record = record->GetLink()) {
2448      for (ShadowFrame* shadow_frame = record->GetShadowFrame();
2449           shadow_frame != nullptr;
2450           shadow_frame = shadow_frame->GetLink()) {
2451        mapper.VisitShadowFrame(shadow_frame);
2452      }
2453    }
2454  }
2455  if (tlsPtr_.deoptimization_return_value_stack != nullptr) {
2456    for (DeoptimizationReturnValueRecord* record = tlsPtr_.deoptimization_return_value_stack;
2457         record != nullptr;
2458         record = record->GetLink()) {
2459      if (record->IsReference()) {
2460        visitor->VisitRootIfNonNull(record->GetGCRoot(),
2461            RootInfo(kRootThreadObject, thread_id));
2462      }
2463    }
2464  }
2465  for (auto* verifier = tlsPtr_.method_verifier; verifier != nullptr; verifier = verifier->link_) {
2466    verifier->VisitRoots(visitor, RootInfo(kRootNativeStack, thread_id));
2467  }
2468  // Visit roots on this thread's stack
2469  Context* context = GetLongJumpContext();
2470  RootCallbackVisitor visitor_to_callback(visitor, thread_id);
2471  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context, visitor_to_callback);
2472  mapper.WalkStack();
2473  ReleaseLongJumpContext(context);
2474  for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
2475    visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id));
2476  }
2477}
2478
2479class VerifyRootVisitor : public SingleRootVisitor {
2480 public:
2481  void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
2482      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2483    VerifyObject(root);
2484  }
2485};
2486
2487void Thread::VerifyStackImpl() {
2488  VerifyRootVisitor visitor;
2489  std::unique_ptr<Context> context(Context::Create());
2490  RootCallbackVisitor visitor_to_callback(&visitor, GetThreadId());
2491  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitor_to_callback);
2492  mapper.WalkStack();
2493}
2494
2495// Set the stack end to that to be used during a stack overflow
2496void Thread::SetStackEndForStackOverflow() {
2497  // During stack overflow we allow use of the full stack.
2498  if (tlsPtr_.stack_end == tlsPtr_.stack_begin) {
2499    // However, we seem to have already extended to use the full stack.
2500    LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently "
2501               << GetStackOverflowReservedBytes(kRuntimeISA) << ")?";
2502    DumpStack(LOG(ERROR));
2503    LOG(FATAL) << "Recursive stack overflow.";
2504  }
2505
2506  tlsPtr_.stack_end = tlsPtr_.stack_begin;
2507
2508  // Remove the stack overflow protection if is it set up.
2509  bool implicit_stack_check = !Runtime::Current()->ExplicitStackOverflowChecks();
2510  if (implicit_stack_check) {
2511    if (!UnprotectStack()) {
2512      LOG(ERROR) << "Unable to remove stack protection for stack overflow";
2513    }
2514  }
2515}
2516
2517void Thread::SetTlab(uint8_t* start, uint8_t* end) {
2518  DCHECK_LE(start, end);
2519  tlsPtr_.thread_local_start = start;
2520  tlsPtr_.thread_local_pos  = tlsPtr_.thread_local_start;
2521  tlsPtr_.thread_local_end = end;
2522  tlsPtr_.thread_local_objects = 0;
2523}
2524
2525bool Thread::HasTlab() const {
2526  bool has_tlab = tlsPtr_.thread_local_pos != nullptr;
2527  if (has_tlab) {
2528    DCHECK(tlsPtr_.thread_local_start != nullptr && tlsPtr_.thread_local_end != nullptr);
2529  } else {
2530    DCHECK(tlsPtr_.thread_local_start == nullptr && tlsPtr_.thread_local_end == nullptr);
2531  }
2532  return has_tlab;
2533}
2534
2535std::ostream& operator<<(std::ostream& os, const Thread& thread) {
2536  thread.ShortDump(os);
2537  return os;
2538}
2539
2540void Thread::ProtectStack() {
2541  void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
2542  VLOG(threads) << "Protecting stack at " << pregion;
2543  if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) {
2544    LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. "
2545        "Reason: "
2546        << strerror(errno) << " size:  " << kStackOverflowProtectedSize;
2547  }
2548}
2549
2550bool Thread::UnprotectStack() {
2551  void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
2552  VLOG(threads) << "Unprotecting stack at " << pregion;
2553  return mprotect(pregion, kStackOverflowProtectedSize, PROT_READ|PROT_WRITE) == 0;
2554}
2555
2556void Thread::ActivateSingleStepControl(SingleStepControl* ssc) {
2557  CHECK(Dbg::IsDebuggerActive());
2558  CHECK(GetSingleStepControl() == nullptr) << "Single step already active in thread " << *this;
2559  CHECK(ssc != nullptr);
2560  tlsPtr_.single_step_control = ssc;
2561}
2562
2563void Thread::DeactivateSingleStepControl() {
2564  CHECK(Dbg::IsDebuggerActive());
2565  CHECK(GetSingleStepControl() != nullptr) << "Single step not active in thread " << *this;
2566  SingleStepControl* ssc = GetSingleStepControl();
2567  tlsPtr_.single_step_control = nullptr;
2568  delete ssc;
2569}
2570
2571void Thread::SetDebugInvokeReq(DebugInvokeReq* req) {
2572  CHECK(Dbg::IsDebuggerActive());
2573  CHECK(GetInvokeReq() == nullptr) << "Debug invoke req already active in thread " << *this;
2574  CHECK(Thread::Current() != this) << "Debug invoke can't be dispatched by the thread itself";
2575  CHECK(req != nullptr);
2576  tlsPtr_.debug_invoke_req = req;
2577}
2578
2579void Thread::ClearDebugInvokeReq() {
2580  CHECK(GetInvokeReq() != nullptr) << "Debug invoke req not active in thread " << *this;
2581  CHECK(Thread::Current() == this) << "Debug invoke must be finished by the thread itself";
2582  DebugInvokeReq* req = tlsPtr_.debug_invoke_req;
2583  tlsPtr_.debug_invoke_req = nullptr;
2584  delete req;
2585}
2586
2587void Thread::PushVerifier(verifier::MethodVerifier* verifier) {
2588  verifier->link_ = tlsPtr_.method_verifier;
2589  tlsPtr_.method_verifier = verifier;
2590}
2591
2592void Thread::PopVerifier(verifier::MethodVerifier* verifier) {
2593  CHECK_EQ(tlsPtr_.method_verifier, verifier);
2594  tlsPtr_.method_verifier = verifier->link_;
2595}
2596
2597}  // namespace art
2598