thread.cc revision 5662383511a550e428bcdee0cc1be28e464ceed4
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define ATRACE_TAG ATRACE_TAG_DALVIK
18
19#include "thread.h"
20
21#include <cutils/trace.h>
22#include <pthread.h>
23#include <signal.h>
24#include <sys/resource.h>
25#include <sys/time.h>
26
27#include <algorithm>
28#include <bitset>
29#include <cerrno>
30#include <iostream>
31#include <list>
32#include <sstream>
33
34#include "arch/context.h"
35#include "art_field-inl.h"
36#include "art_method-inl.h"
37#include "base/bit_utils.h"
38#include "base/mutex.h"
39#include "base/timing_logger.h"
40#include "base/to_str.h"
41#include "class_linker-inl.h"
42#include "debugger.h"
43#include "dex_file-inl.h"
44#include "entrypoints/entrypoint_utils.h"
45#include "entrypoints/quick/quick_alloc_entrypoints.h"
46#include "gc_map.h"
47#include "gc/accounting/card_table-inl.h"
48#include "gc/allocator/rosalloc.h"
49#include "gc/heap.h"
50#include "gc/space/space.h"
51#include "handle_scope-inl.h"
52#include "indirect_reference_table-inl.h"
53#include "jni_internal.h"
54#include "mirror/class_loader.h"
55#include "mirror/class-inl.h"
56#include "mirror/object_array-inl.h"
57#include "mirror/stack_trace_element.h"
58#include "monitor.h"
59#include "object_lock.h"
60#include "quick_exception_handler.h"
61#include "quick/quick_method_frame_info.h"
62#include "reflection.h"
63#include "runtime.h"
64#include "scoped_thread_state_change.h"
65#include "ScopedLocalRef.h"
66#include "ScopedUtfChars.h"
67#include "stack.h"
68#include "thread_list.h"
69#include "thread-inl.h"
70#include "utils.h"
71#include "verifier/dex_gc_map.h"
72#include "verifier/method_verifier.h"
73#include "verify_object-inl.h"
74#include "vmap_table.h"
75#include "well_known_classes.h"
76
77namespace art {
78
79bool Thread::is_started_ = false;
80pthread_key_t Thread::pthread_key_self_;
81ConditionVariable* Thread::resume_cond_ = nullptr;
82const size_t Thread::kStackOverflowImplicitCheckSize = GetStackOverflowReservedBytes(kRuntimeISA);
83
84static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
85
86void Thread::InitCardTable() {
87  tlsPtr_.card_table = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin();
88}
89
90static void UnimplementedEntryPoint() {
91  UNIMPLEMENTED(FATAL);
92}
93
94void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
95                     QuickEntryPoints* qpoints);
96
97void Thread::InitTlsEntryPoints() {
98  // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
99  uintptr_t* begin = reinterpret_cast<uintptr_t*>(&tlsPtr_.interpreter_entrypoints);
100  uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(&tlsPtr_.quick_entrypoints) +
101      sizeof(tlsPtr_.quick_entrypoints));
102  for (uintptr_t* it = begin; it != end; ++it) {
103    *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
104  }
105  InitEntryPoints(&tlsPtr_.interpreter_entrypoints, &tlsPtr_.jni_entrypoints,
106                  &tlsPtr_.quick_entrypoints);
107}
108
109void Thread::InitStringEntryPoints() {
110  ScopedObjectAccess soa(this);
111  QuickEntryPoints* qpoints = &tlsPtr_.quick_entrypoints;
112  qpoints->pNewEmptyString = reinterpret_cast<void(*)()>(
113      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newEmptyString));
114  qpoints->pNewStringFromBytes_B = reinterpret_cast<void(*)()>(
115      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_B));
116  qpoints->pNewStringFromBytes_BI = reinterpret_cast<void(*)()>(
117      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BI));
118  qpoints->pNewStringFromBytes_BII = reinterpret_cast<void(*)()>(
119      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BII));
120  qpoints->pNewStringFromBytes_BIII = reinterpret_cast<void(*)()>(
121      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIII));
122  qpoints->pNewStringFromBytes_BIIString = reinterpret_cast<void(*)()>(
123      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIIString));
124  qpoints->pNewStringFromBytes_BString = reinterpret_cast<void(*)()>(
125      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BString));
126  qpoints->pNewStringFromBytes_BIICharset = reinterpret_cast<void(*)()>(
127      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BIICharset));
128  qpoints->pNewStringFromBytes_BCharset = reinterpret_cast<void(*)()>(
129      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromBytes_BCharset));
130  qpoints->pNewStringFromChars_C = reinterpret_cast<void(*)()>(
131      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromChars_C));
132  qpoints->pNewStringFromChars_CII = reinterpret_cast<void(*)()>(
133      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromChars_CII));
134  qpoints->pNewStringFromChars_IIC = reinterpret_cast<void(*)()>(
135      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromChars_IIC));
136  qpoints->pNewStringFromCodePoints = reinterpret_cast<void(*)()>(
137      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromCodePoints));
138  qpoints->pNewStringFromString = reinterpret_cast<void(*)()>(
139      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromString));
140  qpoints->pNewStringFromStringBuffer = reinterpret_cast<void(*)()>(
141      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromStringBuffer));
142  qpoints->pNewStringFromStringBuilder = reinterpret_cast<void(*)()>(
143      soa.DecodeMethod(WellKnownClasses::java_lang_StringFactory_newStringFromStringBuilder));
144}
145
146void Thread::ResetQuickAllocEntryPointsForThread() {
147  ResetQuickAllocEntryPoints(&tlsPtr_.quick_entrypoints);
148}
149
150class DeoptimizationReturnValueRecord {
151 public:
152  DeoptimizationReturnValueRecord(const JValue& ret_val,
153                                  bool is_reference,
154                                  DeoptimizationReturnValueRecord* link)
155      : ret_val_(ret_val), is_reference_(is_reference), link_(link) {}
156
157  JValue GetReturnValue() const { return ret_val_; }
158  bool IsReference() const { return is_reference_; }
159  DeoptimizationReturnValueRecord* GetLink() const { return link_; }
160  mirror::Object** GetGCRoot() {
161    DCHECK(is_reference_);
162    return ret_val_.GetGCRoot();
163  }
164
165 private:
166  JValue ret_val_;
167  const bool is_reference_;
168  DeoptimizationReturnValueRecord* const link_;
169
170  DISALLOW_COPY_AND_ASSIGN(DeoptimizationReturnValueRecord);
171};
172
173class StackedShadowFrameRecord {
174 public:
175  StackedShadowFrameRecord(ShadowFrame* shadow_frame,
176                           StackedShadowFrameType type,
177                           StackedShadowFrameRecord* link)
178      : shadow_frame_(shadow_frame),
179        type_(type),
180        link_(link) {}
181
182  ShadowFrame* GetShadowFrame() const { return shadow_frame_; }
183  StackedShadowFrameType GetType() const { return type_; }
184  StackedShadowFrameRecord* GetLink() const { return link_; }
185
186 private:
187  ShadowFrame* const shadow_frame_;
188  const StackedShadowFrameType type_;
189  StackedShadowFrameRecord* const link_;
190
191  DISALLOW_COPY_AND_ASSIGN(StackedShadowFrameRecord);
192};
193
194void Thread::PushAndClearDeoptimizationReturnValue() {
195  DeoptimizationReturnValueRecord* record = new DeoptimizationReturnValueRecord(
196      tls64_.deoptimization_return_value,
197      tls32_.deoptimization_return_value_is_reference,
198      tlsPtr_.deoptimization_return_value_stack);
199  tlsPtr_.deoptimization_return_value_stack = record;
200  ClearDeoptimizationReturnValue();
201}
202
203JValue Thread::PopDeoptimizationReturnValue() {
204  DeoptimizationReturnValueRecord* record = tlsPtr_.deoptimization_return_value_stack;
205  DCHECK(record != nullptr);
206  tlsPtr_.deoptimization_return_value_stack = record->GetLink();
207  JValue ret_val(record->GetReturnValue());
208  delete record;
209  return ret_val;
210}
211
212void Thread::PushStackedShadowFrame(ShadowFrame* sf, StackedShadowFrameType type) {
213  StackedShadowFrameRecord* record = new StackedShadowFrameRecord(
214      sf, type, tlsPtr_.stacked_shadow_frame_record);
215  tlsPtr_.stacked_shadow_frame_record = record;
216}
217
218ShadowFrame* Thread::PopStackedShadowFrame(StackedShadowFrameType type) {
219  StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
220  DCHECK(record != nullptr);
221  DCHECK_EQ(record->GetType(), type);
222  tlsPtr_.stacked_shadow_frame_record = record->GetLink();
223  ShadowFrame* shadow_frame = record->GetShadowFrame();
224  delete record;
225  return shadow_frame;
226}
227
228void Thread::InitTid() {
229  tls32_.tid = ::art::GetTid();
230}
231
232void Thread::InitAfterFork() {
233  // One thread (us) survived the fork, but we have a new tid so we need to
234  // update the value stashed in this Thread*.
235  InitTid();
236}
237
238void* Thread::CreateCallback(void* arg) {
239  Thread* self = reinterpret_cast<Thread*>(arg);
240  Runtime* runtime = Runtime::Current();
241  if (runtime == nullptr) {
242    LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self;
243    return nullptr;
244  }
245  {
246    // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true
247    //       after self->Init().
248    MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
249    // Check that if we got here we cannot be shutting down (as shutdown should never have started
250    // while threads are being born).
251    CHECK(!runtime->IsShuttingDownLocked());
252    // Note: given that the JNIEnv is created in the parent thread, the only failure point here is
253    //       a mess in InitStackHwm. We do not have a reasonable way to recover from that, so abort
254    //       the runtime in such a case. In case this ever changes, we need to make sure here to
255    //       delete the tmp_jni_env, as we own it at this point.
256    CHECK(self->Init(runtime->GetThreadList(), runtime->GetJavaVM(), self->tlsPtr_.tmp_jni_env));
257    self->tlsPtr_.tmp_jni_env = nullptr;
258    Runtime::Current()->EndThreadBirth();
259  }
260  {
261    ScopedObjectAccess soa(self);
262    self->InitStringEntryPoints();
263
264    // Copy peer into self, deleting global reference when done.
265    CHECK(self->tlsPtr_.jpeer != nullptr);
266    self->tlsPtr_.opeer = soa.Decode<mirror::Object*>(self->tlsPtr_.jpeer);
267    self->GetJniEnv()->DeleteGlobalRef(self->tlsPtr_.jpeer);
268    self->tlsPtr_.jpeer = nullptr;
269    self->SetThreadName(self->GetThreadName(soa)->ToModifiedUtf8().c_str());
270
271    ArtField* priorityField = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority);
272    self->SetNativePriority(priorityField->GetInt(self->tlsPtr_.opeer));
273    Dbg::PostThreadStart(self);
274
275    // Invoke the 'run' method of our java.lang.Thread.
276    mirror::Object* receiver = self->tlsPtr_.opeer;
277    jmethodID mid = WellKnownClasses::java_lang_Thread_run;
278    ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(receiver));
279    InvokeVirtualOrInterfaceWithJValues(soa, ref.get(), mid, nullptr);
280  }
281  // Detach and delete self.
282  Runtime::Current()->GetThreadList()->Unregister(self);
283
284  return nullptr;
285}
286
287Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
288                                  mirror::Object* thread_peer) {
289  ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer);
290  Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer)));
291  // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
292  // to stop it from going away.
293  if (kIsDebugBuild) {
294    MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
295    if (result != nullptr && !result->IsSuspended()) {
296      Locks::thread_list_lock_->AssertHeld(soa.Self());
297    }
298  }
299  return result;
300}
301
302Thread* Thread::FromManagedThread(const ScopedObjectAccessAlreadyRunnable& soa,
303                                  jobject java_thread) {
304  return FromManagedThread(soa, soa.Decode<mirror::Object*>(java_thread));
305}
306
307static size_t FixStackSize(size_t stack_size) {
308  // A stack size of zero means "use the default".
309  if (stack_size == 0) {
310    stack_size = Runtime::Current()->GetDefaultStackSize();
311  }
312
313  // Dalvik used the bionic pthread default stack size for native threads,
314  // so include that here to support apps that expect large native stacks.
315  stack_size += 1 * MB;
316
317  // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN.
318  if (stack_size < PTHREAD_STACK_MIN) {
319    stack_size = PTHREAD_STACK_MIN;
320  }
321
322  if (Runtime::Current()->ExplicitStackOverflowChecks()) {
323    // It's likely that callers are trying to ensure they have at least a certain amount of
324    // stack space, so we should add our reserved space on top of what they requested, rather
325    // than implicitly take it away from them.
326    stack_size += GetStackOverflowReservedBytes(kRuntimeISA);
327  } else {
328    // If we are going to use implicit stack checks, allocate space for the protected
329    // region at the bottom of the stack.
330    stack_size += Thread::kStackOverflowImplicitCheckSize +
331        GetStackOverflowReservedBytes(kRuntimeISA);
332  }
333
334  // Some systems require the stack size to be a multiple of the system page size, so round up.
335  stack_size = RoundUp(stack_size, kPageSize);
336
337  return stack_size;
338}
339
340// Global variable to prevent the compiler optimizing away the page reads for the stack.
341uint8_t dont_optimize_this;
342
343// Install a protected region in the stack.  This is used to trigger a SIGSEGV if a stack
344// overflow is detected.  It is located right below the stack_begin_.
345//
346// There is a little complexity here that deserves a special mention.  On some
347// architectures, the stack created using a VM_GROWSDOWN flag
348// to prevent memory being allocated when it's not needed.  This flag makes the
349// kernel only allocate memory for the stack by growing down in memory.  Because we
350// want to put an mprotected region far away from that at the stack top, we need
351// to make sure the pages for the stack are mapped in before we call mprotect.  We do
352// this by reading every page from the stack bottom (highest address) to the stack top.
353// We then madvise this away.
354void Thread::InstallImplicitProtection() {
355  uint8_t* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
356  uint8_t* stack_himem = tlsPtr_.stack_end;
357  uint8_t* stack_top = reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(&stack_himem) &
358      ~(kPageSize - 1));    // Page containing current top of stack.
359
360  // First remove the protection on the protected region as will want to read and
361  // write it.  This may fail (on the first attempt when the stack is not mapped)
362  // but we ignore that.
363  UnprotectStack();
364
365  // Map in the stack.  This must be done by reading from the
366  // current stack pointer downwards as the stack may be mapped using VM_GROWSDOWN
367  // in the kernel.  Any access more than a page below the current SP might cause
368  // a segv.
369
370  // Read every page from the high address to the low.
371  for (uint8_t* p = stack_top; p >= pregion; p -= kPageSize) {
372    dont_optimize_this = *p;
373  }
374
375  VLOG(threads) << "installing stack protected region at " << std::hex <<
376      static_cast<void*>(pregion) << " to " <<
377      static_cast<void*>(pregion + kStackOverflowProtectedSize - 1);
378
379  // Protect the bottom of the stack to prevent read/write to it.
380  ProtectStack();
381
382  // Tell the kernel that we won't be needing these pages any more.
383  // NB. madvise will probably write zeroes into the memory (on linux it does).
384  uint32_t unwanted_size = stack_top - pregion - kPageSize;
385  madvise(pregion, unwanted_size, MADV_DONTNEED);
386}
387
388void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
389  CHECK(java_peer != nullptr);
390  Thread* self = static_cast<JNIEnvExt*>(env)->self;
391
392  if (VLOG_IS_ON(threads)) {
393    ScopedObjectAccess soa(env);
394
395    ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
396    mirror::String* java_name = reinterpret_cast<mirror::String*>(f->GetObject(
397        soa.Decode<mirror::Object*>(java_peer)));
398    std::string thread_name;
399    if (java_name != nullptr) {
400      thread_name = java_name->ToModifiedUtf8();
401    } else {
402      thread_name = "(Unnamed)";
403    }
404
405    VLOG(threads) << "Creating native thread for " << thread_name;
406    self->Dump(LOG(INFO));
407  }
408
409  Runtime* runtime = Runtime::Current();
410
411  // Atomically start the birth of the thread ensuring the runtime isn't shutting down.
412  bool thread_start_during_shutdown = false;
413  {
414    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
415    if (runtime->IsShuttingDownLocked()) {
416      thread_start_during_shutdown = true;
417    } else {
418      runtime->StartThreadBirth();
419    }
420  }
421  if (thread_start_during_shutdown) {
422    ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError"));
423    env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown");
424    return;
425  }
426
427  Thread* child_thread = new Thread(is_daemon);
428  // Use global JNI ref to hold peer live while child thread starts.
429  child_thread->tlsPtr_.jpeer = env->NewGlobalRef(java_peer);
430  stack_size = FixStackSize(stack_size);
431
432  // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to
433  // assign it.
434  env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer,
435                    reinterpret_cast<jlong>(child_thread));
436
437  // Try to allocate a JNIEnvExt for the thread. We do this here as we might be out of memory and
438  // do not have a good way to report this on the child's side.
439  std::unique_ptr<JNIEnvExt> child_jni_env_ext(
440      JNIEnvExt::Create(child_thread, Runtime::Current()->GetJavaVM()));
441
442  int pthread_create_result = 0;
443  if (child_jni_env_ext.get() != nullptr) {
444    pthread_t new_pthread;
445    pthread_attr_t attr;
446    child_thread->tlsPtr_.tmp_jni_env = child_jni_env_ext.get();
447    CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
448    CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED),
449                       "PTHREAD_CREATE_DETACHED");
450    CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
451    pthread_create_result = pthread_create(&new_pthread,
452                                           &attr,
453                                           Thread::CreateCallback,
454                                           child_thread);
455    CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
456
457    if (pthread_create_result == 0) {
458      // pthread_create started the new thread. The child is now responsible for managing the
459      // JNIEnvExt we created.
460      // Note: we can't check for tmp_jni_env == nullptr, as that would require synchronization
461      //       between the threads.
462      child_jni_env_ext.release();
463      return;
464    }
465  }
466
467  // Either JNIEnvExt::Create or pthread_create(3) failed, so clean up.
468  {
469    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
470    runtime->EndThreadBirth();
471  }
472  // Manually delete the global reference since Thread::Init will not have been run.
473  env->DeleteGlobalRef(child_thread->tlsPtr_.jpeer);
474  child_thread->tlsPtr_.jpeer = nullptr;
475  delete child_thread;
476  child_thread = nullptr;
477  // TODO: remove from thread group?
478  env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0);
479  {
480    std::string msg(child_jni_env_ext.get() == nullptr ?
481        "Could not allocate JNI Env" :
482        StringPrintf("pthread_create (%s stack) failed: %s",
483                                 PrettySize(stack_size).c_str(), strerror(pthread_create_result)));
484    ScopedObjectAccess soa(env);
485    soa.Self()->ThrowOutOfMemoryError(msg.c_str());
486  }
487}
488
489bool Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm, JNIEnvExt* jni_env_ext) {
490  // This function does all the initialization that must be run by the native thread it applies to.
491  // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
492  // we can handshake with the corresponding native thread when it's ready.) Check this native
493  // thread hasn't been through here already...
494  CHECK(Thread::Current() == nullptr);
495
496  // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
497  // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
498  tlsPtr_.pthread_self = pthread_self();
499  CHECK(is_started_);
500
501  SetUpAlternateSignalStack();
502  if (!InitStackHwm()) {
503    return false;
504  }
505  InitCpu();
506  InitTlsEntryPoints();
507  RemoveSuspendTrigger();
508  InitCardTable();
509  InitTid();
510
511  CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
512  DCHECK_EQ(Thread::Current(), this);
513
514  tls32_.thin_lock_thread_id = thread_list->AllocThreadId(this);
515
516  if (jni_env_ext != nullptr) {
517    DCHECK_EQ(jni_env_ext->vm, java_vm);
518    DCHECK_EQ(jni_env_ext->self, this);
519    tlsPtr_.jni_env = jni_env_ext;
520  } else {
521    tlsPtr_.jni_env = JNIEnvExt::Create(this, java_vm);
522    if (tlsPtr_.jni_env == nullptr) {
523      return false;
524    }
525  }
526
527  thread_list->Register(this);
528  return true;
529}
530
531Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group,
532                       bool create_peer) {
533  Runtime* runtime = Runtime::Current();
534  if (runtime == nullptr) {
535    LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name;
536    return nullptr;
537  }
538  Thread* self;
539  {
540    MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
541    if (runtime->IsShuttingDownLocked()) {
542      LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name;
543      return nullptr;
544    } else {
545      Runtime::Current()->StartThreadBirth();
546      self = new Thread(as_daemon);
547      bool init_success = self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
548      Runtime::Current()->EndThreadBirth();
549      if (!init_success) {
550        delete self;
551        return nullptr;
552      }
553    }
554  }
555
556  self->InitStringEntryPoints();
557
558  CHECK_NE(self->GetState(), kRunnable);
559  self->SetState(kNative);
560
561  // If we're the main thread, ClassLinker won't be created until after we're attached,
562  // so that thread needs a two-stage attach. Regular threads don't need this hack.
563  // In the compiler, all threads need this hack, because no-one's going to be getting
564  // a native peer!
565  if (create_peer) {
566    self->CreatePeer(thread_name, as_daemon, thread_group);
567  } else {
568    // These aren't necessary, but they improve diagnostics for unit tests & command-line tools.
569    if (thread_name != nullptr) {
570      self->tlsPtr_.name->assign(thread_name);
571      ::art::SetThreadName(thread_name);
572    } else if (self->GetJniEnv()->check_jni) {
573      LOG(WARNING) << *Thread::Current() << " attached without supplying a name";
574    }
575  }
576
577  if (VLOG_IS_ON(threads)) {
578    if (thread_name != nullptr) {
579      VLOG(threads) << "Attaching thread " << thread_name;
580    } else {
581      VLOG(threads) << "Attaching unnamed thread.";
582    }
583    ScopedObjectAccess soa(self);
584    self->Dump(LOG(INFO));
585  }
586
587  {
588    ScopedObjectAccess soa(self);
589    Dbg::PostThreadStart(self);
590  }
591
592  return self;
593}
594
595void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) {
596  Runtime* runtime = Runtime::Current();
597  CHECK(runtime->IsStarted());
598  JNIEnv* env = tlsPtr_.jni_env;
599
600  if (thread_group == nullptr) {
601    thread_group = runtime->GetMainThreadGroup();
602  }
603  ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
604  // Add missing null check in case of OOM b/18297817
605  if (name != nullptr && thread_name.get() == nullptr) {
606    CHECK(IsExceptionPending());
607    return;
608  }
609  jint thread_priority = GetNativePriority();
610  jboolean thread_is_daemon = as_daemon;
611
612  ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread));
613  if (peer.get() == nullptr) {
614    CHECK(IsExceptionPending());
615    return;
616  }
617  {
618    ScopedObjectAccess soa(this);
619    tlsPtr_.opeer = soa.Decode<mirror::Object*>(peer.get());
620  }
621  env->CallNonvirtualVoidMethod(peer.get(),
622                                WellKnownClasses::java_lang_Thread,
623                                WellKnownClasses::java_lang_Thread_init,
624                                thread_group, thread_name.get(), thread_priority, thread_is_daemon);
625  AssertNoPendingException();
626
627  Thread* self = this;
628  DCHECK_EQ(self, Thread::Current());
629  env->SetLongField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer,
630                    reinterpret_cast<jlong>(self));
631
632  ScopedObjectAccess soa(self);
633  StackHandleScope<1> hs(self);
634  MutableHandle<mirror::String> peer_thread_name(hs.NewHandle(GetThreadName(soa)));
635  if (peer_thread_name.Get() == nullptr) {
636    // The Thread constructor should have set the Thread.name to a
637    // non-null value. However, because we can run without code
638    // available (in the compiler, in tests), we manually assign the
639    // fields the constructor should have set.
640    if (runtime->IsActiveTransaction()) {
641      InitPeer<true>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
642    } else {
643      InitPeer<false>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
644    }
645    peer_thread_name.Assign(GetThreadName(soa));
646  }
647  // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
648  if (peer_thread_name.Get() != nullptr) {
649    SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
650  }
651}
652
653template<bool kTransactionActive>
654void Thread::InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
655                      jobject thread_name, jint thread_priority) {
656  soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->
657      SetBoolean<kTransactionActive>(tlsPtr_.opeer, thread_is_daemon);
658  soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->
659      SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object*>(thread_group));
660  soa.DecodeField(WellKnownClasses::java_lang_Thread_name)->
661      SetObject<kTransactionActive>(tlsPtr_.opeer, soa.Decode<mirror::Object*>(thread_name));
662  soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->
663      SetInt<kTransactionActive>(tlsPtr_.opeer, thread_priority);
664}
665
666void Thread::SetThreadName(const char* name) {
667  tlsPtr_.name->assign(name);
668  ::art::SetThreadName(name);
669  Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
670}
671
672bool Thread::InitStackHwm() {
673  void* read_stack_base;
674  size_t read_stack_size;
675  size_t read_guard_size;
676  GetThreadStack(tlsPtr_.pthread_self, &read_stack_base, &read_stack_size, &read_guard_size);
677
678  tlsPtr_.stack_begin = reinterpret_cast<uint8_t*>(read_stack_base);
679  tlsPtr_.stack_size = read_stack_size;
680
681  // The minimum stack size we can cope with is the overflow reserved bytes (typically
682  // 8K) + the protected region size (4K) + another page (4K).  Typically this will
683  // be 8+4+4 = 16K.  The thread won't be able to do much with this stack even the GC takes
684  // between 8K and 12K.
685  uint32_t min_stack = GetStackOverflowReservedBytes(kRuntimeISA) + kStackOverflowProtectedSize
686    + 4 * KB;
687  if (read_stack_size <= min_stack) {
688    // Note, as we know the stack is small, avoid operations that could use a lot of stack.
689    LogMessage::LogLineLowStack(__PRETTY_FUNCTION__, __LINE__, ERROR,
690                                "Attempt to attach a thread with a too-small stack");
691    return false;
692  }
693
694  // This is included in the SIGQUIT output, but it's useful here for thread debugging.
695  VLOG(threads) << StringPrintf("Native stack is at %p (%s with %s guard)",
696                                read_stack_base,
697                                PrettySize(read_stack_size).c_str(),
698                                PrettySize(read_guard_size).c_str());
699
700  // Set stack_end_ to the bottom of the stack saving space of stack overflows
701
702  Runtime* runtime = Runtime::Current();
703  bool implicit_stack_check = !runtime->ExplicitStackOverflowChecks() && !runtime->IsAotCompiler();
704  ResetDefaultStackEnd();
705
706  // Install the protected region if we are doing implicit overflow checks.
707  if (implicit_stack_check) {
708    // The thread might have protected region at the bottom.  We need
709    // to install our own region so we need to move the limits
710    // of the stack to make room for it.
711
712    tlsPtr_.stack_begin += read_guard_size + kStackOverflowProtectedSize;
713    tlsPtr_.stack_end += read_guard_size + kStackOverflowProtectedSize;
714    tlsPtr_.stack_size -= read_guard_size;
715
716    InstallImplicitProtection();
717  }
718
719  // Sanity check.
720  int stack_variable;
721  CHECK_GT(&stack_variable, reinterpret_cast<void*>(tlsPtr_.stack_end));
722
723  return true;
724}
725
726void Thread::ShortDump(std::ostream& os) const {
727  os << "Thread[";
728  if (GetThreadId() != 0) {
729    // If we're in kStarting, we won't have a thin lock id or tid yet.
730    os << GetThreadId()
731       << ",tid=" << GetTid() << ',';
732  }
733  os << GetState()
734     << ",Thread*=" << this
735     << ",peer=" << tlsPtr_.opeer
736     << ",\"" << (tlsPtr_.name != nullptr ? *tlsPtr_.name : "null") << "\""
737     << "]";
738}
739
740void Thread::Dump(std::ostream& os) const {
741  DumpState(os);
742  DumpStack(os);
743}
744
745mirror::String* Thread::GetThreadName(const ScopedObjectAccessAlreadyRunnable& soa) const {
746  ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
747  return (tlsPtr_.opeer != nullptr) ?
748      reinterpret_cast<mirror::String*>(f->GetObject(tlsPtr_.opeer)) : nullptr;
749}
750
751void Thread::GetThreadName(std::string& name) const {
752  name.assign(*tlsPtr_.name);
753}
754
755uint64_t Thread::GetCpuMicroTime() const {
756#if defined(__linux__)
757  clockid_t cpu_clock_id;
758  pthread_getcpuclockid(tlsPtr_.pthread_self, &cpu_clock_id);
759  timespec now;
760  clock_gettime(cpu_clock_id, &now);
761  return static_cast<uint64_t>(now.tv_sec) * UINT64_C(1000000) + now.tv_nsec / UINT64_C(1000);
762#else  // __APPLE__
763  UNIMPLEMENTED(WARNING);
764  return -1;
765#endif
766}
767
768// Attempt to rectify locks so that we dump thread list with required locks before exiting.
769static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
770  LOG(ERROR) << *thread << " suspend count already zero.";
771  Locks::thread_suspend_count_lock_->Unlock(self);
772  if (!Locks::mutator_lock_->IsSharedHeld(self)) {
773    Locks::mutator_lock_->SharedTryLock(self);
774    if (!Locks::mutator_lock_->IsSharedHeld(self)) {
775      LOG(WARNING) << "Dumping thread list without holding mutator_lock_";
776    }
777  }
778  if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
779    Locks::thread_list_lock_->TryLock(self);
780    if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
781      LOG(WARNING) << "Dumping thread list without holding thread_list_lock_";
782    }
783  }
784  std::ostringstream ss;
785  Runtime::Current()->GetThreadList()->Dump(ss);
786  LOG(FATAL) << ss.str();
787}
788
789void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) {
790  if (kIsDebugBuild) {
791    DCHECK(delta == -1 || delta == +1 || delta == -tls32_.debug_suspend_count)
792          << delta << " " << tls32_.debug_suspend_count << " " << this;
793    DCHECK_GE(tls32_.suspend_count, tls32_.debug_suspend_count) << this;
794    Locks::thread_suspend_count_lock_->AssertHeld(self);
795    if (this != self && !IsSuspended()) {
796      Locks::thread_list_lock_->AssertHeld(self);
797    }
798  }
799  if (UNLIKELY(delta < 0 && tls32_.suspend_count <= 0)) {
800    UnsafeLogFatalForSuspendCount(self, this);
801    return;
802  }
803
804  tls32_.suspend_count += delta;
805  if (for_debugger) {
806    tls32_.debug_suspend_count += delta;
807  }
808
809  if (tls32_.suspend_count == 0) {
810    AtomicClearFlag(kSuspendRequest);
811  } else {
812    AtomicSetFlag(kSuspendRequest);
813    TriggerSuspend();
814  }
815}
816
817void Thread::RunCheckpointFunction() {
818  Closure *checkpoints[kMaxCheckpoints];
819
820  // Grab the suspend_count lock and copy the current set of
821  // checkpoints.  Then clear the list and the flag.  The RequestCheckpoint
822  // function will also grab this lock so we prevent a race between setting
823  // the kCheckpointRequest flag and clearing it.
824  {
825    MutexLock mu(this, *Locks::thread_suspend_count_lock_);
826    for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
827      checkpoints[i] = tlsPtr_.checkpoint_functions[i];
828      tlsPtr_.checkpoint_functions[i] = nullptr;
829    }
830    AtomicClearFlag(kCheckpointRequest);
831  }
832
833  // Outside the lock, run all the checkpoint functions that
834  // we collected.
835  bool found_checkpoint = false;
836  for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
837    if (checkpoints[i] != nullptr) {
838      ATRACE_BEGIN("Checkpoint function");
839      checkpoints[i]->Run(this);
840      ATRACE_END();
841      found_checkpoint = true;
842    }
843  }
844  CHECK(found_checkpoint);
845}
846
847bool Thread::RequestCheckpoint(Closure* function) {
848  union StateAndFlags old_state_and_flags;
849  old_state_and_flags.as_int = tls32_.state_and_flags.as_int;
850  if (old_state_and_flags.as_struct.state != kRunnable) {
851    return false;  // Fail, thread is suspended and so can't run a checkpoint.
852  }
853
854  uint32_t available_checkpoint = kMaxCheckpoints;
855  for (uint32_t i = 0 ; i < kMaxCheckpoints; ++i) {
856    if (tlsPtr_.checkpoint_functions[i] == nullptr) {
857      available_checkpoint = i;
858      break;
859    }
860  }
861  if (available_checkpoint == kMaxCheckpoints) {
862    // No checkpoint functions available, we can't run a checkpoint
863    return false;
864  }
865  tlsPtr_.checkpoint_functions[available_checkpoint] = function;
866
867  // Checkpoint function installed now install flag bit.
868  // We must be runnable to request a checkpoint.
869  DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable);
870  union StateAndFlags new_state_and_flags;
871  new_state_and_flags.as_int = old_state_and_flags.as_int;
872  new_state_and_flags.as_struct.flags |= kCheckpointRequest;
873  bool success = tls32_.state_and_flags.as_atomic_int.CompareExchangeStrongSequentiallyConsistent(
874      old_state_and_flags.as_int, new_state_and_flags.as_int);
875  if (UNLIKELY(!success)) {
876    // The thread changed state before the checkpoint was installed.
877    CHECK_EQ(tlsPtr_.checkpoint_functions[available_checkpoint], function);
878    tlsPtr_.checkpoint_functions[available_checkpoint] = nullptr;
879  } else {
880    CHECK_EQ(ReadFlag(kCheckpointRequest), true);
881    TriggerSuspend();
882  }
883  return success;
884}
885
886Closure* Thread::GetFlipFunction() {
887  Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function);
888  Closure* func;
889  do {
890    func = atomic_func->LoadRelaxed();
891    if (func == nullptr) {
892      return nullptr;
893    }
894  } while (!atomic_func->CompareExchangeWeakSequentiallyConsistent(func, nullptr));
895  DCHECK(func != nullptr);
896  return func;
897}
898
899void Thread::SetFlipFunction(Closure* function) {
900  CHECK(function != nullptr);
901  Atomic<Closure*>* atomic_func = reinterpret_cast<Atomic<Closure*>*>(&tlsPtr_.flip_function);
902  atomic_func->StoreSequentiallyConsistent(function);
903}
904
905void Thread::FullSuspendCheck() {
906  VLOG(threads) << this << " self-suspending";
907  ATRACE_BEGIN("Full suspend check");
908  // Make thread appear suspended to other threads, release mutator_lock_.
909  tls32_.suspended_at_suspend_check = true;
910  TransitionFromRunnableToSuspended(kSuspended);
911  // Transition back to runnable noting requests to suspend, re-acquire share on mutator_lock_.
912  TransitionFromSuspendedToRunnable();
913  tls32_.suspended_at_suspend_check = false;
914  ATRACE_END();
915  VLOG(threads) << this << " self-reviving";
916}
917
918void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
919  std::string group_name;
920  int priority;
921  bool is_daemon = false;
922  Thread* self = Thread::Current();
923
924  // If flip_function is not null, it means we have run a checkpoint
925  // before the thread wakes up to execute the flip function and the
926  // thread roots haven't been forwarded.  So the following access to
927  // the roots (opeer or methods in the frames) would be bad. Run it
928  // here. TODO: clean up.
929  if (thread != nullptr) {
930    ScopedObjectAccessUnchecked soa(self);
931    Thread* this_thread = const_cast<Thread*>(thread);
932    Closure* flip_func = this_thread->GetFlipFunction();
933    if (flip_func != nullptr) {
934      flip_func->Run(this_thread);
935    }
936  }
937
938  // Don't do this if we are aborting since the GC may have all the threads suspended. This will
939  // cause ScopedObjectAccessUnchecked to deadlock.
940  if (gAborting == 0 && self != nullptr && thread != nullptr && thread->tlsPtr_.opeer != nullptr) {
941    ScopedObjectAccessUnchecked soa(self);
942    priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)
943        ->GetInt(thread->tlsPtr_.opeer);
944    is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)
945        ->GetBoolean(thread->tlsPtr_.opeer);
946
947    mirror::Object* thread_group =
948        soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->tlsPtr_.opeer);
949
950    if (thread_group != nullptr) {
951      ArtField* group_name_field =
952          soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
953      mirror::String* group_name_string =
954          reinterpret_cast<mirror::String*>(group_name_field->GetObject(thread_group));
955      group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>";
956    }
957  } else {
958    priority = GetNativePriority();
959  }
960
961  std::string scheduler_group_name(GetSchedulerGroupName(tid));
962  if (scheduler_group_name.empty()) {
963    scheduler_group_name = "default";
964  }
965
966  if (thread != nullptr) {
967    os << '"' << *thread->tlsPtr_.name << '"';
968    if (is_daemon) {
969      os << " daemon";
970    }
971    os << " prio=" << priority
972       << " tid=" << thread->GetThreadId()
973       << " " << thread->GetState();
974    if (thread->IsStillStarting()) {
975      os << " (still starting up)";
976    }
977    os << "\n";
978  } else {
979    os << '"' << ::art::GetThreadName(tid) << '"'
980       << " prio=" << priority
981       << " (not attached)\n";
982  }
983
984  if (thread != nullptr) {
985    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
986    os << "  | group=\"" << group_name << "\""
987       << " sCount=" << thread->tls32_.suspend_count
988       << " dsCount=" << thread->tls32_.debug_suspend_count
989       << " obj=" << reinterpret_cast<void*>(thread->tlsPtr_.opeer)
990       << " self=" << reinterpret_cast<const void*>(thread) << "\n";
991  }
992
993  os << "  | sysTid=" << tid
994     << " nice=" << getpriority(PRIO_PROCESS, tid)
995     << " cgrp=" << scheduler_group_name;
996  if (thread != nullptr) {
997    int policy;
998    sched_param sp;
999    CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->tlsPtr_.pthread_self, &policy, &sp),
1000                       __FUNCTION__);
1001    os << " sched=" << policy << "/" << sp.sched_priority
1002       << " handle=" << reinterpret_cast<void*>(thread->tlsPtr_.pthread_self);
1003  }
1004  os << "\n";
1005
1006  // Grab the scheduler stats for this thread.
1007  std::string scheduler_stats;
1008  if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) {
1009    scheduler_stats.resize(scheduler_stats.size() - 1);  // Lose the trailing '\n'.
1010  } else {
1011    scheduler_stats = "0 0 0";
1012  }
1013
1014  char native_thread_state = '?';
1015  int utime = 0;
1016  int stime = 0;
1017  int task_cpu = 0;
1018  GetTaskStats(tid, &native_thread_state, &utime, &stime, &task_cpu);
1019
1020  os << "  | state=" << native_thread_state
1021     << " schedstat=( " << scheduler_stats << " )"
1022     << " utm=" << utime
1023     << " stm=" << stime
1024     << " core=" << task_cpu
1025     << " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
1026  if (thread != nullptr) {
1027    os << "  | stack=" << reinterpret_cast<void*>(thread->tlsPtr_.stack_begin) << "-"
1028        << reinterpret_cast<void*>(thread->tlsPtr_.stack_end) << " stackSize="
1029        << PrettySize(thread->tlsPtr_.stack_size) << "\n";
1030    // Dump the held mutexes.
1031    os << "  | held mutexes=";
1032    for (size_t i = 0; i < kLockLevelCount; ++i) {
1033      if (i != kMonitorLock) {
1034        BaseMutex* mutex = thread->GetHeldMutex(static_cast<LockLevel>(i));
1035        if (mutex != nullptr) {
1036          os << " \"" << mutex->GetName() << "\"";
1037          if (mutex->IsReaderWriterMutex()) {
1038            ReaderWriterMutex* rw_mutex = down_cast<ReaderWriterMutex*>(mutex);
1039            if (rw_mutex->GetExclusiveOwnerTid() == static_cast<uint64_t>(tid)) {
1040              os << "(exclusive held)";
1041            } else {
1042              os << "(shared held)";
1043            }
1044          }
1045        }
1046      }
1047    }
1048    os << "\n";
1049  }
1050}
1051
1052void Thread::DumpState(std::ostream& os) const {
1053  Thread::DumpState(os, this, GetTid());
1054}
1055
1056struct StackDumpVisitor : public StackVisitor {
1057  StackDumpVisitor(std::ostream& os_in, Thread* thread_in, Context* context, bool can_allocate_in)
1058      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1059      : StackVisitor(thread_in, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
1060        os(os_in),
1061        thread(thread_in),
1062        can_allocate(can_allocate_in),
1063        last_method(nullptr),
1064        last_line_number(0),
1065        repetition_count(0),
1066        frame_count(0) {}
1067
1068  virtual ~StackDumpVisitor() {
1069    if (frame_count == 0) {
1070      os << "  (no managed stack frames)\n";
1071    }
1072  }
1073
1074  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1075    ArtMethod* m = GetMethod();
1076    if (m->IsRuntimeMethod()) {
1077      return true;
1078    }
1079    const int kMaxRepetition = 3;
1080    mirror::Class* c = m->GetDeclaringClass();
1081    mirror::DexCache* dex_cache = c->GetDexCache();
1082    int line_number = -1;
1083    if (dex_cache != nullptr) {  // be tolerant of bad input
1084      const DexFile& dex_file = *dex_cache->GetDexFile();
1085      line_number = dex_file.GetLineNumFromPC(m, GetDexPc(false));
1086    }
1087    if (line_number == last_line_number && last_method == m) {
1088      ++repetition_count;
1089    } else {
1090      if (repetition_count >= kMaxRepetition) {
1091        os << "  ... repeated " << (repetition_count - kMaxRepetition) << " times\n";
1092      }
1093      repetition_count = 0;
1094      last_line_number = line_number;
1095      last_method = m;
1096    }
1097    if (repetition_count < kMaxRepetition) {
1098      os << "  at " << PrettyMethod(m, false);
1099      if (m->IsNative()) {
1100        os << "(Native method)";
1101      } else {
1102        const char* source_file(m->GetDeclaringClassSourceFile());
1103        os << "(" << (source_file != nullptr ? source_file : "unavailable")
1104           << ":" << line_number << ")";
1105      }
1106      os << "\n";
1107      if (frame_count == 0) {
1108        Monitor::DescribeWait(os, thread);
1109      }
1110      if (can_allocate) {
1111        // Visit locks, but do not abort on errors. This would trigger a nested abort.
1112        Monitor::VisitLocks(this, DumpLockedObject, &os, false);
1113      }
1114    }
1115
1116    ++frame_count;
1117    return true;
1118  }
1119
1120  static void DumpLockedObject(mirror::Object* o, void* context)
1121      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1122    std::ostream& os = *reinterpret_cast<std::ostream*>(context);
1123    os << "  - locked ";
1124    if (o == nullptr) {
1125      os << "an unknown object";
1126    } else {
1127      if ((o->GetLockWord(false).GetState() == LockWord::kThinLocked) &&
1128          Locks::mutator_lock_->IsExclusiveHeld(Thread::Current())) {
1129        // Getting the identity hashcode here would result in lock inflation and suspension of the
1130        // current thread, which isn't safe if this is the only runnable thread.
1131        os << StringPrintf("<@addr=0x%" PRIxPTR "> (a %s)", reinterpret_cast<intptr_t>(o),
1132                           PrettyTypeOf(o).c_str());
1133      } else {
1134        // IdentityHashCode can cause thread suspension, which would invalidate o if it moved. So
1135        // we get the pretty type beofre we call IdentityHashCode.
1136        const std::string pretty_type(PrettyTypeOf(o));
1137        os << StringPrintf("<0x%08x> (a %s)", o->IdentityHashCode(), pretty_type.c_str());
1138      }
1139    }
1140    os << "\n";
1141  }
1142
1143  std::ostream& os;
1144  const Thread* thread;
1145  const bool can_allocate;
1146  ArtMethod* last_method;
1147  int last_line_number;
1148  int repetition_count;
1149  int frame_count;
1150};
1151
1152static bool ShouldShowNativeStack(const Thread* thread)
1153    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1154  ThreadState state = thread->GetState();
1155
1156  // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting.
1157  if (state > kWaiting && state < kStarting) {
1158    return true;
1159  }
1160
1161  // In an Object.wait variant or Thread.sleep? That's not interesting.
1162  if (state == kTimedWaiting || state == kSleeping || state == kWaiting) {
1163    return false;
1164  }
1165
1166  // Threads with no managed stack frames should be shown.
1167  const ManagedStack* managed_stack = thread->GetManagedStack();
1168  if (managed_stack == nullptr || (managed_stack->GetTopQuickFrame() == nullptr &&
1169      managed_stack->GetTopShadowFrame() == nullptr)) {
1170    return true;
1171  }
1172
1173  // In some other native method? That's interesting.
1174  // We don't just check kNative because native methods will be in state kSuspended if they're
1175  // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the
1176  // thread-startup states if it's early enough in their life cycle (http://b/7432159).
1177  ArtMethod* current_method = thread->GetCurrentMethod(nullptr);
1178  return current_method != nullptr && current_method->IsNative();
1179}
1180
1181void Thread::DumpJavaStack(std::ostream& os) const {
1182  // If flip_function is not null, it means we have run a checkpoint
1183  // before the thread wakes up to execute the flip function and the
1184  // thread roots haven't been forwarded.  So the following access to
1185  // the roots (locks or methods in the frames) would be bad. Run it
1186  // here. TODO: clean up.
1187  {
1188    Thread* this_thread = const_cast<Thread*>(this);
1189    Closure* flip_func = this_thread->GetFlipFunction();
1190    if (flip_func != nullptr) {
1191      flip_func->Run(this_thread);
1192    }
1193  }
1194
1195  // Dumping the Java stack involves the verifier for locks. The verifier operates under the
1196  // assumption that there is no exception pending on entry. Thus, stash any pending exception.
1197  // Thread::Current() instead of this in case a thread is dumping the stack of another suspended
1198  // thread.
1199  StackHandleScope<1> scope(Thread::Current());
1200  Handle<mirror::Throwable> exc;
1201  bool have_exception = false;
1202  if (IsExceptionPending()) {
1203    exc = scope.NewHandle(GetException());
1204    const_cast<Thread*>(this)->ClearException();
1205    have_exception = true;
1206  }
1207
1208  std::unique_ptr<Context> context(Context::Create());
1209  StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(),
1210                          !tls32_.throwing_OutOfMemoryError);
1211  dumper.WalkStack();
1212
1213  if (have_exception) {
1214    const_cast<Thread*>(this)->SetException(exc.Get());
1215  }
1216}
1217
1218void Thread::DumpStack(std::ostream& os) const {
1219  // TODO: we call this code when dying but may not have suspended the thread ourself. The
1220  //       IsSuspended check is therefore racy with the use for dumping (normally we inhibit
1221  //       the race with the thread_suspend_count_lock_).
1222  bool dump_for_abort = (gAborting > 0);
1223  bool safe_to_dump = (this == Thread::Current() || IsSuspended());
1224  if (!kIsDebugBuild) {
1225    // We always want to dump the stack for an abort, however, there is no point dumping another
1226    // thread's stack in debug builds where we'll hit the not suspended check in the stack walk.
1227    safe_to_dump = (safe_to_dump || dump_for_abort);
1228  }
1229  if (safe_to_dump) {
1230    // If we're currently in native code, dump that stack before dumping the managed stack.
1231    if (dump_for_abort || ShouldShowNativeStack(this)) {
1232      DumpKernelStack(os, GetTid(), "  kernel: ", false);
1233      DumpNativeStack(os, GetTid(), "  native: ", GetCurrentMethod(nullptr, !dump_for_abort));
1234    }
1235    DumpJavaStack(os);
1236  } else {
1237    os << "Not able to dump stack of thread that isn't suspended";
1238  }
1239}
1240
1241void Thread::ThreadExitCallback(void* arg) {
1242  Thread* self = reinterpret_cast<Thread*>(arg);
1243  if (self->tls32_.thread_exit_check_count == 0) {
1244    LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's "
1245        "going to use a pthread_key_create destructor?): " << *self;
1246    CHECK(is_started_);
1247    CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
1248    self->tls32_.thread_exit_check_count = 1;
1249  } else {
1250    LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
1251  }
1252}
1253
1254void Thread::Startup() {
1255  CHECK(!is_started_);
1256  is_started_ = true;
1257  {
1258    // MutexLock to keep annotalysis happy.
1259    //
1260    // Note we use null for the thread because Thread::Current can
1261    // return garbage since (is_started_ == true) and
1262    // Thread::pthread_key_self_ is not yet initialized.
1263    // This was seen on glibc.
1264    MutexLock mu(nullptr, *Locks::thread_suspend_count_lock_);
1265    resume_cond_ = new ConditionVariable("Thread resumption condition variable",
1266                                         *Locks::thread_suspend_count_lock_);
1267  }
1268
1269  // Allocate a TLS slot.
1270  CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback),
1271                     "self key");
1272
1273  // Double-check the TLS slot allocation.
1274  if (pthread_getspecific(pthread_key_self_) != nullptr) {
1275    LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr";
1276  }
1277}
1278
1279void Thread::FinishStartup() {
1280  Runtime* runtime = Runtime::Current();
1281  CHECK(runtime->IsStarted());
1282
1283  // Finish attaching the main thread.
1284  ScopedObjectAccess soa(Thread::Current());
1285  Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup());
1286
1287  Runtime::Current()->GetClassLinker()->RunRootClinits();
1288}
1289
1290void Thread::Shutdown() {
1291  CHECK(is_started_);
1292  is_started_ = false;
1293  CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
1294  MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
1295  if (resume_cond_ != nullptr) {
1296    delete resume_cond_;
1297    resume_cond_ = nullptr;
1298  }
1299}
1300
1301Thread::Thread(bool daemon) : tls32_(daemon), wait_monitor_(nullptr), interrupted_(false) {
1302  wait_mutex_ = new Mutex("a thread wait mutex");
1303  wait_cond_ = new ConditionVariable("a thread wait condition variable", *wait_mutex_);
1304  tlsPtr_.instrumentation_stack = new std::deque<instrumentation::InstrumentationStackFrame>;
1305  tlsPtr_.name = new std::string(kThreadNameDuringStartup);
1306  tlsPtr_.nested_signal_state = static_cast<jmp_buf*>(malloc(sizeof(jmp_buf)));
1307
1308  static_assert((sizeof(Thread) % 4) == 0U,
1309                "art::Thread has a size which is not a multiple of 4.");
1310  tls32_.state_and_flags.as_struct.flags = 0;
1311  tls32_.state_and_flags.as_struct.state = kNative;
1312  memset(&tlsPtr_.held_mutexes[0], 0, sizeof(tlsPtr_.held_mutexes));
1313  std::fill(tlsPtr_.rosalloc_runs,
1314            tlsPtr_.rosalloc_runs + kNumRosAllocThreadLocalSizeBrackets,
1315            gc::allocator::RosAlloc::GetDedicatedFullRun());
1316  for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
1317    tlsPtr_.checkpoint_functions[i] = nullptr;
1318  }
1319  tlsPtr_.flip_function = nullptr;
1320  tls32_.suspended_at_suspend_check = false;
1321}
1322
1323bool Thread::IsStillStarting() const {
1324  // You might think you can check whether the state is kStarting, but for much of thread startup,
1325  // the thread is in kNative; it might also be in kVmWait.
1326  // You might think you can check whether the peer is null, but the peer is actually created and
1327  // assigned fairly early on, and needs to be.
1328  // It turns out that the last thing to change is the thread name; that's a good proxy for "has
1329  // this thread _ever_ entered kRunnable".
1330  return (tlsPtr_.jpeer == nullptr && tlsPtr_.opeer == nullptr) ||
1331      (*tlsPtr_.name == kThreadNameDuringStartup);
1332}
1333
1334void Thread::AssertPendingException() const {
1335  CHECK(IsExceptionPending()) << "Pending exception expected.";
1336}
1337
1338void Thread::AssertPendingOOMException() const {
1339  AssertPendingException();
1340  auto* e = GetException();
1341  CHECK_EQ(e->GetClass(), DecodeJObject(WellKnownClasses::java_lang_OutOfMemoryError)->AsClass())
1342      << e->Dump();
1343}
1344
1345void Thread::AssertNoPendingException() const {
1346  if (UNLIKELY(IsExceptionPending())) {
1347    ScopedObjectAccess soa(Thread::Current());
1348    mirror::Throwable* exception = GetException();
1349    LOG(FATAL) << "No pending exception expected: " << exception->Dump();
1350  }
1351}
1352
1353void Thread::AssertNoPendingExceptionForNewException(const char* msg) const {
1354  if (UNLIKELY(IsExceptionPending())) {
1355    ScopedObjectAccess soa(Thread::Current());
1356    mirror::Throwable* exception = GetException();
1357    LOG(FATAL) << "Throwing new exception '" << msg << "' with unexpected pending exception: "
1358        << exception->Dump();
1359  }
1360}
1361
1362class MonitorExitVisitor : public SingleRootVisitor {
1363 public:
1364  explicit MonitorExitVisitor(Thread* self) : self_(self) { }
1365
1366  // NO_THREAD_SAFETY_ANALYSIS due to MonitorExit.
1367  void VisitRoot(mirror::Object* entered_monitor, const RootInfo& info ATTRIBUTE_UNUSED)
1368      OVERRIDE NO_THREAD_SAFETY_ANALYSIS {
1369    if (self_->HoldsLock(entered_monitor)) {
1370      LOG(WARNING) << "Calling MonitorExit on object "
1371                   << entered_monitor << " (" << PrettyTypeOf(entered_monitor) << ")"
1372                   << " left locked by native thread "
1373                   << *Thread::Current() << " which is detaching";
1374      entered_monitor->MonitorExit(self_);
1375    }
1376  }
1377
1378 private:
1379  Thread* const self_;
1380};
1381
1382void Thread::Destroy() {
1383  Thread* self = this;
1384  DCHECK_EQ(self, Thread::Current());
1385
1386  if (tlsPtr_.jni_env != nullptr) {
1387    {
1388      ScopedObjectAccess soa(self);
1389      MonitorExitVisitor visitor(self);
1390      // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
1391      tlsPtr_.jni_env->monitors.VisitRoots(&visitor, RootInfo(kRootVMInternal));
1392    }
1393    // Release locally held global references which releasing may require the mutator lock.
1394    if (tlsPtr_.jpeer != nullptr) {
1395      // If pthread_create fails we don't have a jni env here.
1396      tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer);
1397      tlsPtr_.jpeer = nullptr;
1398    }
1399    if (tlsPtr_.class_loader_override != nullptr) {
1400      tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.class_loader_override);
1401      tlsPtr_.class_loader_override = nullptr;
1402    }
1403  }
1404
1405  if (tlsPtr_.opeer != nullptr) {
1406    ScopedObjectAccess soa(self);
1407    // We may need to call user-supplied managed code, do this before final clean-up.
1408    HandleUncaughtExceptions(soa);
1409    RemoveFromThreadGroup(soa);
1410
1411    // this.nativePeer = 0;
1412    if (Runtime::Current()->IsActiveTransaction()) {
1413      soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)
1414          ->SetLong<true>(tlsPtr_.opeer, 0);
1415    } else {
1416      soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)
1417          ->SetLong<false>(tlsPtr_.opeer, 0);
1418    }
1419    Dbg::PostThreadDeath(self);
1420
1421    // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
1422    // who is waiting.
1423    mirror::Object* lock =
1424        soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(tlsPtr_.opeer);
1425    // (This conditional is only needed for tests, where Thread.lock won't have been set.)
1426    if (lock != nullptr) {
1427      StackHandleScope<1> hs(self);
1428      Handle<mirror::Object> h_obj(hs.NewHandle(lock));
1429      ObjectLock<mirror::Object> locker(self, h_obj);
1430      locker.NotifyAll();
1431    }
1432    tlsPtr_.opeer = nullptr;
1433  }
1434
1435  {
1436    ScopedObjectAccess soa(self);
1437    Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this);
1438  }
1439}
1440
1441Thread::~Thread() {
1442  CHECK(tlsPtr_.class_loader_override == nullptr);
1443  CHECK(tlsPtr_.jpeer == nullptr);
1444  CHECK(tlsPtr_.opeer == nullptr);
1445  bool initialized = (tlsPtr_.jni_env != nullptr);  // Did Thread::Init run?
1446  if (initialized) {
1447    delete tlsPtr_.jni_env;
1448    tlsPtr_.jni_env = nullptr;
1449  }
1450  CHECK_NE(GetState(), kRunnable);
1451  CHECK_NE(ReadFlag(kCheckpointRequest), true);
1452  CHECK(tlsPtr_.checkpoint_functions[0] == nullptr);
1453  CHECK(tlsPtr_.checkpoint_functions[1] == nullptr);
1454  CHECK(tlsPtr_.checkpoint_functions[2] == nullptr);
1455  CHECK(tlsPtr_.flip_function == nullptr);
1456  CHECK_EQ(tls32_.suspended_at_suspend_check, false);
1457
1458  // We may be deleting a still born thread.
1459  SetStateUnsafe(kTerminated);
1460
1461  delete wait_cond_;
1462  delete wait_mutex_;
1463
1464  if (tlsPtr_.long_jump_context != nullptr) {
1465    delete tlsPtr_.long_jump_context;
1466  }
1467
1468  if (initialized) {
1469    CleanupCpu();
1470  }
1471
1472  if (tlsPtr_.single_step_control != nullptr) {
1473    delete tlsPtr_.single_step_control;
1474  }
1475  delete tlsPtr_.instrumentation_stack;
1476  delete tlsPtr_.name;
1477  delete tlsPtr_.stack_trace_sample;
1478  free(tlsPtr_.nested_signal_state);
1479
1480  Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this);
1481
1482  TearDownAlternateSignalStack();
1483}
1484
1485void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) {
1486  if (!IsExceptionPending()) {
1487    return;
1488  }
1489  ScopedLocalRef<jobject> peer(tlsPtr_.jni_env, soa.AddLocalReference<jobject>(tlsPtr_.opeer));
1490  ScopedThreadStateChange tsc(this, kNative);
1491
1492  // Get and clear the exception.
1493  ScopedLocalRef<jthrowable> exception(tlsPtr_.jni_env, tlsPtr_.jni_env->ExceptionOccurred());
1494  tlsPtr_.jni_env->ExceptionClear();
1495
1496  // If the thread has its own handler, use that.
1497  ScopedLocalRef<jobject> handler(tlsPtr_.jni_env,
1498                                  tlsPtr_.jni_env->GetObjectField(peer.get(),
1499                                      WellKnownClasses::java_lang_Thread_uncaughtHandler));
1500  if (handler.get() == nullptr) {
1501    // Otherwise use the thread group's default handler.
1502    handler.reset(tlsPtr_.jni_env->GetObjectField(peer.get(),
1503                                                  WellKnownClasses::java_lang_Thread_group));
1504  }
1505
1506  // Call the handler.
1507  tlsPtr_.jni_env->CallVoidMethod(handler.get(),
1508      WellKnownClasses::java_lang_Thread__UncaughtExceptionHandler_uncaughtException,
1509      peer.get(), exception.get());
1510
1511  // If the handler threw, clear that exception too.
1512  tlsPtr_.jni_env->ExceptionClear();
1513}
1514
1515void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
1516  // this.group.removeThread(this);
1517  // group can be null if we're in the compiler or a test.
1518  mirror::Object* ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)
1519      ->GetObject(tlsPtr_.opeer);
1520  if (ogroup != nullptr) {
1521    ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup));
1522    ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(tlsPtr_.opeer));
1523    ScopedThreadStateChange tsc(soa.Self(), kNative);
1524    tlsPtr_.jni_env->CallVoidMethod(group.get(),
1525                                    WellKnownClasses::java_lang_ThreadGroup_removeThread,
1526                                    peer.get());
1527  }
1528}
1529
1530size_t Thread::NumHandleReferences() {
1531  size_t count = 0;
1532  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur != nullptr; cur = cur->GetLink()) {
1533    count += cur->NumberOfReferences();
1534  }
1535  return count;
1536}
1537
1538bool Thread::HandleScopeContains(jobject obj) const {
1539  StackReference<mirror::Object>* hs_entry =
1540      reinterpret_cast<StackReference<mirror::Object>*>(obj);
1541  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur!= nullptr; cur = cur->GetLink()) {
1542    if (cur->Contains(hs_entry)) {
1543      return true;
1544    }
1545  }
1546  // JNI code invoked from portable code uses shadow frames rather than the handle scope.
1547  return tlsPtr_.managed_stack.ShadowFramesContain(hs_entry);
1548}
1549
1550void Thread::HandleScopeVisitRoots(RootVisitor* visitor, uint32_t thread_id) {
1551  BufferedRootVisitor<kDefaultBufferedRootCount> buffered_visitor(
1552      visitor, RootInfo(kRootNativeStack, thread_id));
1553  for (HandleScope* cur = tlsPtr_.top_handle_scope; cur; cur = cur->GetLink()) {
1554    for (size_t j = 0, count = cur->NumberOfReferences(); j < count; ++j) {
1555      // GetReference returns a pointer to the stack reference within the handle scope. If this
1556      // needs to be updated, it will be done by the root visitor.
1557      buffered_visitor.VisitRootIfNonNull(cur->GetHandle(j).GetReference());
1558    }
1559  }
1560}
1561
1562mirror::Object* Thread::DecodeJObject(jobject obj) const {
1563  if (obj == nullptr) {
1564    return nullptr;
1565  }
1566  IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
1567  IndirectRefKind kind = GetIndirectRefKind(ref);
1568  mirror::Object* result;
1569  bool expect_null = false;
1570  // The "kinds" below are sorted by the frequency we expect to encounter them.
1571  if (kind == kLocal) {
1572    IndirectReferenceTable& locals = tlsPtr_.jni_env->locals;
1573    // Local references do not need a read barrier.
1574    result = locals.Get<kWithoutReadBarrier>(ref);
1575  } else if (kind == kHandleScopeOrInvalid) {
1576    // TODO: make stack indirect reference table lookup more efficient.
1577    // Check if this is a local reference in the handle scope.
1578    if (LIKELY(HandleScopeContains(obj))) {
1579      // Read from handle scope.
1580      result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr();
1581      VerifyObject(result);
1582    } else {
1583      tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of invalid jobject %p", obj);
1584      expect_null = true;
1585      result = nullptr;
1586    }
1587  } else if (kind == kGlobal) {
1588    result = tlsPtr_.jni_env->vm->DecodeGlobal(const_cast<Thread*>(this), ref);
1589  } else {
1590    DCHECK_EQ(kind, kWeakGlobal);
1591    result = tlsPtr_.jni_env->vm->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
1592    if (Runtime::Current()->IsClearedJniWeakGlobal(result)) {
1593      // This is a special case where it's okay to return null.
1594      expect_null = true;
1595      result = nullptr;
1596    }
1597  }
1598
1599  if (UNLIKELY(!expect_null && result == nullptr)) {
1600    tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of deleted %s %p",
1601                                   ToStr<IndirectRefKind>(kind).c_str(), obj);
1602  }
1603  return result;
1604}
1605
1606// Implements java.lang.Thread.interrupted.
1607bool Thread::Interrupted() {
1608  MutexLock mu(Thread::Current(), *wait_mutex_);
1609  bool interrupted = IsInterruptedLocked();
1610  SetInterruptedLocked(false);
1611  return interrupted;
1612}
1613
1614// Implements java.lang.Thread.isInterrupted.
1615bool Thread::IsInterrupted() {
1616  MutexLock mu(Thread::Current(), *wait_mutex_);
1617  return IsInterruptedLocked();
1618}
1619
1620void Thread::Interrupt(Thread* self) {
1621  MutexLock mu(self, *wait_mutex_);
1622  if (interrupted_) {
1623    return;
1624  }
1625  interrupted_ = true;
1626  NotifyLocked(self);
1627}
1628
1629void Thread::Notify() {
1630  Thread* self = Thread::Current();
1631  MutexLock mu(self, *wait_mutex_);
1632  NotifyLocked(self);
1633}
1634
1635void Thread::NotifyLocked(Thread* self) {
1636  if (wait_monitor_ != nullptr) {
1637    wait_cond_->Signal(self);
1638  }
1639}
1640
1641void Thread::SetClassLoaderOverride(jobject class_loader_override) {
1642  if (tlsPtr_.class_loader_override != nullptr) {
1643    GetJniEnv()->DeleteGlobalRef(tlsPtr_.class_loader_override);
1644  }
1645  tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override);
1646}
1647
1648class CountStackDepthVisitor : public StackVisitor {
1649 public:
1650  explicit CountStackDepthVisitor(Thread* thread)
1651      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1652      : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
1653        depth_(0), skip_depth_(0), skipping_(true) {}
1654
1655  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1656    // We want to skip frames up to and including the exception's constructor.
1657    // Note we also skip the frame if it doesn't have a method (namely the callee
1658    // save frame)
1659    ArtMethod* m = GetMethod();
1660    if (skipping_ && !m->IsRuntimeMethod() &&
1661        !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) {
1662      skipping_ = false;
1663    }
1664    if (!skipping_) {
1665      if (!m->IsRuntimeMethod()) {  // Ignore runtime frames (in particular callee save).
1666        ++depth_;
1667      }
1668    } else {
1669      ++skip_depth_;
1670    }
1671    return true;
1672  }
1673
1674  int GetDepth() const {
1675    return depth_;
1676  }
1677
1678  int GetSkipDepth() const {
1679    return skip_depth_;
1680  }
1681
1682 private:
1683  uint32_t depth_;
1684  uint32_t skip_depth_;
1685  bool skipping_;
1686};
1687
1688template<bool kTransactionActive>
1689class BuildInternalStackTraceVisitor : public StackVisitor {
1690 public:
1691  explicit BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth)
1692      : StackVisitor(thread, nullptr, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
1693        self_(self),
1694        skip_depth_(skip_depth),
1695        count_(0),
1696        trace_(nullptr),
1697        pointer_size_(Runtime::Current()->GetClassLinker()->GetImagePointerSize()) {}
1698
1699  bool Init(int depth)
1700      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1701    // Allocate method trace with format [method pointers][pcs].
1702    auto* cl = Runtime::Current()->GetClassLinker();
1703    trace_ = cl->AllocPointerArray(self_, depth * 2);
1704    if (trace_ == nullptr) {
1705      self_->AssertPendingOOMException();
1706      return false;
1707    }
1708    // If We are called from native, use non-transactional mode.
1709    const char* last_no_suspend_cause =
1710        self_->StartAssertNoThreadSuspension("Building internal stack trace");
1711    CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause;
1712    return true;
1713  }
1714
1715  virtual ~BuildInternalStackTraceVisitor() {
1716    if (trace_ != nullptr) {
1717      self_->EndAssertNoThreadSuspension(nullptr);
1718    }
1719  }
1720
1721  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1722    if (trace_ == nullptr) {
1723      return true;  // We're probably trying to fillInStackTrace for an OutOfMemoryError.
1724    }
1725    if (skip_depth_ > 0) {
1726      skip_depth_--;
1727      return true;
1728    }
1729    ArtMethod* m = GetMethod();
1730    if (m->IsRuntimeMethod()) {
1731      return true;  // Ignore runtime frames (in particular callee save).
1732    }
1733    trace_->SetElementPtrSize<kTransactionActive>(
1734        count_, m, pointer_size_);
1735    trace_->SetElementPtrSize<kTransactionActive>(
1736        trace_->GetLength() / 2 + count_, m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc(),
1737            pointer_size_);
1738    ++count_;
1739    return true;
1740  }
1741
1742  mirror::PointerArray* GetInternalStackTrace() const {
1743    return trace_;
1744  }
1745
1746 private:
1747  Thread* const self_;
1748  // How many more frames to skip.
1749  int32_t skip_depth_;
1750  // Current position down stack trace.
1751  uint32_t count_;
1752  // An array of the methods on the stack, the last entries are the dex PCs.
1753  mirror::PointerArray* trace_;
1754  // For cross compilation.
1755  size_t pointer_size_;
1756};
1757
1758template<bool kTransactionActive>
1759jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessAlreadyRunnable& soa) const {
1760  // Compute depth of stack
1761  CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
1762  count_visitor.WalkStack();
1763  int32_t depth = count_visitor.GetDepth();
1764  int32_t skip_depth = count_visitor.GetSkipDepth();
1765
1766  // Build internal stack trace.
1767  BuildInternalStackTraceVisitor<kTransactionActive> build_trace_visitor(soa.Self(),
1768                                                                         const_cast<Thread*>(this),
1769                                                                         skip_depth);
1770  if (!build_trace_visitor.Init(depth)) {
1771    return nullptr;  // Allocation failed.
1772  }
1773  build_trace_visitor.WalkStack();
1774  mirror::PointerArray* trace = build_trace_visitor.GetInternalStackTrace();
1775  if (kIsDebugBuild) {
1776    // Second half is dex PCs.
1777    for (uint32_t i = 0; i < static_cast<uint32_t>(trace->GetLength() / 2); ++i) {
1778      auto* method = trace->GetElementPtrSize<ArtMethod*>(
1779          i, Runtime::Current()->GetClassLinker()->GetImagePointerSize());
1780      CHECK(method != nullptr);
1781    }
1782  }
1783  return soa.AddLocalReference<jobject>(trace);
1784}
1785template jobject Thread::CreateInternalStackTrace<false>(
1786    const ScopedObjectAccessAlreadyRunnable& soa) const;
1787template jobject Thread::CreateInternalStackTrace<true>(
1788    const ScopedObjectAccessAlreadyRunnable& soa) const;
1789
1790bool Thread::IsExceptionThrownByCurrentMethod(mirror::Throwable* exception) const {
1791  CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
1792  count_visitor.WalkStack();
1793  return count_visitor.GetDepth() == exception->GetStackDepth();
1794}
1795
1796jobjectArray Thread::InternalStackTraceToStackTraceElementArray(
1797    const ScopedObjectAccessAlreadyRunnable& soa, jobject internal, jobjectArray output_array,
1798    int* stack_depth) {
1799  // Decode the internal stack trace into the depth, method trace and PC trace
1800  int32_t depth = soa.Decode<mirror::PointerArray*>(internal)->GetLength() / 2;
1801
1802  auto* cl = Runtime::Current()->GetClassLinker();
1803
1804  jobjectArray result;
1805
1806  if (output_array != nullptr) {
1807    // Reuse the array we were given.
1808    result = output_array;
1809    // ...adjusting the number of frames we'll write to not exceed the array length.
1810    const int32_t traces_length =
1811        soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->GetLength();
1812    depth = std::min(depth, traces_length);
1813  } else {
1814    // Create java_trace array and place in local reference table
1815    mirror::ObjectArray<mirror::StackTraceElement>* java_traces =
1816        cl->AllocStackTraceElementArray(soa.Self(), depth);
1817    if (java_traces == nullptr) {
1818      return nullptr;
1819    }
1820    result = soa.AddLocalReference<jobjectArray>(java_traces);
1821  }
1822
1823  if (stack_depth != nullptr) {
1824    *stack_depth = depth;
1825  }
1826
1827  for (int32_t i = 0; i < depth; ++i) {
1828    auto* method_trace = soa.Decode<mirror::PointerArray*>(internal);
1829    // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
1830    ArtMethod* method = method_trace->GetElementPtrSize<ArtMethod*>(i, sizeof(void*));
1831    uint32_t dex_pc = method_trace->GetElementPtrSize<uint32_t>(
1832        i + method_trace->GetLength() / 2, sizeof(void*));
1833    int32_t line_number;
1834    StackHandleScope<3> hs(soa.Self());
1835    auto class_name_object(hs.NewHandle<mirror::String>(nullptr));
1836    auto source_name_object(hs.NewHandle<mirror::String>(nullptr));
1837    if (method->IsProxyMethod()) {
1838      line_number = -1;
1839      class_name_object.Assign(method->GetDeclaringClass()->GetName());
1840      // source_name_object intentionally left null for proxy methods
1841    } else {
1842      line_number = method->GetLineNumFromDexPC(dex_pc);
1843      // Allocate element, potentially triggering GC
1844      // TODO: reuse class_name_object via Class::name_?
1845      const char* descriptor = method->GetDeclaringClassDescriptor();
1846      CHECK(descriptor != nullptr);
1847      std::string class_name(PrettyDescriptor(descriptor));
1848      class_name_object.Assign(
1849          mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
1850      if (class_name_object.Get() == nullptr) {
1851        soa.Self()->AssertPendingOOMException();
1852        return nullptr;
1853      }
1854      const char* source_file = method->GetDeclaringClassSourceFile();
1855      if (source_file != nullptr) {
1856        source_name_object.Assign(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
1857        if (source_name_object.Get() == nullptr) {
1858          soa.Self()->AssertPendingOOMException();
1859          return nullptr;
1860        }
1861      }
1862    }
1863    const char* method_name = method->GetInterfaceMethodIfProxy(sizeof(void*))->GetName();
1864    CHECK(method_name != nullptr);
1865    Handle<mirror::String> method_name_object(
1866        hs.NewHandle(mirror::String::AllocFromModifiedUtf8(soa.Self(), method_name)));
1867    if (method_name_object.Get() == nullptr) {
1868      return nullptr;
1869    }
1870    mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(
1871        soa.Self(), class_name_object, method_name_object, source_name_object, line_number);
1872    if (obj == nullptr) {
1873      return nullptr;
1874    }
1875    // We are called from native: use non-transactional mode.
1876    soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->Set<false>(i, obj);
1877  }
1878  return result;
1879}
1880
1881void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) {
1882  va_list args;
1883  va_start(args, fmt);
1884  ThrowNewExceptionV(exception_class_descriptor, fmt, args);
1885  va_end(args);
1886}
1887
1888void Thread::ThrowNewExceptionV(const char* exception_class_descriptor,
1889                                const char* fmt, va_list ap) {
1890  std::string msg;
1891  StringAppendV(&msg, fmt, ap);
1892  ThrowNewException(exception_class_descriptor, msg.c_str());
1893}
1894
1895void Thread::ThrowNewException(const char* exception_class_descriptor,
1896                               const char* msg) {
1897  // Callers should either clear or call ThrowNewWrappedException.
1898  AssertNoPendingExceptionForNewException(msg);
1899  ThrowNewWrappedException(exception_class_descriptor, msg);
1900}
1901
1902static mirror::ClassLoader* GetCurrentClassLoader(Thread* self)
1903    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1904  ArtMethod* method = self->GetCurrentMethod(nullptr);
1905  return method != nullptr
1906      ? method->GetDeclaringClass()->GetClassLoader()
1907      : nullptr;
1908}
1909
1910void Thread::ThrowNewWrappedException(const char* exception_class_descriptor,
1911                                      const char* msg) {
1912  DCHECK_EQ(this, Thread::Current());
1913  ScopedObjectAccessUnchecked soa(this);
1914  StackHandleScope<3> hs(soa.Self());
1915  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(GetCurrentClassLoader(soa.Self())));
1916  ScopedLocalRef<jobject> cause(GetJniEnv(), soa.AddLocalReference<jobject>(GetException()));
1917  ClearException();
1918  Runtime* runtime = Runtime::Current();
1919  auto* cl = runtime->GetClassLinker();
1920  Handle<mirror::Class> exception_class(
1921      hs.NewHandle(cl->FindClass(this, exception_class_descriptor, class_loader)));
1922  if (UNLIKELY(exception_class.Get() == nullptr)) {
1923    CHECK(IsExceptionPending());
1924    LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
1925    return;
1926  }
1927
1928  if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(soa.Self(), exception_class, true,
1929                                                             true))) {
1930    DCHECK(IsExceptionPending());
1931    return;
1932  }
1933  DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass());
1934  Handle<mirror::Throwable> exception(
1935      hs.NewHandle(down_cast<mirror::Throwable*>(exception_class->AllocObject(this))));
1936
1937  // If we couldn't allocate the exception, throw the pre-allocated out of memory exception.
1938  if (exception.Get() == nullptr) {
1939    SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1940    return;
1941  }
1942
1943  // Choose an appropriate constructor and set up the arguments.
1944  const char* signature;
1945  ScopedLocalRef<jstring> msg_string(GetJniEnv(), nullptr);
1946  if (msg != nullptr) {
1947    // Ensure we remember this and the method over the String allocation.
1948    msg_string.reset(
1949        soa.AddLocalReference<jstring>(mirror::String::AllocFromModifiedUtf8(this, msg)));
1950    if (UNLIKELY(msg_string.get() == nullptr)) {
1951      CHECK(IsExceptionPending());  // OOME.
1952      return;
1953    }
1954    if (cause.get() == nullptr) {
1955      signature = "(Ljava/lang/String;)V";
1956    } else {
1957      signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V";
1958    }
1959  } else {
1960    if (cause.get() == nullptr) {
1961      signature = "()V";
1962    } else {
1963      signature = "(Ljava/lang/Throwable;)V";
1964    }
1965  }
1966  ArtMethod* exception_init_method =
1967      exception_class->FindDeclaredDirectMethod("<init>", signature, cl->GetImagePointerSize());
1968
1969  CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in "
1970      << PrettyDescriptor(exception_class_descriptor);
1971
1972  if (UNLIKELY(!runtime->IsStarted())) {
1973    // Something is trying to throw an exception without a started runtime, which is the common
1974    // case in the compiler. We won't be able to invoke the constructor of the exception, so set
1975    // the exception fields directly.
1976    if (msg != nullptr) {
1977      exception->SetDetailMessage(down_cast<mirror::String*>(DecodeJObject(msg_string.get())));
1978    }
1979    if (cause.get() != nullptr) {
1980      exception->SetCause(down_cast<mirror::Throwable*>(DecodeJObject(cause.get())));
1981    }
1982    ScopedLocalRef<jobject> trace(GetJniEnv(),
1983                                  Runtime::Current()->IsActiveTransaction()
1984                                      ? CreateInternalStackTrace<true>(soa)
1985                                      : CreateInternalStackTrace<false>(soa));
1986    if (trace.get() != nullptr) {
1987      exception->SetStackState(down_cast<mirror::Throwable*>(DecodeJObject(trace.get())));
1988    }
1989    SetException(exception.Get());
1990  } else {
1991    jvalue jv_args[2];
1992    size_t i = 0;
1993
1994    if (msg != nullptr) {
1995      jv_args[i].l = msg_string.get();
1996      ++i;
1997    }
1998    if (cause.get() != nullptr) {
1999      jv_args[i].l = cause.get();
2000      ++i;
2001    }
2002    ScopedLocalRef<jobject> ref(soa.Env(), soa.AddLocalReference<jobject>(exception.Get()));
2003    InvokeWithJValues(soa, ref.get(), soa.EncodeMethod(exception_init_method), jv_args);
2004    if (LIKELY(!IsExceptionPending())) {
2005      SetException(exception.Get());
2006    }
2007  }
2008}
2009
2010void Thread::ThrowOutOfMemoryError(const char* msg) {
2011  LOG(WARNING) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
2012      msg, (tls32_.throwing_OutOfMemoryError ? " (recursive case)" : ""));
2013  if (!tls32_.throwing_OutOfMemoryError) {
2014    tls32_.throwing_OutOfMemoryError = true;
2015    ThrowNewException("Ljava/lang/OutOfMemoryError;", msg);
2016    tls32_.throwing_OutOfMemoryError = false;
2017  } else {
2018    Dump(LOG(WARNING));  // The pre-allocated OOME has no stack, so help out and log one.
2019    SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
2020  }
2021}
2022
2023Thread* Thread::CurrentFromGdb() {
2024  return Thread::Current();
2025}
2026
2027void Thread::DumpFromGdb() const {
2028  std::ostringstream ss;
2029  Dump(ss);
2030  std::string str(ss.str());
2031  // log to stderr for debugging command line processes
2032  std::cerr << str;
2033#ifdef HAVE_ANDROID_OS
2034  // log to logcat for debugging frameworks processes
2035  LOG(INFO) << str;
2036#endif
2037}
2038
2039// Explicitly instantiate 32 and 64bit thread offset dumping support.
2040template void Thread::DumpThreadOffset<4>(std::ostream& os, uint32_t offset);
2041template void Thread::DumpThreadOffset<8>(std::ostream& os, uint32_t offset);
2042
2043template<size_t ptr_size>
2044void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
2045#define DO_THREAD_OFFSET(x, y) \
2046    if (offset == x.Uint32Value()) { \
2047      os << y; \
2048      return; \
2049    }
2050  DO_THREAD_OFFSET(ThreadFlagsOffset<ptr_size>(), "state_and_flags")
2051  DO_THREAD_OFFSET(CardTableOffset<ptr_size>(), "card_table")
2052  DO_THREAD_OFFSET(ExceptionOffset<ptr_size>(), "exception")
2053  DO_THREAD_OFFSET(PeerOffset<ptr_size>(), "peer");
2054  DO_THREAD_OFFSET(JniEnvOffset<ptr_size>(), "jni_env")
2055  DO_THREAD_OFFSET(SelfOffset<ptr_size>(), "self")
2056  DO_THREAD_OFFSET(StackEndOffset<ptr_size>(), "stack_end")
2057  DO_THREAD_OFFSET(ThinLockIdOffset<ptr_size>(), "thin_lock_thread_id")
2058  DO_THREAD_OFFSET(TopOfManagedStackOffset<ptr_size>(), "top_quick_frame_method")
2059  DO_THREAD_OFFSET(TopShadowFrameOffset<ptr_size>(), "top_shadow_frame")
2060  DO_THREAD_OFFSET(TopHandleScopeOffset<ptr_size>(), "top_handle_scope")
2061  DO_THREAD_OFFSET(ThreadSuspendTriggerOffset<ptr_size>(), "suspend_trigger")
2062#undef DO_THREAD_OFFSET
2063
2064#define INTERPRETER_ENTRY_POINT_INFO(x) \
2065    if (INTERPRETER_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
2066      os << #x; \
2067      return; \
2068    }
2069  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToInterpreterBridge)
2070  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToCompiledCodeBridge)
2071#undef INTERPRETER_ENTRY_POINT_INFO
2072
2073#define JNI_ENTRY_POINT_INFO(x) \
2074    if (JNI_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
2075      os << #x; \
2076      return; \
2077    }
2078  JNI_ENTRY_POINT_INFO(pDlsymLookup)
2079#undef JNI_ENTRY_POINT_INFO
2080
2081#define QUICK_ENTRY_POINT_INFO(x) \
2082    if (QUICK_ENTRYPOINT_OFFSET(ptr_size, x).Uint32Value() == offset) { \
2083      os << #x; \
2084      return; \
2085    }
2086  QUICK_ENTRY_POINT_INFO(pAllocArray)
2087  QUICK_ENTRY_POINT_INFO(pAllocArrayResolved)
2088  QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck)
2089  QUICK_ENTRY_POINT_INFO(pAllocObject)
2090  QUICK_ENTRY_POINT_INFO(pAllocObjectResolved)
2091  QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized)
2092  QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck)
2093  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray)
2094  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck)
2095  QUICK_ENTRY_POINT_INFO(pAllocStringFromBytes)
2096  QUICK_ENTRY_POINT_INFO(pAllocStringFromChars)
2097  QUICK_ENTRY_POINT_INFO(pAllocStringFromString)
2098  QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial)
2099  QUICK_ENTRY_POINT_INFO(pCheckCast)
2100  QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage)
2101  QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess)
2102  QUICK_ENTRY_POINT_INFO(pInitializeType)
2103  QUICK_ENTRY_POINT_INFO(pResolveString)
2104  QUICK_ENTRY_POINT_INFO(pSet8Instance)
2105  QUICK_ENTRY_POINT_INFO(pSet8Static)
2106  QUICK_ENTRY_POINT_INFO(pSet16Instance)
2107  QUICK_ENTRY_POINT_INFO(pSet16Static)
2108  QUICK_ENTRY_POINT_INFO(pSet32Instance)
2109  QUICK_ENTRY_POINT_INFO(pSet32Static)
2110  QUICK_ENTRY_POINT_INFO(pSet64Instance)
2111  QUICK_ENTRY_POINT_INFO(pSet64Static)
2112  QUICK_ENTRY_POINT_INFO(pSetObjInstance)
2113  QUICK_ENTRY_POINT_INFO(pSetObjStatic)
2114  QUICK_ENTRY_POINT_INFO(pGetByteInstance)
2115  QUICK_ENTRY_POINT_INFO(pGetBooleanInstance)
2116  QUICK_ENTRY_POINT_INFO(pGetByteStatic)
2117  QUICK_ENTRY_POINT_INFO(pGetBooleanStatic)
2118  QUICK_ENTRY_POINT_INFO(pGetShortInstance)
2119  QUICK_ENTRY_POINT_INFO(pGetCharInstance)
2120  QUICK_ENTRY_POINT_INFO(pGetShortStatic)
2121  QUICK_ENTRY_POINT_INFO(pGetCharStatic)
2122  QUICK_ENTRY_POINT_INFO(pGet32Instance)
2123  QUICK_ENTRY_POINT_INFO(pGet32Static)
2124  QUICK_ENTRY_POINT_INFO(pGet64Instance)
2125  QUICK_ENTRY_POINT_INFO(pGet64Static)
2126  QUICK_ENTRY_POINT_INFO(pGetObjInstance)
2127  QUICK_ENTRY_POINT_INFO(pGetObjStatic)
2128  QUICK_ENTRY_POINT_INFO(pAputObjectWithNullAndBoundCheck)
2129  QUICK_ENTRY_POINT_INFO(pAputObjectWithBoundCheck)
2130  QUICK_ENTRY_POINT_INFO(pAputObject)
2131  QUICK_ENTRY_POINT_INFO(pHandleFillArrayData)
2132  QUICK_ENTRY_POINT_INFO(pJniMethodStart)
2133  QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized)
2134  QUICK_ENTRY_POINT_INFO(pJniMethodEnd)
2135  QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized)
2136  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference)
2137  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized)
2138  QUICK_ENTRY_POINT_INFO(pQuickGenericJniTrampoline)
2139  QUICK_ENTRY_POINT_INFO(pLockObject)
2140  QUICK_ENTRY_POINT_INFO(pUnlockObject)
2141  QUICK_ENTRY_POINT_INFO(pCmpgDouble)
2142  QUICK_ENTRY_POINT_INFO(pCmpgFloat)
2143  QUICK_ENTRY_POINT_INFO(pCmplDouble)
2144  QUICK_ENTRY_POINT_INFO(pCmplFloat)
2145  QUICK_ENTRY_POINT_INFO(pFmod)
2146  QUICK_ENTRY_POINT_INFO(pL2d)
2147  QUICK_ENTRY_POINT_INFO(pFmodf)
2148  QUICK_ENTRY_POINT_INFO(pL2f)
2149  QUICK_ENTRY_POINT_INFO(pD2iz)
2150  QUICK_ENTRY_POINT_INFO(pF2iz)
2151  QUICK_ENTRY_POINT_INFO(pIdivmod)
2152  QUICK_ENTRY_POINT_INFO(pD2l)
2153  QUICK_ENTRY_POINT_INFO(pF2l)
2154  QUICK_ENTRY_POINT_INFO(pLdiv)
2155  QUICK_ENTRY_POINT_INFO(pLmod)
2156  QUICK_ENTRY_POINT_INFO(pLmul)
2157  QUICK_ENTRY_POINT_INFO(pShlLong)
2158  QUICK_ENTRY_POINT_INFO(pShrLong)
2159  QUICK_ENTRY_POINT_INFO(pUshrLong)
2160  QUICK_ENTRY_POINT_INFO(pIndexOf)
2161  QUICK_ENTRY_POINT_INFO(pStringCompareTo)
2162  QUICK_ENTRY_POINT_INFO(pMemcpy)
2163  QUICK_ENTRY_POINT_INFO(pQuickImtConflictTrampoline)
2164  QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline)
2165  QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge)
2166  QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck)
2167  QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck)
2168  QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck)
2169  QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck)
2170  QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck)
2171  QUICK_ENTRY_POINT_INFO(pTestSuspend)
2172  QUICK_ENTRY_POINT_INFO(pDeliverException)
2173  QUICK_ENTRY_POINT_INFO(pThrowArrayBounds)
2174  QUICK_ENTRY_POINT_INFO(pThrowDivZero)
2175  QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod)
2176  QUICK_ENTRY_POINT_INFO(pThrowNullPointer)
2177  QUICK_ENTRY_POINT_INFO(pThrowStackOverflow)
2178  QUICK_ENTRY_POINT_INFO(pDeoptimize)
2179  QUICK_ENTRY_POINT_INFO(pA64Load)
2180  QUICK_ENTRY_POINT_INFO(pA64Store)
2181  QUICK_ENTRY_POINT_INFO(pNewEmptyString)
2182  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_B)
2183  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BI)
2184  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BII)
2185  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIII)
2186  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIIString)
2187  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BString)
2188  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BIICharset)
2189  QUICK_ENTRY_POINT_INFO(pNewStringFromBytes_BCharset)
2190  QUICK_ENTRY_POINT_INFO(pNewStringFromChars_C)
2191  QUICK_ENTRY_POINT_INFO(pNewStringFromChars_CII)
2192  QUICK_ENTRY_POINT_INFO(pNewStringFromChars_IIC)
2193  QUICK_ENTRY_POINT_INFO(pNewStringFromCodePoints)
2194  QUICK_ENTRY_POINT_INFO(pNewStringFromString)
2195  QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuffer)
2196  QUICK_ENTRY_POINT_INFO(pNewStringFromStringBuilder)
2197  QUICK_ENTRY_POINT_INFO(pReadBarrierJni)
2198#undef QUICK_ENTRY_POINT_INFO
2199
2200  os << offset;
2201}
2202
2203void Thread::QuickDeliverException() {
2204  // Get exception from thread.
2205  mirror::Throwable* exception = GetException();
2206  CHECK(exception != nullptr);
2207  // Don't leave exception visible while we try to find the handler, which may cause class
2208  // resolution.
2209  ClearException();
2210  bool is_deoptimization = (exception == GetDeoptimizationException());
2211  QuickExceptionHandler exception_handler(this, is_deoptimization);
2212  if (is_deoptimization) {
2213    exception_handler.DeoptimizeStack();
2214  } else {
2215    exception_handler.FindCatch(exception);
2216  }
2217  exception_handler.UpdateInstrumentationStack();
2218  exception_handler.DoLongJump();
2219}
2220
2221Context* Thread::GetLongJumpContext() {
2222  Context* result = tlsPtr_.long_jump_context;
2223  if (result == nullptr) {
2224    result = Context::Create();
2225  } else {
2226    tlsPtr_.long_jump_context = nullptr;  // Avoid context being shared.
2227    result->Reset();
2228  }
2229  return result;
2230}
2231
2232// Note: this visitor may return with a method set, but dex_pc_ being DexFile:kDexNoIndex. This is
2233//       so we don't abort in a special situation (thinlocked monitor) when dumping the Java stack.
2234struct CurrentMethodVisitor FINAL : public StackVisitor {
2235  CurrentMethodVisitor(Thread* thread, Context* context, bool abort_on_error)
2236      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2237      : StackVisitor(thread, context, StackVisitor::StackWalkKind::kIncludeInlinedFrames),
2238        this_object_(nullptr),
2239        method_(nullptr),
2240        dex_pc_(0),
2241        abort_on_error_(abort_on_error) {}
2242  bool VisitFrame() OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2243    ArtMethod* m = GetMethod();
2244    if (m->IsRuntimeMethod()) {
2245      // Continue if this is a runtime method.
2246      return true;
2247    }
2248    if (context_ != nullptr) {
2249      this_object_ = GetThisObject();
2250    }
2251    method_ = m;
2252    dex_pc_ = GetDexPc(abort_on_error_);
2253    return false;
2254  }
2255  mirror::Object* this_object_;
2256  ArtMethod* method_;
2257  uint32_t dex_pc_;
2258  const bool abort_on_error_;
2259};
2260
2261ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, bool abort_on_error) const {
2262  CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr, abort_on_error);
2263  visitor.WalkStack(false);
2264  if (dex_pc != nullptr) {
2265    *dex_pc = visitor.dex_pc_;
2266  }
2267  return visitor.method_;
2268}
2269
2270bool Thread::HoldsLock(mirror::Object* object) const {
2271  if (object == nullptr) {
2272    return false;
2273  }
2274  return object->GetLockOwnerThreadId() == GetThreadId();
2275}
2276
2277// RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor).
2278template <typename RootVisitor>
2279class ReferenceMapVisitor : public StackVisitor {
2280 public:
2281  ReferenceMapVisitor(Thread* thread, Context* context, RootVisitor& visitor)
2282      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2283        // We are visiting the references in compiled frames, so we do not need
2284        // to know the inlined frames.
2285      : StackVisitor(thread, context, StackVisitor::StackWalkKind::kSkipInlinedFrames),
2286        visitor_(visitor) {}
2287
2288  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2289    if (false) {
2290      LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
2291                << StringPrintf("@ PC:%04x", GetDexPc());
2292    }
2293    ShadowFrame* shadow_frame = GetCurrentShadowFrame();
2294    if (shadow_frame != nullptr) {
2295      VisitShadowFrame(shadow_frame);
2296    } else {
2297      VisitQuickFrame();
2298    }
2299    return true;
2300  }
2301
2302  void VisitShadowFrame(ShadowFrame* shadow_frame) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2303    ArtMethod* m = shadow_frame->GetMethod();
2304    DCHECK(m != nullptr);
2305    size_t num_regs = shadow_frame->NumberOfVRegs();
2306    if (m->IsNative() || shadow_frame->HasReferenceArray()) {
2307      // handle scope for JNI or References for interpreter.
2308      for (size_t reg = 0; reg < num_regs; ++reg) {
2309        mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2310        if (ref != nullptr) {
2311          mirror::Object* new_ref = ref;
2312          visitor_(&new_ref, reg, this);
2313          if (new_ref != ref) {
2314            shadow_frame->SetVRegReference(reg, new_ref);
2315          }
2316        }
2317      }
2318    } else {
2319      // Java method.
2320      // Portable path use DexGcMap and store in Method.native_gc_map_.
2321      const uint8_t* gc_map = m->GetNativeGcMap(sizeof(void*));
2322      CHECK(gc_map != nullptr) << PrettyMethod(m);
2323      verifier::DexPcToReferenceMap dex_gc_map(gc_map);
2324      uint32_t dex_pc = shadow_frame->GetDexPC();
2325      const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
2326      DCHECK(reg_bitmap != nullptr);
2327      num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
2328      for (size_t reg = 0; reg < num_regs; ++reg) {
2329        if (TestBitmap(reg, reg_bitmap)) {
2330          mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2331          if (ref != nullptr) {
2332            mirror::Object* new_ref = ref;
2333            visitor_(&new_ref, reg, this);
2334            if (new_ref != ref) {
2335              shadow_frame->SetVRegReference(reg, new_ref);
2336            }
2337          }
2338        }
2339      }
2340    }
2341  }
2342
2343 private:
2344  void VisitQuickFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2345    auto* cur_quick_frame = GetCurrentQuickFrame();
2346    DCHECK(cur_quick_frame != nullptr);
2347    auto* m = *cur_quick_frame;
2348
2349    // Process register map (which native and runtime methods don't have)
2350    if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
2351      if (m->IsOptimized(sizeof(void*))) {
2352        auto* vreg_base = reinterpret_cast<StackReference<mirror::Object>*>(
2353            reinterpret_cast<uintptr_t>(cur_quick_frame));
2354        Runtime* runtime = Runtime::Current();
2355        const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
2356        uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
2357        CodeInfo code_info = m->GetOptimizedCodeInfo();
2358        StackMapEncoding encoding = code_info.ExtractEncoding();
2359        StackMap map = code_info.GetStackMapForNativePcOffset(native_pc_offset, encoding);
2360        DCHECK(map.IsValid());
2361        MemoryRegion mask = map.GetStackMask(encoding);
2362        // Visit stack entries that hold pointers.
2363        for (size_t i = 0; i < mask.size_in_bits(); ++i) {
2364          if (mask.LoadBit(i)) {
2365            auto* ref_addr = vreg_base + i;
2366            mirror::Object* ref = ref_addr->AsMirrorPtr();
2367            if (ref != nullptr) {
2368              mirror::Object* new_ref = ref;
2369              visitor_(&new_ref, -1, this);
2370              if (ref != new_ref) {
2371                ref_addr->Assign(new_ref);
2372              }
2373            }
2374          }
2375        }
2376        // Visit callee-save registers that hold pointers.
2377        uint32_t register_mask = map.GetRegisterMask(encoding);
2378        for (size_t i = 0; i < BitSizeOf<uint32_t>(); ++i) {
2379          if (register_mask & (1 << i)) {
2380            mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(i));
2381            if (*ref_addr != nullptr) {
2382              visitor_(ref_addr, -1, this);
2383            }
2384          }
2385        }
2386      } else {
2387        const uint8_t* native_gc_map = m->GetNativeGcMap(sizeof(void*));
2388        CHECK(native_gc_map != nullptr) << PrettyMethod(m);
2389        const DexFile::CodeItem* code_item = m->GetCodeItem();
2390        // Can't be null or how would we compile its instructions?
2391        DCHECK(code_item != nullptr) << PrettyMethod(m);
2392        NativePcOffsetToReferenceMap map(native_gc_map);
2393        size_t num_regs = map.RegWidth() * 8;
2394        if (num_regs > 0) {
2395          Runtime* runtime = Runtime::Current();
2396          const void* entry_point = runtime->GetInstrumentation()->GetQuickCodeFor(m, sizeof(void*));
2397          uintptr_t native_pc_offset = m->NativeQuickPcOffset(GetCurrentQuickFramePc(), entry_point);
2398          const uint8_t* reg_bitmap = map.FindBitMap(native_pc_offset);
2399          DCHECK(reg_bitmap != nullptr);
2400          const void* code_pointer = ArtMethod::EntryPointToCodePointer(entry_point);
2401          const VmapTable vmap_table(m->GetVmapTable(code_pointer, sizeof(void*)));
2402          QuickMethodFrameInfo frame_info = m->GetQuickFrameInfo(code_pointer);
2403          // For all dex registers in the bitmap
2404          DCHECK(cur_quick_frame != nullptr);
2405          for (size_t reg = 0; reg < num_regs; ++reg) {
2406            // Does this register hold a reference?
2407            if (TestBitmap(reg, reg_bitmap)) {
2408              uint32_t vmap_offset;
2409              if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
2410                int vmap_reg = vmap_table.ComputeRegister(frame_info.CoreSpillMask(), vmap_offset,
2411                                                          kReferenceVReg);
2412                // This is sound as spilled GPRs will be word sized (ie 32 or 64bit).
2413                mirror::Object** ref_addr =
2414                    reinterpret_cast<mirror::Object**>(GetGPRAddress(vmap_reg));
2415                if (*ref_addr != nullptr) {
2416                  visitor_(ref_addr, reg, this);
2417                }
2418              } else {
2419                StackReference<mirror::Object>* ref_addr =
2420                    reinterpret_cast<StackReference<mirror::Object>*>(GetVRegAddrFromQuickCode(
2421                        cur_quick_frame, code_item, frame_info.CoreSpillMask(),
2422                        frame_info.FpSpillMask(), frame_info.FrameSizeInBytes(), reg));
2423                mirror::Object* ref = ref_addr->AsMirrorPtr();
2424                if (ref != nullptr) {
2425                  mirror::Object* new_ref = ref;
2426                  visitor_(&new_ref, reg, this);
2427                  if (ref != new_ref) {
2428                    ref_addr->Assign(new_ref);
2429                  }
2430                }
2431              }
2432            }
2433          }
2434        }
2435      }
2436    }
2437  }
2438
2439  // Visitor for when we visit a root.
2440  RootVisitor& visitor_;
2441};
2442
2443class RootCallbackVisitor {
2444 public:
2445  RootCallbackVisitor(RootVisitor* visitor, uint32_t tid) : visitor_(visitor), tid_(tid) {}
2446
2447  void operator()(mirror::Object** obj, size_t vreg, const StackVisitor* stack_visitor) const
2448      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2449    visitor_->VisitRoot(obj, JavaFrameRootInfo(tid_, stack_visitor, vreg));
2450  }
2451
2452 private:
2453  RootVisitor* const visitor_;
2454  const uint32_t tid_;
2455};
2456
2457void Thread::VisitRoots(RootVisitor* visitor) {
2458  const uint32_t thread_id = GetThreadId();
2459  visitor->VisitRootIfNonNull(&tlsPtr_.opeer, RootInfo(kRootThreadObject, thread_id));
2460  if (tlsPtr_.exception != nullptr && tlsPtr_.exception != GetDeoptimizationException()) {
2461    visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception),
2462                   RootInfo(kRootNativeStack, thread_id));
2463  }
2464  visitor->VisitRootIfNonNull(&tlsPtr_.monitor_enter_object, RootInfo(kRootNativeStack, thread_id));
2465  tlsPtr_.jni_env->locals.VisitRoots(visitor, RootInfo(kRootJNILocal, thread_id));
2466  tlsPtr_.jni_env->monitors.VisitRoots(visitor, RootInfo(kRootJNIMonitor, thread_id));
2467  HandleScopeVisitRoots(visitor, thread_id);
2468  if (tlsPtr_.debug_invoke_req != nullptr) {
2469    tlsPtr_.debug_invoke_req->VisitRoots(visitor, RootInfo(kRootDebugger, thread_id));
2470  }
2471  if (tlsPtr_.stacked_shadow_frame_record != nullptr) {
2472    RootCallbackVisitor visitor_to_callback(visitor, thread_id);
2473    ReferenceMapVisitor<RootCallbackVisitor> mapper(this, nullptr, visitor_to_callback);
2474    for (StackedShadowFrameRecord* record = tlsPtr_.stacked_shadow_frame_record;
2475         record != nullptr;
2476         record = record->GetLink()) {
2477      for (ShadowFrame* shadow_frame = record->GetShadowFrame();
2478           shadow_frame != nullptr;
2479           shadow_frame = shadow_frame->GetLink()) {
2480        mapper.VisitShadowFrame(shadow_frame);
2481      }
2482    }
2483  }
2484  if (tlsPtr_.deoptimization_return_value_stack != nullptr) {
2485    for (DeoptimizationReturnValueRecord* record = tlsPtr_.deoptimization_return_value_stack;
2486         record != nullptr;
2487         record = record->GetLink()) {
2488      if (record->IsReference()) {
2489        visitor->VisitRootIfNonNull(record->GetGCRoot(),
2490            RootInfo(kRootThreadObject, thread_id));
2491      }
2492    }
2493  }
2494  for (auto* verifier = tlsPtr_.method_verifier; verifier != nullptr; verifier = verifier->link_) {
2495    verifier->VisitRoots(visitor, RootInfo(kRootNativeStack, thread_id));
2496  }
2497  // Visit roots on this thread's stack
2498  Context* context = GetLongJumpContext();
2499  RootCallbackVisitor visitor_to_callback(visitor, thread_id);
2500  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context, visitor_to_callback);
2501  mapper.WalkStack();
2502  ReleaseLongJumpContext(context);
2503  for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
2504    visitor->VisitRootIfNonNull(&frame.this_object_, RootInfo(kRootVMInternal, thread_id));
2505  }
2506}
2507
2508class VerifyRootVisitor : public SingleRootVisitor {
2509 public:
2510  void VisitRoot(mirror::Object* root, const RootInfo& info ATTRIBUTE_UNUSED)
2511      OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2512    VerifyObject(root);
2513  }
2514};
2515
2516void Thread::VerifyStackImpl() {
2517  VerifyRootVisitor visitor;
2518  std::unique_ptr<Context> context(Context::Create());
2519  RootCallbackVisitor visitor_to_callback(&visitor, GetThreadId());
2520  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitor_to_callback);
2521  mapper.WalkStack();
2522}
2523
2524// Set the stack end to that to be used during a stack overflow
2525void Thread::SetStackEndForStackOverflow() {
2526  // During stack overflow we allow use of the full stack.
2527  if (tlsPtr_.stack_end == tlsPtr_.stack_begin) {
2528    // However, we seem to have already extended to use the full stack.
2529    LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently "
2530               << GetStackOverflowReservedBytes(kRuntimeISA) << ")?";
2531    DumpStack(LOG(ERROR));
2532    LOG(FATAL) << "Recursive stack overflow.";
2533  }
2534
2535  tlsPtr_.stack_end = tlsPtr_.stack_begin;
2536
2537  // Remove the stack overflow protection if is it set up.
2538  bool implicit_stack_check = !Runtime::Current()->ExplicitStackOverflowChecks();
2539  if (implicit_stack_check) {
2540    if (!UnprotectStack()) {
2541      LOG(ERROR) << "Unable to remove stack protection for stack overflow";
2542    }
2543  }
2544}
2545
2546void Thread::SetTlab(uint8_t* start, uint8_t* end) {
2547  DCHECK_LE(start, end);
2548  tlsPtr_.thread_local_start = start;
2549  tlsPtr_.thread_local_pos  = tlsPtr_.thread_local_start;
2550  tlsPtr_.thread_local_end = end;
2551  tlsPtr_.thread_local_objects = 0;
2552}
2553
2554bool Thread::HasTlab() const {
2555  bool has_tlab = tlsPtr_.thread_local_pos != nullptr;
2556  if (has_tlab) {
2557    DCHECK(tlsPtr_.thread_local_start != nullptr && tlsPtr_.thread_local_end != nullptr);
2558  } else {
2559    DCHECK(tlsPtr_.thread_local_start == nullptr && tlsPtr_.thread_local_end == nullptr);
2560  }
2561  return has_tlab;
2562}
2563
2564std::ostream& operator<<(std::ostream& os, const Thread& thread) {
2565  thread.ShortDump(os);
2566  return os;
2567}
2568
2569void Thread::ProtectStack() {
2570  void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
2571  VLOG(threads) << "Protecting stack at " << pregion;
2572  if (mprotect(pregion, kStackOverflowProtectedSize, PROT_NONE) == -1) {
2573    LOG(FATAL) << "Unable to create protected region in stack for implicit overflow check. "
2574        "Reason: "
2575        << strerror(errno) << " size:  " << kStackOverflowProtectedSize;
2576  }
2577}
2578
2579bool Thread::UnprotectStack() {
2580  void* pregion = tlsPtr_.stack_begin - kStackOverflowProtectedSize;
2581  VLOG(threads) << "Unprotecting stack at " << pregion;
2582  return mprotect(pregion, kStackOverflowProtectedSize, PROT_READ|PROT_WRITE) == 0;
2583}
2584
2585void Thread::ActivateSingleStepControl(SingleStepControl* ssc) {
2586  CHECK(Dbg::IsDebuggerActive());
2587  CHECK(GetSingleStepControl() == nullptr) << "Single step already active in thread " << *this;
2588  CHECK(ssc != nullptr);
2589  tlsPtr_.single_step_control = ssc;
2590}
2591
2592void Thread::DeactivateSingleStepControl() {
2593  CHECK(Dbg::IsDebuggerActive());
2594  CHECK(GetSingleStepControl() != nullptr) << "Single step not active in thread " << *this;
2595  SingleStepControl* ssc = GetSingleStepControl();
2596  tlsPtr_.single_step_control = nullptr;
2597  delete ssc;
2598}
2599
2600void Thread::SetDebugInvokeReq(DebugInvokeReq* req) {
2601  CHECK(Dbg::IsDebuggerActive());
2602  CHECK(GetInvokeReq() == nullptr) << "Debug invoke req already active in thread " << *this;
2603  CHECK(Thread::Current() != this) << "Debug invoke can't be dispatched by the thread itself";
2604  CHECK(req != nullptr);
2605  tlsPtr_.debug_invoke_req = req;
2606}
2607
2608void Thread::ClearDebugInvokeReq() {
2609  CHECK(GetInvokeReq() != nullptr) << "Debug invoke req not active in thread " << *this;
2610  CHECK(Thread::Current() == this) << "Debug invoke must be finished by the thread itself";
2611  DebugInvokeReq* req = tlsPtr_.debug_invoke_req;
2612  tlsPtr_.debug_invoke_req = nullptr;
2613  delete req;
2614}
2615
2616void Thread::PushVerifier(verifier::MethodVerifier* verifier) {
2617  verifier->link_ = tlsPtr_.method_verifier;
2618  tlsPtr_.method_verifier = verifier;
2619}
2620
2621void Thread::PopVerifier(verifier::MethodVerifier* verifier) {
2622  CHECK_EQ(tlsPtr_.method_verifier, verifier);
2623  tlsPtr_.method_verifier = verifier->link_;
2624}
2625
2626}  // namespace art
2627