thread.cc revision 4e30541a92381fb280cd0be9a1763b713ee4d64c
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define ATRACE_TAG ATRACE_TAG_DALVIK
18
19#include "thread.h"
20
21#include <cutils/trace.h>
22#include <pthread.h>
23#include <signal.h>
24#include <sys/resource.h>
25#include <sys/time.h>
26
27#include <algorithm>
28#include <bitset>
29#include <cerrno>
30#include <iostream>
31#include <list>
32
33#include "arch/context.h"
34#include "base/mutex.h"
35#include "catch_finder.h"
36#include "class_linker.h"
37#include "class_linker-inl.h"
38#include "cutils/atomic.h"
39#include "cutils/atomic-inline.h"
40#include "debugger.h"
41#include "dex_file-inl.h"
42#include "entrypoints/entrypoint_utils.h"
43#include "gc_map.h"
44#include "gc/accounting/card_table-inl.h"
45#include "gc/heap.h"
46#include "gc/space/space.h"
47#include "invoke_arg_array_builder.h"
48#include "jni_internal.h"
49#include "mirror/art_field-inl.h"
50#include "mirror/art_method-inl.h"
51#include "mirror/class-inl.h"
52#include "mirror/class_loader.h"
53#include "mirror/object_array-inl.h"
54#include "mirror/stack_trace_element.h"
55#include "monitor.h"
56#include "object_utils.h"
57#include "reflection.h"
58#include "runtime.h"
59#include "scoped_thread_state_change.h"
60#include "ScopedLocalRef.h"
61#include "ScopedUtfChars.h"
62#include "sirt_ref.h"
63#include "stack.h"
64#include "stack_indirect_reference_table.h"
65#include "thread-inl.h"
66#include "thread_list.h"
67#include "utils.h"
68#include "verifier/dex_gc_map.h"
69#include "verify_object-inl.h"
70#include "vmap_table.h"
71#include "well_known_classes.h"
72
73namespace art {
74
75bool Thread::is_started_ = false;
76pthread_key_t Thread::pthread_key_self_;
77ConditionVariable* Thread::resume_cond_ = nullptr;
78
79static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
80
81void Thread::InitCardTable() {
82  card_table_ = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin();
83}
84
85#if !defined(__APPLE__)
86static void UnimplementedEntryPoint() {
87  UNIMPLEMENTED(FATAL);
88}
89#endif
90
91void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
92                     PortableEntryPoints* ppoints, QuickEntryPoints* qpoints);
93
94void Thread::InitTlsEntryPoints() {
95#if !defined(__APPLE__)  // The Mac GCC is too old to accept this code.
96  // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
97  uintptr_t* begin = reinterpret_cast<uintptr_t*>(&interpreter_entrypoints_);
98  uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(quick_entrypoints_));
99  for (uintptr_t* it = begin; it != end; ++it) {
100    *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
101  }
102  begin = reinterpret_cast<uintptr_t*>(&interpreter_entrypoints_);
103  end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(portable_entrypoints_));
104  for (uintptr_t* it = begin; it != end; ++it) {
105    *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
106  }
107#endif
108  InitEntryPoints(&interpreter_entrypoints_, &jni_entrypoints_, &portable_entrypoints_,
109                  &quick_entrypoints_);
110}
111
112void ResetQuickAllocEntryPoints(QuickEntryPoints* qpoints);
113
114void Thread::ResetQuickAllocEntryPointsForThread() {
115  ResetQuickAllocEntryPoints(&quick_entrypoints_);
116}
117
118void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) {
119  deoptimization_shadow_frame_ = sf;
120}
121
122void Thread::SetDeoptimizationReturnValue(const JValue& ret_val) {
123  deoptimization_return_value_.SetJ(ret_val.GetJ());
124}
125
126ShadowFrame* Thread::GetAndClearDeoptimizationShadowFrame(JValue* ret_val) {
127  ShadowFrame* sf = deoptimization_shadow_frame_;
128  deoptimization_shadow_frame_ = nullptr;
129  ret_val->SetJ(deoptimization_return_value_.GetJ());
130  return sf;
131}
132
133void Thread::InitTid() {
134  tid_ = ::art::GetTid();
135}
136
137void Thread::InitAfterFork() {
138  // One thread (us) survived the fork, but we have a new tid so we need to
139  // update the value stashed in this Thread*.
140  InitTid();
141}
142
143void* Thread::CreateCallback(void* arg) {
144  Thread* self = reinterpret_cast<Thread*>(arg);
145  Runtime* runtime = Runtime::Current();
146  if (runtime == nullptr) {
147    LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self;
148    return nullptr;
149  }
150  {
151    // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true
152    //       after self->Init().
153    MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
154    // Check that if we got here we cannot be shutting down (as shutdown should never have started
155    // while threads are being born).
156    CHECK(!runtime->IsShuttingDownLocked());
157    self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
158    Runtime::Current()->EndThreadBirth();
159  }
160  {
161    ScopedObjectAccess soa(self);
162
163    // Copy peer into self, deleting global reference when done.
164    CHECK(self->jpeer_ != nullptr);
165    self->opeer_ = soa.Decode<mirror::Object*>(self->jpeer_);
166    self->GetJniEnv()->DeleteGlobalRef(self->jpeer_);
167    self->jpeer_ = nullptr;
168
169    {
170      SirtRef<mirror::String> thread_name(self, self->GetThreadName(soa));
171      self->SetThreadName(thread_name->ToModifiedUtf8().c_str());
172    }
173    Dbg::PostThreadStart(self);
174
175    // Invoke the 'run' method of our java.lang.Thread.
176    mirror::Object* receiver = self->opeer_;
177    jmethodID mid = WellKnownClasses::java_lang_Thread_run;
178    mirror::ArtMethod* m =
179        receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(soa.DecodeMethod(mid));
180    JValue result;
181    ArgArray arg_array(nullptr, 0);
182    arg_array.Append(receiver);
183    m->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), &result, "V");
184  }
185  // Detach and delete self.
186  Runtime::Current()->GetThreadList()->Unregister(self);
187
188  return nullptr;
189}
190
191Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa,
192                                  mirror::Object* thread_peer) {
193  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer);
194  Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetLong(thread_peer)));
195  // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
196  // to stop it from going away.
197  if (kIsDebugBuild) {
198    MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
199    if (result != nullptr && !result->IsSuspended()) {
200      Locks::thread_list_lock_->AssertHeld(soa.Self());
201    }
202  }
203  return result;
204}
205
206Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, jobject java_thread) {
207  return FromManagedThread(soa, soa.Decode<mirror::Object*>(java_thread));
208}
209
210static size_t FixStackSize(size_t stack_size) {
211  // A stack size of zero means "use the default".
212  if (stack_size == 0) {
213    stack_size = Runtime::Current()->GetDefaultStackSize();
214  }
215
216  // Dalvik used the bionic pthread default stack size for native threads,
217  // so include that here to support apps that expect large native stacks.
218  stack_size += 1 * MB;
219
220  // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN.
221  if (stack_size < PTHREAD_STACK_MIN) {
222    stack_size = PTHREAD_STACK_MIN;
223  }
224
225  // It's likely that callers are trying to ensure they have at least a certain amount of
226  // stack space, so we should add our reserved space on top of what they requested, rather
227  // than implicitly take it away from them.
228  stack_size += Thread::kStackOverflowReservedBytes;
229
230  // Some systems require the stack size to be a multiple of the system page size, so round up.
231  stack_size = RoundUp(stack_size, kPageSize);
232
233  return stack_size;
234}
235
236void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
237  CHECK(java_peer != nullptr);
238  Thread* self = static_cast<JNIEnvExt*>(env)->self;
239  Runtime* runtime = Runtime::Current();
240
241  // Atomically start the birth of the thread ensuring the runtime isn't shutting down.
242  bool thread_start_during_shutdown = false;
243  {
244    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
245    if (runtime->IsShuttingDownLocked()) {
246      thread_start_during_shutdown = true;
247    } else {
248      runtime->StartThreadBirth();
249    }
250  }
251  if (thread_start_during_shutdown) {
252    ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError"));
253    env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown");
254    return;
255  }
256
257  Thread* child_thread = new Thread(is_daemon);
258  // Use global JNI ref to hold peer live while child thread starts.
259  child_thread->jpeer_ = env->NewGlobalRef(java_peer);
260  stack_size = FixStackSize(stack_size);
261
262  // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to
263  // assign it.
264  env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer,
265                    reinterpret_cast<jlong>(child_thread));
266
267  pthread_t new_pthread;
268  pthread_attr_t attr;
269  CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
270  CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED");
271  CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
272  int pthread_create_result = pthread_create(&new_pthread, &attr, Thread::CreateCallback, child_thread);
273  CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
274
275  if (pthread_create_result != 0) {
276    // pthread_create(3) failed, so clean up.
277    {
278      MutexLock mu(self, *Locks::runtime_shutdown_lock_);
279      runtime->EndThreadBirth();
280    }
281    // Manually delete the global reference since Thread::Init will not have been run.
282    env->DeleteGlobalRef(child_thread->jpeer_);
283    child_thread->jpeer_ = nullptr;
284    delete child_thread;
285    child_thread = nullptr;
286    // TODO: remove from thread group?
287    env->SetLongField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0);
288    {
289      std::string msg(StringPrintf("pthread_create (%s stack) failed: %s",
290                                   PrettySize(stack_size).c_str(), strerror(pthread_create_result)));
291      ScopedObjectAccess soa(env);
292      soa.Self()->ThrowOutOfMemoryError(msg.c_str());
293    }
294  }
295}
296
297void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
298  // This function does all the initialization that must be run by the native thread it applies to.
299  // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
300  // we can handshake with the corresponding native thread when it's ready.) Check this native
301  // thread hasn't been through here already...
302  CHECK(Thread::Current() == nullptr);
303  SetUpAlternateSignalStack();
304  InitCpu();
305  InitTlsEntryPoints();
306  InitCardTable();
307  InitTid();
308  // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
309  // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
310  pthread_self_ = pthread_self();
311  CHECK(is_started_);
312  CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
313  DCHECK_EQ(Thread::Current(), this);
314
315  thin_lock_thread_id_ = thread_list->AllocThreadId(this);
316  InitStackHwm();
317
318  jni_env_ = new JNIEnvExt(this, java_vm);
319  thread_list->Register(this);
320}
321
322Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group,
323                       bool create_peer) {
324  Thread* self;
325  Runtime* runtime = Runtime::Current();
326  if (runtime == nullptr) {
327    LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name;
328    return nullptr;
329  }
330  {
331    MutexLock mu(nullptr, *Locks::runtime_shutdown_lock_);
332    if (runtime->IsShuttingDownLocked()) {
333      LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name;
334      return nullptr;
335    } else {
336      Runtime::Current()->StartThreadBirth();
337      self = new Thread(as_daemon);
338      self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
339      Runtime::Current()->EndThreadBirth();
340    }
341  }
342
343  CHECK_NE(self->GetState(), kRunnable);
344  self->SetState(kNative);
345
346  // If we're the main thread, ClassLinker won't be created until after we're attached,
347  // so that thread needs a two-stage attach. Regular threads don't need this hack.
348  // In the compiler, all threads need this hack, because no-one's going to be getting
349  // a native peer!
350  if (create_peer) {
351    self->CreatePeer(thread_name, as_daemon, thread_group);
352  } else {
353    // These aren't necessary, but they improve diagnostics for unit tests & command-line tools.
354    if (thread_name != nullptr) {
355      self->name_->assign(thread_name);
356      ::art::SetThreadName(thread_name);
357    }
358  }
359
360  return self;
361}
362
363void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) {
364  Runtime* runtime = Runtime::Current();
365  CHECK(runtime->IsStarted());
366  JNIEnv* env = jni_env_;
367
368  if (thread_group == nullptr) {
369    thread_group = runtime->GetMainThreadGroup();
370  }
371  ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
372  jint thread_priority = GetNativePriority();
373  jboolean thread_is_daemon = as_daemon;
374
375  ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread));
376  if (peer.get() == nullptr) {
377    CHECK(IsExceptionPending());
378    return;
379  }
380  {
381    ScopedObjectAccess soa(this);
382    opeer_ = soa.Decode<mirror::Object*>(peer.get());
383  }
384  env->CallNonvirtualVoidMethod(peer.get(),
385                                WellKnownClasses::java_lang_Thread,
386                                WellKnownClasses::java_lang_Thread_init,
387                                thread_group, thread_name.get(), thread_priority, thread_is_daemon);
388  AssertNoPendingException();
389
390  Thread* self = this;
391  DCHECK_EQ(self, Thread::Current());
392  jni_env_->SetLongField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer,
393                         reinterpret_cast<jlong>(self));
394
395  ScopedObjectAccess soa(self);
396  SirtRef<mirror::String> peer_thread_name(soa.Self(), GetThreadName(soa));
397  if (peer_thread_name.get() == nullptr) {
398    // The Thread constructor should have set the Thread.name to a
399    // non-null value. However, because we can run without code
400    // available (in the compiler, in tests), we manually assign the
401    // fields the constructor should have set.
402    if (runtime->IsActiveTransaction()) {
403      InitPeer<true>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
404    } else {
405      InitPeer<false>(soa, thread_is_daemon, thread_group, thread_name.get(), thread_priority);
406    }
407    peer_thread_name.reset(GetThreadName(soa));
408  }
409  // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
410  if (peer_thread_name.get() != nullptr) {
411    SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
412  }
413}
414
415template<bool kTransactionActive>
416void Thread::InitPeer(ScopedObjectAccess& soa, jboolean thread_is_daemon, jobject thread_group,
417                      jobject thread_name, jint thread_priority) {
418  soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->
419      SetBoolean<kTransactionActive>(opeer_, thread_is_daemon);
420  soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->
421      SetObject<kTransactionActive>(opeer_, soa.Decode<mirror::Object*>(thread_group));
422  soa.DecodeField(WellKnownClasses::java_lang_Thread_name)->
423      SetObject<kTransactionActive>(opeer_, soa.Decode<mirror::Object*>(thread_name));
424  soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->
425      SetInt<kTransactionActive>(opeer_, thread_priority);
426}
427
428void Thread::SetThreadName(const char* name) {
429  name_->assign(name);
430  ::art::SetThreadName(name);
431  Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
432}
433
434void Thread::InitStackHwm() {
435  void* stack_base;
436  size_t stack_size;
437  GetThreadStack(pthread_self_, &stack_base, &stack_size);
438
439  // TODO: include this in the thread dumps; potentially useful in SIGQUIT output?
440  VLOG(threads) << StringPrintf("Native stack is at %p (%s)", stack_base, PrettySize(stack_size).c_str());
441
442  stack_begin_ = reinterpret_cast<byte*>(stack_base);
443  stack_size_ = stack_size;
444
445  if (stack_size_ <= kStackOverflowReservedBytes) {
446    LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << stack_size_ << " bytes)";
447  }
448
449  // TODO: move this into the Linux GetThreadStack implementation.
450#if !defined(__APPLE__)
451  // If we're the main thread, check whether we were run with an unlimited stack. In that case,
452  // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
453  // will be broken because we'll die long before we get close to 2GB.
454  bool is_main_thread = (::art::GetTid() == getpid());
455  if (is_main_thread) {
456    rlimit stack_limit;
457    if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) {
458      PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed";
459    }
460    if (stack_limit.rlim_cur == RLIM_INFINITY) {
461      // Find the default stack size for new threads...
462      pthread_attr_t default_attributes;
463      size_t default_stack_size;
464      CHECK_PTHREAD_CALL(pthread_attr_init, (&default_attributes), "default stack size query");
465      CHECK_PTHREAD_CALL(pthread_attr_getstacksize, (&default_attributes, &default_stack_size),
466                         "default stack size query");
467      CHECK_PTHREAD_CALL(pthread_attr_destroy, (&default_attributes), "default stack size query");
468
469      // ...and use that as our limit.
470      size_t old_stack_size = stack_size_;
471      stack_size_ = default_stack_size;
472      stack_begin_ += (old_stack_size - stack_size_);
473      VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")"
474                    << " to " << PrettySize(stack_size_)
475                    << " with base " << reinterpret_cast<void*>(stack_begin_);
476    }
477  }
478#endif
479
480  // Set stack_end_ to the bottom of the stack saving space of stack overflows
481  ResetDefaultStackEnd();
482
483  // Sanity check.
484  int stack_variable;
485  CHECK_GT(&stack_variable, reinterpret_cast<void*>(stack_end_));
486}
487
488void Thread::ShortDump(std::ostream& os) const {
489  os << "Thread[";
490  if (GetThreadId() != 0) {
491    // If we're in kStarting, we won't have a thin lock id or tid yet.
492    os << GetThreadId()
493             << ",tid=" << GetTid() << ',';
494  }
495  os << GetState()
496           << ",Thread*=" << this
497           << ",peer=" << opeer_
498           << ",\"" << *name_ << "\""
499           << "]";
500}
501
502void Thread::Dump(std::ostream& os) const {
503  DumpState(os);
504  DumpStack(os);
505}
506
507mirror::String* Thread::GetThreadName(const ScopedObjectAccessUnchecked& soa) const {
508  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
509  return (opeer_ != nullptr) ? reinterpret_cast<mirror::String*>(f->GetObject(opeer_)) : nullptr;
510}
511
512void Thread::GetThreadName(std::string& name) const {
513  name.assign(*name_);
514}
515
516uint64_t Thread::GetCpuMicroTime() const {
517#if defined(HAVE_POSIX_CLOCKS)
518  clockid_t cpu_clock_id;
519  pthread_getcpuclockid(pthread_self_, &cpu_clock_id);
520  timespec now;
521  clock_gettime(cpu_clock_id, &now);
522  return static_cast<uint64_t>(now.tv_sec) * 1000000LL + now.tv_nsec / 1000LL;
523#else
524  UNIMPLEMENTED(WARNING);
525  return -1;
526#endif
527}
528
529void Thread::AtomicSetFlag(ThreadFlag flag) {
530  android_atomic_or(flag, &state_and_flags_.as_int);
531}
532
533void Thread::AtomicClearFlag(ThreadFlag flag) {
534  android_atomic_and(-1 ^ flag, &state_and_flags_.as_int);
535}
536
537// Attempt to rectify locks so that we dump thread list with required locks before exiting.
538static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
539  LOG(ERROR) << *thread << " suspend count already zero.";
540  Locks::thread_suspend_count_lock_->Unlock(self);
541  if (!Locks::mutator_lock_->IsSharedHeld(self)) {
542    Locks::mutator_lock_->SharedTryLock(self);
543    if (!Locks::mutator_lock_->IsSharedHeld(self)) {
544      LOG(WARNING) << "Dumping thread list without holding mutator_lock_";
545    }
546  }
547  if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
548    Locks::thread_list_lock_->TryLock(self);
549    if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
550      LOG(WARNING) << "Dumping thread list without holding thread_list_lock_";
551    }
552  }
553  std::ostringstream ss;
554  Runtime::Current()->GetThreadList()->DumpLocked(ss);
555  LOG(FATAL) << ss.str();
556}
557
558void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) {
559  DCHECK(delta == -1 || delta == +1 || delta == -debug_suspend_count_)
560      << delta << " " << debug_suspend_count_ << " " << this;
561  DCHECK_GE(suspend_count_, debug_suspend_count_) << this;
562  Locks::thread_suspend_count_lock_->AssertHeld(self);
563  if (this != self && !IsSuspended()) {
564    Locks::thread_list_lock_->AssertHeld(self);
565  }
566  if (UNLIKELY(delta < 0 && suspend_count_ <= 0)) {
567    UnsafeLogFatalForSuspendCount(self, this);
568    return;
569  }
570
571  suspend_count_ += delta;
572  if (for_debugger) {
573    debug_suspend_count_ += delta;
574  }
575
576  if (suspend_count_ == 0) {
577    AtomicClearFlag(kSuspendRequest);
578  } else {
579    AtomicSetFlag(kSuspendRequest);
580  }
581}
582
583void Thread::RunCheckpointFunction() {
584  Closure *checkpoints[kMaxCheckpoints];
585
586  // Grab the suspend_count lock and copy the current set of
587  // checkpoints.  Then clear the list and the flag.  The RequestCheckpoint
588  // function will also grab this lock so we prevent a race between setting
589  // the kCheckpointRequest flag and clearing it.
590  {
591    MutexLock mu(this, *Locks::thread_suspend_count_lock_);
592    for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
593      checkpoints[i] = checkpoint_functions_[i];
594      checkpoint_functions_[i] = nullptr;
595    }
596    AtomicClearFlag(kCheckpointRequest);
597  }
598
599  // Outside the lock, run all the checkpoint functions that
600  // we collected.
601  bool found_checkpoint = false;
602  for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
603    if (checkpoints[i] != nullptr) {
604      ATRACE_BEGIN("Checkpoint function");
605      checkpoints[i]->Run(this);
606      ATRACE_END();
607      found_checkpoint = true;
608    }
609  }
610  CHECK(found_checkpoint);
611}
612
613bool Thread::RequestCheckpoint(Closure* function) {
614  union StateAndFlags old_state_and_flags;
615  old_state_and_flags.as_int = state_and_flags_.as_int;
616  if (old_state_and_flags.as_struct.state != kRunnable) {
617    return false;  // Fail, thread is suspended and so can't run a checkpoint.
618  }
619
620  uint32_t available_checkpoint = kMaxCheckpoints;
621  for (uint32_t i = 0 ; i < kMaxCheckpoints; ++i) {
622    if (checkpoint_functions_[i] == nullptr) {
623      available_checkpoint = i;
624      break;
625    }
626  }
627  if (available_checkpoint == kMaxCheckpoints) {
628    // No checkpoint functions available, we can't run a checkpoint
629    return false;
630  }
631  checkpoint_functions_[available_checkpoint] = function;
632
633  // Checkpoint function installed now install flag bit.
634  // We must be runnable to request a checkpoint.
635  DCHECK_EQ(old_state_and_flags.as_struct.state, kRunnable);
636  union StateAndFlags new_state_and_flags;
637  new_state_and_flags.as_int = old_state_and_flags.as_int;
638  new_state_and_flags.as_struct.flags |= kCheckpointRequest;
639  int succeeded = android_atomic_acquire_cas(old_state_and_flags.as_int, new_state_and_flags.as_int,
640                                         &state_and_flags_.as_int);
641  if (UNLIKELY(succeeded != 0)) {
642    // The thread changed state before the checkpoint was installed.
643    CHECK_EQ(checkpoint_functions_[available_checkpoint], function);
644    checkpoint_functions_[available_checkpoint] = nullptr;
645  } else {
646    CHECK_EQ(ReadFlag(kCheckpointRequest), true);
647  }
648  return succeeded == 0;
649}
650
651void Thread::FullSuspendCheck() {
652  VLOG(threads) << this << " self-suspending";
653  ATRACE_BEGIN("Full suspend check");
654  // Make thread appear suspended to other threads, release mutator_lock_.
655  TransitionFromRunnableToSuspended(kSuspended);
656  // Transition back to runnable noting requests to suspend, re-acquire share on mutator_lock_.
657  TransitionFromSuspendedToRunnable();
658  ATRACE_END();
659  VLOG(threads) << this << " self-reviving";
660}
661
662void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
663  std::string group_name;
664  int priority;
665  bool is_daemon = false;
666  Thread* self = Thread::Current();
667
668  if (self != nullptr && thread != nullptr && thread->opeer_ != nullptr) {
669    ScopedObjectAccessUnchecked soa(self);
670    priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->GetInt(thread->opeer_);
671    is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->GetBoolean(thread->opeer_);
672
673    mirror::Object* thread_group =
674        soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->opeer_);
675
676    if (thread_group != nullptr) {
677      mirror::ArtField* group_name_field =
678          soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
679      mirror::String* group_name_string =
680          reinterpret_cast<mirror::String*>(group_name_field->GetObject(thread_group));
681      group_name = (group_name_string != nullptr) ? group_name_string->ToModifiedUtf8() : "<null>";
682    }
683  } else {
684    priority = GetNativePriority();
685  }
686
687  std::string scheduler_group_name(GetSchedulerGroupName(tid));
688  if (scheduler_group_name.empty()) {
689    scheduler_group_name = "default";
690  }
691
692  if (thread != nullptr) {
693    os << '"' << *thread->name_ << '"';
694    if (is_daemon) {
695      os << " daemon";
696    }
697    os << " prio=" << priority
698       << " tid=" << thread->GetThreadId()
699       << " " << thread->GetState();
700    if (thread->IsStillStarting()) {
701      os << " (still starting up)";
702    }
703    os << "\n";
704  } else {
705    os << '"' << ::art::GetThreadName(tid) << '"'
706       << " prio=" << priority
707       << " (not attached)\n";
708  }
709
710  if (thread != nullptr) {
711    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
712    os << "  | group=\"" << group_name << "\""
713       << " sCount=" << thread->suspend_count_
714       << " dsCount=" << thread->debug_suspend_count_
715       << " obj=" << reinterpret_cast<void*>(thread->opeer_)
716       << " self=" << reinterpret_cast<const void*>(thread) << "\n";
717  }
718
719  os << "  | sysTid=" << tid
720     << " nice=" << getpriority(PRIO_PROCESS, tid)
721     << " cgrp=" << scheduler_group_name;
722  if (thread != nullptr) {
723    int policy;
724    sched_param sp;
725    CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->pthread_self_, &policy, &sp), __FUNCTION__);
726    os << " sched=" << policy << "/" << sp.sched_priority
727       << " handle=" << reinterpret_cast<void*>(thread->pthread_self_);
728  }
729  os << "\n";
730
731  // Grab the scheduler stats for this thread.
732  std::string scheduler_stats;
733  if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) {
734    scheduler_stats.resize(scheduler_stats.size() - 1);  // Lose the trailing '\n'.
735  } else {
736    scheduler_stats = "0 0 0";
737  }
738
739  char native_thread_state = '?';
740  int utime = 0;
741  int stime = 0;
742  int task_cpu = 0;
743  GetTaskStats(tid, &native_thread_state, &utime, &stime, &task_cpu);
744
745  os << "  | state=" << native_thread_state
746     << " schedstat=( " << scheduler_stats << " )"
747     << " utm=" << utime
748     << " stm=" << stime
749     << " core=" << task_cpu
750     << " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
751  if (thread != nullptr) {
752    os << "  | stack=" << reinterpret_cast<void*>(thread->stack_begin_) << "-" << reinterpret_cast<void*>(thread->stack_end_)
753       << " stackSize=" << PrettySize(thread->stack_size_) << "\n";
754  }
755}
756
757void Thread::DumpState(std::ostream& os) const {
758  Thread::DumpState(os, this, GetTid());
759}
760
761struct StackDumpVisitor : public StackVisitor {
762  StackDumpVisitor(std::ostream& os, Thread* thread, Context* context, bool can_allocate)
763      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
764      : StackVisitor(thread, context), os(os), thread(thread), can_allocate(can_allocate),
765        last_method(nullptr), last_line_number(0), repetition_count(0), frame_count(0) {
766  }
767
768  virtual ~StackDumpVisitor() {
769    if (frame_count == 0) {
770      os << "  (no managed stack frames)\n";
771    }
772  }
773
774  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
775    mirror::ArtMethod* m = GetMethod();
776    if (m->IsRuntimeMethod()) {
777      return true;
778    }
779    const int kMaxRepetition = 3;
780    mirror::Class* c = m->GetDeclaringClass();
781    mirror::DexCache* dex_cache = c->GetDexCache();
782    int line_number = -1;
783    if (dex_cache != nullptr) {  // be tolerant of bad input
784      const DexFile& dex_file = *dex_cache->GetDexFile();
785      line_number = dex_file.GetLineNumFromPC(m, GetDexPc());
786    }
787    if (line_number == last_line_number && last_method == m) {
788      ++repetition_count;
789    } else {
790      if (repetition_count >= kMaxRepetition) {
791        os << "  ... repeated " << (repetition_count - kMaxRepetition) << " times\n";
792      }
793      repetition_count = 0;
794      last_line_number = line_number;
795      last_method = m;
796    }
797    if (repetition_count < kMaxRepetition) {
798      os << "  at " << PrettyMethod(m, false);
799      if (m->IsNative()) {
800        os << "(Native method)";
801      } else {
802        mh.ChangeMethod(m);
803        const char* source_file(mh.GetDeclaringClassSourceFile());
804        os << "(" << (source_file != nullptr ? source_file : "unavailable")
805           << ":" << line_number << ")";
806      }
807      os << "\n";
808      if (frame_count == 0) {
809        Monitor::DescribeWait(os, thread);
810      }
811      if (can_allocate) {
812        Monitor::VisitLocks(this, DumpLockedObject, &os);
813      }
814    }
815
816    ++frame_count;
817    return true;
818  }
819
820  static void DumpLockedObject(mirror::Object* o, void* context)
821      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
822    std::ostream& os = *reinterpret_cast<std::ostream*>(context);
823    os << "  - locked <" << o << "> (a " << PrettyTypeOf(o) << ")\n";
824  }
825
826  std::ostream& os;
827  const Thread* thread;
828  const bool can_allocate;
829  MethodHelper mh;
830  mirror::ArtMethod* last_method;
831  int last_line_number;
832  int repetition_count;
833  int frame_count;
834};
835
836static bool ShouldShowNativeStack(const Thread* thread)
837    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
838  ThreadState state = thread->GetState();
839
840  // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting.
841  if (state > kWaiting && state < kStarting) {
842    return true;
843  }
844
845  // In an Object.wait variant or Thread.sleep? That's not interesting.
846  if (state == kTimedWaiting || state == kSleeping || state == kWaiting) {
847    return false;
848  }
849
850  // In some other native method? That's interesting.
851  // We don't just check kNative because native methods will be in state kSuspended if they're
852  // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the
853  // thread-startup states if it's early enough in their life cycle (http://b/7432159).
854  mirror::ArtMethod* current_method = thread->GetCurrentMethod(nullptr);
855  return current_method != nullptr && current_method->IsNative();
856}
857
858void Thread::DumpStack(std::ostream& os) const {
859  // TODO: we call this code when dying but may not have suspended the thread ourself. The
860  //       IsSuspended check is therefore racy with the use for dumping (normally we inhibit
861  //       the race with the thread_suspend_count_lock_).
862  // No point dumping for an abort in debug builds where we'll hit the not suspended check in stack.
863  bool dump_for_abort = (gAborting > 0) && !kIsDebugBuild;
864  if (this == Thread::Current() || IsSuspended() || dump_for_abort) {
865    // If we're currently in native code, dump that stack before dumping the managed stack.
866    if (dump_for_abort || ShouldShowNativeStack(this)) {
867      DumpKernelStack(os, GetTid(), "  kernel: ", false);
868      DumpNativeStack(os, GetTid(), "  native: ", false);
869    }
870    UniquePtr<Context> context(Context::Create());
871    StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(), !throwing_OutOfMemoryError_);
872    dumper.WalkStack();
873  } else {
874    os << "Not able to dump stack of thread that isn't suspended";
875  }
876}
877
878void Thread::ThreadExitCallback(void* arg) {
879  Thread* self = reinterpret_cast<Thread*>(arg);
880  if (self->thread_exit_check_count_ == 0) {
881    LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's going to use a pthread_key_create destructor?): " << *self;
882    CHECK(is_started_);
883    CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
884    self->thread_exit_check_count_ = 1;
885  } else {
886    LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
887  }
888}
889
890void Thread::Startup() {
891  CHECK(!is_started_);
892  is_started_ = true;
893  {
894    // MutexLock to keep annotalysis happy.
895    //
896    // Note we use nullptr for the thread because Thread::Current can
897    // return garbage since (is_started_ == true) and
898    // Thread::pthread_key_self_ is not yet initialized.
899    // This was seen on glibc.
900    MutexLock mu(nullptr, *Locks::thread_suspend_count_lock_);
901    resume_cond_ = new ConditionVariable("Thread resumption condition variable",
902                                         *Locks::thread_suspend_count_lock_);
903  }
904
905  // Allocate a TLS slot.
906  CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key");
907
908  // Double-check the TLS slot allocation.
909  if (pthread_getspecific(pthread_key_self_) != nullptr) {
910    LOG(FATAL) << "Newly-created pthread TLS slot is not nullptr";
911  }
912}
913
914void Thread::FinishStartup() {
915  Runtime* runtime = Runtime::Current();
916  CHECK(runtime->IsStarted());
917
918  // Finish attaching the main thread.
919  ScopedObjectAccess soa(Thread::Current());
920  Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup());
921
922  Runtime::Current()->GetClassLinker()->RunRootClinits();
923}
924
925void Thread::Shutdown() {
926  CHECK(is_started_);
927  is_started_ = false;
928  CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
929  MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
930  if (resume_cond_ != nullptr) {
931    delete resume_cond_;
932    resume_cond_ = nullptr;
933  }
934}
935
936Thread::Thread(bool daemon)
937    : suspend_count_(0),
938      card_table_(nullptr),
939      exception_(nullptr),
940      stack_end_(nullptr),
941      managed_stack_(),
942      jni_env_(nullptr),
943      self_(nullptr),
944      opeer_(nullptr),
945      jpeer_(nullptr),
946      stack_begin_(nullptr),
947      stack_size_(0),
948      thin_lock_thread_id_(0),
949      stack_trace_sample_(nullptr),
950      trace_clock_base_(0),
951      tid_(0),
952      wait_mutex_(new Mutex("a thread wait mutex")),
953      wait_cond_(new ConditionVariable("a thread wait condition variable", *wait_mutex_)),
954      wait_monitor_(nullptr),
955      interrupted_(false),
956      wait_next_(nullptr),
957      monitor_enter_object_(nullptr),
958      top_sirt_(nullptr),
959      runtime_(nullptr),
960      class_loader_override_(nullptr),
961      long_jump_context_(nullptr),
962      throwing_OutOfMemoryError_(false),
963      debug_suspend_count_(0),
964      debug_invoke_req_(new DebugInvokeReq),
965      single_step_control_(new SingleStepControl),
966      deoptimization_shadow_frame_(nullptr),
967      instrumentation_stack_(new std::deque<instrumentation::InstrumentationStackFrame>),
968      name_(new std::string(kThreadNameDuringStartup)),
969      daemon_(daemon),
970      pthread_self_(0),
971      no_thread_suspension_(0),
972      last_no_thread_suspension_cause_(nullptr),
973      thread_exit_check_count_(0),
974      thread_local_start_(nullptr),
975      thread_local_pos_(nullptr),
976      thread_local_end_(nullptr),
977      thread_local_objects_(0),
978      thread_local_alloc_stack_top_(nullptr),
979      thread_local_alloc_stack_end_(nullptr) {
980  CHECK_EQ((sizeof(Thread) % 4), 0U) << sizeof(Thread);
981  state_and_flags_.as_struct.flags = 0;
982  state_and_flags_.as_struct.state = kNative;
983  memset(&held_mutexes_[0], 0, sizeof(held_mutexes_));
984  memset(rosalloc_runs_, 0, sizeof(rosalloc_runs_));
985  for (uint32_t i = 0; i < kMaxCheckpoints; ++i) {
986    checkpoint_functions_[i] = nullptr;
987  }
988}
989
990bool Thread::IsStillStarting() const {
991  // You might think you can check whether the state is kStarting, but for much of thread startup,
992  // the thread is in kNative; it might also be in kVmWait.
993  // You might think you can check whether the peer is nullptr, but the peer is actually created and
994  // assigned fairly early on, and needs to be.
995  // It turns out that the last thing to change is the thread name; that's a good proxy for "has
996  // this thread _ever_ entered kRunnable".
997  return (jpeer_ == nullptr && opeer_ == nullptr) || (*name_ == kThreadNameDuringStartup);
998}
999
1000void Thread::AssertNoPendingException() const {
1001  if (UNLIKELY(IsExceptionPending())) {
1002    ScopedObjectAccess soa(Thread::Current());
1003    mirror::Throwable* exception = GetException(nullptr);
1004    LOG(FATAL) << "No pending exception expected: " << exception->Dump();
1005  }
1006}
1007
1008static void MonitorExitVisitor(mirror::Object** object, void* arg, uint32_t /*thread_id*/,
1009                               RootType /*root_type*/)
1010    NO_THREAD_SAFETY_ANALYSIS {
1011  Thread* self = reinterpret_cast<Thread*>(arg);
1012  mirror::Object* entered_monitor = *object;
1013  if (self->HoldsLock(entered_monitor)) {
1014    LOG(WARNING) << "Calling MonitorExit on object "
1015                 << object << " (" << PrettyTypeOf(entered_monitor) << ")"
1016                 << " left locked by native thread "
1017                 << *Thread::Current() << " which is detaching";
1018    entered_monitor->MonitorExit(self);
1019  }
1020}
1021
1022void Thread::Destroy() {
1023  Thread* self = this;
1024  DCHECK_EQ(self, Thread::Current());
1025
1026  if (opeer_ != nullptr) {
1027    ScopedObjectAccess soa(self);
1028    // We may need to call user-supplied managed code, do this before final clean-up.
1029    HandleUncaughtExceptions(soa);
1030    RemoveFromThreadGroup(soa);
1031
1032    // this.nativePeer = 0;
1033    if (Runtime::Current()->IsActiveTransaction()) {
1034      soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)->SetLong<true>(opeer_, 0);
1035    } else {
1036      soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)->SetLong<false>(opeer_, 0);
1037    }
1038    Dbg::PostThreadDeath(self);
1039
1040    // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
1041    // who is waiting.
1042    mirror::Object* lock =
1043        soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(opeer_);
1044    // (This conditional is only needed for tests, where Thread.lock won't have been set.)
1045    if (lock != nullptr) {
1046      SirtRef<mirror::Object> sirt_obj(self, lock);
1047      ObjectLock<mirror::Object> locker(self, &sirt_obj);
1048      locker.Notify();
1049    }
1050  }
1051
1052  // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
1053  if (jni_env_ != nullptr) {
1054    jni_env_->monitors.VisitRoots(MonitorExitVisitor, self, 0, kRootVMInternal);
1055  }
1056}
1057
1058Thread::~Thread() {
1059  if (jni_env_ != nullptr && jpeer_ != nullptr) {
1060    // If pthread_create fails we don't have a jni env here.
1061    jni_env_->DeleteGlobalRef(jpeer_);
1062    jpeer_ = nullptr;
1063  }
1064  opeer_ = nullptr;
1065
1066  bool initialized = (jni_env_ != nullptr);  // Did Thread::Init run?
1067  if (initialized) {
1068    delete jni_env_;
1069    jni_env_ = nullptr;
1070  }
1071  CHECK_NE(GetState(), kRunnable);
1072  CHECK_NE(ReadFlag(kCheckpointRequest), true);
1073  CHECK(checkpoint_functions_[0] == nullptr);
1074  CHECK(checkpoint_functions_[1] == nullptr);
1075  CHECK(checkpoint_functions_[2] == nullptr);
1076
1077  // We may be deleting a still born thread.
1078  SetStateUnsafe(kTerminated);
1079
1080  delete wait_cond_;
1081  delete wait_mutex_;
1082
1083  if (long_jump_context_ != nullptr) {
1084    delete long_jump_context_;
1085  }
1086
1087  if (initialized) {
1088    CleanupCpu();
1089  }
1090
1091  delete debug_invoke_req_;
1092  delete single_step_control_;
1093  delete instrumentation_stack_;
1094  delete name_;
1095  delete stack_trace_sample_;
1096
1097  Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this);
1098
1099  TearDownAlternateSignalStack();
1100}
1101
1102void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) {
1103  if (!IsExceptionPending()) {
1104    return;
1105  }
1106  ScopedLocalRef<jobject> peer(jni_env_, soa.AddLocalReference<jobject>(opeer_));
1107  ScopedThreadStateChange tsc(this, kNative);
1108
1109  // Get and clear the exception.
1110  ScopedLocalRef<jthrowable> exception(jni_env_, jni_env_->ExceptionOccurred());
1111  jni_env_->ExceptionClear();
1112
1113  // If the thread has its own handler, use that.
1114  ScopedLocalRef<jobject> handler(jni_env_,
1115                                  jni_env_->GetObjectField(peer.get(),
1116                                                           WellKnownClasses::java_lang_Thread_uncaughtHandler));
1117  if (handler.get() == nullptr) {
1118    // Otherwise use the thread group's default handler.
1119    handler.reset(jni_env_->GetObjectField(peer.get(), WellKnownClasses::java_lang_Thread_group));
1120  }
1121
1122  // Call the handler.
1123  jni_env_->CallVoidMethod(handler.get(),
1124                           WellKnownClasses::java_lang_Thread$UncaughtExceptionHandler_uncaughtException,
1125                           peer.get(), exception.get());
1126
1127  // If the handler threw, clear that exception too.
1128  jni_env_->ExceptionClear();
1129}
1130
1131void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
1132  // this.group.removeThread(this);
1133  // group can be null if we're in the compiler or a test.
1134  mirror::Object* ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(opeer_);
1135  if (ogroup != nullptr) {
1136    ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup));
1137    ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(opeer_));
1138    ScopedThreadStateChange tsc(soa.Self(), kNative);
1139    jni_env_->CallVoidMethod(group.get(), WellKnownClasses::java_lang_ThreadGroup_removeThread,
1140                             peer.get());
1141  }
1142}
1143
1144size_t Thread::NumSirtReferences() {
1145  size_t count = 0;
1146  for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
1147    count += cur->NumberOfReferences();
1148  }
1149  return count;
1150}
1151
1152bool Thread::SirtContains(jobject obj) const {
1153  StackReference<mirror::Object>* sirt_entry =
1154      reinterpret_cast<StackReference<mirror::Object>*>(obj);
1155  for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
1156    if (cur->Contains(sirt_entry)) {
1157      return true;
1158    }
1159  }
1160  // JNI code invoked from portable code uses shadow frames rather than the SIRT.
1161  return managed_stack_.ShadowFramesContain(sirt_entry);
1162}
1163
1164void Thread::SirtVisitRoots(RootCallback* visitor, void* arg, uint32_t thread_id) {
1165  for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
1166    size_t num_refs = cur->NumberOfReferences();
1167    for (size_t j = 0; j < num_refs; ++j) {
1168      mirror::Object* object = cur->GetReference(j);
1169      if (object != nullptr) {
1170        mirror::Object* old_obj = object;
1171        visitor(&object, arg, thread_id, kRootNativeStack);
1172        if (old_obj != object) {
1173          cur->SetReference(j, object);
1174        }
1175      }
1176    }
1177  }
1178}
1179
1180mirror::Object* Thread::DecodeJObject(jobject obj) const {
1181  Locks::mutator_lock_->AssertSharedHeld(this);
1182  if (obj == nullptr) {
1183    return nullptr;
1184  }
1185  IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
1186  IndirectRefKind kind = GetIndirectRefKind(ref);
1187  mirror::Object* result;
1188  // The "kinds" below are sorted by the frequency we expect to encounter them.
1189  if (kind == kLocal) {
1190    IndirectReferenceTable& locals = jni_env_->locals;
1191    result = const_cast<mirror::Object*>(locals.Get(ref));
1192  } else if (kind == kSirtOrInvalid) {
1193    // TODO: make stack indirect reference table lookup more efficient.
1194    // Check if this is a local reference in the SIRT.
1195    if (LIKELY(SirtContains(obj))) {
1196      // Read from SIRT.
1197      result = reinterpret_cast<StackReference<mirror::Object>*>(obj)->AsMirrorPtr();
1198    } else if (Runtime::Current()->GetJavaVM()->work_around_app_jni_bugs) {
1199      // Assume an invalid local reference is actually a direct pointer.
1200      result = reinterpret_cast<mirror::Object*>(obj);
1201    } else {
1202      result = kInvalidIndirectRefObject;
1203    }
1204  } else if (kind == kGlobal) {
1205    JavaVMExt* vm = Runtime::Current()->GetJavaVM();
1206    IndirectReferenceTable& globals = vm->globals;
1207    ReaderMutexLock mu(const_cast<Thread*>(this), vm->globals_lock);
1208    result = const_cast<mirror::Object*>(globals.Get(ref));
1209  } else {
1210    DCHECK_EQ(kind, kWeakGlobal);
1211    result = Runtime::Current()->GetJavaVM()->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
1212    if (result == kClearedJniWeakGlobal) {
1213      // This is a special case where it's okay to return nullptr.
1214      return nullptr;
1215    }
1216  }
1217
1218  if (UNLIKELY(result == nullptr)) {
1219    JniAbortF(nullptr, "use of deleted %s %p", ToStr<IndirectRefKind>(kind).c_str(), obj);
1220  } else {
1221    if (result != kInvalidIndirectRefObject) {
1222      VerifyObject(result);
1223    }
1224  }
1225  return result;
1226}
1227
1228// Implements java.lang.Thread.interrupted.
1229bool Thread::Interrupted() {
1230  MutexLock mu(Thread::Current(), *wait_mutex_);
1231  bool interrupted = interrupted_;
1232  interrupted_ = false;
1233  return interrupted;
1234}
1235
1236// Implements java.lang.Thread.isInterrupted.
1237bool Thread::IsInterrupted() {
1238  MutexLock mu(Thread::Current(), *wait_mutex_);
1239  return interrupted_;
1240}
1241
1242void Thread::Interrupt() {
1243  Thread* self = Thread::Current();
1244  MutexLock mu(self, *wait_mutex_);
1245  if (interrupted_) {
1246    return;
1247  }
1248  interrupted_ = true;
1249  NotifyLocked(self);
1250}
1251
1252void Thread::Notify() {
1253  Thread* self = Thread::Current();
1254  MutexLock mu(self, *wait_mutex_);
1255  NotifyLocked(self);
1256}
1257
1258void Thread::NotifyLocked(Thread* self) {
1259  if (wait_monitor_ != nullptr) {
1260    wait_cond_->Signal(self);
1261  }
1262}
1263
1264class CountStackDepthVisitor : public StackVisitor {
1265 public:
1266  explicit CountStackDepthVisitor(Thread* thread)
1267      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1268      : StackVisitor(thread, nullptr),
1269        depth_(0), skip_depth_(0), skipping_(true) {}
1270
1271  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1272    // We want to skip frames up to and including the exception's constructor.
1273    // Note we also skip the frame if it doesn't have a method (namely the callee
1274    // save frame)
1275    mirror::ArtMethod* m = GetMethod();
1276    if (skipping_ && !m->IsRuntimeMethod() &&
1277        !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) {
1278      skipping_ = false;
1279    }
1280    if (!skipping_) {
1281      if (!m->IsRuntimeMethod()) {  // Ignore runtime frames (in particular callee save).
1282        ++depth_;
1283      }
1284    } else {
1285      ++skip_depth_;
1286    }
1287    return true;
1288  }
1289
1290  int GetDepth() const {
1291    return depth_;
1292  }
1293
1294  int GetSkipDepth() const {
1295    return skip_depth_;
1296  }
1297
1298 private:
1299  uint32_t depth_;
1300  uint32_t skip_depth_;
1301  bool skipping_;
1302};
1303
1304class BuildInternalStackTraceVisitor : public StackVisitor {
1305 public:
1306  explicit BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth)
1307      : StackVisitor(thread, nullptr), self_(self),
1308        skip_depth_(skip_depth), count_(0), dex_pc_trace_(nullptr), method_trace_(nullptr) {}
1309
1310  bool Init(int depth)
1311      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1312    // Allocate method trace with an extra slot that will hold the PC trace
1313    SirtRef<mirror::ObjectArray<mirror::Object> >
1314        method_trace(self_,
1315                     Runtime::Current()->GetClassLinker()->AllocObjectArray<mirror::Object>(self_,
1316                                                                                            depth + 1));
1317    if (method_trace.get() == nullptr) {
1318      return false;
1319    }
1320    mirror::IntArray* dex_pc_trace = mirror::IntArray::Alloc(self_, depth);
1321    if (dex_pc_trace == nullptr) {
1322      return false;
1323    }
1324    // Save PC trace in last element of method trace, also places it into the
1325    // object graph.
1326    // We are called from native: use non-transactional mode.
1327    method_trace->Set<false>(depth, dex_pc_trace);
1328    // Set the Object*s and assert that no thread suspension is now possible.
1329    const char* last_no_suspend_cause =
1330        self_->StartAssertNoThreadSuspension("Building internal stack trace");
1331    CHECK(last_no_suspend_cause == nullptr) << last_no_suspend_cause;
1332    method_trace_ = method_trace.get();
1333    dex_pc_trace_ = dex_pc_trace;
1334    return true;
1335  }
1336
1337  virtual ~BuildInternalStackTraceVisitor() {
1338    if (method_trace_ != nullptr) {
1339      self_->EndAssertNoThreadSuspension(nullptr);
1340    }
1341  }
1342
1343  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1344    if (method_trace_ == nullptr || dex_pc_trace_ == nullptr) {
1345      return true;  // We're probably trying to fillInStackTrace for an OutOfMemoryError.
1346    }
1347    if (skip_depth_ > 0) {
1348      skip_depth_--;
1349      return true;
1350    }
1351    mirror::ArtMethod* m = GetMethod();
1352    if (m->IsRuntimeMethod()) {
1353      return true;  // Ignore runtime frames (in particular callee save).
1354    }
1355    // TODO dedup this code.
1356    if (Runtime::Current()->IsActiveTransaction()) {
1357      method_trace_->Set<true>(count_, m);
1358      dex_pc_trace_->Set<true>(count_, m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc());
1359    } else {
1360      method_trace_->Set<false>(count_, m);
1361      dex_pc_trace_->Set<false>(count_, m->IsProxyMethod() ? DexFile::kDexNoIndex : GetDexPc());
1362    }
1363    ++count_;
1364    return true;
1365  }
1366
1367  mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const {
1368    return method_trace_;
1369  }
1370
1371 private:
1372  Thread* const self_;
1373  // How many more frames to skip.
1374  int32_t skip_depth_;
1375  // Current position down stack trace.
1376  uint32_t count_;
1377  // Array of dex PC values.
1378  mirror::IntArray* dex_pc_trace_;
1379  // An array of the methods on the stack, the last entry is a reference to the PC trace.
1380  mirror::ObjectArray<mirror::Object>* method_trace_;
1381};
1382
1383jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessUnchecked& soa) const {
1384  // Compute depth of stack
1385  CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
1386  count_visitor.WalkStack();
1387  int32_t depth = count_visitor.GetDepth();
1388  int32_t skip_depth = count_visitor.GetSkipDepth();
1389
1390  // Build internal stack trace.
1391  BuildInternalStackTraceVisitor build_trace_visitor(soa.Self(), const_cast<Thread*>(this),
1392                                                     skip_depth);
1393  if (!build_trace_visitor.Init(depth)) {
1394    return nullptr;  // Allocation failed.
1395  }
1396  build_trace_visitor.WalkStack();
1397  mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace();
1398  if (kIsDebugBuild) {
1399    for (int32_t i = 0; i < trace->GetLength(); ++i) {
1400      CHECK(trace->Get(i) != nullptr);
1401    }
1402  }
1403  return soa.AddLocalReference<jobjectArray>(trace);
1404}
1405
1406jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
1407    jobjectArray output_array, int* stack_depth) {
1408  // Transition into runnable state to work on Object*/Array*
1409  ScopedObjectAccess soa(env);
1410  // Decode the internal stack trace into the depth, method trace and PC trace
1411  int32_t depth = soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal)->GetLength() - 1;
1412
1413  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1414
1415  jobjectArray result;
1416
1417  if (output_array != nullptr) {
1418    // Reuse the array we were given.
1419    result = output_array;
1420    // ...adjusting the number of frames we'll write to not exceed the array length.
1421    const int32_t traces_length =
1422        soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->GetLength();
1423    depth = std::min(depth, traces_length);
1424  } else {
1425    // Create java_trace array and place in local reference table
1426    mirror::ObjectArray<mirror::StackTraceElement>* java_traces =
1427        class_linker->AllocStackTraceElementArray(soa.Self(), depth);
1428    if (java_traces == nullptr) {
1429      return nullptr;
1430    }
1431    result = soa.AddLocalReference<jobjectArray>(java_traces);
1432  }
1433
1434  if (stack_depth != nullptr) {
1435    *stack_depth = depth;
1436  }
1437
1438  for (int32_t i = 0; i < depth; ++i) {
1439    mirror::ObjectArray<mirror::Object>* method_trace =
1440          soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal);
1441    // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
1442    mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(method_trace->Get(i));
1443    MethodHelper mh(method);
1444    int32_t line_number;
1445    SirtRef<mirror::String> class_name_object(soa.Self(), nullptr);
1446    SirtRef<mirror::String> source_name_object(soa.Self(), nullptr);
1447    if (method->IsProxyMethod()) {
1448      line_number = -1;
1449      class_name_object.reset(method->GetDeclaringClass()->GetName());
1450      // source_name_object intentionally left null for proxy methods
1451    } else {
1452      mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth));
1453      uint32_t dex_pc = pc_trace->Get(i);
1454      line_number = mh.GetLineNumFromDexPC(dex_pc);
1455      // Allocate element, potentially triggering GC
1456      // TODO: reuse class_name_object via Class::name_?
1457      const char* descriptor = mh.GetDeclaringClassDescriptor();
1458      CHECK(descriptor != nullptr);
1459      std::string class_name(PrettyDescriptor(descriptor));
1460      class_name_object.reset(mirror::String::AllocFromModifiedUtf8(soa.Self(), class_name.c_str()));
1461      if (class_name_object.get() == nullptr) {
1462        return nullptr;
1463      }
1464      const char* source_file = mh.GetDeclaringClassSourceFile();
1465      if (source_file != nullptr) {
1466        source_name_object.reset(mirror::String::AllocFromModifiedUtf8(soa.Self(), source_file));
1467        if (source_name_object.get() == nullptr) {
1468          return nullptr;
1469        }
1470      }
1471    }
1472    const char* method_name = mh.GetName();
1473    CHECK(method_name != nullptr);
1474    SirtRef<mirror::String> method_name_object(soa.Self(),
1475                                               mirror::String::AllocFromModifiedUtf8(soa.Self(),
1476                                                                                     method_name));
1477    if (method_name_object.get() == nullptr) {
1478      return nullptr;
1479    }
1480    mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(
1481        soa.Self(), class_name_object, method_name_object, source_name_object, line_number);
1482    if (obj == nullptr) {
1483      return nullptr;
1484    }
1485    // We are called from native: use non-transactional mode.
1486    soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(result)->Set<false>(i, obj);
1487  }
1488  return result;
1489}
1490
1491void Thread::ThrowNewExceptionF(const ThrowLocation& throw_location,
1492                                const char* exception_class_descriptor, const char* fmt, ...) {
1493  va_list args;
1494  va_start(args, fmt);
1495  ThrowNewExceptionV(throw_location, exception_class_descriptor,
1496                     fmt, args);
1497  va_end(args);
1498}
1499
1500void Thread::ThrowNewExceptionV(const ThrowLocation& throw_location,
1501                                const char* exception_class_descriptor,
1502                                const char* fmt, va_list ap) {
1503  std::string msg;
1504  StringAppendV(&msg, fmt, ap);
1505  ThrowNewException(throw_location, exception_class_descriptor, msg.c_str());
1506}
1507
1508void Thread::ThrowNewException(const ThrowLocation& throw_location, const char* exception_class_descriptor,
1509                               const char* msg) {
1510  AssertNoPendingException();  // Callers should either clear or call ThrowNewWrappedException.
1511  ThrowNewWrappedException(throw_location, exception_class_descriptor, msg);
1512}
1513
1514void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
1515                                      const char* exception_class_descriptor,
1516                                      const char* msg) {
1517  DCHECK_EQ(this, Thread::Current());
1518  // Ensure we don't forget arguments over object allocation.
1519  SirtRef<mirror::Object> saved_throw_this(this, throw_location.GetThis());
1520  SirtRef<mirror::ArtMethod> saved_throw_method(this, throw_location.GetMethod());
1521  // Ignore the cause throw location. TODO: should we report this as a re-throw?
1522  SirtRef<mirror::Throwable> cause(this, GetException(nullptr));
1523  ClearException();
1524  Runtime* runtime = Runtime::Current();
1525
1526  mirror::ClassLoader* cl = nullptr;
1527  if (saved_throw_method.get() != nullptr) {
1528    cl = saved_throw_method.get()->GetDeclaringClass()->GetClassLoader();
1529  }
1530  SirtRef<mirror::ClassLoader> class_loader(this, cl);
1531  SirtRef<mirror::Class>
1532      exception_class(this, runtime->GetClassLinker()->FindClass(exception_class_descriptor,
1533                                                                 class_loader));
1534  if (UNLIKELY(exception_class.get() == nullptr)) {
1535    CHECK(IsExceptionPending());
1536    LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
1537    return;
1538  }
1539
1540  if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(exception_class, true, true))) {
1541    DCHECK(IsExceptionPending());
1542    return;
1543  }
1544  DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass());
1545  SirtRef<mirror::Throwable> exception(this,
1546                                down_cast<mirror::Throwable*>(exception_class->AllocObject(this)));
1547
1548  // If we couldn't allocate the exception, throw the pre-allocated out of memory exception.
1549  if (exception.get() == nullptr) {
1550    ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
1551                                         throw_location.GetDexPc());
1552    SetException(gc_safe_throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1553    return;
1554  }
1555
1556  // Choose an appropriate constructor and set up the arguments.
1557  const char* signature;
1558  const char* shorty;
1559  SirtRef<mirror::String> msg_string(this, nullptr);
1560  if (msg != nullptr) {
1561    // Ensure we remember this and the method over the String allocation.
1562    msg_string.reset(mirror::String::AllocFromModifiedUtf8(this, msg));
1563    if (UNLIKELY(msg_string.get() == nullptr)) {
1564      CHECK(IsExceptionPending());  // OOME.
1565      return;
1566    }
1567    if (cause.get() == nullptr) {
1568      shorty = "VL";
1569      signature = "(Ljava/lang/String;)V";
1570    } else {
1571      shorty = "VLL";
1572      signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V";
1573    }
1574  } else {
1575    if (cause.get() == nullptr) {
1576      shorty = "V";
1577      signature = "()V";
1578    } else {
1579      shorty = "VL";
1580      signature = "(Ljava/lang/Throwable;)V";
1581    }
1582  }
1583  mirror::ArtMethod* exception_init_method =
1584      exception_class->FindDeclaredDirectMethod("<init>", signature);
1585
1586  CHECK(exception_init_method != nullptr) << "No <init>" << signature << " in "
1587      << PrettyDescriptor(exception_class_descriptor);
1588
1589  if (UNLIKELY(!runtime->IsStarted())) {
1590    // Something is trying to throw an exception without a started runtime, which is the common
1591    // case in the compiler. We won't be able to invoke the constructor of the exception, so set
1592    // the exception fields directly.
1593    if (msg != nullptr) {
1594      exception->SetDetailMessage(msg_string.get());
1595    }
1596    if (cause.get() != nullptr) {
1597      exception->SetCause(cause.get());
1598    }
1599    ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
1600                                         throw_location.GetDexPc());
1601    SetException(gc_safe_throw_location, exception.get());
1602  } else {
1603    ArgArray args(shorty, strlen(shorty));
1604    args.Append(exception.get());
1605    if (msg != nullptr) {
1606      args.Append(msg_string.get());
1607    }
1608    if (cause.get() != nullptr) {
1609      args.Append(cause.get());
1610    }
1611    JValue result;
1612    exception_init_method->Invoke(this, args.GetArray(), args.GetNumBytes(), &result, shorty);
1613    if (LIKELY(!IsExceptionPending())) {
1614      ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
1615                                           throw_location.GetDexPc());
1616      SetException(gc_safe_throw_location, exception.get());
1617    }
1618  }
1619}
1620
1621void Thread::ThrowOutOfMemoryError(const char* msg) {
1622  LOG(ERROR) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
1623      msg, (throwing_OutOfMemoryError_ ? " (recursive case)" : ""));
1624  ThrowLocation throw_location = GetCurrentLocationForThrow();
1625  if (!throwing_OutOfMemoryError_) {
1626    throwing_OutOfMemoryError_ = true;
1627    ThrowNewException(throw_location, "Ljava/lang/OutOfMemoryError;", msg);
1628    throwing_OutOfMemoryError_ = false;
1629  } else {
1630    Dump(LOG(ERROR));  // The pre-allocated OOME has no stack, so help out and log one.
1631    SetException(throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1632  }
1633}
1634
1635Thread* Thread::CurrentFromGdb() {
1636  return Thread::Current();
1637}
1638
1639void Thread::DumpFromGdb() const {
1640  std::ostringstream ss;
1641  Dump(ss);
1642  std::string str(ss.str());
1643  // log to stderr for debugging command line processes
1644  std::cerr << str;
1645#ifdef HAVE_ANDROID_OS
1646  // log to logcat for debugging frameworks processes
1647  LOG(INFO) << str;
1648#endif
1649}
1650
1651struct EntryPointInfo {
1652  uint32_t offset;
1653  const char* name;
1654};
1655#define INTERPRETER_ENTRY_POINT_INFO(x) { INTERPRETER_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
1656#define JNI_ENTRY_POINT_INFO(x)         { JNI_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
1657#define PORTABLE_ENTRY_POINT_INFO(x)    { PORTABLE_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
1658#define QUICK_ENTRY_POINT_INFO(x)       { QUICK_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
1659static const EntryPointInfo gThreadEntryPointInfo[] = {
1660  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToInterpreterBridge),
1661  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToCompiledCodeBridge),
1662  JNI_ENTRY_POINT_INFO(pDlsymLookup),
1663  PORTABLE_ENTRY_POINT_INFO(pPortableImtConflictTrampoline),
1664  PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampoline),
1665  PORTABLE_ENTRY_POINT_INFO(pPortableToInterpreterBridge),
1666  QUICK_ENTRY_POINT_INFO(pAllocArray),
1667  QUICK_ENTRY_POINT_INFO(pAllocArrayResolved),
1668  QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck),
1669  QUICK_ENTRY_POINT_INFO(pAllocObject),
1670  QUICK_ENTRY_POINT_INFO(pAllocObjectResolved),
1671  QUICK_ENTRY_POINT_INFO(pAllocObjectInitialized),
1672  QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck),
1673  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray),
1674  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck),
1675  QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial),
1676  QUICK_ENTRY_POINT_INFO(pCheckCast),
1677  QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage),
1678  QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess),
1679  QUICK_ENTRY_POINT_INFO(pInitializeType),
1680  QUICK_ENTRY_POINT_INFO(pResolveString),
1681  QUICK_ENTRY_POINT_INFO(pSet32Instance),
1682  QUICK_ENTRY_POINT_INFO(pSet32Static),
1683  QUICK_ENTRY_POINT_INFO(pSet64Instance),
1684  QUICK_ENTRY_POINT_INFO(pSet64Static),
1685  QUICK_ENTRY_POINT_INFO(pSetObjInstance),
1686  QUICK_ENTRY_POINT_INFO(pSetObjStatic),
1687  QUICK_ENTRY_POINT_INFO(pGet32Instance),
1688  QUICK_ENTRY_POINT_INFO(pGet32Static),
1689  QUICK_ENTRY_POINT_INFO(pGet64Instance),
1690  QUICK_ENTRY_POINT_INFO(pGet64Static),
1691  QUICK_ENTRY_POINT_INFO(pGetObjInstance),
1692  QUICK_ENTRY_POINT_INFO(pGetObjStatic),
1693  QUICK_ENTRY_POINT_INFO(pAputObjectWithNullAndBoundCheck),
1694  QUICK_ENTRY_POINT_INFO(pAputObjectWithBoundCheck),
1695  QUICK_ENTRY_POINT_INFO(pAputObject),
1696  QUICK_ENTRY_POINT_INFO(pHandleFillArrayData),
1697  QUICK_ENTRY_POINT_INFO(pJniMethodStart),
1698  QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized),
1699  QUICK_ENTRY_POINT_INFO(pJniMethodEnd),
1700  QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized),
1701  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference),
1702  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized),
1703  QUICK_ENTRY_POINT_INFO(pLockObject),
1704  QUICK_ENTRY_POINT_INFO(pUnlockObject),
1705  QUICK_ENTRY_POINT_INFO(pCmpgDouble),
1706  QUICK_ENTRY_POINT_INFO(pCmpgFloat),
1707  QUICK_ENTRY_POINT_INFO(pCmplDouble),
1708  QUICK_ENTRY_POINT_INFO(pCmplFloat),
1709  QUICK_ENTRY_POINT_INFO(pFmod),
1710  QUICK_ENTRY_POINT_INFO(pSqrt),
1711  QUICK_ENTRY_POINT_INFO(pL2d),
1712  QUICK_ENTRY_POINT_INFO(pFmodf),
1713  QUICK_ENTRY_POINT_INFO(pL2f),
1714  QUICK_ENTRY_POINT_INFO(pD2iz),
1715  QUICK_ENTRY_POINT_INFO(pF2iz),
1716  QUICK_ENTRY_POINT_INFO(pIdivmod),
1717  QUICK_ENTRY_POINT_INFO(pD2l),
1718  QUICK_ENTRY_POINT_INFO(pF2l),
1719  QUICK_ENTRY_POINT_INFO(pLdiv),
1720  QUICK_ENTRY_POINT_INFO(pLmod),
1721  QUICK_ENTRY_POINT_INFO(pLmul),
1722  QUICK_ENTRY_POINT_INFO(pShlLong),
1723  QUICK_ENTRY_POINT_INFO(pShrLong),
1724  QUICK_ENTRY_POINT_INFO(pUshrLong),
1725  QUICK_ENTRY_POINT_INFO(pIndexOf),
1726  QUICK_ENTRY_POINT_INFO(pMemcmp16),
1727  QUICK_ENTRY_POINT_INFO(pStringCompareTo),
1728  QUICK_ENTRY_POINT_INFO(pMemcpy),
1729  QUICK_ENTRY_POINT_INFO(pQuickImtConflictTrampoline),
1730  QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline),
1731  QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge),
1732  QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck),
1733  QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck),
1734  QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck),
1735  QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck),
1736  QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck),
1737  QUICK_ENTRY_POINT_INFO(pCheckSuspend),
1738  QUICK_ENTRY_POINT_INFO(pTestSuspend),
1739  QUICK_ENTRY_POINT_INFO(pDeliverException),
1740  QUICK_ENTRY_POINT_INFO(pThrowArrayBounds),
1741  QUICK_ENTRY_POINT_INFO(pThrowDivZero),
1742  QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod),
1743  QUICK_ENTRY_POINT_INFO(pThrowNullPointer),
1744  QUICK_ENTRY_POINT_INFO(pThrowStackOverflow),
1745};
1746#undef QUICK_ENTRY_POINT_INFO
1747
1748void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers) {
1749  CHECK_EQ(size_of_pointers, 4U);  // TODO: support 64-bit targets.
1750
1751#define DO_THREAD_OFFSET(x) \
1752    if (offset == static_cast<uint32_t>(OFFSETOF_VOLATILE_MEMBER(Thread, x))) { \
1753      os << # x; \
1754      return; \
1755    }
1756  DO_THREAD_OFFSET(state_and_flags_);
1757  DO_THREAD_OFFSET(card_table_);
1758  DO_THREAD_OFFSET(exception_);
1759  DO_THREAD_OFFSET(opeer_);
1760  DO_THREAD_OFFSET(jni_env_);
1761  DO_THREAD_OFFSET(self_);
1762  DO_THREAD_OFFSET(stack_end_);
1763  DO_THREAD_OFFSET(suspend_count_);
1764  DO_THREAD_OFFSET(thin_lock_thread_id_);
1765  // DO_THREAD_OFFSET(top_of_managed_stack_);
1766  // DO_THREAD_OFFSET(top_of_managed_stack_pc_);
1767  DO_THREAD_OFFSET(top_sirt_);
1768#undef DO_THREAD_OFFSET
1769
1770  size_t entry_point_count = arraysize(gThreadEntryPointInfo);
1771  CHECK_EQ(entry_point_count * size_of_pointers,
1772           sizeof(InterpreterEntryPoints) + sizeof(JniEntryPoints) + sizeof(PortableEntryPoints) +
1773           sizeof(QuickEntryPoints));
1774  uint32_t expected_offset = OFFSETOF_MEMBER(Thread, interpreter_entrypoints_);
1775  for (size_t i = 0; i < entry_point_count; ++i) {
1776    CHECK_EQ(gThreadEntryPointInfo[i].offset, expected_offset) << gThreadEntryPointInfo[i].name;
1777    expected_offset += size_of_pointers;
1778    if (gThreadEntryPointInfo[i].offset == offset) {
1779      os << gThreadEntryPointInfo[i].name;
1780      return;
1781    }
1782  }
1783  os << offset;
1784}
1785
1786void Thread::QuickDeliverException() {
1787  // Get exception from thread.
1788  ThrowLocation throw_location;
1789  mirror::Throwable* exception = GetException(&throw_location);
1790  CHECK(exception != nullptr);
1791  // Don't leave exception visible while we try to find the handler, which may cause class
1792  // resolution.
1793  ClearException();
1794  bool is_deoptimization = (exception == reinterpret_cast<mirror::Throwable*>(-1));
1795  if (kDebugExceptionDelivery) {
1796    if (!is_deoptimization) {
1797      mirror::String* msg = exception->GetDetailMessage();
1798      std::string str_msg(msg != nullptr ? msg->ToModifiedUtf8() : "");
1799      DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception)
1800                << ": " << str_msg << "\n");
1801    } else {
1802      DumpStack(LOG(INFO) << "Deoptimizing: ");
1803    }
1804  }
1805  CatchFinder catch_finder(this, throw_location, exception, is_deoptimization);
1806  catch_finder.FindCatch();
1807  catch_finder.UpdateInstrumentationStack();
1808  catch_finder.DoLongJump();
1809  LOG(FATAL) << "UNREACHABLE";
1810}
1811
1812Context* Thread::GetLongJumpContext() {
1813  Context* result = long_jump_context_;
1814  if (result == nullptr) {
1815    result = Context::Create();
1816  } else {
1817    long_jump_context_ = nullptr;  // Avoid context being shared.
1818    result->Reset();
1819  }
1820  return result;
1821}
1822
1823struct CurrentMethodVisitor : public StackVisitor {
1824  CurrentMethodVisitor(Thread* thread, Context* context)
1825      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1826      : StackVisitor(thread, context), this_object_(nullptr), method_(nullptr), dex_pc_(0) {}
1827  virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1828    mirror::ArtMethod* m = GetMethod();
1829    if (m->IsRuntimeMethod()) {
1830      // Continue if this is a runtime method.
1831      return true;
1832    }
1833    if (context_ != nullptr) {
1834      this_object_ = GetThisObject();
1835    }
1836    method_ = m;
1837    dex_pc_ = GetDexPc();
1838    return false;
1839  }
1840  mirror::Object* this_object_;
1841  mirror::ArtMethod* method_;
1842  uint32_t dex_pc_;
1843};
1844
1845mirror::ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc) const {
1846  CurrentMethodVisitor visitor(const_cast<Thread*>(this), nullptr);
1847  visitor.WalkStack(false);
1848  if (dex_pc != nullptr) {
1849    *dex_pc = visitor.dex_pc_;
1850  }
1851  return visitor.method_;
1852}
1853
1854ThrowLocation Thread::GetCurrentLocationForThrow() {
1855  Context* context = GetLongJumpContext();
1856  CurrentMethodVisitor visitor(this, context);
1857  visitor.WalkStack(false);
1858  ReleaseLongJumpContext(context);
1859  return ThrowLocation(visitor.this_object_, visitor.method_, visitor.dex_pc_);
1860}
1861
1862bool Thread::HoldsLock(mirror::Object* object) {
1863  if (object == nullptr) {
1864    return false;
1865  }
1866  return object->GetLockOwnerThreadId() == thin_lock_thread_id_;
1867}
1868
1869// RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor).
1870template <typename RootVisitor>
1871class ReferenceMapVisitor : public StackVisitor {
1872 public:
1873  ReferenceMapVisitor(Thread* thread, Context* context, const RootVisitor& visitor)
1874      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1875      : StackVisitor(thread, context), visitor_(visitor) {}
1876
1877  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1878    if (false) {
1879      LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
1880          << StringPrintf("@ PC:%04x", GetDexPc());
1881    }
1882    ShadowFrame* shadow_frame = GetCurrentShadowFrame();
1883    if (shadow_frame != nullptr) {
1884      mirror::ArtMethod* m = shadow_frame->GetMethod();
1885      size_t num_regs = shadow_frame->NumberOfVRegs();
1886      if (m->IsNative() || shadow_frame->HasReferenceArray()) {
1887        // SIRT for JNI or References for interpreter.
1888        for (size_t reg = 0; reg < num_regs; ++reg) {
1889          mirror::Object* ref = shadow_frame->GetVRegReference(reg);
1890          if (ref != nullptr) {
1891            mirror::Object* new_ref = ref;
1892            visitor_(&new_ref, reg, this);
1893            if (new_ref != ref) {
1894             shadow_frame->SetVRegReference(reg, new_ref);
1895            }
1896          }
1897        }
1898      } else {
1899        // Java method.
1900        // Portable path use DexGcMap and store in Method.native_gc_map_.
1901        const uint8_t* gc_map = m->GetNativeGcMap();
1902        CHECK(gc_map != nullptr) << PrettyMethod(m);
1903        verifier::DexPcToReferenceMap dex_gc_map(gc_map);
1904        uint32_t dex_pc = GetDexPc();
1905        const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
1906        DCHECK(reg_bitmap != nullptr);
1907        num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
1908        for (size_t reg = 0; reg < num_regs; ++reg) {
1909          if (TestBitmap(reg, reg_bitmap)) {
1910            mirror::Object* ref = shadow_frame->GetVRegReference(reg);
1911            if (ref != nullptr) {
1912              mirror::Object* new_ref = ref;
1913              visitor_(&new_ref, reg, this);
1914              if (new_ref != ref) {
1915               shadow_frame->SetVRegReference(reg, new_ref);
1916              }
1917            }
1918          }
1919        }
1920      }
1921    } else {
1922      mirror::ArtMethod* m = GetMethod();
1923      // Process register map (which native and runtime methods don't have)
1924      if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
1925        const uint8_t* native_gc_map = m->GetNativeGcMap();
1926        CHECK(native_gc_map != nullptr) << PrettyMethod(m);
1927        mh_.ChangeMethod(m);
1928        const DexFile::CodeItem* code_item = mh_.GetCodeItem();
1929        DCHECK(code_item != nullptr) << PrettyMethod(m);  // Can't be nullptr or how would we compile its instructions?
1930        NativePcOffsetToReferenceMap map(native_gc_map);
1931        size_t num_regs = std::min(map.RegWidth() * 8,
1932                                   static_cast<size_t>(code_item->registers_size_));
1933        if (num_regs > 0) {
1934          const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset());
1935          DCHECK(reg_bitmap != nullptr);
1936          const VmapTable vmap_table(m->GetVmapTable());
1937          uint32_t core_spills = m->GetCoreSpillMask();
1938          uint32_t fp_spills = m->GetFpSpillMask();
1939          size_t frame_size = m->GetFrameSizeInBytes();
1940          // For all dex registers in the bitmap
1941          mirror::ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
1942          DCHECK(cur_quick_frame != nullptr);
1943          for (size_t reg = 0; reg < num_regs; ++reg) {
1944            // Does this register hold a reference?
1945            if (TestBitmap(reg, reg_bitmap)) {
1946              uint32_t vmap_offset;
1947              if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
1948                int vmap_reg = vmap_table.ComputeRegister(core_spills, vmap_offset, kReferenceVReg);
1949                // This is sound as spilled GPRs will be word sized (ie 32 or 64bit).
1950                mirror::Object** ref_addr = reinterpret_cast<mirror::Object**>(GetGPRAddress(vmap_reg));
1951                if (*ref_addr != nullptr) {
1952                  visitor_(ref_addr, reg, this);
1953                }
1954              } else {
1955                StackReference<mirror::Object>* ref_addr =
1956                    reinterpret_cast<StackReference<mirror::Object>*>(
1957                        GetVRegAddr(cur_quick_frame, code_item, core_spills, fp_spills, frame_size,
1958                                    reg));
1959                mirror::Object* ref = ref_addr->AsMirrorPtr();
1960                if (ref != nullptr) {
1961                  mirror::Object* new_ref = ref;
1962                  visitor_(&new_ref, reg, this);
1963                  if (ref != new_ref) {
1964                    ref_addr->Assign(new_ref);
1965                  }
1966                }
1967              }
1968            }
1969          }
1970        }
1971      }
1972    }
1973    return true;
1974  }
1975
1976 private:
1977  static bool TestBitmap(size_t reg, const uint8_t* reg_vector) {
1978    return ((reg_vector[reg / kBitsPerByte] >> (reg % kBitsPerByte)) & 0x01) != 0;
1979  }
1980
1981  // Visitor for when we visit a root.
1982  const RootVisitor& visitor_;
1983
1984  // A method helper we keep around to avoid dex file/cache re-computations.
1985  MethodHelper mh_;
1986};
1987
1988class RootCallbackVisitor {
1989 public:
1990  RootCallbackVisitor(RootCallback* callback, void* arg, uint32_t tid)
1991     : callback_(callback), arg_(arg), tid_(tid) {}
1992
1993  void operator()(mirror::Object** obj, size_t, const StackVisitor*) const {
1994    callback_(obj, arg_, tid_, kRootJavaFrame);
1995  }
1996
1997 private:
1998  RootCallback* const callback_;
1999  void* const arg_;
2000  const uint32_t tid_;
2001};
2002
2003void Thread::SetClassLoaderOverride(mirror::ClassLoader* class_loader_override) {
2004  VerifyObject(class_loader_override);
2005  class_loader_override_ = class_loader_override;
2006}
2007
2008void Thread::VisitRoots(RootCallback* visitor, void* arg) {
2009  uint32_t thread_id = GetThreadId();
2010  if (opeer_ != nullptr) {
2011    visitor(&opeer_, arg, thread_id, kRootThreadObject);
2012  }
2013  if (exception_ != nullptr) {
2014    visitor(reinterpret_cast<mirror::Object**>(&exception_), arg, thread_id, kRootNativeStack);
2015  }
2016  throw_location_.VisitRoots(visitor, arg);
2017  if (class_loader_override_ != nullptr) {
2018    visitor(reinterpret_cast<mirror::Object**>(&class_loader_override_), arg, thread_id,
2019            kRootNativeStack);
2020  }
2021  jni_env_->locals.VisitRoots(visitor, arg, thread_id, kRootJNILocal);
2022  jni_env_->monitors.VisitRoots(visitor, arg, thread_id, kRootJNIMonitor);
2023  SirtVisitRoots(visitor, arg, thread_id);
2024  // Visit roots on this thread's stack
2025  Context* context = GetLongJumpContext();
2026  RootCallbackVisitor visitorToCallback(visitor, arg, thread_id);
2027  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context, visitorToCallback);
2028  mapper.WalkStack();
2029  ReleaseLongJumpContext(context);
2030  for (instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
2031    if (frame.this_object_ != nullptr) {
2032      visitor(&frame.this_object_, arg, thread_id, kRootJavaFrame);
2033    }
2034    DCHECK(frame.method_ != nullptr);
2035    visitor(reinterpret_cast<mirror::Object**>(&frame.method_), arg, thread_id, kRootJavaFrame);
2036  }
2037}
2038
2039static void VerifyRoot(mirror::Object** root, void* /*arg*/, uint32_t /*thread_id*/,
2040                       RootType /*root_type*/) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2041  VerifyObject(*root);
2042}
2043
2044void Thread::VerifyStackImpl() {
2045  UniquePtr<Context> context(Context::Create());
2046  RootCallbackVisitor visitorToCallback(VerifyRoot, Runtime::Current()->GetHeap(), GetThreadId());
2047  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitorToCallback);
2048  mapper.WalkStack();
2049}
2050
2051// Set the stack end to that to be used during a stack overflow
2052void Thread::SetStackEndForStackOverflow() {
2053  // During stack overflow we allow use of the full stack.
2054  if (stack_end_ == stack_begin_) {
2055    // However, we seem to have already extended to use the full stack.
2056    LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently "
2057               << kStackOverflowReservedBytes << ")?";
2058    DumpStack(LOG(ERROR));
2059    LOG(FATAL) << "Recursive stack overflow.";
2060  }
2061
2062  stack_end_ = stack_begin_;
2063}
2064
2065void Thread::SetTlab(byte* start, byte* end) {
2066  DCHECK_LE(start, end);
2067  thread_local_start_ = start;
2068  thread_local_pos_  = thread_local_start_;
2069  thread_local_end_ = end;
2070  thread_local_objects_ = 0;
2071}
2072
2073std::ostream& operator<<(std::ostream& os, const Thread& thread) {
2074  thread.ShortDump(os);
2075  return os;
2076}
2077
2078}  // namespace art
2079