thread.cc revision 8438ed31e10f3881ed92f03877d5edaca7d5b48c
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define ATRACE_TAG ATRACE_TAG_DALVIK
18
19#include "thread.h"
20
21#include <cutils/trace.h>
22#include <pthread.h>
23#include <signal.h>
24#include <sys/resource.h>
25#include <sys/time.h>
26
27#include <algorithm>
28#include <bitset>
29#include <cerrno>
30#include <iostream>
31#include <list>
32
33#include "arch/context.h"
34#include "base/mutex.h"
35#include "class_linker.h"
36#include "class_linker-inl.h"
37#include "cutils/atomic.h"
38#include "cutils/atomic-inline.h"
39#include "debugger.h"
40#include "dex_file-inl.h"
41#include "entrypoints/entrypoint_utils.h"
42#include "gc_map.h"
43#include "gc/accounting/card_table-inl.h"
44#include "gc/heap.h"
45#include "gc/space/space.h"
46#include "invoke_arg_array_builder.h"
47#include "jni_internal.h"
48#include "mirror/art_field-inl.h"
49#include "mirror/art_method-inl.h"
50#include "mirror/class-inl.h"
51#include "mirror/class_loader.h"
52#include "mirror/object_array-inl.h"
53#include "mirror/stack_trace_element.h"
54#include "monitor.h"
55#include "object_utils.h"
56#include "reflection.h"
57#include "runtime.h"
58#include "scoped_thread_state_change.h"
59#include "ScopedLocalRef.h"
60#include "ScopedUtfChars.h"
61#include "sirt_ref.h"
62#include "stack.h"
63#include "stack_indirect_reference_table.h"
64#include "thread-inl.h"
65#include "thread_list.h"
66#include "utils.h"
67#include "verifier/dex_gc_map.h"
68#include "verifier/method_verifier.h"
69#include "vmap_table.h"
70#include "well_known_classes.h"
71
72namespace art {
73
74bool Thread::is_started_ = false;
75pthread_key_t Thread::pthread_key_self_;
76ConditionVariable* Thread::resume_cond_ = NULL;
77
78static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
79
80void Thread::InitCardTable() {
81  card_table_ = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin();
82}
83
84#if !defined(__APPLE__)
85static void UnimplementedEntryPoint() {
86  UNIMPLEMENTED(FATAL);
87}
88#endif
89
90void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
91                     PortableEntryPoints* ppoints, QuickEntryPoints* qpoints);
92
93void Thread::InitTlsEntryPoints() {
94#if !defined(__APPLE__)  // The Mac GCC is too old to accept this code.
95  // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
96  uintptr_t* begin = reinterpret_cast<uintptr_t*>(&interpreter_entrypoints_);
97  uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(quick_entrypoints_));
98  for (uintptr_t* it = begin; it != end; ++it) {
99    *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
100  }
101  begin = reinterpret_cast<uintptr_t*>(&interpreter_entrypoints_);
102  end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(portable_entrypoints_));
103  for (uintptr_t* it = begin; it != end; ++it) {
104    *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
105  }
106#endif
107  InitEntryPoints(&interpreter_entrypoints_, &jni_entrypoints_, &portable_entrypoints_,
108                  &quick_entrypoints_);
109}
110
111void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) {
112  deoptimization_shadow_frame_ = sf;
113}
114
115void Thread::SetDeoptimizationReturnValue(const JValue& ret_val) {
116  deoptimization_return_value_.SetJ(ret_val.GetJ());
117}
118
119ShadowFrame* Thread::GetAndClearDeoptimizationShadowFrame(JValue* ret_val) {
120  ShadowFrame* sf = deoptimization_shadow_frame_;
121  deoptimization_shadow_frame_ = NULL;
122  ret_val->SetJ(deoptimization_return_value_.GetJ());
123  return sf;
124}
125
126void Thread::InitTid() {
127  tid_ = ::art::GetTid();
128}
129
130void Thread::InitAfterFork() {
131  // One thread (us) survived the fork, but we have a new tid so we need to
132  // update the value stashed in this Thread*.
133  InitTid();
134}
135
136void* Thread::CreateCallback(void* arg) {
137  Thread* self = reinterpret_cast<Thread*>(arg);
138  Runtime* runtime = Runtime::Current();
139  if (runtime == NULL) {
140    LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self;
141    return NULL;
142  }
143  {
144    // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true
145    //       after self->Init().
146    MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
147    // Check that if we got here we cannot be shutting down (as shutdown should never have started
148    // while threads are being born).
149    CHECK(!runtime->IsShuttingDown());
150    self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
151    Runtime::Current()->EndThreadBirth();
152  }
153  {
154    ScopedObjectAccess soa(self);
155
156    // Copy peer into self, deleting global reference when done.
157    CHECK(self->jpeer_ != NULL);
158    self->opeer_ = soa.Decode<mirror::Object*>(self->jpeer_);
159    self->GetJniEnv()->DeleteGlobalRef(self->jpeer_);
160    self->jpeer_ = NULL;
161
162    {
163      SirtRef<mirror::String> thread_name(self, self->GetThreadName(soa));
164      self->SetThreadName(thread_name->ToModifiedUtf8().c_str());
165    }
166    Dbg::PostThreadStart(self);
167
168    // Invoke the 'run' method of our java.lang.Thread.
169    mirror::Object* receiver = self->opeer_;
170    jmethodID mid = WellKnownClasses::java_lang_Thread_run;
171    mirror::ArtMethod* m =
172        receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(soa.DecodeMethod(mid));
173    JValue result;
174    ArgArray arg_array(NULL, 0);
175    arg_array.Append(reinterpret_cast<uint32_t>(receiver));
176    m->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
177  }
178  // Detach and delete self.
179  Runtime::Current()->GetThreadList()->Unregister(self);
180
181  return NULL;
182}
183
184Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa,
185                                  mirror::Object* thread_peer) {
186  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer);
187  Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetInt(thread_peer)));
188  // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
189  // to stop it from going away.
190  if (kIsDebugBuild) {
191    MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
192    if (result != NULL && !result->IsSuspended()) {
193      Locks::thread_list_lock_->AssertHeld(soa.Self());
194    }
195  }
196  return result;
197}
198
199Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, jobject java_thread) {
200  return FromManagedThread(soa, soa.Decode<mirror::Object*>(java_thread));
201}
202
203static size_t FixStackSize(size_t stack_size) {
204  // A stack size of zero means "use the default".
205  if (stack_size == 0) {
206    stack_size = Runtime::Current()->GetDefaultStackSize();
207  }
208
209  // Dalvik used the bionic pthread default stack size for native threads,
210  // so include that here to support apps that expect large native stacks.
211  stack_size += 1 * MB;
212
213  // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN.
214  if (stack_size < PTHREAD_STACK_MIN) {
215    stack_size = PTHREAD_STACK_MIN;
216  }
217
218  // It's likely that callers are trying to ensure they have at least a certain amount of
219  // stack space, so we should add our reserved space on top of what they requested, rather
220  // than implicitly take it away from them.
221  stack_size += Thread::kStackOverflowReservedBytes;
222
223  // Some systems require the stack size to be a multiple of the system page size, so round up.
224  stack_size = RoundUp(stack_size, kPageSize);
225
226  return stack_size;
227}
228
229void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
230  CHECK(java_peer != NULL);
231  Thread* self = static_cast<JNIEnvExt*>(env)->self;
232  Runtime* runtime = Runtime::Current();
233
234  // Atomically start the birth of the thread ensuring the runtime isn't shutting down.
235  bool thread_start_during_shutdown = false;
236  {
237    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
238    if (runtime->IsShuttingDown()) {
239      thread_start_during_shutdown = true;
240    } else {
241      runtime->StartThreadBirth();
242    }
243  }
244  if (thread_start_during_shutdown) {
245    ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError"));
246    env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown");
247    return;
248  }
249
250  Thread* child_thread = new Thread(is_daemon);
251  // Use global JNI ref to hold peer live while child thread starts.
252  child_thread->jpeer_ = env->NewGlobalRef(java_peer);
253  stack_size = FixStackSize(stack_size);
254
255  // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to
256  // assign it.
257  env->SetIntField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer,
258                   reinterpret_cast<jint>(child_thread));
259
260  pthread_t new_pthread;
261  pthread_attr_t attr;
262  CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
263  CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED");
264  CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
265  int pthread_create_result = pthread_create(&new_pthread, &attr, Thread::CreateCallback, child_thread);
266  CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
267
268  if (pthread_create_result != 0) {
269    // pthread_create(3) failed, so clean up.
270    {
271      MutexLock mu(self, *Locks::runtime_shutdown_lock_);
272      runtime->EndThreadBirth();
273    }
274    // Manually delete the global reference since Thread::Init will not have been run.
275    env->DeleteGlobalRef(child_thread->jpeer_);
276    child_thread->jpeer_ = NULL;
277    delete child_thread;
278    child_thread = NULL;
279    // TODO: remove from thread group?
280    env->SetIntField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0);
281    {
282      std::string msg(StringPrintf("pthread_create (%s stack) failed: %s",
283                                   PrettySize(stack_size).c_str(), strerror(pthread_create_result)));
284      ScopedObjectAccess soa(env);
285      soa.Self()->ThrowOutOfMemoryError(msg.c_str());
286    }
287  }
288}
289
290void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
291  // This function does all the initialization that must be run by the native thread it applies to.
292  // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
293  // we can handshake with the corresponding native thread when it's ready.) Check this native
294  // thread hasn't been through here already...
295  CHECK(Thread::Current() == NULL);
296  SetUpAlternateSignalStack();
297  InitCpu();
298  InitTlsEntryPoints();
299  InitCardTable();
300  InitTid();
301  // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
302  // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
303  pthread_self_ = pthread_self();
304  CHECK(is_started_);
305  CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
306  DCHECK_EQ(Thread::Current(), this);
307
308  thin_lock_id_ = thread_list->AllocThreadId(this);
309  InitStackHwm();
310
311  jni_env_ = new JNIEnvExt(this, java_vm);
312  thread_list->Register(this);
313}
314
315Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group,
316                       bool create_peer) {
317  Thread* self;
318  Runtime* runtime = Runtime::Current();
319  if (runtime == NULL) {
320    LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name;
321    return NULL;
322  }
323  {
324    MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
325    if (runtime->IsShuttingDown()) {
326      LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name;
327      return NULL;
328    } else {
329      Runtime::Current()->StartThreadBirth();
330      self = new Thread(as_daemon);
331      self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
332      Runtime::Current()->EndThreadBirth();
333    }
334  }
335
336  CHECK_NE(self->GetState(), kRunnable);
337  self->SetState(kNative);
338
339  // If we're the main thread, ClassLinker won't be created until after we're attached,
340  // so that thread needs a two-stage attach. Regular threads don't need this hack.
341  // In the compiler, all threads need this hack, because no-one's going to be getting
342  // a native peer!
343  if (create_peer) {
344    self->CreatePeer(thread_name, as_daemon, thread_group);
345  } else {
346    // These aren't necessary, but they improve diagnostics for unit tests & command-line tools.
347    if (thread_name != NULL) {
348      self->name_->assign(thread_name);
349      ::art::SetThreadName(thread_name);
350    }
351  }
352
353  return self;
354}
355
356void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) {
357  Runtime* runtime = Runtime::Current();
358  CHECK(runtime->IsStarted());
359  JNIEnv* env = jni_env_;
360
361  if (thread_group == NULL) {
362    thread_group = runtime->GetMainThreadGroup();
363  }
364  ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
365  jint thread_priority = GetNativePriority();
366  jboolean thread_is_daemon = as_daemon;
367
368  ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread));
369  if (peer.get() == NULL) {
370    CHECK(IsExceptionPending());
371    return;
372  }
373  {
374    ScopedObjectAccess soa(this);
375    opeer_ = soa.Decode<mirror::Object*>(peer.get());
376  }
377  env->CallNonvirtualVoidMethod(peer.get(),
378                                WellKnownClasses::java_lang_Thread,
379                                WellKnownClasses::java_lang_Thread_init,
380                                thread_group, thread_name.get(), thread_priority, thread_is_daemon);
381  AssertNoPendingException();
382
383  Thread* self = this;
384  DCHECK_EQ(self, Thread::Current());
385  jni_env_->SetIntField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer,
386                        reinterpret_cast<jint>(self));
387
388  ScopedObjectAccess soa(self);
389  SirtRef<mirror::String> peer_thread_name(soa.Self(), GetThreadName(soa));
390  if (peer_thread_name.get() == NULL) {
391    // The Thread constructor should have set the Thread.name to a
392    // non-null value. However, because we can run without code
393    // available (in the compiler, in tests), we manually assign the
394    // fields the constructor should have set.
395    soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->
396        SetBoolean(opeer_, thread_is_daemon);
397    soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->
398        SetObject(opeer_, soa.Decode<mirror::Object*>(thread_group));
399    soa.DecodeField(WellKnownClasses::java_lang_Thread_name)->
400        SetObject(opeer_, soa.Decode<mirror::Object*>(thread_name.get()));
401    soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->
402        SetInt(opeer_, thread_priority);
403    peer_thread_name.reset(GetThreadName(soa));
404  }
405  // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
406  if (peer_thread_name.get() != NULL) {
407    SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
408  }
409}
410
411void Thread::SetThreadName(const char* name) {
412  name_->assign(name);
413  ::art::SetThreadName(name);
414  Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
415}
416
417void Thread::InitStackHwm() {
418  void* stack_base;
419  size_t stack_size;
420  GetThreadStack(pthread_self_, stack_base, stack_size);
421
422  // TODO: include this in the thread dumps; potentially useful in SIGQUIT output?
423  VLOG(threads) << StringPrintf("Native stack is at %p (%s)", stack_base, PrettySize(stack_size).c_str());
424
425  stack_begin_ = reinterpret_cast<byte*>(stack_base);
426  stack_size_ = stack_size;
427
428  if (stack_size_ <= kStackOverflowReservedBytes) {
429    LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << stack_size_ << " bytes)";
430  }
431
432  // TODO: move this into the Linux GetThreadStack implementation.
433#if !defined(__APPLE__)
434  // If we're the main thread, check whether we were run with an unlimited stack. In that case,
435  // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
436  // will be broken because we'll die long before we get close to 2GB.
437  bool is_main_thread = (::art::GetTid() == getpid());
438  if (is_main_thread) {
439    rlimit stack_limit;
440    if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) {
441      PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed";
442    }
443    if (stack_limit.rlim_cur == RLIM_INFINITY) {
444      // Find the default stack size for new threads...
445      pthread_attr_t default_attributes;
446      size_t default_stack_size;
447      CHECK_PTHREAD_CALL(pthread_attr_init, (&default_attributes), "default stack size query");
448      CHECK_PTHREAD_CALL(pthread_attr_getstacksize, (&default_attributes, &default_stack_size),
449                         "default stack size query");
450      CHECK_PTHREAD_CALL(pthread_attr_destroy, (&default_attributes), "default stack size query");
451
452      // ...and use that as our limit.
453      size_t old_stack_size = stack_size_;
454      stack_size_ = default_stack_size;
455      stack_begin_ += (old_stack_size - stack_size_);
456      VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")"
457                    << " to " << PrettySize(stack_size_)
458                    << " with base " << reinterpret_cast<void*>(stack_begin_);
459    }
460  }
461#endif
462
463  // Set stack_end_ to the bottom of the stack saving space of stack overflows
464  ResetDefaultStackEnd();
465
466  // Sanity check.
467  int stack_variable;
468  CHECK_GT(&stack_variable, reinterpret_cast<void*>(stack_end_));
469}
470
471void Thread::ShortDump(std::ostream& os) const {
472  os << "Thread[";
473  if (GetThinLockId() != 0) {
474    // If we're in kStarting, we won't have a thin lock id or tid yet.
475    os << GetThinLockId()
476             << ",tid=" << GetTid() << ',';
477  }
478  os << GetState()
479           << ",Thread*=" << this
480           << ",peer=" << opeer_
481           << ",\"" << *name_ << "\""
482           << "]";
483}
484
485void Thread::Dump(std::ostream& os) const {
486  DumpState(os);
487  DumpStack(os);
488}
489
490mirror::String* Thread::GetThreadName(const ScopedObjectAccessUnchecked& soa) const {
491  mirror::ArtField* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
492  return (opeer_ != NULL) ? reinterpret_cast<mirror::String*>(f->GetObject(opeer_)) : NULL;
493}
494
495void Thread::GetThreadName(std::string& name) const {
496  name.assign(*name_);
497}
498
499uint64_t Thread::GetCpuMicroTime() const {
500#if defined(HAVE_POSIX_CLOCKS)
501  clockid_t cpu_clock_id;
502  pthread_getcpuclockid(pthread_self_, &cpu_clock_id);
503  timespec now;
504  clock_gettime(cpu_clock_id, &now);
505  return static_cast<uint64_t>(now.tv_sec) * 1000000LL + now.tv_nsec / 1000LL;
506#else
507  UNIMPLEMENTED(WARNING);
508  return -1;
509#endif
510}
511
512void Thread::AtomicSetFlag(ThreadFlag flag) {
513  android_atomic_or(flag, &state_and_flags_.as_int);
514}
515
516void Thread::AtomicClearFlag(ThreadFlag flag) {
517  android_atomic_and(-1 ^ flag, &state_and_flags_.as_int);
518}
519
520// Attempt to rectify locks so that we dump thread list with required locks before exiting.
521static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
522  LOG(ERROR) << *thread << " suspend count already zero.";
523  Locks::thread_suspend_count_lock_->Unlock(self);
524  if (!Locks::mutator_lock_->IsSharedHeld(self)) {
525    Locks::mutator_lock_->SharedTryLock(self);
526    if (!Locks::mutator_lock_->IsSharedHeld(self)) {
527      LOG(WARNING) << "Dumping thread list without holding mutator_lock_";
528    }
529  }
530  if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
531    Locks::thread_list_lock_->TryLock(self);
532    if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
533      LOG(WARNING) << "Dumping thread list without holding thread_list_lock_";
534    }
535  }
536  std::ostringstream ss;
537  Runtime::Current()->GetThreadList()->DumpLocked(ss);
538  LOG(FATAL) << ss.str();
539}
540
541void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) {
542  DCHECK(delta == -1 || delta == +1 || delta == -debug_suspend_count_)
543      << delta << " " << debug_suspend_count_ << " " << this;
544  DCHECK_GE(suspend_count_, debug_suspend_count_) << this;
545  Locks::thread_suspend_count_lock_->AssertHeld(self);
546  if (this != self && !IsSuspended()) {
547    Locks::thread_list_lock_->AssertHeld(self);
548  }
549  if (UNLIKELY(delta < 0 && suspend_count_ <= 0)) {
550    UnsafeLogFatalForSuspendCount(self, this);
551    return;
552  }
553
554  suspend_count_ += delta;
555  if (for_debugger) {
556    debug_suspend_count_ += delta;
557  }
558
559  if (suspend_count_ == 0) {
560    AtomicClearFlag(kSuspendRequest);
561  } else {
562    AtomicSetFlag(kSuspendRequest);
563  }
564}
565
566void Thread::RunCheckpointFunction() {
567  CHECK(checkpoint_function_ != NULL);
568  ATRACE_BEGIN("Checkpoint function");
569  checkpoint_function_->Run(this);
570  ATRACE_END();
571}
572
573bool Thread::RequestCheckpoint(Closure* function) {
574  CHECK(!ReadFlag(kCheckpointRequest)) << "Already have a pending checkpoint request";
575  checkpoint_function_ = function;
576  union StateAndFlags old_state_and_flags = state_and_flags_;
577  // We must be runnable to request a checkpoint.
578  old_state_and_flags.as_struct.state = kRunnable;
579  union StateAndFlags new_state_and_flags = old_state_and_flags;
580  new_state_and_flags.as_struct.flags |= kCheckpointRequest;
581  int succeeded = android_atomic_cmpxchg(old_state_and_flags.as_int, new_state_and_flags.as_int,
582                                         &state_and_flags_.as_int);
583  return succeeded == 0;
584}
585
586void Thread::FullSuspendCheck() {
587  VLOG(threads) << this << " self-suspending";
588  ATRACE_BEGIN("Full suspend check");
589  // Make thread appear suspended to other threads, release mutator_lock_.
590  TransitionFromRunnableToSuspended(kSuspended);
591  // Transition back to runnable noting requests to suspend, re-acquire share on mutator_lock_.
592  TransitionFromSuspendedToRunnable();
593  ATRACE_END();
594  VLOG(threads) << this << " self-reviving";
595}
596
597Thread* Thread::SuspendForDebugger(jobject peer, bool request_suspension, bool* timed_out) {
598  static const useconds_t kTimeoutUs = 30 * 1000000;  // 30s.
599  useconds_t total_delay_us = 0;
600  useconds_t delay_us = 0;
601  bool did_suspend_request = false;
602  *timed_out = false;
603  while (true) {
604    Thread* thread;
605    {
606      ScopedObjectAccess soa(Thread::Current());
607      Thread* self = soa.Self();
608      MutexLock mu(self, *Locks::thread_list_lock_);
609      thread = Thread::FromManagedThread(soa, peer);
610      if (thread == NULL) {
611        JNIEnv* env = self->GetJniEnv();
612        ScopedLocalRef<jstring> scoped_name_string(env,
613                                                   (jstring)env->GetObjectField(peer,
614                                                              WellKnownClasses::java_lang_Thread_name));
615        ScopedUtfChars scoped_name_chars(env, scoped_name_string.get());
616        if (scoped_name_chars.c_str() == NULL) {
617            LOG(WARNING) << "No such thread for suspend: " << peer;
618            env->ExceptionClear();
619        } else {
620            LOG(WARNING) << "No such thread for suspend: " << peer << ":" << scoped_name_chars.c_str();
621        }
622
623        return NULL;
624      }
625      {
626        MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
627        if (request_suspension) {
628          thread->ModifySuspendCount(soa.Self(), +1, true /* for_debugger */);
629          request_suspension = false;
630          did_suspend_request = true;
631        }
632        // IsSuspended on the current thread will fail as the current thread is changed into
633        // Runnable above. As the suspend count is now raised if this is the current thread
634        // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
635        // to just explicitly handle the current thread in the callers to this code.
636        CHECK_NE(thread, soa.Self()) << "Attempt to suspend the current thread for the debugger";
637        // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend
638        // count, or else we've waited and it has self suspended) or is the current thread, we're
639        // done.
640        if (thread->IsSuspended()) {
641          return thread;
642        }
643        if (total_delay_us >= kTimeoutUs) {
644          LOG(ERROR) << "Thread suspension timed out: " << peer;
645          if (did_suspend_request) {
646            thread->ModifySuspendCount(soa.Self(), -1, true /* for_debugger */);
647          }
648          *timed_out = true;
649          return NULL;
650        }
651      }
652      // Release locks and come out of runnable state.
653    }
654    for (int i = kLockLevelCount - 1; i >= 0; --i) {
655      BaseMutex* held_mutex = Thread::Current()->GetHeldMutex(static_cast<LockLevel>(i));
656      if (held_mutex != NULL) {
657        LOG(FATAL) << "Holding " << held_mutex->GetName()
658            << " while sleeping for thread suspension";
659      }
660    }
661    {
662      useconds_t new_delay_us = delay_us * 2;
663      CHECK_GE(new_delay_us, delay_us);
664      if (new_delay_us < 500000) {  // Don't allow sleeping to be more than 0.5s.
665        delay_us = new_delay_us;
666      }
667    }
668    if (delay_us == 0) {
669      sched_yield();
670      // Default to 1 milliseconds (note that this gets multiplied by 2 before the first sleep).
671      delay_us = 500;
672    } else {
673      usleep(delay_us);
674      total_delay_us += delay_us;
675    }
676  }
677}
678
679void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
680  std::string group_name;
681  int priority;
682  bool is_daemon = false;
683  Thread* self = Thread::Current();
684
685  if (self != NULL && thread != NULL && thread->opeer_ != NULL) {
686    ScopedObjectAccessUnchecked soa(self);
687    priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->GetInt(thread->opeer_);
688    is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->GetBoolean(thread->opeer_);
689
690    mirror::Object* thread_group =
691        soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->opeer_);
692
693    if (thread_group != NULL) {
694      mirror::ArtField* group_name_field =
695          soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
696      mirror::String* group_name_string =
697          reinterpret_cast<mirror::String*>(group_name_field->GetObject(thread_group));
698      group_name = (group_name_string != NULL) ? group_name_string->ToModifiedUtf8() : "<null>";
699    }
700  } else {
701    priority = GetNativePriority();
702  }
703
704  std::string scheduler_group_name(GetSchedulerGroupName(tid));
705  if (scheduler_group_name.empty()) {
706    scheduler_group_name = "default";
707  }
708
709  if (thread != NULL) {
710    os << '"' << *thread->name_ << '"';
711    if (is_daemon) {
712      os << " daemon";
713    }
714    os << " prio=" << priority
715       << " tid=" << thread->GetThinLockId()
716       << " " << thread->GetState();
717    if (thread->IsStillStarting()) {
718      os << " (still starting up)";
719    }
720    os << "\n";
721  } else {
722    os << '"' << ::art::GetThreadName(tid) << '"'
723       << " prio=" << priority
724       << " (not attached)\n";
725  }
726
727  if (thread != NULL) {
728    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
729    os << "  | group=\"" << group_name << "\""
730       << " sCount=" << thread->suspend_count_
731       << " dsCount=" << thread->debug_suspend_count_
732       << " obj=" << reinterpret_cast<void*>(thread->opeer_)
733       << " self=" << reinterpret_cast<const void*>(thread) << "\n";
734  }
735
736  os << "  | sysTid=" << tid
737     << " nice=" << getpriority(PRIO_PROCESS, tid)
738     << " cgrp=" << scheduler_group_name;
739  if (thread != NULL) {
740    int policy;
741    sched_param sp;
742    CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->pthread_self_, &policy, &sp), __FUNCTION__);
743    os << " sched=" << policy << "/" << sp.sched_priority
744       << " handle=" << reinterpret_cast<void*>(thread->pthread_self_);
745  }
746  os << "\n";
747
748  // Grab the scheduler stats for this thread.
749  std::string scheduler_stats;
750  if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) {
751    scheduler_stats.resize(scheduler_stats.size() - 1);  // Lose the trailing '\n'.
752  } else {
753    scheduler_stats = "0 0 0";
754  }
755
756  char native_thread_state = '?';
757  int utime = 0;
758  int stime = 0;
759  int task_cpu = 0;
760  GetTaskStats(tid, native_thread_state, utime, stime, task_cpu);
761
762  os << "  | state=" << native_thread_state
763     << " schedstat=( " << scheduler_stats << " )"
764     << " utm=" << utime
765     << " stm=" << stime
766     << " core=" << task_cpu
767     << " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
768  if (thread != NULL) {
769    os << "  | stack=" << reinterpret_cast<void*>(thread->stack_begin_) << "-" << reinterpret_cast<void*>(thread->stack_end_)
770       << " stackSize=" << PrettySize(thread->stack_size_) << "\n";
771  }
772}
773
774void Thread::DumpState(std::ostream& os) const {
775  Thread::DumpState(os, this, GetTid());
776}
777
778struct StackDumpVisitor : public StackVisitor {
779  StackDumpVisitor(std::ostream& os, Thread* thread, Context* context, bool can_allocate)
780      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
781      : StackVisitor(thread, context), os(os), thread(thread), can_allocate(can_allocate),
782        last_method(NULL), last_line_number(0), repetition_count(0), frame_count(0) {
783  }
784
785  virtual ~StackDumpVisitor() {
786    if (frame_count == 0) {
787      os << "  (no managed stack frames)\n";
788    }
789  }
790
791  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
792    mirror::ArtMethod* m = GetMethod();
793    if (m->IsRuntimeMethod()) {
794      return true;
795    }
796    const int kMaxRepetition = 3;
797    mirror::Class* c = m->GetDeclaringClass();
798    const mirror::DexCache* dex_cache = c->GetDexCache();
799    int line_number = -1;
800    if (dex_cache != NULL) {  // be tolerant of bad input
801      const DexFile& dex_file = *dex_cache->GetDexFile();
802      line_number = dex_file.GetLineNumFromPC(m, GetDexPc());
803    }
804    if (line_number == last_line_number && last_method == m) {
805      repetition_count++;
806    } else {
807      if (repetition_count >= kMaxRepetition) {
808        os << "  ... repeated " << (repetition_count - kMaxRepetition) << " times\n";
809      }
810      repetition_count = 0;
811      last_line_number = line_number;
812      last_method = m;
813    }
814    if (repetition_count < kMaxRepetition) {
815      os << "  at " << PrettyMethod(m, false);
816      if (m->IsNative()) {
817        os << "(Native method)";
818      } else {
819        mh.ChangeMethod(m);
820        const char* source_file(mh.GetDeclaringClassSourceFile());
821        os << "(" << (source_file != NULL ? source_file : "unavailable")
822           << ":" << line_number << ")"
823           << " <0x" << std::hex << GetDexPc() << ">";
824      }
825      os << "\n";
826      if (frame_count == 0) {
827        Monitor::DescribeWait(os, thread);
828      }
829      if (can_allocate) {
830        Monitor::VisitLocks(this, DumpLockedObject, &os);
831      }
832    }
833
834    ++frame_count;
835    return true;
836  }
837
838  static void DumpLockedObject(mirror::Object* o, void* context)
839      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
840    std::ostream& os = *reinterpret_cast<std::ostream*>(context);
841    os << "  - locked <" << o << "> (a " << PrettyTypeOf(o) << ")\n";
842  }
843
844  std::ostream& os;
845  const Thread* thread;
846  const bool can_allocate;
847  MethodHelper mh;
848  mirror::ArtMethod* last_method;
849  int last_line_number;
850  int repetition_count;
851  int frame_count;
852};
853
854static bool ShouldShowNativeStack(const Thread* thread)
855    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
856  ThreadState state = thread->GetState();
857
858  // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting.
859  if (state > kWaiting && state < kStarting) {
860    return true;
861  }
862
863  // In an Object.wait variant or Thread.sleep? That's not interesting.
864  if (state == kTimedWaiting || state == kSleeping || state == kWaiting) {
865    return false;
866  }
867
868  // In some other native method? That's interesting.
869  // We don't just check kNative because native methods will be in state kSuspended if they're
870  // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the
871  // thread-startup states if it's early enough in their life cycle (http://b/7432159).
872  mirror::ArtMethod* current_method = thread->GetCurrentMethod(NULL);
873  return current_method != NULL && current_method->IsNative();
874}
875
876void Thread::DumpStack(std::ostream& os) const {
877  // TODO: we call this code when dying but may not have suspended the thread ourself. The
878  //       IsSuspended check is therefore racy with the use for dumping (normally we inhibit
879  //       the race with the thread_suspend_count_lock_).
880  // No point dumping for an abort in debug builds where we'll hit the not suspended check in stack.
881  bool dump_for_abort = (gAborting > 0) && !kIsDebugBuild;
882  if (this == Thread::Current() || IsSuspended() || dump_for_abort) {
883    // If we're currently in native code, dump that stack before dumping the managed stack.
884    if (dump_for_abort || ShouldShowNativeStack(this)) {
885      DumpKernelStack(os, GetTid(), "  kernel: ", false);
886      DumpNativeStack(os, GetTid(), "  native: ", false);
887    }
888    UniquePtr<Context> context(Context::Create());
889    StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(), !throwing_OutOfMemoryError_);
890    dumper.WalkStack();
891  } else {
892    os << "Not able to dump stack of thread that isn't suspended";
893  }
894}
895
896void Thread::ThreadExitCallback(void* arg) {
897  Thread* self = reinterpret_cast<Thread*>(arg);
898  if (self->thread_exit_check_count_ == 0) {
899    LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's going to use a pthread_key_create destructor?): " << *self;
900    CHECK(is_started_);
901    CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
902    self->thread_exit_check_count_ = 1;
903  } else {
904    LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
905  }
906}
907
908void Thread::Startup() {
909  CHECK(!is_started_);
910  is_started_ = true;
911  {
912    // MutexLock to keep annotalysis happy.
913    //
914    // Note we use NULL for the thread because Thread::Current can
915    // return garbage since (is_started_ == true) and
916    // Thread::pthread_key_self_ is not yet initialized.
917    // This was seen on glibc.
918    MutexLock mu(NULL, *Locks::thread_suspend_count_lock_);
919    resume_cond_ = new ConditionVariable("Thread resumption condition variable",
920                                         *Locks::thread_suspend_count_lock_);
921  }
922
923  // Allocate a TLS slot.
924  CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key");
925
926  // Double-check the TLS slot allocation.
927  if (pthread_getspecific(pthread_key_self_) != NULL) {
928    LOG(FATAL) << "Newly-created pthread TLS slot is not NULL";
929  }
930}
931
932void Thread::FinishStartup() {
933  Runtime* runtime = Runtime::Current();
934  CHECK(runtime->IsStarted());
935
936  // Finish attaching the main thread.
937  ScopedObjectAccess soa(Thread::Current());
938  Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup());
939
940  Runtime::Current()->GetClassLinker()->RunRootClinits();
941}
942
943void Thread::Shutdown() {
944  CHECK(is_started_);
945  is_started_ = false;
946  CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
947  MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
948  if (resume_cond_ != NULL) {
949    delete resume_cond_;
950    resume_cond_ = NULL;
951  }
952}
953
954Thread::Thread(bool daemon)
955    : suspend_count_(0),
956      card_table_(NULL),
957      exception_(NULL),
958      stack_end_(NULL),
959      managed_stack_(),
960      jni_env_(NULL),
961      self_(NULL),
962      opeer_(NULL),
963      jpeer_(NULL),
964      stack_begin_(NULL),
965      stack_size_(0),
966      stack_trace_sample_(NULL),
967      trace_clock_base_(0),
968      thin_lock_id_(0),
969      tid_(0),
970      wait_mutex_(new Mutex("a thread wait mutex")),
971      wait_cond_(new ConditionVariable("a thread wait condition variable", *wait_mutex_)),
972      wait_monitor_(NULL),
973      interrupted_(false),
974      wait_next_(NULL),
975      monitor_enter_object_(NULL),
976      top_sirt_(NULL),
977      runtime_(NULL),
978      class_loader_override_(NULL),
979      long_jump_context_(NULL),
980      throwing_OutOfMemoryError_(false),
981      debug_suspend_count_(0),
982      debug_invoke_req_(new DebugInvokeReq),
983      deoptimization_shadow_frame_(NULL),
984      instrumentation_stack_(new std::deque<instrumentation::InstrumentationStackFrame>),
985      name_(new std::string(kThreadNameDuringStartup)),
986      daemon_(daemon),
987      pthread_self_(0),
988      no_thread_suspension_(0),
989      last_no_thread_suspension_cause_(NULL),
990      checkpoint_function_(0),
991      thread_exit_check_count_(0) {
992  CHECK_EQ((sizeof(Thread) % 4), 0U) << sizeof(Thread);
993  state_and_flags_.as_struct.flags = 0;
994  state_and_flags_.as_struct.state = kNative;
995  memset(&held_mutexes_[0], 0, sizeof(held_mutexes_));
996}
997
998bool Thread::IsStillStarting() const {
999  // You might think you can check whether the state is kStarting, but for much of thread startup,
1000  // the thread is in kNative; it might also be in kVmWait.
1001  // You might think you can check whether the peer is NULL, but the peer is actually created and
1002  // assigned fairly early on, and needs to be.
1003  // It turns out that the last thing to change is the thread name; that's a good proxy for "has
1004  // this thread _ever_ entered kRunnable".
1005  return (jpeer_ == NULL && opeer_ == NULL) || (*name_ == kThreadNameDuringStartup);
1006}
1007
1008void Thread::AssertNoPendingException() const {
1009  if (UNLIKELY(IsExceptionPending())) {
1010    ScopedObjectAccess soa(Thread::Current());
1011    mirror::Throwable* exception = GetException(NULL);
1012    LOG(FATAL) << "No pending exception expected: " << exception->Dump();
1013  }
1014}
1015
1016static void MonitorExitVisitor(const mirror::Object* object, void* arg) NO_THREAD_SAFETY_ANALYSIS {
1017  Thread* self = reinterpret_cast<Thread*>(arg);
1018  mirror::Object* entered_monitor = const_cast<mirror::Object*>(object);
1019  if (self->HoldsLock(entered_monitor)) {
1020    LOG(WARNING) << "Calling MonitorExit on object "
1021                 << object << " (" << PrettyTypeOf(object) << ")"
1022                 << " left locked by native thread "
1023                 << *Thread::Current() << " which is detaching";
1024    entered_monitor->MonitorExit(self);
1025  }
1026}
1027
1028void Thread::Destroy() {
1029  Thread* self = this;
1030  DCHECK_EQ(self, Thread::Current());
1031
1032  if (opeer_ != NULL) {
1033    ScopedObjectAccess soa(self);
1034    // We may need to call user-supplied managed code, do this before final clean-up.
1035    HandleUncaughtExceptions(soa);
1036    RemoveFromThreadGroup(soa);
1037
1038    // this.nativePeer = 0;
1039    soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)->SetInt(opeer_, 0);
1040    Dbg::PostThreadDeath(self);
1041
1042    // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
1043    // who is waiting.
1044    mirror::Object* lock =
1045        soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(opeer_);
1046    // (This conditional is only needed for tests, where Thread.lock won't have been set.)
1047    if (lock != NULL) {
1048      ObjectLock locker(self, lock);
1049      locker.Notify();
1050    }
1051  }
1052
1053  // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
1054  if (jni_env_ != NULL) {
1055    jni_env_->monitors.VisitRoots(MonitorExitVisitor, self);
1056  }
1057}
1058
1059Thread::~Thread() {
1060  if (jni_env_ != NULL && jpeer_ != NULL) {
1061    // If pthread_create fails we don't have a jni env here.
1062    jni_env_->DeleteGlobalRef(jpeer_);
1063    jpeer_ = NULL;
1064  }
1065  opeer_ = NULL;
1066
1067  delete jni_env_;
1068  jni_env_ = NULL;
1069
1070  CHECK_NE(GetState(), kRunnable);
1071  // We may be deleting a still born thread.
1072  SetStateUnsafe(kTerminated);
1073
1074  delete wait_cond_;
1075  delete wait_mutex_;
1076
1077  if (long_jump_context_ != NULL) {
1078    delete long_jump_context_;
1079  }
1080
1081  delete debug_invoke_req_;
1082  delete instrumentation_stack_;
1083  delete name_;
1084  delete stack_trace_sample_;
1085
1086  TearDownAlternateSignalStack();
1087}
1088
1089void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) {
1090  if (!IsExceptionPending()) {
1091    return;
1092  }
1093  ScopedLocalRef<jobject> peer(jni_env_, soa.AddLocalReference<jobject>(opeer_));
1094  ScopedThreadStateChange tsc(this, kNative);
1095
1096  // Get and clear the exception.
1097  ScopedLocalRef<jthrowable> exception(jni_env_, jni_env_->ExceptionOccurred());
1098  jni_env_->ExceptionClear();
1099
1100  // If the thread has its own handler, use that.
1101  ScopedLocalRef<jobject> handler(jni_env_,
1102                                  jni_env_->GetObjectField(peer.get(),
1103                                                           WellKnownClasses::java_lang_Thread_uncaughtHandler));
1104  if (handler.get() == NULL) {
1105    // Otherwise use the thread group's default handler.
1106    handler.reset(jni_env_->GetObjectField(peer.get(), WellKnownClasses::java_lang_Thread_group));
1107  }
1108
1109  // Call the handler.
1110  jni_env_->CallVoidMethod(handler.get(),
1111                           WellKnownClasses::java_lang_Thread$UncaughtExceptionHandler_uncaughtException,
1112                           peer.get(), exception.get());
1113
1114  // If the handler threw, clear that exception too.
1115  jni_env_->ExceptionClear();
1116}
1117
1118void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
1119  // this.group.removeThread(this);
1120  // group can be null if we're in the compiler or a test.
1121  mirror::Object* ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(opeer_);
1122  if (ogroup != NULL) {
1123    ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup));
1124    ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(opeer_));
1125    ScopedThreadStateChange tsc(soa.Self(), kNative);
1126    jni_env_->CallVoidMethod(group.get(), WellKnownClasses::java_lang_ThreadGroup_removeThread,
1127                             peer.get());
1128  }
1129}
1130
1131size_t Thread::NumSirtReferences() {
1132  size_t count = 0;
1133  for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
1134    count += cur->NumberOfReferences();
1135  }
1136  return count;
1137}
1138
1139bool Thread::SirtContains(jobject obj) const {
1140  mirror::Object** sirt_entry = reinterpret_cast<mirror::Object**>(obj);
1141  for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
1142    if (cur->Contains(sirt_entry)) {
1143      return true;
1144    }
1145  }
1146  // JNI code invoked from portable code uses shadow frames rather than the SIRT.
1147  return managed_stack_.ShadowFramesContain(sirt_entry);
1148}
1149
1150void Thread::SirtVisitRoots(RootVisitor* visitor, void* arg) {
1151  for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
1152    size_t num_refs = cur->NumberOfReferences();
1153    for (size_t j = 0; j < num_refs; j++) {
1154      mirror::Object* object = cur->GetReference(j);
1155      if (object != NULL) {
1156        visitor(object, arg);
1157      }
1158    }
1159  }
1160}
1161
1162mirror::Object* Thread::DecodeJObject(jobject obj) const {
1163  Locks::mutator_lock_->AssertSharedHeld(this);
1164  if (obj == NULL) {
1165    return NULL;
1166  }
1167  IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
1168  IndirectRefKind kind = GetIndirectRefKind(ref);
1169  mirror::Object* result;
1170  // The "kinds" below are sorted by the frequency we expect to encounter them.
1171  if (kind == kLocal) {
1172    IndirectReferenceTable& locals = jni_env_->locals;
1173    result = const_cast<mirror::Object*>(locals.Get(ref));
1174  } else if (kind == kSirtOrInvalid) {
1175    // TODO: make stack indirect reference table lookup more efficient
1176    // Check if this is a local reference in the SIRT
1177    if (LIKELY(SirtContains(obj))) {
1178      result = *reinterpret_cast<mirror::Object**>(obj);  // Read from SIRT
1179    } else if (Runtime::Current()->GetJavaVM()->work_around_app_jni_bugs) {
1180      // Assume an invalid local reference is actually a direct pointer.
1181      result = reinterpret_cast<mirror::Object*>(obj);
1182    } else {
1183      result = kInvalidIndirectRefObject;
1184    }
1185  } else if (kind == kGlobal) {
1186    JavaVMExt* vm = Runtime::Current()->GetJavaVM();
1187    IndirectReferenceTable& globals = vm->globals;
1188    ReaderMutexLock mu(const_cast<Thread*>(this), vm->globals_lock);
1189    result = const_cast<mirror::Object*>(globals.Get(ref));
1190  } else {
1191    DCHECK_EQ(kind, kWeakGlobal);
1192    JavaVMExt* vm = Runtime::Current()->GetJavaVM();
1193    IndirectReferenceTable& weak_globals = vm->weak_globals;
1194    ReaderMutexLock mu(const_cast<Thread*>(this), vm->weak_globals_lock);
1195    result = const_cast<mirror::Object*>(weak_globals.Get(ref));
1196    if (result == kClearedJniWeakGlobal) {
1197      // This is a special case where it's okay to return NULL.
1198      return NULL;
1199    }
1200  }
1201
1202  if (UNLIKELY(result == NULL)) {
1203    JniAbortF(NULL, "use of deleted %s %p", ToStr<IndirectRefKind>(kind).c_str(), obj);
1204  } else {
1205    if (kIsDebugBuild && (result != kInvalidIndirectRefObject)) {
1206      Runtime::Current()->GetHeap()->VerifyObject(result);
1207    }
1208  }
1209  return result;
1210}
1211
1212// Implements java.lang.Thread.interrupted.
1213bool Thread::Interrupted() {
1214  MutexLock mu(Thread::Current(), *wait_mutex_);
1215  bool interrupted = interrupted_;
1216  interrupted_ = false;
1217  return interrupted;
1218}
1219
1220// Implements java.lang.Thread.isInterrupted.
1221bool Thread::IsInterrupted() {
1222  MutexLock mu(Thread::Current(), *wait_mutex_);
1223  return interrupted_;
1224}
1225
1226void Thread::Interrupt() {
1227  Thread* self = Thread::Current();
1228  MutexLock mu(self, *wait_mutex_);
1229  if (interrupted_) {
1230    return;
1231  }
1232  interrupted_ = true;
1233  NotifyLocked(self);
1234}
1235
1236void Thread::Notify() {
1237  Thread* self = Thread::Current();
1238  MutexLock mu(self, *wait_mutex_);
1239  NotifyLocked(self);
1240}
1241
1242void Thread::NotifyLocked(Thread* self) {
1243  if (wait_monitor_ != NULL) {
1244    wait_cond_->Signal(self);
1245  }
1246}
1247
1248class CountStackDepthVisitor : public StackVisitor {
1249 public:
1250  explicit CountStackDepthVisitor(Thread* thread)
1251      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1252      : StackVisitor(thread, NULL),
1253        depth_(0), skip_depth_(0), skipping_(true) {}
1254
1255  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1256    // We want to skip frames up to and including the exception's constructor.
1257    // Note we also skip the frame if it doesn't have a method (namely the callee
1258    // save frame)
1259    mirror::ArtMethod* m = GetMethod();
1260    if (skipping_ && !m->IsRuntimeMethod() &&
1261        !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) {
1262      skipping_ = false;
1263    }
1264    if (!skipping_) {
1265      if (!m->IsRuntimeMethod()) {  // Ignore runtime frames (in particular callee save).
1266        ++depth_;
1267      }
1268    } else {
1269      ++skip_depth_;
1270    }
1271    return true;
1272  }
1273
1274  int GetDepth() const {
1275    return depth_;
1276  }
1277
1278  int GetSkipDepth() const {
1279    return skip_depth_;
1280  }
1281
1282 private:
1283  uint32_t depth_;
1284  uint32_t skip_depth_;
1285  bool skipping_;
1286};
1287
1288class BuildInternalStackTraceVisitor : public StackVisitor {
1289 public:
1290  explicit BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth)
1291      : StackVisitor(thread, NULL), self_(self),
1292        skip_depth_(skip_depth), count_(0), dex_pc_trace_(NULL), method_trace_(NULL) {}
1293
1294  bool Init(int depth)
1295      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1296    // Allocate method trace with an extra slot that will hold the PC trace
1297    SirtRef<mirror::ObjectArray<mirror::Object> >
1298        method_trace(self_,
1299                     Runtime::Current()->GetClassLinker()->AllocObjectArray<mirror::Object>(self_,
1300                                                                                            depth + 1));
1301    if (method_trace.get() == NULL) {
1302      return false;
1303    }
1304    mirror::IntArray* dex_pc_trace = mirror::IntArray::Alloc(self_, depth);
1305    if (dex_pc_trace == NULL) {
1306      return false;
1307    }
1308    // Save PC trace in last element of method trace, also places it into the
1309    // object graph.
1310    method_trace->Set(depth, dex_pc_trace);
1311    // Set the Object*s and assert that no thread suspension is now possible.
1312    const char* last_no_suspend_cause =
1313        self_->StartAssertNoThreadSuspension("Building internal stack trace");
1314    CHECK(last_no_suspend_cause == NULL) << last_no_suspend_cause;
1315    method_trace_ = method_trace.get();
1316    dex_pc_trace_ = dex_pc_trace;
1317    return true;
1318  }
1319
1320  virtual ~BuildInternalStackTraceVisitor() {
1321    if (method_trace_ != NULL) {
1322      self_->EndAssertNoThreadSuspension(NULL);
1323    }
1324  }
1325
1326  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1327    if (method_trace_ == NULL || dex_pc_trace_ == NULL) {
1328      return true;  // We're probably trying to fillInStackTrace for an OutOfMemoryError.
1329    }
1330    if (skip_depth_ > 0) {
1331      skip_depth_--;
1332      return true;
1333    }
1334    mirror::ArtMethod* m = GetMethod();
1335    if (m->IsRuntimeMethod()) {
1336      return true;  // Ignore runtime frames (in particular callee save).
1337    }
1338    method_trace_->Set(count_, m);
1339    dex_pc_trace_->Set(count_, GetDexPc());
1340    ++count_;
1341    return true;
1342  }
1343
1344  mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const {
1345    return method_trace_;
1346  }
1347
1348 private:
1349  Thread* const self_;
1350  // How many more frames to skip.
1351  int32_t skip_depth_;
1352  // Current position down stack trace.
1353  uint32_t count_;
1354  // Array of dex PC values.
1355  mirror::IntArray* dex_pc_trace_;
1356  // An array of the methods on the stack, the last entry is a reference to the PC trace.
1357  mirror::ObjectArray<mirror::Object>* method_trace_;
1358};
1359
1360jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessUnchecked& soa) const {
1361  // Compute depth of stack
1362  CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
1363  count_visitor.WalkStack();
1364  int32_t depth = count_visitor.GetDepth();
1365  int32_t skip_depth = count_visitor.GetSkipDepth();
1366
1367  // Build internal stack trace.
1368  BuildInternalStackTraceVisitor build_trace_visitor(soa.Self(), const_cast<Thread*>(this),
1369                                                     skip_depth);
1370  if (!build_trace_visitor.Init(depth)) {
1371    return NULL;  // Allocation failed.
1372  }
1373  build_trace_visitor.WalkStack();
1374  mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace();
1375  if (kIsDebugBuild) {
1376    for (int32_t i = 0; i < trace->GetLength(); ++i) {
1377      CHECK(trace->Get(i) != NULL);
1378    }
1379  }
1380  return soa.AddLocalReference<jobjectArray>(trace);
1381}
1382
1383jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
1384    jobjectArray output_array, int* stack_depth) {
1385  // Transition into runnable state to work on Object*/Array*
1386  ScopedObjectAccess soa(env);
1387  // Decode the internal stack trace into the depth, method trace and PC trace
1388  mirror::ObjectArray<mirror::Object>* method_trace =
1389      soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal);
1390  int32_t depth = method_trace->GetLength() - 1;
1391  mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth));
1392
1393  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1394
1395  jobjectArray result;
1396  mirror::ObjectArray<mirror::StackTraceElement>* java_traces;
1397  if (output_array != NULL) {
1398    // Reuse the array we were given.
1399    result = output_array;
1400    java_traces = soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(output_array);
1401    // ...adjusting the number of frames we'll write to not exceed the array length.
1402    depth = std::min(depth, java_traces->GetLength());
1403  } else {
1404    // Create java_trace array and place in local reference table
1405    java_traces = class_linker->AllocStackTraceElementArray(soa.Self(), depth);
1406    if (java_traces == NULL) {
1407      return NULL;
1408    }
1409    result = soa.AddLocalReference<jobjectArray>(java_traces);
1410  }
1411
1412  if (stack_depth != NULL) {
1413    *stack_depth = depth;
1414  }
1415
1416  MethodHelper mh;
1417  for (int32_t i = 0; i < depth; ++i) {
1418    // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
1419    mirror::ArtMethod* method = down_cast<mirror::ArtMethod*>(method_trace->Get(i));
1420    mh.ChangeMethod(method);
1421    uint32_t dex_pc = pc_trace->Get(i);
1422    int32_t line_number = mh.GetLineNumFromDexPC(dex_pc);
1423    // Allocate element, potentially triggering GC
1424    // TODO: reuse class_name_object via Class::name_?
1425    const char* descriptor = mh.GetDeclaringClassDescriptor();
1426    CHECK(descriptor != NULL);
1427    std::string class_name(PrettyDescriptor(descriptor));
1428    SirtRef<mirror::String> class_name_object(soa.Self(),
1429                                              mirror::String::AllocFromModifiedUtf8(soa.Self(),
1430                                                                                    class_name.c_str()));
1431    if (class_name_object.get() == NULL) {
1432      return NULL;
1433    }
1434    const char* method_name = mh.GetName();
1435    CHECK(method_name != NULL);
1436    SirtRef<mirror::String> method_name_object(soa.Self(),
1437                                               mirror::String::AllocFromModifiedUtf8(soa.Self(),
1438                                                                                     method_name));
1439    if (method_name_object.get() == NULL) {
1440      return NULL;
1441    }
1442    const char* source_file = mh.GetDeclaringClassSourceFile();
1443    SirtRef<mirror::String> source_name_object(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(),
1444                                                                                                 source_file));
1445    mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(soa.Self(),
1446                                                                      class_name_object.get(),
1447                                                                      method_name_object.get(),
1448                                                                      source_name_object.get(),
1449                                                                      line_number);
1450    if (obj == NULL) {
1451      return NULL;
1452    }
1453#ifdef MOVING_GARBAGE_COLLECTOR
1454    // Re-read after potential GC
1455    java_traces = Decode<ObjectArray<Object>*>(soa.Env(), result);
1456    method_trace = down_cast<ObjectArray<Object>*>(Decode<Object*>(soa.Env(), internal));
1457    pc_trace = down_cast<IntArray*>(method_trace->Get(depth));
1458#endif
1459    java_traces->Set(i, obj);
1460  }
1461  return result;
1462}
1463
1464void Thread::ThrowNewExceptionF(const ThrowLocation& throw_location,
1465                                const char* exception_class_descriptor, const char* fmt, ...) {
1466  va_list args;
1467  va_start(args, fmt);
1468  ThrowNewExceptionV(throw_location, exception_class_descriptor,
1469                     fmt, args);
1470  va_end(args);
1471}
1472
1473void Thread::ThrowNewExceptionV(const ThrowLocation& throw_location,
1474                                const char* exception_class_descriptor,
1475                                const char* fmt, va_list ap) {
1476  std::string msg;
1477  StringAppendV(&msg, fmt, ap);
1478  ThrowNewException(throw_location, exception_class_descriptor, msg.c_str());
1479}
1480
1481void Thread::ThrowNewException(const ThrowLocation& throw_location, const char* exception_class_descriptor,
1482                               const char* msg) {
1483  AssertNoPendingException();  // Callers should either clear or call ThrowNewWrappedException.
1484  ThrowNewWrappedException(throw_location, exception_class_descriptor, msg);
1485}
1486
1487void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
1488                                      const char* exception_class_descriptor,
1489                                      const char* msg) {
1490  DCHECK_EQ(this, Thread::Current());
1491  // Ensure we don't forget arguments over object allocation.
1492  SirtRef<mirror::Object> saved_throw_this(this, throw_location.GetThis());
1493  SirtRef<mirror::ArtMethod> saved_throw_method(this, throw_location.GetMethod());
1494  // Ignore the cause throw location. TODO: should we report this as a re-throw?
1495  SirtRef<mirror::Throwable> cause(this, GetException(NULL));
1496  ClearException();
1497  Runtime* runtime = Runtime::Current();
1498
1499  mirror::ClassLoader* cl = NULL;
1500  if (throw_location.GetMethod() != NULL) {
1501    cl = throw_location.GetMethod()->GetDeclaringClass()->GetClassLoader();
1502  }
1503  SirtRef<mirror::Class>
1504      exception_class(this, runtime->GetClassLinker()->FindClass(exception_class_descriptor, cl));
1505  if (UNLIKELY(exception_class.get() == NULL)) {
1506    CHECK(IsExceptionPending());
1507    LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
1508    return;
1509  }
1510
1511  if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(exception_class.get(), true, true))) {
1512    DCHECK(IsExceptionPending());
1513    return;
1514  }
1515  DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass());
1516  SirtRef<mirror::Throwable> exception(this,
1517                                down_cast<mirror::Throwable*>(exception_class->AllocObject(this)));
1518
1519  // Choose an appropriate constructor and set up the arguments.
1520  const char* signature;
1521  SirtRef<mirror::String> msg_string(this, NULL);
1522  if (msg != NULL) {
1523    // Ensure we remember this and the method over the String allocation.
1524    msg_string.reset(mirror::String::AllocFromModifiedUtf8(this, msg));
1525    if (UNLIKELY(msg_string.get() == NULL)) {
1526      CHECK(IsExceptionPending());  // OOME.
1527      return;
1528    }
1529    if (cause.get() == NULL) {
1530      signature = "(Ljava/lang/String;)V";
1531    } else {
1532      signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V";
1533    }
1534  } else {
1535    if (cause.get() == NULL) {
1536      signature = "()V";
1537    } else {
1538      signature = "(Ljava/lang/Throwable;)V";
1539    }
1540  }
1541  mirror::ArtMethod* exception_init_method =
1542      exception_class->FindDeclaredDirectMethod("<init>", signature);
1543
1544  CHECK(exception_init_method != NULL) << "No <init>" << signature << " in "
1545      << PrettyDescriptor(exception_class_descriptor);
1546
1547  if (UNLIKELY(!runtime->IsStarted())) {
1548    // Something is trying to throw an exception without a started runtime, which is the common
1549    // case in the compiler. We won't be able to invoke the constructor of the exception, so set
1550    // the exception fields directly.
1551    if (msg != NULL) {
1552      exception->SetDetailMessage(msg_string.get());
1553    }
1554    if (cause.get() != NULL) {
1555      exception->SetCause(cause.get());
1556    }
1557    ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
1558                                         throw_location.GetDexPc());
1559    SetException(gc_safe_throw_location, exception.get());
1560  } else {
1561    ArgArray args("VLL", 3);
1562    args.Append(reinterpret_cast<uint32_t>(exception.get()));
1563    if (msg != NULL) {
1564      args.Append(reinterpret_cast<uint32_t>(msg_string.get()));
1565    }
1566    if (cause.get() != NULL) {
1567      args.Append(reinterpret_cast<uint32_t>(cause.get()));
1568    }
1569    JValue result;
1570    exception_init_method->Invoke(this, args.GetArray(), args.GetNumBytes(), &result, 'V');
1571    if (LIKELY(!IsExceptionPending())) {
1572      ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
1573                                           throw_location.GetDexPc());
1574      SetException(gc_safe_throw_location, exception.get());
1575    }
1576  }
1577}
1578
1579void Thread::ThrowOutOfMemoryError(const char* msg) {
1580  LOG(ERROR) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
1581      msg, (throwing_OutOfMemoryError_ ? " (recursive case)" : ""));
1582  ThrowLocation throw_location = GetCurrentLocationForThrow();
1583  if (!throwing_OutOfMemoryError_) {
1584    throwing_OutOfMemoryError_ = true;
1585    ThrowNewException(throw_location, "Ljava/lang/OutOfMemoryError;", msg);
1586    throwing_OutOfMemoryError_ = false;
1587  } else {
1588    Dump(LOG(ERROR));  // The pre-allocated OOME has no stack, so help out and log one.
1589    SetException(throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1590  }
1591}
1592
1593Thread* Thread::CurrentFromGdb() {
1594  return Thread::Current();
1595}
1596
1597void Thread::DumpFromGdb() const {
1598  std::ostringstream ss;
1599  Dump(ss);
1600  std::string str(ss.str());
1601  // log to stderr for debugging command line processes
1602  std::cerr << str;
1603#ifdef HAVE_ANDROID_OS
1604  // log to logcat for debugging frameworks processes
1605  LOG(INFO) << str;
1606#endif
1607}
1608
1609struct EntryPointInfo {
1610  uint32_t offset;
1611  const char* name;
1612};
1613#define INTERPRETER_ENTRY_POINT_INFO(x) { INTERPRETER_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
1614#define JNI_ENTRY_POINT_INFO(x)         { JNI_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
1615#define PORTABLE_ENTRY_POINT_INFO(x)    { PORTABLE_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
1616#define QUICK_ENTRY_POINT_INFO(x)       { QUICK_ENTRYPOINT_OFFSET(x).Uint32Value(), #x }
1617static const EntryPointInfo gThreadEntryPointInfo[] = {
1618  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToInterpreterBridge),
1619  INTERPRETER_ENTRY_POINT_INFO(pInterpreterToCompiledCodeBridge),
1620  JNI_ENTRY_POINT_INFO(pDlsymLookup),
1621  PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampoline),
1622  PORTABLE_ENTRY_POINT_INFO(pPortableToInterpreterBridge),
1623  QUICK_ENTRY_POINT_INFO(pAllocArray),
1624  QUICK_ENTRY_POINT_INFO(pAllocArrayWithAccessCheck),
1625  QUICK_ENTRY_POINT_INFO(pAllocObject),
1626  QUICK_ENTRY_POINT_INFO(pAllocObjectWithAccessCheck),
1627  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArray),
1628  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayWithAccessCheck),
1629  QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivial),
1630  QUICK_ENTRY_POINT_INFO(pCanPutArrayElement),
1631  QUICK_ENTRY_POINT_INFO(pCheckCast),
1632  QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage),
1633  QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess),
1634  QUICK_ENTRY_POINT_INFO(pInitializeType),
1635  QUICK_ENTRY_POINT_INFO(pResolveString),
1636  QUICK_ENTRY_POINT_INFO(pSet32Instance),
1637  QUICK_ENTRY_POINT_INFO(pSet32Static),
1638  QUICK_ENTRY_POINT_INFO(pSet64Instance),
1639  QUICK_ENTRY_POINT_INFO(pSet64Static),
1640  QUICK_ENTRY_POINT_INFO(pSetObjInstance),
1641  QUICK_ENTRY_POINT_INFO(pSetObjStatic),
1642  QUICK_ENTRY_POINT_INFO(pGet32Instance),
1643  QUICK_ENTRY_POINT_INFO(pGet32Static),
1644  QUICK_ENTRY_POINT_INFO(pGet64Instance),
1645  QUICK_ENTRY_POINT_INFO(pGet64Static),
1646  QUICK_ENTRY_POINT_INFO(pGetObjInstance),
1647  QUICK_ENTRY_POINT_INFO(pGetObjStatic),
1648  QUICK_ENTRY_POINT_INFO(pHandleFillArrayData),
1649  QUICK_ENTRY_POINT_INFO(pJniMethodStart),
1650  QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized),
1651  QUICK_ENTRY_POINT_INFO(pJniMethodEnd),
1652  QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized),
1653  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference),
1654  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized),
1655  QUICK_ENTRY_POINT_INFO(pLockObject),
1656  QUICK_ENTRY_POINT_INFO(pUnlockObject),
1657  QUICK_ENTRY_POINT_INFO(pCmpgDouble),
1658  QUICK_ENTRY_POINT_INFO(pCmpgFloat),
1659  QUICK_ENTRY_POINT_INFO(pCmplDouble),
1660  QUICK_ENTRY_POINT_INFO(pCmplFloat),
1661  QUICK_ENTRY_POINT_INFO(pFmod),
1662  QUICK_ENTRY_POINT_INFO(pSqrt),
1663  QUICK_ENTRY_POINT_INFO(pL2d),
1664  QUICK_ENTRY_POINT_INFO(pFmodf),
1665  QUICK_ENTRY_POINT_INFO(pL2f),
1666  QUICK_ENTRY_POINT_INFO(pD2iz),
1667  QUICK_ENTRY_POINT_INFO(pF2iz),
1668  QUICK_ENTRY_POINT_INFO(pIdivmod),
1669  QUICK_ENTRY_POINT_INFO(pD2l),
1670  QUICK_ENTRY_POINT_INFO(pF2l),
1671  QUICK_ENTRY_POINT_INFO(pLdiv),
1672  QUICK_ENTRY_POINT_INFO(pLdivmod),
1673  QUICK_ENTRY_POINT_INFO(pLmul),
1674  QUICK_ENTRY_POINT_INFO(pShlLong),
1675  QUICK_ENTRY_POINT_INFO(pShrLong),
1676  QUICK_ENTRY_POINT_INFO(pUshrLong),
1677  QUICK_ENTRY_POINT_INFO(pIndexOf),
1678  QUICK_ENTRY_POINT_INFO(pMemcmp16),
1679  QUICK_ENTRY_POINT_INFO(pStringCompareTo),
1680  QUICK_ENTRY_POINT_INFO(pMemcpy),
1681  QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampoline),
1682  QUICK_ENTRY_POINT_INFO(pQuickToInterpreterBridge),
1683  QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck),
1684  QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampoline),
1685  QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck),
1686  QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck),
1687  QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck),
1688  QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck),
1689  QUICK_ENTRY_POINT_INFO(pCheckSuspend),
1690  QUICK_ENTRY_POINT_INFO(pTestSuspend),
1691  QUICK_ENTRY_POINT_INFO(pDeliverException),
1692  QUICK_ENTRY_POINT_INFO(pThrowArrayBounds),
1693  QUICK_ENTRY_POINT_INFO(pThrowDivZero),
1694  QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethod),
1695  QUICK_ENTRY_POINT_INFO(pThrowNullPointer),
1696  QUICK_ENTRY_POINT_INFO(pThrowStackOverflow),
1697};
1698#undef QUICK_ENTRY_POINT_INFO
1699
1700void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers) {
1701  CHECK_EQ(size_of_pointers, 4U);  // TODO: support 64-bit targets.
1702
1703#define DO_THREAD_OFFSET(x) \
1704    if (offset == static_cast<uint32_t>(OFFSETOF_VOLATILE_MEMBER(Thread, x))) { \
1705      os << # x; \
1706      return; \
1707    }
1708  DO_THREAD_OFFSET(state_and_flags_);
1709  DO_THREAD_OFFSET(card_table_);
1710  DO_THREAD_OFFSET(exception_);
1711  DO_THREAD_OFFSET(opeer_);
1712  DO_THREAD_OFFSET(jni_env_);
1713  DO_THREAD_OFFSET(self_);
1714  DO_THREAD_OFFSET(stack_end_);
1715  DO_THREAD_OFFSET(suspend_count_);
1716  DO_THREAD_OFFSET(thin_lock_id_);
1717  // DO_THREAD_OFFSET(top_of_managed_stack_);
1718  // DO_THREAD_OFFSET(top_of_managed_stack_pc_);
1719  DO_THREAD_OFFSET(top_sirt_);
1720#undef DO_THREAD_OFFSET
1721
1722  size_t entry_point_count = arraysize(gThreadEntryPointInfo);
1723  CHECK_EQ(entry_point_count * size_of_pointers,
1724           sizeof(InterpreterEntryPoints) + sizeof(JniEntryPoints) + sizeof(PortableEntryPoints) +
1725           sizeof(QuickEntryPoints));
1726  uint32_t expected_offset = OFFSETOF_MEMBER(Thread, interpreter_entrypoints_);
1727  for (size_t i = 0; i < entry_point_count; ++i) {
1728    CHECK_EQ(gThreadEntryPointInfo[i].offset, expected_offset) << gThreadEntryPointInfo[i].name;
1729    expected_offset += size_of_pointers;
1730    if (gThreadEntryPointInfo[i].offset == offset) {
1731      os << gThreadEntryPointInfo[i].name;
1732      return;
1733    }
1734  }
1735  os << offset;
1736}
1737
1738static const bool kDebugExceptionDelivery = false;
1739class CatchBlockStackVisitor : public StackVisitor {
1740 public:
1741  CatchBlockStackVisitor(Thread* self, const ThrowLocation& throw_location,
1742                         mirror::Throwable* exception, bool is_deoptimization)
1743      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1744      : StackVisitor(self, self->GetLongJumpContext()),
1745        self_(self), exception_(exception), is_deoptimization_(is_deoptimization),
1746        to_find_(is_deoptimization ? NULL : exception->GetClass()), throw_location_(throw_location),
1747        handler_quick_frame_(NULL), handler_quick_frame_pc_(0), handler_dex_pc_(0),
1748        native_method_count_(0), clear_exception_(false),
1749        method_tracing_active_(is_deoptimization ||
1750                               Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()),
1751        instrumentation_frames_to_pop_(0), top_shadow_frame_(NULL), prev_shadow_frame_(NULL) {
1752    // Exception not in root sets, can't allow GC.
1753    last_no_assert_suspension_cause_ = self->StartAssertNoThreadSuspension("Finding catch block");
1754  }
1755
1756  ~CatchBlockStackVisitor() {
1757    LOG(FATAL) << "UNREACHABLE";  // Expected to take long jump.
1758  }
1759
1760  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1761    mirror::ArtMethod* method = GetMethod();
1762    if (method == NULL) {
1763      // This is the upcall, we remember the frame and last pc so that we may long jump to them.
1764      handler_quick_frame_pc_ = GetCurrentQuickFramePc();
1765      handler_quick_frame_ = GetCurrentQuickFrame();
1766      return false;  // End stack walk.
1767    } else {
1768      if (UNLIKELY(method_tracing_active_ &&
1769                   GetQuickInstrumentationExitPc() == GetReturnPc())) {
1770        // Keep count of the number of unwinds during instrumentation.
1771        instrumentation_frames_to_pop_++;
1772      }
1773      if (method->IsRuntimeMethod()) {
1774        // Ignore callee save method.
1775        DCHECK(method->IsCalleeSaveMethod());
1776        return true;
1777      } else if (is_deoptimization_) {
1778        return HandleDeoptimization(method);
1779      } else {
1780        return HandleTryItems(method);
1781      }
1782    }
1783  }
1784
1785  bool HandleTryItems(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1786    uint32_t dex_pc = DexFile::kDexNoIndex;
1787    if (method->IsNative()) {
1788      native_method_count_++;
1789    } else {
1790      dex_pc = GetDexPc();
1791    }
1792    if (dex_pc != DexFile::kDexNoIndex) {
1793      uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc, &clear_exception_);
1794      if (found_dex_pc != DexFile::kDexNoIndex) {
1795        handler_dex_pc_ = found_dex_pc;
1796        handler_quick_frame_pc_ = method->ToNativePc(found_dex_pc);
1797        handler_quick_frame_ = GetCurrentQuickFrame();
1798        return false;  // End stack walk.
1799      }
1800    }
1801    return true;  // Continue stack walk.
1802  }
1803
1804  bool HandleDeoptimization(mirror::ArtMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1805    MethodHelper mh(m);
1806    const DexFile::CodeItem* code_item = mh.GetCodeItem();
1807    CHECK(code_item != NULL);
1808    uint16_t num_regs =  code_item->registers_size_;
1809    uint32_t dex_pc = GetDexPc();
1810    const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
1811    uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits();
1812    ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, m, new_dex_pc);
1813    verifier::MethodVerifier verifier(&mh.GetDexFile(), mh.GetDexCache(), mh.GetClassLoader(),
1814                                      mh.GetClassDefIndex(), code_item,
1815                                      m->GetDexMethodIndex(), m, m->GetAccessFlags(), false, true);
1816    verifier.Verify();
1817    std::vector<int32_t> kinds = verifier.DescribeVRegs(dex_pc);
1818    for (uint16_t reg = 0; reg < num_regs; reg++) {
1819      VRegKind kind = static_cast<VRegKind>(kinds.at(reg * 2));
1820      switch (kind) {
1821        case kUndefined:
1822          new_frame->SetVReg(reg, 0xEBADDE09);
1823          break;
1824        case kConstant:
1825          new_frame->SetVReg(reg, kinds.at((reg * 2) + 1));
1826          break;
1827        case kReferenceVReg:
1828          new_frame->SetVRegReference(reg,
1829                                      reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kind)));
1830          break;
1831        default:
1832          new_frame->SetVReg(reg, GetVReg(m, reg, kind));
1833          break;
1834      }
1835    }
1836    if (prev_shadow_frame_ != NULL) {
1837      prev_shadow_frame_->SetLink(new_frame);
1838    } else {
1839      top_shadow_frame_ = new_frame;
1840    }
1841    prev_shadow_frame_ = new_frame;
1842    return true;
1843  }
1844
1845  void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1846    mirror::ArtMethod* catch_method = *handler_quick_frame_;
1847    if (catch_method == NULL) {
1848      if (kDebugExceptionDelivery) {
1849        LOG(INFO) << "Handler is upcall";
1850      }
1851    } else {
1852      CHECK(!is_deoptimization_);
1853      if (kDebugExceptionDelivery) {
1854        const DexFile& dex_file = *catch_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
1855        int line_number = dex_file.GetLineNumFromPC(catch_method, handler_dex_pc_);
1856        LOG(INFO) << "Handler: " << PrettyMethod(catch_method) << " (line: " << line_number << ")";
1857      }
1858    }
1859    if (clear_exception_) {
1860      // Exception was cleared as part of delivery.
1861      DCHECK(!self_->IsExceptionPending());
1862    } else {
1863      // Put exception back in root set with clear throw location.
1864      self_->SetException(ThrowLocation(), exception_);
1865    }
1866    self_->EndAssertNoThreadSuspension(last_no_assert_suspension_cause_);
1867    // Do instrumentation events after allowing thread suspension again.
1868    instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
1869    for (size_t i = 0; i < instrumentation_frames_to_pop_; ++i) {
1870      // We pop the instrumentation stack here so as not to corrupt it during the stack walk.
1871      if (i != instrumentation_frames_to_pop_ - 1 || self_->GetInstrumentationStack()->front().method_ != catch_method) {
1872        // Don't pop the instrumentation frame of the catch handler.
1873        instrumentation->PopMethodForUnwind(self_, is_deoptimization_);
1874      }
1875    }
1876    if (!is_deoptimization_) {
1877      instrumentation->ExceptionCaughtEvent(self_, throw_location_, catch_method, handler_dex_pc_,
1878                                            exception_);
1879    } else {
1880      // TODO: proper return value.
1881      self_->SetDeoptimizationShadowFrame(top_shadow_frame_);
1882    }
1883    // Place context back on thread so it will be available when we continue.
1884    self_->ReleaseLongJumpContext(context_);
1885    context_->SetSP(reinterpret_cast<uintptr_t>(handler_quick_frame_));
1886    CHECK_NE(handler_quick_frame_pc_, 0u);
1887    context_->SetPC(handler_quick_frame_pc_);
1888    context_->SmashCallerSaves();
1889    context_->DoLongJump();
1890  }
1891
1892 private:
1893  Thread* const self_;
1894  mirror::Throwable* const exception_;
1895  const bool is_deoptimization_;
1896  // The type of the exception catch block to find.
1897  mirror::Class* const to_find_;
1898  // Location of the throw.
1899  const ThrowLocation& throw_location_;
1900  // Quick frame with found handler or last frame if no handler found.
1901  mirror::ArtMethod** handler_quick_frame_;
1902  // PC to branch to for the handler.
1903  uintptr_t handler_quick_frame_pc_;
1904  // Associated dex PC.
1905  uint32_t handler_dex_pc_;
1906  // Number of native methods passed in crawl (equates to number of SIRTs to pop)
1907  uint32_t native_method_count_;
1908  // Should the exception be cleared as the catch block has no move-exception?
1909  bool clear_exception_;
1910  // Is method tracing active?
1911  const bool method_tracing_active_;
1912  // Support for nesting no thread suspension checks.
1913  const char* last_no_assert_suspension_cause_;
1914  // Number of frames to pop in long jump.
1915  size_t instrumentation_frames_to_pop_;
1916  ShadowFrame* top_shadow_frame_;
1917  ShadowFrame* prev_shadow_frame_;
1918};
1919
1920void Thread::QuickDeliverException() {
1921  // Get exception from thread.
1922  ThrowLocation throw_location;
1923  mirror::Throwable* exception = GetException(&throw_location);
1924  CHECK(exception != NULL);
1925  LG << "XXX bdc QuickDeliverException " << PrettyMethod(throw_location.GetMethod()) << " " << std::hex << throw_location.GetDexPc();
1926  // Don't leave exception visible while we try to find the handler, which may cause class
1927  // resolution.
1928  ClearException();
1929  bool is_deoptimization = (exception == reinterpret_cast<mirror::Throwable*>(-1));
1930  if (kDebugExceptionDelivery) {
1931    if (!is_deoptimization) {
1932      mirror::String* msg = exception->GetDetailMessage();
1933      std::string str_msg(msg != NULL ? msg->ToModifiedUtf8() : "");
1934      DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception)
1935                << ": " << str_msg << "\n");
1936    } else {
1937      DumpStack(LOG(INFO) << "Deoptimizing: ");
1938    }
1939  }
1940  CatchBlockStackVisitor catch_finder(this, throw_location, exception, is_deoptimization);
1941  catch_finder.WalkStack(true);
1942  catch_finder.DoLongJump();
1943  LOG(FATAL) << "UNREACHABLE";
1944}
1945
1946Context* Thread::GetLongJumpContext() {
1947  Context* result = long_jump_context_;
1948  if (result == NULL) {
1949    result = Context::Create();
1950  } else {
1951    long_jump_context_ = NULL;  // Avoid context being shared.
1952    result->Reset();
1953  }
1954  return result;
1955}
1956
1957struct CurrentMethodVisitor : public StackVisitor {
1958  CurrentMethodVisitor(Thread* thread, Context* context)
1959      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1960      : StackVisitor(thread, context), this_object_(NULL), method_(NULL), dex_pc_(0) {}
1961  virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1962    mirror::ArtMethod* m = GetMethod();
1963    if (m->IsRuntimeMethod()) {
1964      // Continue if this is a runtime method.
1965      return true;
1966    }
1967    if (context_ != NULL) {
1968      this_object_ = GetThisObject();
1969    }
1970    method_ = m;
1971    dex_pc_ = GetDexPc();
1972    return false;
1973  }
1974  mirror::Object* this_object_;
1975  mirror::ArtMethod* method_;
1976  uint32_t dex_pc_;
1977};
1978
1979mirror::ArtMethod* Thread::GetCurrentMethod(uint32_t* dex_pc) const {
1980  CurrentMethodVisitor visitor(const_cast<Thread*>(this), NULL);
1981  visitor.WalkStack(false);
1982  if (dex_pc != NULL) {
1983    *dex_pc = visitor.dex_pc_;
1984  }
1985  return visitor.method_;
1986}
1987
1988ThrowLocation Thread::GetCurrentLocationForThrow() {
1989  Context* context = GetLongJumpContext();
1990  CurrentMethodVisitor visitor(this, context);
1991  visitor.WalkStack(false);
1992  ReleaseLongJumpContext(context);
1993  return ThrowLocation(visitor.this_object_, visitor.method_, visitor.dex_pc_);
1994}
1995
1996bool Thread::HoldsLock(mirror::Object* object) {
1997  if (object == NULL) {
1998    return false;
1999  }
2000  return object->GetThinLockId() == thin_lock_id_;
2001}
2002
2003// RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor).
2004template <typename RootVisitor>
2005class ReferenceMapVisitor : public StackVisitor {
2006 public:
2007  ReferenceMapVisitor(Thread* thread, Context* context, const RootVisitor& visitor)
2008      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
2009      : StackVisitor(thread, context), visitor_(visitor) {}
2010
2011  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
2012    if (false) {
2013      LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
2014          << StringPrintf("@ PC:%04x", GetDexPc());
2015    }
2016    ShadowFrame* shadow_frame = GetCurrentShadowFrame();
2017    if (shadow_frame != NULL) {
2018      mirror::ArtMethod* m = shadow_frame->GetMethod();
2019      size_t num_regs = shadow_frame->NumberOfVRegs();
2020      if (m->IsNative() || shadow_frame->HasReferenceArray()) {
2021        // SIRT for JNI or References for interpreter.
2022        for (size_t reg = 0; reg < num_regs; ++reg) {
2023          mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2024          if (ref != NULL) {
2025            visitor_(ref, reg, this);
2026          }
2027        }
2028      } else {
2029        // Java method.
2030        // Portable path use DexGcMap and store in Method.native_gc_map_.
2031        const uint8_t* gc_map = m->GetNativeGcMap();
2032        CHECK(gc_map != NULL) << PrettyMethod(m);
2033        uint32_t gc_map_length = static_cast<uint32_t>((gc_map[0] << 24) |
2034                                                       (gc_map[1] << 16) |
2035                                                       (gc_map[2] << 8) |
2036                                                       (gc_map[3] << 0));
2037        verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length);
2038        uint32_t dex_pc = GetDexPc();
2039        const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
2040        DCHECK(reg_bitmap != NULL);
2041        num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
2042        for (size_t reg = 0; reg < num_regs; ++reg) {
2043          if (TestBitmap(reg, reg_bitmap)) {
2044            mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2045            if (ref != NULL) {
2046              visitor_(ref, reg, this);
2047            }
2048          }
2049        }
2050      }
2051    } else {
2052      mirror::ArtMethod* m = GetMethod();
2053      // Process register map (which native and runtime methods don't have)
2054      if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
2055        const uint8_t* native_gc_map = m->GetNativeGcMap();
2056        CHECK(native_gc_map != NULL) << PrettyMethod(m);
2057        mh_.ChangeMethod(m);
2058        const DexFile::CodeItem* code_item = mh_.GetCodeItem();
2059        DCHECK(code_item != NULL) << PrettyMethod(m);  // Can't be NULL or how would we compile its instructions?
2060        NativePcOffsetToReferenceMap map(native_gc_map);
2061        size_t num_regs = std::min(map.RegWidth() * 8,
2062                                   static_cast<size_t>(code_item->registers_size_));
2063        if (num_regs > 0) {
2064          const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset());
2065          DCHECK(reg_bitmap != NULL);
2066          const VmapTable vmap_table(m->GetVmapTable());
2067          uint32_t core_spills = m->GetCoreSpillMask();
2068          uint32_t fp_spills = m->GetFpSpillMask();
2069          size_t frame_size = m->GetFrameSizeInBytes();
2070          // For all dex registers in the bitmap
2071          mirror::ArtMethod** cur_quick_frame = GetCurrentQuickFrame();
2072          DCHECK(cur_quick_frame != NULL);
2073          for (size_t reg = 0; reg < num_regs; ++reg) {
2074            // Does this register hold a reference?
2075            if (TestBitmap(reg, reg_bitmap)) {
2076              uint32_t vmap_offset;
2077              mirror::Object* ref;
2078              if (vmap_table.IsInContext(reg, kReferenceVReg, &vmap_offset)) {
2079                uintptr_t val = GetGPR(vmap_table.ComputeRegister(core_spills, vmap_offset,
2080                                                                  kReferenceVReg));
2081                ref = reinterpret_cast<mirror::Object*>(val);
2082              } else {
2083                ref = reinterpret_cast<mirror::Object*>(GetVReg(cur_quick_frame, code_item,
2084                                                                core_spills, fp_spills, frame_size,
2085                                                                reg));
2086              }
2087
2088              if (ref != NULL) {
2089                visitor_(ref, reg, this);
2090              }
2091            }
2092          }
2093        }
2094      }
2095    }
2096    return true;
2097  }
2098
2099 private:
2100  static bool TestBitmap(int reg, const uint8_t* reg_vector) {
2101    return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0;
2102  }
2103
2104  // Visitor for when we visit a root.
2105  const RootVisitor& visitor_;
2106
2107  // A method helper we keep around to avoid dex file/cache re-computations.
2108  MethodHelper mh_;
2109};
2110
2111class RootCallbackVisitor {
2112 public:
2113  RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {}
2114
2115  void operator()(const mirror::Object* obj, size_t, const StackVisitor*) const {
2116    visitor_(obj, arg_);
2117  }
2118
2119 private:
2120  RootVisitor* visitor_;
2121  void* arg_;
2122};
2123
2124class VerifyCallbackVisitor {
2125 public:
2126  VerifyCallbackVisitor(VerifyRootVisitor* visitor, void* arg)
2127      : visitor_(visitor),
2128        arg_(arg) {
2129  }
2130
2131  void operator()(const mirror::Object* obj, size_t vreg, const StackVisitor* visitor) const {
2132    visitor_(obj, arg_, vreg, visitor);
2133  }
2134
2135 private:
2136  VerifyRootVisitor* const visitor_;
2137  void* const arg_;
2138};
2139
2140struct VerifyRootWrapperArg {
2141  VerifyRootVisitor* visitor;
2142  void* arg;
2143};
2144
2145static void VerifyRootWrapperCallback(const mirror::Object* root, void* arg) {
2146  VerifyRootWrapperArg* wrapperArg = reinterpret_cast<VerifyRootWrapperArg*>(arg);
2147  wrapperArg->visitor(root, wrapperArg->arg, 0, NULL);
2148}
2149
2150void Thread::VerifyRoots(VerifyRootVisitor* visitor, void* arg) {
2151  // We need to map from a RootVisitor to VerifyRootVisitor, so pass in nulls for arguments we
2152  // don't have.
2153  VerifyRootWrapperArg wrapperArg;
2154  wrapperArg.arg = arg;
2155  wrapperArg.visitor = visitor;
2156
2157  if (opeer_ != NULL) {
2158    VerifyRootWrapperCallback(opeer_, &wrapperArg);
2159  }
2160  if (exception_ != NULL) {
2161    VerifyRootWrapperCallback(exception_, &wrapperArg);
2162  }
2163  throw_location_.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2164  if (class_loader_override_ != NULL) {
2165    VerifyRootWrapperCallback(class_loader_override_, &wrapperArg);
2166  }
2167  jni_env_->locals.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2168  jni_env_->monitors.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2169
2170  SirtVisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2171
2172  // Visit roots on this thread's stack
2173  Context* context = GetLongJumpContext();
2174  VerifyCallbackVisitor visitorToCallback(visitor, arg);
2175  ReferenceMapVisitor<VerifyCallbackVisitor> mapper(this, context, visitorToCallback);
2176  mapper.WalkStack();
2177  ReleaseLongJumpContext(context);
2178
2179  std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack = GetInstrumentationStack();
2180  typedef std::deque<instrumentation::InstrumentationStackFrame>::const_iterator It;
2181  for (It it = instrumentation_stack->begin(), end = instrumentation_stack->end(); it != end; ++it) {
2182    mirror::Object* this_object = (*it).this_object_;
2183    if (this_object != NULL) {
2184      VerifyRootWrapperCallback(this_object, &wrapperArg);
2185    }
2186    mirror::ArtMethod* method = (*it).method_;
2187    VerifyRootWrapperCallback(method, &wrapperArg);
2188  }
2189}
2190
2191void Thread::VisitRoots(RootVisitor* visitor, void* arg) {
2192  if (opeer_ != NULL) {
2193    visitor(opeer_, arg);
2194  }
2195  if (exception_ != NULL) {
2196    visitor(exception_, arg);
2197  }
2198  throw_location_.VisitRoots(visitor, arg);
2199  if (class_loader_override_ != NULL) {
2200    visitor(class_loader_override_, arg);
2201  }
2202  jni_env_->locals.VisitRoots(visitor, arg);
2203  jni_env_->monitors.VisitRoots(visitor, arg);
2204
2205  SirtVisitRoots(visitor, arg);
2206
2207  // Visit roots on this thread's stack
2208  Context* context = GetLongJumpContext();
2209  RootCallbackVisitor visitorToCallback(visitor, arg);
2210  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context, visitorToCallback);
2211  mapper.WalkStack();
2212  ReleaseLongJumpContext(context);
2213
2214  for (const instrumentation::InstrumentationStackFrame& frame : *GetInstrumentationStack()) {
2215    mirror::Object* this_object = frame.this_object_;
2216    if (this_object != NULL) {
2217      visitor(this_object, arg);
2218    }
2219    mirror::ArtMethod* method = frame.method_;
2220    visitor(method, arg);
2221  }
2222}
2223
2224static void VerifyObject(const mirror::Object* root, void* arg) {
2225  gc::Heap* heap = reinterpret_cast<gc::Heap*>(arg);
2226  heap->VerifyObject(root);
2227}
2228
2229void Thread::VerifyStackImpl() {
2230  UniquePtr<Context> context(Context::Create());
2231  RootCallbackVisitor visitorToCallback(VerifyObject, Runtime::Current()->GetHeap());
2232  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitorToCallback);
2233  mapper.WalkStack();
2234}
2235
2236// Set the stack end to that to be used during a stack overflow
2237void Thread::SetStackEndForStackOverflow() {
2238  // During stack overflow we allow use of the full stack.
2239  if (stack_end_ == stack_begin_) {
2240    // However, we seem to have already extended to use the full stack.
2241    LOG(ERROR) << "Need to increase kStackOverflowReservedBytes (currently "
2242               << kStackOverflowReservedBytes << ")?";
2243    DumpStack(LOG(ERROR));
2244    LOG(FATAL) << "Recursive stack overflow.";
2245  }
2246
2247  stack_end_ = stack_begin_;
2248}
2249
2250std::ostream& operator<<(std::ostream& os, const Thread& thread) {
2251  thread.ShortDump(os);
2252  return os;
2253}
2254
2255}  // namespace art
2256