thread.cc revision 7940e44f4517de5e2634a7e07d58d0fb26160513
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define ATRACE_TAG ATRACE_TAG_DALVIK
18
19#include "thread.h"
20
21#include <cutils/trace.h>
22#include <pthread.h>
23#include <signal.h>
24#include <sys/resource.h>
25#include <sys/time.h>
26
27#include <algorithm>
28#include <bitset>
29#include <cerrno>
30#include <iostream>
31#include <list>
32
33#include "base/mutex.h"
34#include "class_linker.h"
35#include "class_linker-inl.h"
36#include "cutils/atomic.h"
37#include "cutils/atomic-inline.h"
38#include "debugger.h"
39#include "dex_file-inl.h"
40#include "gc_map.h"
41#include "gc/accounting/card_table-inl.h"
42#include "gc/heap.h"
43#include "gc/space/space.h"
44#include "invoke_arg_array_builder.h"
45#include "jni_internal.h"
46#include "mirror/abstract_method-inl.h"
47#include "mirror/class-inl.h"
48#include "mirror/class_loader.h"
49#include "mirror/field-inl.h"
50#include "mirror/object_array-inl.h"
51#include "mirror/stack_trace_element.h"
52#include "monitor.h"
53#include "oat/runtime/context.h"
54#include "object_utils.h"
55#include "reflection.h"
56#include "runtime.h"
57#include "runtime_support.h"
58#include "scoped_thread_state_change.h"
59#include "ScopedLocalRef.h"
60#include "ScopedUtfChars.h"
61#include "sirt_ref.h"
62#include "stack.h"
63#include "stack_indirect_reference_table.h"
64#include "thread-inl.h"
65#include "thread_list.h"
66#include "utils.h"
67#include "verifier/dex_gc_map.h"
68#include "verifier/method_verifier.h"
69#include "well_known_classes.h"
70
71namespace art {
72
73bool Thread::is_started_ = false;
74pthread_key_t Thread::pthread_key_self_;
75ConditionVariable* Thread::resume_cond_ = NULL;
76
77static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
78
79void Thread::InitCardTable() {
80  card_table_ = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin();
81}
82
83#if !defined(__APPLE__)
84static void UnimplementedEntryPoint() {
85  UNIMPLEMENTED(FATAL);
86}
87#endif
88
89void Thread::InitFunctionPointers() {
90#if !defined(__APPLE__) // The Mac GCC is too old to accept this code.
91  // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
92  uintptr_t* begin = reinterpret_cast<uintptr_t*>(&entrypoints_);
93  uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(entrypoints_));
94  for (uintptr_t* it = begin; it != end; ++it) {
95    *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
96  }
97#endif
98  InitEntryPoints(&entrypoints_);
99}
100
101void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) {
102  deoptimization_shadow_frame_ = sf;
103}
104
105void Thread::SetDeoptimizationReturnValue(const JValue& ret_val) {
106  deoptimization_return_value_.SetJ(ret_val.GetJ());
107}
108
109ShadowFrame* Thread::GetAndClearDeoptimizationShadowFrame(JValue* ret_val) {
110  ShadowFrame* sf = deoptimization_shadow_frame_;
111  deoptimization_shadow_frame_ = NULL;
112  ret_val->SetJ(deoptimization_return_value_.GetJ());
113  return sf;
114}
115
116void Thread::InitTid() {
117  tid_ = ::art::GetTid();
118}
119
120void Thread::InitAfterFork() {
121  // One thread (us) survived the fork, but we have a new tid so we need to
122  // update the value stashed in this Thread*.
123  InitTid();
124}
125
126void* Thread::CreateCallback(void* arg) {
127  Thread* self = reinterpret_cast<Thread*>(arg);
128  Runtime* runtime = Runtime::Current();
129  if (runtime == NULL) {
130    LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self;
131    return NULL;
132  }
133  {
134    // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true
135    //       after self->Init().
136    MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
137    // Check that if we got here we cannot be shutting down (as shutdown should never have started
138    // while threads are being born).
139    CHECK(!runtime->IsShuttingDown());
140    self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
141    Runtime::Current()->EndThreadBirth();
142  }
143  {
144    ScopedObjectAccess soa(self);
145
146    // Copy peer into self, deleting global reference when done.
147    CHECK(self->jpeer_ != NULL);
148    self->opeer_ = soa.Decode<mirror::Object*>(self->jpeer_);
149    self->GetJniEnv()->DeleteGlobalRef(self->jpeer_);
150    self->jpeer_ = NULL;
151
152    {
153      SirtRef<mirror::String> thread_name(self, self->GetThreadName(soa));
154      self->SetThreadName(thread_name->ToModifiedUtf8().c_str());
155    }
156    Dbg::PostThreadStart(self);
157
158    // Invoke the 'run' method of our java.lang.Thread.
159    mirror::Object* receiver = self->opeer_;
160    jmethodID mid = WellKnownClasses::java_lang_Thread_run;
161    mirror::AbstractMethod* m =
162        receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(soa.DecodeMethod(mid));
163    JValue result;
164    ArgArray arg_array(NULL, 0);
165    arg_array.Append(reinterpret_cast<uint32_t>(receiver));
166    m->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
167  }
168  // Detach and delete self.
169  Runtime::Current()->GetThreadList()->Unregister(self);
170
171  return NULL;
172}
173
174Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa,
175                                  mirror::Object* thread_peer) {
176  mirror::Field* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer);
177  Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetInt(thread_peer)));
178  // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
179  // to stop it from going away.
180  if (kIsDebugBuild) {
181    MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
182    if (result != NULL && !result->IsSuspended()) {
183      Locks::thread_list_lock_->AssertHeld(soa.Self());
184    }
185  }
186  return result;
187}
188
189Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, jobject java_thread) {
190  return FromManagedThread(soa, soa.Decode<mirror::Object*>(java_thread));
191}
192
193static size_t FixStackSize(size_t stack_size) {
194  // A stack size of zero means "use the default".
195  if (stack_size == 0) {
196    stack_size = Runtime::Current()->GetDefaultStackSize();
197  }
198
199  // Dalvik used the bionic pthread default stack size for native threads,
200  // so include that here to support apps that expect large native stacks.
201  stack_size += 1 * MB;
202
203  // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN.
204  if (stack_size < PTHREAD_STACK_MIN) {
205    stack_size = PTHREAD_STACK_MIN;
206  }
207
208  // It's likely that callers are trying to ensure they have at least a certain amount of
209  // stack space, so we should add our reserved space on top of what they requested, rather
210  // than implicitly take it away from them.
211  stack_size += Thread::kStackOverflowReservedBytes;
212
213  // Some systems require the stack size to be a multiple of the system page size, so round up.
214  stack_size = RoundUp(stack_size, kPageSize);
215
216  return stack_size;
217}
218
219static void SigAltStack(stack_t* new_stack, stack_t* old_stack) {
220  if (sigaltstack(new_stack, old_stack) == -1) {
221    PLOG(FATAL) << "sigaltstack failed";
222  }
223}
224
225static void SetUpAlternateSignalStack() {
226  // Create and set an alternate signal stack.
227  stack_t ss;
228  ss.ss_sp = new uint8_t[SIGSTKSZ];
229  ss.ss_size = SIGSTKSZ;
230  ss.ss_flags = 0;
231  CHECK(ss.ss_sp != NULL);
232  SigAltStack(&ss, NULL);
233
234  // Double-check that it worked.
235  ss.ss_sp = NULL;
236  SigAltStack(NULL, &ss);
237  VLOG(threads) << "Alternate signal stack is " << PrettySize(ss.ss_size) << " at " << ss.ss_sp;
238}
239
240static void TearDownAlternateSignalStack() {
241  // Get the pointer so we can free the memory.
242  stack_t ss;
243  SigAltStack(NULL, &ss);
244  uint8_t* allocated_signal_stack = reinterpret_cast<uint8_t*>(ss.ss_sp);
245
246  // Tell the kernel to stop using it.
247  ss.ss_sp = NULL;
248  ss.ss_flags = SS_DISABLE;
249  ss.ss_size = SIGSTKSZ; // Avoid ENOMEM failure with Mac OS' buggy libc.
250  SigAltStack(&ss, NULL);
251
252  // Free it.
253  delete[] allocated_signal_stack;
254}
255
256void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
257  CHECK(java_peer != NULL);
258  Thread* self = static_cast<JNIEnvExt*>(env)->self;
259  Runtime* runtime = Runtime::Current();
260
261  // Atomically start the birth of the thread ensuring the runtime isn't shutting down.
262  bool thread_start_during_shutdown = false;
263  {
264    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
265    if (runtime->IsShuttingDown()) {
266      thread_start_during_shutdown = true;
267    } else {
268      runtime->StartThreadBirth();
269    }
270  }
271  if (thread_start_during_shutdown) {
272    ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError"));
273    env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown");
274    return;
275  }
276
277  Thread* child_thread = new Thread(is_daemon);
278  // Use global JNI ref to hold peer live while child thread starts.
279  child_thread->jpeer_ = env->NewGlobalRef(java_peer);
280  stack_size = FixStackSize(stack_size);
281
282  // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to
283  // assign it.
284  env->SetIntField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer,
285                   reinterpret_cast<jint>(child_thread));
286
287  pthread_t new_pthread;
288  pthread_attr_t attr;
289  CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
290  CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED");
291  CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
292  int pthread_create_result = pthread_create(&new_pthread, &attr, Thread::CreateCallback, child_thread);
293  CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
294
295  if (pthread_create_result != 0) {
296    // pthread_create(3) failed, so clean up.
297    {
298      MutexLock mu(self, *Locks::runtime_shutdown_lock_);
299      runtime->EndThreadBirth();
300    }
301    // Manually delete the global reference since Thread::Init will not have been run.
302    env->DeleteGlobalRef(child_thread->jpeer_);
303    child_thread->jpeer_ = NULL;
304    delete child_thread;
305    child_thread = NULL;
306    // TODO: remove from thread group?
307    env->SetIntField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0);
308    {
309      std::string msg(StringPrintf("pthread_create (%s stack) failed: %s",
310                                   PrettySize(stack_size).c_str(), strerror(pthread_create_result)));
311      ScopedObjectAccess soa(env);
312      soa.Self()->ThrowOutOfMemoryError(msg.c_str());
313    }
314  }
315}
316
317void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
318  // This function does all the initialization that must be run by the native thread it applies to.
319  // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
320  // we can handshake with the corresponding native thread when it's ready.) Check this native
321  // thread hasn't been through here already...
322  CHECK(Thread::Current() == NULL);
323  SetUpAlternateSignalStack();
324  InitCpu();
325  InitFunctionPointers();
326  InitCardTable();
327  InitTid();
328  // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
329  // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
330  pthread_self_ = pthread_self();
331  CHECK(is_started_);
332  CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
333  DCHECK_EQ(Thread::Current(), this);
334
335  thin_lock_id_ = thread_list->AllocThreadId(this);
336  InitStackHwm();
337
338  jni_env_ = new JNIEnvExt(this, java_vm);
339  thread_list->Register(this);
340}
341
342Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group,
343                       bool create_peer) {
344  Thread* self;
345  Runtime* runtime = Runtime::Current();
346  if (runtime == NULL) {
347    LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name;
348    return NULL;
349  }
350  {
351    MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
352    if (runtime->IsShuttingDown()) {
353      LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name;
354      return NULL;
355    } else {
356      Runtime::Current()->StartThreadBirth();
357      self = new Thread(as_daemon);
358      self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
359      Runtime::Current()->EndThreadBirth();
360    }
361  }
362
363  CHECK_NE(self->GetState(), kRunnable);
364  self->SetState(kNative);
365
366  // If we're the main thread, ClassLinker won't be created until after we're attached,
367  // so that thread needs a two-stage attach. Regular threads don't need this hack.
368  // In the compiler, all threads need this hack, because no-one's going to be getting
369  // a native peer!
370  if (create_peer) {
371    self->CreatePeer(thread_name, as_daemon, thread_group);
372  } else {
373    // These aren't necessary, but they improve diagnostics for unit tests & command-line tools.
374    if (thread_name != NULL) {
375      self->name_->assign(thread_name);
376      ::art::SetThreadName(thread_name);
377    }
378  }
379
380  return self;
381}
382
383void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) {
384  Runtime* runtime = Runtime::Current();
385  CHECK(runtime->IsStarted());
386  JNIEnv* env = jni_env_;
387
388  if (thread_group == NULL) {
389    thread_group = runtime->GetMainThreadGroup();
390  }
391  ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
392  jint thread_priority = GetNativePriority();
393  jboolean thread_is_daemon = as_daemon;
394
395  ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread));
396  if (peer.get() == NULL) {
397    CHECK(IsExceptionPending());
398    return;
399  }
400  {
401    ScopedObjectAccess soa(this);
402    opeer_ = soa.Decode<mirror::Object*>(peer.get());
403  }
404  env->CallNonvirtualVoidMethod(peer.get(),
405                                WellKnownClasses::java_lang_Thread,
406                                WellKnownClasses::java_lang_Thread_init,
407                                thread_group, thread_name.get(), thread_priority, thread_is_daemon);
408  AssertNoPendingException();
409
410  Thread* self = this;
411  DCHECK_EQ(self, Thread::Current());
412  jni_env_->SetIntField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer,
413                        reinterpret_cast<jint>(self));
414
415  ScopedObjectAccess soa(self);
416  SirtRef<mirror::String> peer_thread_name(soa.Self(), GetThreadName(soa));
417  if (peer_thread_name.get() == NULL) {
418    // The Thread constructor should have set the Thread.name to a
419    // non-null value. However, because we can run without code
420    // available (in the compiler, in tests), we manually assign the
421    // fields the constructor should have set.
422    soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->
423        SetBoolean(opeer_, thread_is_daemon);
424    soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->
425        SetObject(opeer_, soa.Decode<mirror::Object*>(thread_group));
426    soa.DecodeField(WellKnownClasses::java_lang_Thread_name)->
427        SetObject(opeer_, soa.Decode<mirror::Object*>(thread_name.get()));
428    soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->
429        SetInt(opeer_, thread_priority);
430    peer_thread_name.reset(GetThreadName(soa));
431  }
432  // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
433  if (peer_thread_name.get() != NULL) {
434    SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
435  }
436}
437
438void Thread::SetThreadName(const char* name) {
439  name_->assign(name);
440  ::art::SetThreadName(name);
441  Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
442}
443
444void Thread::InitStackHwm() {
445  void* stack_base;
446  size_t stack_size;
447  GetThreadStack(pthread_self_, stack_base, stack_size);
448
449  // TODO: include this in the thread dumps; potentially useful in SIGQUIT output?
450  VLOG(threads) << StringPrintf("Native stack is at %p (%s)", stack_base, PrettySize(stack_size).c_str());
451
452  stack_begin_ = reinterpret_cast<byte*>(stack_base);
453  stack_size_ = stack_size;
454
455  if (stack_size_ <= kStackOverflowReservedBytes) {
456    LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << stack_size_ << " bytes)";
457  }
458
459  // TODO: move this into the Linux GetThreadStack implementation.
460#if !defined(__APPLE__)
461  // If we're the main thread, check whether we were run with an unlimited stack. In that case,
462  // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
463  // will be broken because we'll die long before we get close to 2GB.
464  bool is_main_thread = (::art::GetTid() == getpid());
465  if (is_main_thread) {
466    rlimit stack_limit;
467    if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) {
468      PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed";
469    }
470    if (stack_limit.rlim_cur == RLIM_INFINITY) {
471      // Find the default stack size for new threads...
472      pthread_attr_t default_attributes;
473      size_t default_stack_size;
474      CHECK_PTHREAD_CALL(pthread_attr_init, (&default_attributes), "default stack size query");
475      CHECK_PTHREAD_CALL(pthread_attr_getstacksize, (&default_attributes, &default_stack_size),
476                         "default stack size query");
477      CHECK_PTHREAD_CALL(pthread_attr_destroy, (&default_attributes), "default stack size query");
478
479      // ...and use that as our limit.
480      size_t old_stack_size = stack_size_;
481      stack_size_ = default_stack_size;
482      stack_begin_ += (old_stack_size - stack_size_);
483      VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")"
484                    << " to " << PrettySize(stack_size_)
485                    << " with base " << reinterpret_cast<void*>(stack_begin_);
486    }
487  }
488#endif
489
490  // Set stack_end_ to the bottom of the stack saving space of stack overflows
491  ResetDefaultStackEnd();
492
493  // Sanity check.
494  int stack_variable;
495  CHECK_GT(&stack_variable, reinterpret_cast<void*>(stack_end_));
496}
497
498void Thread::ShortDump(std::ostream& os) const {
499  os << "Thread[";
500  if (GetThinLockId() != 0) {
501    // If we're in kStarting, we won't have a thin lock id or tid yet.
502    os << GetThinLockId()
503             << ",tid=" << GetTid() << ',';
504  }
505  os << GetState()
506           << ",Thread*=" << this
507           << ",peer=" << opeer_
508           << ",\"" << *name_ << "\""
509           << "]";
510}
511
512void Thread::Dump(std::ostream& os) const {
513  DumpState(os);
514  DumpStack(os);
515}
516
517mirror::String* Thread::GetThreadName(const ScopedObjectAccessUnchecked& soa) const {
518  mirror::Field* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
519  return (opeer_ != NULL) ? reinterpret_cast<mirror::String*>(f->GetObject(opeer_)) : NULL;
520}
521
522void Thread::GetThreadName(std::string& name) const {
523  name.assign(*name_);
524}
525
526void Thread::AtomicSetFlag(ThreadFlag flag) {
527  android_atomic_or(flag, &state_and_flags_.as_int);
528}
529
530void Thread::AtomicClearFlag(ThreadFlag flag) {
531  android_atomic_and(-1 ^ flag, &state_and_flags_.as_int);
532}
533
534// Attempt to rectify locks so that we dump thread list with required locks before exiting.
535static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
536  LOG(ERROR) << *thread << " suspend count already zero.";
537  Locks::thread_suspend_count_lock_->Unlock(self);
538  if (!Locks::mutator_lock_->IsSharedHeld(self)) {
539    Locks::mutator_lock_->SharedTryLock(self);
540    if (!Locks::mutator_lock_->IsSharedHeld(self)) {
541      LOG(WARNING) << "Dumping thread list without holding mutator_lock_";
542    }
543  }
544  if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
545    Locks::thread_list_lock_->TryLock(self);
546    if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
547      LOG(WARNING) << "Dumping thread list without holding thread_list_lock_";
548    }
549  }
550  std::ostringstream ss;
551  Runtime::Current()->GetThreadList()->DumpLocked(ss);
552  LOG(FATAL) << ss.str();
553}
554
555void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) {
556  DCHECK(delta == -1 || delta == +1 || delta == -debug_suspend_count_)
557      << delta << " " << debug_suspend_count_ << " " << this;
558  DCHECK_GE(suspend_count_, debug_suspend_count_) << this;
559  Locks::thread_suspend_count_lock_->AssertHeld(self);
560  if (this != self && !IsSuspended()) {
561    Locks::thread_list_lock_->AssertHeld(self);
562  }
563  if (UNLIKELY(delta < 0 && suspend_count_ <= 0)) {
564    UnsafeLogFatalForSuspendCount(self, this);
565    return;
566  }
567
568  suspend_count_ += delta;
569  if (for_debugger) {
570    debug_suspend_count_ += delta;
571  }
572
573  if (suspend_count_ == 0) {
574    AtomicClearFlag(kSuspendRequest);
575  } else {
576    AtomicSetFlag(kSuspendRequest);
577  }
578}
579
580void Thread::RunCheckpointFunction() {
581  CHECK(checkpoint_function_ != NULL);
582  ATRACE_BEGIN("Checkpoint function");
583  checkpoint_function_->Run(this);
584  ATRACE_END();
585}
586
587bool Thread::RequestCheckpoint(Closure* function) {
588  CHECK(!ReadFlag(kCheckpointRequest)) << "Already have a pending checkpoint request";
589  checkpoint_function_ = function;
590  union StateAndFlags old_state_and_flags = state_and_flags_;
591  // We must be runnable to request a checkpoint.
592  old_state_and_flags.as_struct.state = kRunnable;
593  union StateAndFlags new_state_and_flags = old_state_and_flags;
594  new_state_and_flags.as_struct.flags |= kCheckpointRequest;
595  int succeeded = android_atomic_cmpxchg(old_state_and_flags.as_int, new_state_and_flags.as_int,
596                                         &state_and_flags_.as_int);
597  return succeeded == 0;
598}
599
600void Thread::FullSuspendCheck() {
601  VLOG(threads) << this << " self-suspending";
602  ATRACE_BEGIN("Full suspend check");
603  // Make thread appear suspended to other threads, release mutator_lock_.
604  TransitionFromRunnableToSuspended(kSuspended);
605  // Transition back to runnable noting requests to suspend, re-acquire share on mutator_lock_.
606  TransitionFromSuspendedToRunnable();
607  ATRACE_END();
608  VLOG(threads) << this << " self-reviving";
609}
610
611Thread* Thread::SuspendForDebugger(jobject peer, bool request_suspension, bool* timed_out) {
612  static const useconds_t kTimeoutUs = 30 * 1000000; // 30s.
613  useconds_t total_delay_us = 0;
614  useconds_t delay_us = 0;
615  bool did_suspend_request = false;
616  *timed_out = false;
617  while (true) {
618    Thread* thread;
619    {
620      ScopedObjectAccess soa(Thread::Current());
621      Thread* self = soa.Self();
622      MutexLock mu(self, *Locks::thread_list_lock_);
623      thread = Thread::FromManagedThread(soa, peer);
624      if (thread == NULL) {
625        JNIEnv* env = self->GetJniEnv();
626        ScopedLocalRef<jstring> scoped_name_string(env,
627                                                   (jstring)env->GetObjectField(peer,
628                                                              WellKnownClasses::java_lang_Thread_name));
629        ScopedUtfChars scoped_name_chars(env,scoped_name_string.get());
630        if (scoped_name_chars.c_str() == NULL) {
631            LOG(WARNING) << "No such thread for suspend: " << peer;
632            env->ExceptionClear();
633        } else {
634            LOG(WARNING) << "No such thread for suspend: " << peer << ":" << scoped_name_chars.c_str();
635        }
636
637        return NULL;
638      }
639      {
640        MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
641        if (request_suspension) {
642          thread->ModifySuspendCount(soa.Self(), +1, true /* for_debugger */);
643          request_suspension = false;
644          did_suspend_request = true;
645        }
646        // IsSuspended on the current thread will fail as the current thread is changed into
647        // Runnable above. As the suspend count is now raised if this is the current thread
648        // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
649        // to just explicitly handle the current thread in the callers to this code.
650        CHECK_NE(thread, soa.Self()) << "Attempt to suspend the current thread for the debugger";
651        // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend
652        // count, or else we've waited and it has self suspended) or is the current thread, we're
653        // done.
654        if (thread->IsSuspended()) {
655          return thread;
656        }
657        if (total_delay_us >= kTimeoutUs) {
658          LOG(ERROR) << "Thread suspension timed out: " << peer;
659          if (did_suspend_request) {
660            thread->ModifySuspendCount(soa.Self(), -1, true /* for_debugger */);
661          }
662          *timed_out = true;
663          return NULL;
664        }
665      }
666      // Release locks and come out of runnable state.
667    }
668    for (int i = kLockLevelCount - 1; i >= 0; --i) {
669      BaseMutex* held_mutex = Thread::Current()->GetHeldMutex(static_cast<LockLevel>(i));
670      if (held_mutex != NULL) {
671        LOG(FATAL) << "Holding " << held_mutex->GetName()
672            << " while sleeping for thread suspension";
673      }
674    }
675    {
676      useconds_t new_delay_us = delay_us * 2;
677      CHECK_GE(new_delay_us, delay_us);
678      if (new_delay_us < 500000) {  // Don't allow sleeping to be more than 0.5s.
679        delay_us = new_delay_us;
680      }
681    }
682    if (delay_us == 0) {
683      sched_yield();
684      // Default to 1 milliseconds (note that this gets multiplied by 2 before the first sleep).
685      delay_us = 500;
686    } else {
687      usleep(delay_us);
688      total_delay_us += delay_us;
689    }
690  }
691}
692
693void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
694  std::string group_name;
695  int priority;
696  bool is_daemon = false;
697  Thread* self = Thread::Current();
698
699  if (self != NULL && thread != NULL && thread->opeer_ != NULL) {
700    ScopedObjectAccessUnchecked soa(self);
701    priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->GetInt(thread->opeer_);
702    is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->GetBoolean(thread->opeer_);
703
704    mirror::Object* thread_group =
705        soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->opeer_);
706
707    if (thread_group != NULL) {
708      mirror::Field* group_name_field =
709          soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
710      mirror::String* group_name_string =
711          reinterpret_cast<mirror::String*>(group_name_field->GetObject(thread_group));
712      group_name = (group_name_string != NULL) ? group_name_string->ToModifiedUtf8() : "<null>";
713    }
714  } else {
715    priority = GetNativePriority();
716  }
717
718  std::string scheduler_group_name(GetSchedulerGroupName(tid));
719  if (scheduler_group_name.empty()) {
720    scheduler_group_name = "default";
721  }
722
723  if (thread != NULL) {
724    os << '"' << *thread->name_ << '"';
725    if (is_daemon) {
726      os << " daemon";
727    }
728    os << " prio=" << priority
729       << " tid=" << thread->GetThinLockId()
730       << " " << thread->GetState();
731    if (thread->IsStillStarting()) {
732      os << " (still starting up)";
733    }
734    os << "\n";
735  } else {
736    os << '"' << ::art::GetThreadName(tid) << '"'
737       << " prio=" << priority
738       << " (not attached)\n";
739  }
740
741  if (thread != NULL) {
742    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
743    os << "  | group=\"" << group_name << "\""
744       << " sCount=" << thread->suspend_count_
745       << " dsCount=" << thread->debug_suspend_count_
746       << " obj=" << reinterpret_cast<void*>(thread->opeer_)
747       << " self=" << reinterpret_cast<const void*>(thread) << "\n";
748  }
749
750  os << "  | sysTid=" << tid
751     << " nice=" << getpriority(PRIO_PROCESS, tid)
752     << " cgrp=" << scheduler_group_name;
753  if (thread != NULL) {
754    int policy;
755    sched_param sp;
756    CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->pthread_self_, &policy, &sp), __FUNCTION__);
757    os << " sched=" << policy << "/" << sp.sched_priority
758       << " handle=" << reinterpret_cast<void*>(thread->pthread_self_);
759  }
760  os << "\n";
761
762  // Grab the scheduler stats for this thread.
763  std::string scheduler_stats;
764  if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) {
765    scheduler_stats.resize(scheduler_stats.size() - 1); // Lose the trailing '\n'.
766  } else {
767    scheduler_stats = "0 0 0";
768  }
769
770  char native_thread_state = '?';
771  int utime = 0;
772  int stime = 0;
773  int task_cpu = 0;
774  GetTaskStats(tid, native_thread_state, utime, stime, task_cpu);
775
776  os << "  | state=" << native_thread_state
777     << " schedstat=( " << scheduler_stats << " )"
778     << " utm=" << utime
779     << " stm=" << stime
780     << " core=" << task_cpu
781     << " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
782  if (thread != NULL) {
783    os << "  | stack=" << reinterpret_cast<void*>(thread->stack_begin_) << "-" << reinterpret_cast<void*>(thread->stack_end_)
784       << " stackSize=" << PrettySize(thread->stack_size_) << "\n";
785  }
786}
787
788void Thread::DumpState(std::ostream& os) const {
789  Thread::DumpState(os, this, GetTid());
790}
791
792struct StackDumpVisitor : public StackVisitor {
793  StackDumpVisitor(std::ostream& os, Thread* thread, Context* context, bool can_allocate)
794      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
795      : StackVisitor(thread, context), os(os), thread(thread), can_allocate(can_allocate),
796        last_method(NULL), last_line_number(0), repetition_count(0), frame_count(0) {
797  }
798
799  virtual ~StackDumpVisitor() {
800    if (frame_count == 0) {
801      os << "  (no managed stack frames)\n";
802    }
803  }
804
805  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
806    mirror::AbstractMethod* m = GetMethod();
807    if (m->IsRuntimeMethod()) {
808      return true;
809    }
810    const int kMaxRepetition = 3;
811    mirror::Class* c = m->GetDeclaringClass();
812    const mirror::DexCache* dex_cache = c->GetDexCache();
813    int line_number = -1;
814    if (dex_cache != NULL) {  // be tolerant of bad input
815      const DexFile& dex_file = *dex_cache->GetDexFile();
816      line_number = dex_file.GetLineNumFromPC(m, GetDexPc());
817    }
818    if (line_number == last_line_number && last_method == m) {
819      repetition_count++;
820    } else {
821      if (repetition_count >= kMaxRepetition) {
822        os << "  ... repeated " << (repetition_count - kMaxRepetition) << " times\n";
823      }
824      repetition_count = 0;
825      last_line_number = line_number;
826      last_method = m;
827    }
828    if (repetition_count < kMaxRepetition) {
829      os << "  at " << PrettyMethod(m, false);
830      if (m->IsNative()) {
831        os << "(Native method)";
832      } else {
833        mh.ChangeMethod(m);
834        const char* source_file(mh.GetDeclaringClassSourceFile());
835        os << "(" << (source_file != NULL ? source_file : "unavailable")
836           << ":" << line_number << ")";
837      }
838      os << "\n";
839      if (frame_count == 0) {
840        Monitor::DescribeWait(os, thread);
841      }
842      if (can_allocate) {
843        Monitor::VisitLocks(this, DumpLockedObject, &os);
844      }
845    }
846
847    ++frame_count;
848    return true;
849  }
850
851  static void DumpLockedObject(mirror::Object* o, void* context)
852      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
853    std::ostream& os = *reinterpret_cast<std::ostream*>(context);
854    os << "  - locked <" << o << "> (a " << PrettyTypeOf(o) << ")\n";
855  }
856
857  std::ostream& os;
858  const Thread* thread;
859  const bool can_allocate;
860  MethodHelper mh;
861  mirror::AbstractMethod* last_method;
862  int last_line_number;
863  int repetition_count;
864  int frame_count;
865};
866
867static bool ShouldShowNativeStack(const Thread* thread)
868    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
869  ThreadState state = thread->GetState();
870
871  // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting.
872  if (state > kWaiting && state < kStarting) {
873    return true;
874  }
875
876  // In an Object.wait variant or Thread.sleep? That's not interesting.
877  if (state == kTimedWaiting || state == kSleeping || state == kWaiting) {
878    return false;
879  }
880
881  // In some other native method? That's interesting.
882  // We don't just check kNative because native methods will be in state kSuspended if they're
883  // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the
884  // thread-startup states if it's early enough in their life cycle (http://b/7432159).
885  mirror::AbstractMethod* current_method = thread->GetCurrentMethod(NULL);
886  return current_method != NULL && current_method->IsNative();
887}
888
889void Thread::DumpStack(std::ostream& os) const {
890  // TODO: we call this code when dying but may not have suspended the thread ourself. The
891  //       IsSuspended check is therefore racy with the use for dumping (normally we inhibit
892  //       the race with the thread_suspend_count_lock_).
893  bool dump_for_abort = (gAborting > 0);
894  if (this == Thread::Current() || IsSuspended() || dump_for_abort) {
895    // If we're currently in native code, dump that stack before dumping the managed stack.
896    if (dump_for_abort || ShouldShowNativeStack(this)) {
897      DumpKernelStack(os, GetTid(), "  kernel: ", false);
898      DumpNativeStack(os, GetTid(), "  native: ", false);
899    }
900    UniquePtr<Context> context(Context::Create());
901    StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(), !throwing_OutOfMemoryError_);
902    dumper.WalkStack();
903  } else {
904    os << "Not able to dump stack of thread that isn't suspended";
905  }
906}
907
908void Thread::ThreadExitCallback(void* arg) {
909  Thread* self = reinterpret_cast<Thread*>(arg);
910  if (self->thread_exit_check_count_ == 0) {
911    LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's going to use a pthread_key_create destructor?): " << *self;
912    CHECK(is_started_);
913    CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
914    self->thread_exit_check_count_ = 1;
915  } else {
916    LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
917  }
918}
919
920void Thread::Startup() {
921  CHECK(!is_started_);
922  is_started_ = true;
923  {
924    MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);  // Keep GCC happy.
925    resume_cond_ = new ConditionVariable("Thread resumption condition variable",
926                                         *Locks::thread_suspend_count_lock_);
927  }
928
929  // Allocate a TLS slot.
930  CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key");
931
932  // Double-check the TLS slot allocation.
933  if (pthread_getspecific(pthread_key_self_) != NULL) {
934    LOG(FATAL) << "Newly-created pthread TLS slot is not NULL";
935  }
936}
937
938void Thread::FinishStartup() {
939  Runtime* runtime = Runtime::Current();
940  CHECK(runtime->IsStarted());
941
942  // Finish attaching the main thread.
943  ScopedObjectAccess soa(Thread::Current());
944  Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup());
945
946  Runtime::Current()->GetClassLinker()->RunRootClinits();
947}
948
949void Thread::Shutdown() {
950  CHECK(is_started_);
951  is_started_ = false;
952  CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
953  MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
954  if (resume_cond_ != NULL) {
955    delete resume_cond_;
956    resume_cond_ = NULL;
957  }
958}
959
960Thread::Thread(bool daemon)
961    : suspend_count_(0),
962      card_table_(NULL),
963      exception_(NULL),
964      stack_end_(NULL),
965      managed_stack_(),
966      jni_env_(NULL),
967      self_(NULL),
968      opeer_(NULL),
969      jpeer_(NULL),
970      stack_begin_(NULL),
971      stack_size_(0),
972      thin_lock_id_(0),
973      tid_(0),
974      wait_mutex_(new Mutex("a thread wait mutex")),
975      wait_cond_(new ConditionVariable("a thread wait condition variable", *wait_mutex_)),
976      wait_monitor_(NULL),
977      interrupted_(false),
978      wait_next_(NULL),
979      monitor_enter_object_(NULL),
980      top_sirt_(NULL),
981      runtime_(NULL),
982      class_loader_override_(NULL),
983      long_jump_context_(NULL),
984      throwing_OutOfMemoryError_(false),
985      debug_suspend_count_(0),
986      debug_invoke_req_(new DebugInvokeReq),
987      deoptimization_shadow_frame_(NULL),
988      instrumentation_stack_(new std::deque<instrumentation::InstrumentationStackFrame>),
989      name_(new std::string(kThreadNameDuringStartup)),
990      daemon_(daemon),
991      pthread_self_(0),
992      no_thread_suspension_(0),
993      last_no_thread_suspension_cause_(NULL),
994      checkpoint_function_(0),
995      thread_exit_check_count_(0) {
996  CHECK_EQ((sizeof(Thread) % 4), 0U) << sizeof(Thread);
997  state_and_flags_.as_struct.flags = 0;
998  state_and_flags_.as_struct.state = kNative;
999  memset(&held_mutexes_[0], 0, sizeof(held_mutexes_));
1000}
1001
1002bool Thread::IsStillStarting() const {
1003  // You might think you can check whether the state is kStarting, but for much of thread startup,
1004  // the thread is in kNative; it might also be in kVmWait.
1005  // You might think you can check whether the peer is NULL, but the peer is actually created and
1006  // assigned fairly early on, and needs to be.
1007  // It turns out that the last thing to change is the thread name; that's a good proxy for "has
1008  // this thread _ever_ entered kRunnable".
1009  return (jpeer_ == NULL && opeer_ == NULL) || (*name_ == kThreadNameDuringStartup);
1010}
1011
1012void Thread::AssertNoPendingException() const {
1013  if (UNLIKELY(IsExceptionPending())) {
1014    ScopedObjectAccess soa(Thread::Current());
1015    mirror::Throwable* exception = GetException(NULL);
1016    LOG(FATAL) << "No pending exception expected: " << exception->Dump();
1017  }
1018}
1019
1020static void MonitorExitVisitor(const mirror::Object* object, void* arg) NO_THREAD_SAFETY_ANALYSIS {
1021  Thread* self = reinterpret_cast<Thread*>(arg);
1022  mirror::Object* entered_monitor = const_cast<mirror::Object*>(object);
1023  if (self->HoldsLock(entered_monitor)) {
1024    LOG(WARNING) << "Calling MonitorExit on object "
1025                 << object << " (" << PrettyTypeOf(object) << ")"
1026                 << " left locked by native thread "
1027                 << *Thread::Current() << " which is detaching";
1028    entered_monitor->MonitorExit(self);
1029  }
1030}
1031
1032void Thread::Destroy() {
1033  Thread* self = this;
1034  DCHECK_EQ(self, Thread::Current());
1035
1036  if (opeer_ != NULL) {
1037    ScopedObjectAccess soa(self);
1038    // We may need to call user-supplied managed code, do this before final clean-up.
1039    HandleUncaughtExceptions(soa);
1040    RemoveFromThreadGroup(soa);
1041
1042    // this.nativePeer = 0;
1043    soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)->SetInt(opeer_, 0);
1044    Dbg::PostThreadDeath(self);
1045
1046    // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
1047    // who is waiting.
1048    mirror::Object* lock =
1049        soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(opeer_);
1050    // (This conditional is only needed for tests, where Thread.lock won't have been set.)
1051    if (lock != NULL) {
1052      ObjectLock locker(self, lock);
1053      locker.Notify();
1054    }
1055  }
1056
1057  // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
1058  if (jni_env_ != NULL) {
1059    jni_env_->monitors.VisitRoots(MonitorExitVisitor, self);
1060  }
1061}
1062
1063Thread::~Thread() {
1064  if (jni_env_ != NULL && jpeer_ != NULL) {
1065    // If pthread_create fails we don't have a jni env here.
1066    jni_env_->DeleteGlobalRef(jpeer_);
1067    jpeer_ = NULL;
1068  }
1069  opeer_ = NULL;
1070
1071  delete jni_env_;
1072  jni_env_ = NULL;
1073
1074  CHECK_NE(GetState(), kRunnable);
1075  // We may be deleting a still born thread.
1076  SetStateUnsafe(kTerminated);
1077
1078  delete wait_cond_;
1079  delete wait_mutex_;
1080
1081  if (long_jump_context_ != NULL) {
1082    delete long_jump_context_;
1083  }
1084
1085  delete debug_invoke_req_;
1086  delete instrumentation_stack_;
1087  delete name_;
1088
1089  TearDownAlternateSignalStack();
1090}
1091
1092void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) {
1093  if (!IsExceptionPending()) {
1094    return;
1095  }
1096  ScopedLocalRef<jobject> peer(jni_env_, soa.AddLocalReference<jobject>(opeer_));
1097  ScopedThreadStateChange tsc(this, kNative);
1098
1099  // Get and clear the exception.
1100  ScopedLocalRef<jthrowable> exception(jni_env_, jni_env_->ExceptionOccurred());
1101  jni_env_->ExceptionClear();
1102
1103  // If the thread has its own handler, use that.
1104  ScopedLocalRef<jobject> handler(jni_env_,
1105                                  jni_env_->GetObjectField(peer.get(),
1106                                                           WellKnownClasses::java_lang_Thread_uncaughtHandler));
1107  if (handler.get() == NULL) {
1108    // Otherwise use the thread group's default handler.
1109    handler.reset(jni_env_->GetObjectField(peer.get(), WellKnownClasses::java_lang_Thread_group));
1110  }
1111
1112  // Call the handler.
1113  jni_env_->CallVoidMethod(handler.get(),
1114                           WellKnownClasses::java_lang_Thread$UncaughtExceptionHandler_uncaughtException,
1115                           peer.get(), exception.get());
1116
1117  // If the handler threw, clear that exception too.
1118  jni_env_->ExceptionClear();
1119}
1120
1121void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
1122  // this.group.removeThread(this);
1123  // group can be null if we're in the compiler or a test.
1124  mirror::Object* ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(opeer_);
1125  if (ogroup != NULL) {
1126    ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup));
1127    ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(opeer_));
1128    ScopedThreadStateChange tsc(soa.Self(), kNative);
1129    jni_env_->CallVoidMethod(group.get(), WellKnownClasses::java_lang_ThreadGroup_removeThread,
1130                             peer.get());
1131  }
1132}
1133
1134size_t Thread::NumSirtReferences() {
1135  size_t count = 0;
1136  for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
1137    count += cur->NumberOfReferences();
1138  }
1139  return count;
1140}
1141
1142bool Thread::SirtContains(jobject obj) const {
1143  mirror::Object** sirt_entry = reinterpret_cast<mirror::Object**>(obj);
1144  for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
1145    if (cur->Contains(sirt_entry)) {
1146      return true;
1147    }
1148  }
1149  // JNI code invoked from portable code uses shadow frames rather than the SIRT.
1150  return managed_stack_.ShadowFramesContain(sirt_entry);
1151}
1152
1153void Thread::SirtVisitRoots(RootVisitor* visitor, void* arg) {
1154  for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
1155    size_t num_refs = cur->NumberOfReferences();
1156    for (size_t j = 0; j < num_refs; j++) {
1157      mirror::Object* object = cur->GetReference(j);
1158      if (object != NULL) {
1159        visitor(object, arg);
1160      }
1161    }
1162  }
1163}
1164
1165mirror::Object* Thread::DecodeJObject(jobject obj) const {
1166  Locks::mutator_lock_->AssertSharedHeld(this);
1167  if (obj == NULL) {
1168    return NULL;
1169  }
1170  IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
1171  IndirectRefKind kind = GetIndirectRefKind(ref);
1172  mirror::Object* result;
1173  // The "kinds" below are sorted by the frequency we expect to encounter them.
1174  if (kind == kLocal) {
1175    IndirectReferenceTable& locals = jni_env_->locals;
1176    result = const_cast<mirror::Object*>(locals.Get(ref));
1177  } else if (kind == kSirtOrInvalid) {
1178    // TODO: make stack indirect reference table lookup more efficient
1179    // Check if this is a local reference in the SIRT
1180    if (LIKELY(SirtContains(obj))) {
1181      result = *reinterpret_cast<mirror::Object**>(obj);  // Read from SIRT
1182    } else if (Runtime::Current()->GetJavaVM()->work_around_app_jni_bugs) {
1183      // Assume an invalid local reference is actually a direct pointer.
1184      result = reinterpret_cast<mirror::Object*>(obj);
1185    } else {
1186      result = kInvalidIndirectRefObject;
1187    }
1188  } else if (kind == kGlobal) {
1189    JavaVMExt* vm = Runtime::Current()->GetJavaVM();
1190    IndirectReferenceTable& globals = vm->globals;
1191    MutexLock mu(const_cast<Thread*>(this), vm->globals_lock);
1192    result = const_cast<mirror::Object*>(globals.Get(ref));
1193  } else {
1194    DCHECK_EQ(kind, kWeakGlobal);
1195    JavaVMExt* vm = Runtime::Current()->GetJavaVM();
1196    IndirectReferenceTable& weak_globals = vm->weak_globals;
1197    MutexLock mu(const_cast<Thread*>(this), vm->weak_globals_lock);
1198    result = const_cast<mirror::Object*>(weak_globals.Get(ref));
1199    if (result == kClearedJniWeakGlobal) {
1200      // This is a special case where it's okay to return NULL.
1201      return NULL;
1202    }
1203  }
1204
1205  if (UNLIKELY(result == NULL)) {
1206    JniAbortF(NULL, "use of deleted %s %p", ToStr<IndirectRefKind>(kind).c_str(), obj);
1207  } else {
1208    if (kIsDebugBuild && (result != kInvalidIndirectRefObject)) {
1209      Runtime::Current()->GetHeap()->VerifyObject(result);
1210    }
1211  }
1212  return result;
1213}
1214
1215// Implements java.lang.Thread.interrupted.
1216bool Thread::Interrupted() {
1217  MutexLock mu(Thread::Current(), *wait_mutex_);
1218  bool interrupted = interrupted_;
1219  interrupted_ = false;
1220  return interrupted;
1221}
1222
1223// Implements java.lang.Thread.isInterrupted.
1224bool Thread::IsInterrupted() {
1225  MutexLock mu(Thread::Current(), *wait_mutex_);
1226  return interrupted_;
1227}
1228
1229void Thread::Interrupt() {
1230  Thread* self = Thread::Current();
1231  MutexLock mu(self, *wait_mutex_);
1232  if (interrupted_) {
1233    return;
1234  }
1235  interrupted_ = true;
1236  NotifyLocked(self);
1237}
1238
1239void Thread::Notify() {
1240  Thread* self = Thread::Current();
1241  MutexLock mu(self, *wait_mutex_);
1242  NotifyLocked(self);
1243}
1244
1245void Thread::NotifyLocked(Thread* self) {
1246  if (wait_monitor_ != NULL) {
1247    wait_cond_->Signal(self);
1248  }
1249}
1250
1251class CountStackDepthVisitor : public StackVisitor {
1252 public:
1253  CountStackDepthVisitor(Thread* thread)
1254      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1255      : StackVisitor(thread, NULL),
1256        depth_(0), skip_depth_(0), skipping_(true) {}
1257
1258  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1259    // We want to skip frames up to and including the exception's constructor.
1260    // Note we also skip the frame if it doesn't have a method (namely the callee
1261    // save frame)
1262    mirror::AbstractMethod* m = GetMethod();
1263    if (skipping_ && !m->IsRuntimeMethod() &&
1264        !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) {
1265      skipping_ = false;
1266    }
1267    if (!skipping_) {
1268      if (!m->IsRuntimeMethod()) {  // Ignore runtime frames (in particular callee save).
1269        ++depth_;
1270      }
1271    } else {
1272      ++skip_depth_;
1273    }
1274    return true;
1275  }
1276
1277  int GetDepth() const {
1278    return depth_;
1279  }
1280
1281  int GetSkipDepth() const {
1282    return skip_depth_;
1283  }
1284
1285 private:
1286  uint32_t depth_;
1287  uint32_t skip_depth_;
1288  bool skipping_;
1289};
1290
1291class BuildInternalStackTraceVisitor : public StackVisitor {
1292 public:
1293  explicit BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth)
1294      : StackVisitor(thread, NULL), self_(self),
1295        skip_depth_(skip_depth), count_(0), dex_pc_trace_(NULL), method_trace_(NULL) {}
1296
1297  bool Init(int depth)
1298      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1299    // Allocate method trace with an extra slot that will hold the PC trace
1300    SirtRef<mirror::ObjectArray<mirror::Object> >
1301        method_trace(self_,
1302                     Runtime::Current()->GetClassLinker()->AllocObjectArray<mirror::Object>(self_,
1303                                                                                            depth + 1));
1304    if (method_trace.get() == NULL) {
1305      return false;
1306    }
1307    mirror::IntArray* dex_pc_trace = mirror::IntArray::Alloc(self_, depth);
1308    if (dex_pc_trace == NULL) {
1309      return false;
1310    }
1311    // Save PC trace in last element of method trace, also places it into the
1312    // object graph.
1313    method_trace->Set(depth, dex_pc_trace);
1314    // Set the Object*s and assert that no thread suspension is now possible.
1315    const char* last_no_suspend_cause =
1316        self_->StartAssertNoThreadSuspension("Building internal stack trace");
1317    CHECK(last_no_suspend_cause == NULL) << last_no_suspend_cause;
1318    method_trace_ = method_trace.get();
1319    dex_pc_trace_ = dex_pc_trace;
1320    return true;
1321  }
1322
1323  virtual ~BuildInternalStackTraceVisitor() {
1324    if (method_trace_ != NULL) {
1325      self_->EndAssertNoThreadSuspension(NULL);
1326    }
1327  }
1328
1329  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1330    if (method_trace_ == NULL || dex_pc_trace_ == NULL) {
1331      return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError.
1332    }
1333    if (skip_depth_ > 0) {
1334      skip_depth_--;
1335      return true;
1336    }
1337    mirror::AbstractMethod* m = GetMethod();
1338    if (m->IsRuntimeMethod()) {
1339      return true;  // Ignore runtime frames (in particular callee save).
1340    }
1341    method_trace_->Set(count_, m);
1342    dex_pc_trace_->Set(count_, GetDexPc());
1343    ++count_;
1344    return true;
1345  }
1346
1347  mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const {
1348    return method_trace_;
1349  }
1350
1351 private:
1352  Thread* const self_;
1353  // How many more frames to skip.
1354  int32_t skip_depth_;
1355  // Current position down stack trace.
1356  uint32_t count_;
1357  // Array of dex PC values.
1358  mirror::IntArray* dex_pc_trace_;
1359  // An array of the methods on the stack, the last entry is a reference to the PC trace.
1360  mirror::ObjectArray<mirror::Object>* method_trace_;
1361};
1362
1363jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessUnchecked& soa) const {
1364  // Compute depth of stack
1365  CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
1366  count_visitor.WalkStack();
1367  int32_t depth = count_visitor.GetDepth();
1368  int32_t skip_depth = count_visitor.GetSkipDepth();
1369
1370  // Build internal stack trace.
1371  BuildInternalStackTraceVisitor build_trace_visitor(soa.Self(), const_cast<Thread*>(this),
1372                                                     skip_depth);
1373  if (!build_trace_visitor.Init(depth)) {
1374    return NULL;  // Allocation failed.
1375  }
1376  build_trace_visitor.WalkStack();
1377  mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace();
1378  if (kIsDebugBuild) {
1379    for (int32_t i = 0; i < trace->GetLength(); ++i) {
1380      CHECK(trace->Get(i) != NULL);
1381    }
1382  }
1383  return soa.AddLocalReference<jobjectArray>(trace);
1384}
1385
1386jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
1387    jobjectArray output_array, int* stack_depth) {
1388  // Transition into runnable state to work on Object*/Array*
1389  ScopedObjectAccess soa(env);
1390  // Decode the internal stack trace into the depth, method trace and PC trace
1391  mirror::ObjectArray<mirror::Object>* method_trace =
1392      soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal);
1393  int32_t depth = method_trace->GetLength() - 1;
1394  mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth));
1395
1396  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1397
1398  jobjectArray result;
1399  mirror::ObjectArray<mirror::StackTraceElement>* java_traces;
1400  if (output_array != NULL) {
1401    // Reuse the array we were given.
1402    result = output_array;
1403    java_traces = soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(output_array);
1404    // ...adjusting the number of frames we'll write to not exceed the array length.
1405    depth = std::min(depth, java_traces->GetLength());
1406  } else {
1407    // Create java_trace array and place in local reference table
1408    java_traces = class_linker->AllocStackTraceElementArray(soa.Self(), depth);
1409    if (java_traces == NULL) {
1410      return NULL;
1411    }
1412    result = soa.AddLocalReference<jobjectArray>(java_traces);
1413  }
1414
1415  if (stack_depth != NULL) {
1416    *stack_depth = depth;
1417  }
1418
1419  MethodHelper mh;
1420  for (int32_t i = 0; i < depth; ++i) {
1421    // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
1422    mirror::AbstractMethod* method = down_cast<mirror::AbstractMethod*>(method_trace->Get(i));
1423    mh.ChangeMethod(method);
1424    uint32_t dex_pc = pc_trace->Get(i);
1425    int32_t line_number = mh.GetLineNumFromDexPC(dex_pc);
1426    // Allocate element, potentially triggering GC
1427    // TODO: reuse class_name_object via Class::name_?
1428    const char* descriptor = mh.GetDeclaringClassDescriptor();
1429    CHECK(descriptor != NULL);
1430    std::string class_name(PrettyDescriptor(descriptor));
1431    SirtRef<mirror::String> class_name_object(soa.Self(),
1432                                              mirror::String::AllocFromModifiedUtf8(soa.Self(),
1433                                                                                    class_name.c_str()));
1434    if (class_name_object.get() == NULL) {
1435      return NULL;
1436    }
1437    const char* method_name = mh.GetName();
1438    CHECK(method_name != NULL);
1439    SirtRef<mirror::String> method_name_object(soa.Self(),
1440                                               mirror::String::AllocFromModifiedUtf8(soa.Self(),
1441                                                                                     method_name));
1442    if (method_name_object.get() == NULL) {
1443      return NULL;
1444    }
1445    const char* source_file = mh.GetDeclaringClassSourceFile();
1446    SirtRef<mirror::String> source_name_object(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(),
1447                                                                                                 source_file));
1448    mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(soa.Self(),
1449                                                                      class_name_object.get(),
1450                                                                      method_name_object.get(),
1451                                                                      source_name_object.get(),
1452                                                                      line_number);
1453    if (obj == NULL) {
1454      return NULL;
1455    }
1456#ifdef MOVING_GARBAGE_COLLECTOR
1457    // Re-read after potential GC
1458    java_traces = Decode<ObjectArray<Object>*>(soa.Env(), result);
1459    method_trace = down_cast<ObjectArray<Object>*>(Decode<Object*>(soa.Env(), internal));
1460    pc_trace = down_cast<IntArray*>(method_trace->Get(depth));
1461#endif
1462    java_traces->Set(i, obj);
1463  }
1464  return result;
1465}
1466
1467void Thread::ThrowNewExceptionF(const ThrowLocation& throw_location,
1468                                const char* exception_class_descriptor, const char* fmt, ...) {
1469  va_list args;
1470  va_start(args, fmt);
1471  ThrowNewExceptionV(throw_location, exception_class_descriptor,
1472                     fmt, args);
1473  va_end(args);
1474}
1475
1476void Thread::ThrowNewExceptionV(const ThrowLocation& throw_location,
1477                                const char* exception_class_descriptor,
1478                                const char* fmt, va_list ap) {
1479  std::string msg;
1480  StringAppendV(&msg, fmt, ap);
1481  ThrowNewException(throw_location, exception_class_descriptor, msg.c_str());
1482}
1483
1484void Thread::ThrowNewException(const ThrowLocation& throw_location, const char* exception_class_descriptor,
1485                               const char* msg) {
1486  AssertNoPendingException(); // Callers should either clear or call ThrowNewWrappedException.
1487  ThrowNewWrappedException(throw_location, exception_class_descriptor, msg);
1488}
1489
1490void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
1491                                      const char* exception_class_descriptor,
1492                                      const char* msg) {
1493  DCHECK_EQ(this, Thread::Current());
1494  // Ensure we don't forget arguments over object allocation.
1495  SirtRef<mirror::Object> saved_throw_this(this, throw_location.GetThis());
1496  SirtRef<mirror::AbstractMethod> saved_throw_method(this, throw_location.GetMethod());
1497  // Ignore the cause throw location. TODO: should we report this as a re-throw?
1498  SirtRef<mirror::Throwable> cause(this, GetException(NULL));
1499  ClearException();
1500  Runtime* runtime = Runtime::Current();
1501
1502  mirror::ClassLoader* cl = NULL;
1503  if (throw_location.GetMethod() != NULL) {
1504    cl = throw_location.GetMethod()->GetDeclaringClass()->GetClassLoader();
1505  }
1506  SirtRef<mirror::Class>
1507      exception_class(this, runtime->GetClassLinker()->FindClass(exception_class_descriptor, cl));
1508  if (UNLIKELY(exception_class.get() == NULL)) {
1509    CHECK(IsExceptionPending());
1510    LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
1511    return;
1512  }
1513
1514  if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(exception_class.get(), true, true))) {
1515    DCHECK(IsExceptionPending());
1516    return;
1517  }
1518  DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass());
1519  SirtRef<mirror::Throwable> exception(this,
1520                                down_cast<mirror::Throwable*>(exception_class->AllocObject(this)));
1521
1522  // Choose an appropriate constructor and set up the arguments.
1523  const char* signature;
1524  SirtRef<mirror::String> msg_string(this, NULL);
1525  if (msg != NULL) {
1526    // Ensure we remember this and the method over the String allocation.
1527    msg_string.reset(mirror::String::AllocFromModifiedUtf8(this, msg));
1528    if (UNLIKELY(msg_string.get() == NULL)) {
1529      CHECK(IsExceptionPending());  // OOME.
1530      return;
1531    }
1532    if (cause.get() == NULL) {
1533      signature = "(Ljava/lang/String;)V";
1534    } else {
1535      signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V";
1536    }
1537  } else {
1538    if (cause.get() == NULL) {
1539      signature = "()V";
1540    } else {
1541      signature = "(Ljava/lang/Throwable;)V";
1542    }
1543  }
1544  mirror::AbstractMethod* exception_init_method =
1545      exception_class->FindDeclaredDirectMethod("<init>", signature);
1546
1547  CHECK(exception_init_method != NULL) << "No <init>" << signature << " in "
1548      << PrettyDescriptor(exception_class_descriptor);
1549
1550  if (UNLIKELY(!runtime->IsStarted())) {
1551    // Something is trying to throw an exception without a started runtime, which is the common
1552    // case in the compiler. We won't be able to invoke the constructor of the exception, so set
1553    // the exception fields directly.
1554    if (msg != NULL) {
1555      exception->SetDetailMessage(msg_string.get());
1556    }
1557    if (cause.get() != NULL) {
1558      exception->SetCause(cause.get());
1559    }
1560    ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
1561                                         throw_location.GetDexPc());
1562    SetException(gc_safe_throw_location, exception.get());
1563  } else {
1564    ArgArray args("VLL", 3);
1565    args.Append(reinterpret_cast<uint32_t>(exception.get()));
1566    if (msg != NULL) {
1567      args.Append(reinterpret_cast<uint32_t>(msg_string.get()));
1568    }
1569    if (cause.get() != NULL) {
1570      args.Append(reinterpret_cast<uint32_t>(cause.get()));
1571    }
1572    JValue result;
1573    exception_init_method->Invoke(this, args.GetArray(), args.GetNumBytes(), &result, 'V');
1574    if (LIKELY(!IsExceptionPending())) {
1575      ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
1576                                           throw_location.GetDexPc());
1577      SetException(gc_safe_throw_location, exception.get());
1578    }
1579  }
1580}
1581
1582void Thread::ThrowOutOfMemoryError(const char* msg) {
1583  LOG(ERROR) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
1584      msg, (throwing_OutOfMemoryError_ ? " (recursive case)" : ""));
1585  ThrowLocation throw_location = GetCurrentLocationForThrow();
1586  if (!throwing_OutOfMemoryError_) {
1587    throwing_OutOfMemoryError_ = true;
1588    ThrowNewException(throw_location, "Ljava/lang/OutOfMemoryError;", msg);
1589    throwing_OutOfMemoryError_ = false;
1590  } else {
1591    Dump(LOG(ERROR)); // The pre-allocated OOME has no stack, so help out and log one.
1592    SetException(throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1593  }
1594}
1595
1596Thread* Thread::CurrentFromGdb() {
1597  return Thread::Current();
1598}
1599
1600void Thread::DumpFromGdb() const {
1601  std::ostringstream ss;
1602  Dump(ss);
1603  std::string str(ss.str());
1604  // log to stderr for debugging command line processes
1605  std::cerr << str;
1606#ifdef HAVE_ANDROID_OS
1607  // log to logcat for debugging frameworks processes
1608  LOG(INFO) << str;
1609#endif
1610}
1611
1612struct EntryPointInfo {
1613  uint32_t offset;
1614  const char* name;
1615};
1616#define ENTRY_POINT_INFO(x) { ENTRYPOINT_OFFSET(x), #x }
1617static const EntryPointInfo gThreadEntryPointInfo[] = {
1618  ENTRY_POINT_INFO(pAllocArrayFromCode),
1619  ENTRY_POINT_INFO(pAllocArrayFromCodeWithAccessCheck),
1620  ENTRY_POINT_INFO(pAllocObjectFromCode),
1621  ENTRY_POINT_INFO(pAllocObjectFromCodeWithAccessCheck),
1622  ENTRY_POINT_INFO(pCheckAndAllocArrayFromCode),
1623  ENTRY_POINT_INFO(pCheckAndAllocArrayFromCodeWithAccessCheck),
1624  ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode),
1625  ENTRY_POINT_INFO(pCanPutArrayElementFromCode),
1626  ENTRY_POINT_INFO(pCheckCastFromCode),
1627  ENTRY_POINT_INFO(pInitializeStaticStorage),
1628  ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode),
1629  ENTRY_POINT_INFO(pInitializeTypeFromCode),
1630  ENTRY_POINT_INFO(pResolveStringFromCode),
1631  ENTRY_POINT_INFO(pSet32Instance),
1632  ENTRY_POINT_INFO(pSet32Static),
1633  ENTRY_POINT_INFO(pSet64Instance),
1634  ENTRY_POINT_INFO(pSet64Static),
1635  ENTRY_POINT_INFO(pSetObjInstance),
1636  ENTRY_POINT_INFO(pSetObjStatic),
1637  ENTRY_POINT_INFO(pGet32Instance),
1638  ENTRY_POINT_INFO(pGet32Static),
1639  ENTRY_POINT_INFO(pGet64Instance),
1640  ENTRY_POINT_INFO(pGet64Static),
1641  ENTRY_POINT_INFO(pGetObjInstance),
1642  ENTRY_POINT_INFO(pGetObjStatic),
1643  ENTRY_POINT_INFO(pHandleFillArrayDataFromCode),
1644  ENTRY_POINT_INFO(pJniMethodStart),
1645  ENTRY_POINT_INFO(pJniMethodStartSynchronized),
1646  ENTRY_POINT_INFO(pJniMethodEnd),
1647  ENTRY_POINT_INFO(pJniMethodEndSynchronized),
1648  ENTRY_POINT_INFO(pJniMethodEndWithReference),
1649  ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized),
1650  ENTRY_POINT_INFO(pLockObjectFromCode),
1651  ENTRY_POINT_INFO(pUnlockObjectFromCode),
1652  ENTRY_POINT_INFO(pCmpgDouble),
1653  ENTRY_POINT_INFO(pCmpgFloat),
1654  ENTRY_POINT_INFO(pCmplDouble),
1655  ENTRY_POINT_INFO(pCmplFloat),
1656  ENTRY_POINT_INFO(pFmod),
1657  ENTRY_POINT_INFO(pSqrt),
1658  ENTRY_POINT_INFO(pL2d),
1659  ENTRY_POINT_INFO(pFmodf),
1660  ENTRY_POINT_INFO(pL2f),
1661  ENTRY_POINT_INFO(pD2iz),
1662  ENTRY_POINT_INFO(pF2iz),
1663  ENTRY_POINT_INFO(pIdivmod),
1664  ENTRY_POINT_INFO(pD2l),
1665  ENTRY_POINT_INFO(pF2l),
1666  ENTRY_POINT_INFO(pLdiv),
1667  ENTRY_POINT_INFO(pLdivmod),
1668  ENTRY_POINT_INFO(pLmul),
1669  ENTRY_POINT_INFO(pShlLong),
1670  ENTRY_POINT_INFO(pShrLong),
1671  ENTRY_POINT_INFO(pUshrLong),
1672  ENTRY_POINT_INFO(pInterpreterToInterpreterEntry),
1673  ENTRY_POINT_INFO(pInterpreterToQuickEntry),
1674  ENTRY_POINT_INFO(pIndexOf),
1675  ENTRY_POINT_INFO(pMemcmp16),
1676  ENTRY_POINT_INFO(pStringCompareTo),
1677  ENTRY_POINT_INFO(pMemcpy),
1678  ENTRY_POINT_INFO(pPortableResolutionTrampolineFromCode),
1679  ENTRY_POINT_INFO(pQuickResolutionTrampolineFromCode),
1680  ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck),
1681  ENTRY_POINT_INFO(pInvokeInterfaceTrampoline),
1682  ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck),
1683  ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck),
1684  ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck),
1685  ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck),
1686  ENTRY_POINT_INFO(pCheckSuspendFromCode),
1687  ENTRY_POINT_INFO(pTestSuspendFromCode),
1688  ENTRY_POINT_INFO(pDeliverException),
1689  ENTRY_POINT_INFO(pThrowArrayBoundsFromCode),
1690  ENTRY_POINT_INFO(pThrowDivZeroFromCode),
1691  ENTRY_POINT_INFO(pThrowNoSuchMethodFromCode),
1692  ENTRY_POINT_INFO(pThrowNullPointerFromCode),
1693  ENTRY_POINT_INFO(pThrowStackOverflowFromCode),
1694};
1695#undef ENTRY_POINT_INFO
1696
1697void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers) {
1698  CHECK_EQ(size_of_pointers, 4U); // TODO: support 64-bit targets.
1699
1700#define DO_THREAD_OFFSET(x) if (offset == static_cast<uint32_t>(OFFSETOF_VOLATILE_MEMBER(Thread, x))) { os << # x; return; }
1701  DO_THREAD_OFFSET(state_and_flags_);
1702  DO_THREAD_OFFSET(card_table_);
1703  DO_THREAD_OFFSET(exception_);
1704  DO_THREAD_OFFSET(opeer_);
1705  DO_THREAD_OFFSET(jni_env_);
1706  DO_THREAD_OFFSET(self_);
1707  DO_THREAD_OFFSET(stack_end_);
1708  DO_THREAD_OFFSET(suspend_count_);
1709  DO_THREAD_OFFSET(thin_lock_id_);
1710  //DO_THREAD_OFFSET(top_of_managed_stack_);
1711  //DO_THREAD_OFFSET(top_of_managed_stack_pc_);
1712  DO_THREAD_OFFSET(top_sirt_);
1713#undef DO_THREAD_OFFSET
1714
1715  size_t entry_point_count = arraysize(gThreadEntryPointInfo);
1716  CHECK_EQ(entry_point_count * size_of_pointers, sizeof(EntryPoints));
1717  uint32_t expected_offset = OFFSETOF_MEMBER(Thread, entrypoints_);
1718  for (size_t i = 0; i < entry_point_count; ++i) {
1719    CHECK_EQ(gThreadEntryPointInfo[i].offset, expected_offset) << gThreadEntryPointInfo[i].name;
1720    expected_offset += size_of_pointers;
1721    if (gThreadEntryPointInfo[i].offset == offset) {
1722      os << gThreadEntryPointInfo[i].name;
1723      return;
1724    }
1725  }
1726  os << offset;
1727}
1728
1729static const bool kDebugExceptionDelivery = false;
1730class CatchBlockStackVisitor : public StackVisitor {
1731 public:
1732  CatchBlockStackVisitor(Thread* self, const ThrowLocation& throw_location,
1733                         mirror::Throwable* exception, bool is_deoptimization)
1734      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1735      : StackVisitor(self, self->GetLongJumpContext()),
1736        self_(self), exception_(exception), is_deoptimization_(is_deoptimization),
1737        to_find_(is_deoptimization ? NULL : exception->GetClass()), throw_location_(throw_location),
1738        handler_quick_frame_(NULL), handler_quick_frame_pc_(0), handler_dex_pc_(0),
1739        native_method_count_(0),
1740        method_tracing_active_(is_deoptimization ||
1741                               Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()),
1742        instrumentation_frames_to_pop_(0), top_shadow_frame_(NULL), prev_shadow_frame_(NULL) {
1743    // Exception not in root sets, can't allow GC.
1744    last_no_assert_suspension_cause_ = self->StartAssertNoThreadSuspension("Finding catch block");
1745  }
1746
1747  ~CatchBlockStackVisitor() {
1748    LOG(FATAL) << "UNREACHABLE";  // Expected to take long jump.
1749  }
1750
1751  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1752    mirror::AbstractMethod* method = GetMethod();
1753    if (method == NULL) {
1754      // This is the upcall, we remember the frame and last pc so that we may long jump to them.
1755      handler_quick_frame_pc_ = GetCurrentQuickFramePc();
1756      handler_quick_frame_ = GetCurrentQuickFrame();
1757      return false;  // End stack walk.
1758    } else {
1759      if (UNLIKELY(method_tracing_active_ &&
1760                   GetInstrumentationExitPc() == GetReturnPc())) {
1761        // Keep count of the number of unwinds during instrumentation.
1762        instrumentation_frames_to_pop_++;
1763      }
1764      if (method->IsRuntimeMethod()) {
1765        // Ignore callee save method.
1766        DCHECK(method->IsCalleeSaveMethod());
1767        return true;
1768      } else if (is_deoptimization_) {
1769        return HandleDeoptimization(method);
1770      } else {
1771        return HandleTryItems(method);
1772      }
1773    }
1774  }
1775
1776  bool HandleTryItems(mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1777    uint32_t dex_pc = DexFile::kDexNoIndex;
1778    if (method->IsNative()) {
1779      native_method_count_++;
1780    } else {
1781      dex_pc = GetDexPc();
1782    }
1783    if (dex_pc != DexFile::kDexNoIndex) {
1784      uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc);
1785      if (found_dex_pc != DexFile::kDexNoIndex) {
1786        handler_dex_pc_ = found_dex_pc;
1787        handler_quick_frame_pc_ = method->ToNativePc(found_dex_pc);
1788        handler_quick_frame_ = GetCurrentQuickFrame();
1789        return false;  // End stack walk.
1790      }
1791    }
1792    return true;  // Continue stack walk.
1793  }
1794
1795  bool HandleDeoptimization(mirror::AbstractMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1796    MethodHelper mh(m);
1797    const DexFile::CodeItem* code_item = mh.GetCodeItem();
1798    CHECK(code_item != NULL);
1799    uint16_t num_regs =  code_item->registers_size_;
1800    uint32_t dex_pc = GetDexPc();
1801    const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
1802    uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits();
1803    ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, m, new_dex_pc);
1804    verifier::MethodVerifier verifier(&mh.GetDexFile(), mh.GetDexCache(), mh.GetClassLoader(),
1805                                      mh.GetClassDefIndex(), code_item,
1806                                      m->GetDexMethodIndex(), m, m->GetAccessFlags(), false, true);
1807    verifier.Verify();
1808    std::vector<int32_t> kinds = verifier.DescribeVRegs(dex_pc);
1809    for(uint16_t reg = 0; reg < num_regs; reg++) {
1810      VRegKind kind = static_cast<VRegKind>(kinds.at(reg * 2));
1811      switch (kind) {
1812        case kUndefined:
1813          new_frame->SetVReg(reg, 0xEBADDE09);
1814          break;
1815        case kConstant:
1816          new_frame->SetVReg(reg, kinds.at((reg * 2) + 1));
1817          break;
1818        case kReferenceVReg:
1819          new_frame->SetVRegReference(reg,
1820                                      reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kind)));
1821          break;
1822        default:
1823          new_frame->SetVReg(reg, GetVReg(m, reg, kind));
1824          break;
1825      }
1826    }
1827    if (prev_shadow_frame_ != NULL) {
1828      prev_shadow_frame_->SetLink(new_frame);
1829    } else {
1830      top_shadow_frame_ = new_frame;
1831    }
1832    prev_shadow_frame_ = new_frame;
1833    return true;
1834  }
1835
1836  void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1837    mirror::AbstractMethod* catch_method = *handler_quick_frame_;
1838    if (catch_method == NULL) {
1839      if (kDebugExceptionDelivery) {
1840        LOG(INFO) << "Handler is upcall";
1841      }
1842    } else {
1843      CHECK(!is_deoptimization_);
1844      if (instrumentation_frames_to_pop_ > 0) {
1845        // Don't pop the instrumentation frame of the catch handler.
1846        instrumentation_frames_to_pop_--;
1847      }
1848      if (kDebugExceptionDelivery) {
1849        const DexFile& dex_file = *catch_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
1850        int line_number = dex_file.GetLineNumFromPC(catch_method, handler_dex_pc_);
1851        LOG(INFO) << "Handler: " << PrettyMethod(catch_method) << " (line: " << line_number << ")";
1852      }
1853    }
1854    // Put exception back in root set and clear throw location.
1855    self_->SetException(ThrowLocation(), exception_);
1856    self_->EndAssertNoThreadSuspension(last_no_assert_suspension_cause_);
1857    // Do instrumentation events after allowing thread suspension again.
1858    instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
1859    for (size_t i = 0; i < instrumentation_frames_to_pop_; ++i) {
1860      // We pop the instrumentation stack here so as not to corrupt it during the stack walk.
1861      instrumentation->PopMethodForUnwind(self_, is_deoptimization_);
1862    }
1863    if (!is_deoptimization_) {
1864      instrumentation->ExceptionCaughtEvent(self_, throw_location_, catch_method, handler_dex_pc_,
1865                                            exception_);
1866    } else {
1867      // TODO: proper return value.
1868      self_->SetDeoptimizationShadowFrame(top_shadow_frame_);
1869    }
1870    // Place context back on thread so it will be available when we continue.
1871    self_->ReleaseLongJumpContext(context_);
1872    context_->SetSP(reinterpret_cast<uintptr_t>(handler_quick_frame_));
1873    CHECK_NE(handler_quick_frame_pc_, 0u);
1874    context_->SetPC(handler_quick_frame_pc_);
1875    context_->SmashCallerSaves();
1876    context_->DoLongJump();
1877  }
1878
1879 private:
1880  Thread* const self_;
1881  mirror::Throwable* const exception_;
1882  const bool is_deoptimization_;
1883  // The type of the exception catch block to find.
1884  mirror::Class* const to_find_;
1885  // Location of the throw.
1886  const ThrowLocation& throw_location_;
1887  // Quick frame with found handler or last frame if no handler found.
1888  mirror::AbstractMethod** handler_quick_frame_;
1889  // PC to branch to for the handler.
1890  uintptr_t handler_quick_frame_pc_;
1891  // Associated dex PC.
1892  uint32_t handler_dex_pc_;
1893  // Number of native methods passed in crawl (equates to number of SIRTs to pop)
1894  uint32_t native_method_count_;
1895  // Is method tracing active?
1896  const bool method_tracing_active_;
1897  // Support for nesting no thread suspension checks.
1898  const char* last_no_assert_suspension_cause_;
1899  // Number of frames to pop in long jump.
1900  size_t instrumentation_frames_to_pop_;
1901  ShadowFrame* top_shadow_frame_;
1902  ShadowFrame* prev_shadow_frame_;
1903};
1904
1905void Thread::QuickDeliverException() {
1906  // Get exception from thread.
1907  ThrowLocation throw_location;
1908  mirror::Throwable* exception = GetException(&throw_location);
1909  CHECK(exception != NULL);
1910  // Don't leave exception visible while we try to find the handler, which may cause class
1911  // resolution.
1912  ClearException();
1913  bool is_deoptimization = (exception == reinterpret_cast<mirror::Throwable*>(-1));
1914  if (kDebugExceptionDelivery) {
1915    if (!is_deoptimization) {
1916      mirror::String* msg = exception->GetDetailMessage();
1917      std::string str_msg(msg != NULL ? msg->ToModifiedUtf8() : "");
1918      DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception)
1919                << ": " << str_msg << "\n");
1920    } else {
1921      DumpStack(LOG(INFO) << "Deoptimizing: ");
1922    }
1923  }
1924  CatchBlockStackVisitor catch_finder(this, throw_location, exception, is_deoptimization);
1925  catch_finder.WalkStack(true);
1926  catch_finder.DoLongJump();
1927  LOG(FATAL) << "UNREACHABLE";
1928}
1929
1930Context* Thread::GetLongJumpContext() {
1931  Context* result = long_jump_context_;
1932  if (result == NULL) {
1933    result = Context::Create();
1934  } else {
1935    long_jump_context_ = NULL;  // Avoid context being shared.
1936    result->Reset();
1937  }
1938  return result;
1939}
1940
1941struct CurrentMethodVisitor : public StackVisitor {
1942  CurrentMethodVisitor(Thread* thread, Context* context)
1943      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1944      : StackVisitor(thread, context), this_object_(NULL), method_(NULL), dex_pc_(0) {}
1945  virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1946    mirror::AbstractMethod* m = GetMethod();
1947    if (m->IsRuntimeMethod()) {
1948      // Continue if this is a runtime method.
1949      return true;
1950    }
1951    if (context_ != NULL) {
1952      this_object_ = GetThisObject();
1953    }
1954    method_ = m;
1955    dex_pc_ = GetDexPc();
1956    return false;
1957  }
1958  mirror::Object* this_object_;
1959  mirror::AbstractMethod* method_;
1960  uint32_t dex_pc_;
1961};
1962
1963mirror::AbstractMethod* Thread::GetCurrentMethod(uint32_t* dex_pc) const {
1964  CurrentMethodVisitor visitor(const_cast<Thread*>(this), NULL);
1965  visitor.WalkStack(false);
1966  if (dex_pc != NULL) {
1967    *dex_pc = visitor.dex_pc_;
1968  }
1969  return visitor.method_;
1970}
1971
1972ThrowLocation Thread::GetCurrentLocationForThrow() {
1973  Context* context = GetLongJumpContext();
1974  CurrentMethodVisitor visitor(this, context);
1975  visitor.WalkStack(false);
1976  ReleaseLongJumpContext(context);
1977  return ThrowLocation(visitor.this_object_, visitor.method_, visitor.dex_pc_);
1978}
1979
1980bool Thread::HoldsLock(mirror::Object* object) {
1981  if (object == NULL) {
1982    return false;
1983  }
1984  return object->GetThinLockId() == thin_lock_id_;
1985}
1986
1987// RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor).
1988template <typename RootVisitor>
1989class ReferenceMapVisitor : public StackVisitor {
1990 public:
1991  ReferenceMapVisitor(Thread* thread, Context* context, const RootVisitor& visitor)
1992      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1993      : StackVisitor(thread, context), visitor_(visitor) {}
1994
1995  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1996    if (false) {
1997      LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
1998          << StringPrintf("@ PC:%04x", GetDexPc());
1999    }
2000    ShadowFrame* shadow_frame = GetCurrentShadowFrame();
2001    if (shadow_frame != NULL) {
2002      mirror::AbstractMethod* m = shadow_frame->GetMethod();
2003      size_t num_regs = shadow_frame->NumberOfVRegs();
2004      if (m->IsNative() || shadow_frame->HasReferenceArray()) {
2005        // SIRT for JNI or References for interpreter.
2006        for (size_t reg = 0; reg < num_regs; ++reg) {
2007          mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2008          if (ref != NULL) {
2009            visitor_(ref, reg, this);
2010          }
2011        }
2012      } else {
2013        // Java method.
2014        // Portable path use DexGcMap and store in Method.native_gc_map_.
2015        const uint8_t* gc_map = m->GetNativeGcMap();
2016        CHECK(gc_map != NULL) << PrettyMethod(m);
2017        uint32_t gc_map_length = static_cast<uint32_t>((gc_map[0] << 24) |
2018                                                       (gc_map[1] << 16) |
2019                                                       (gc_map[2] << 8) |
2020                                                       (gc_map[3] << 0));
2021        verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length);
2022        uint32_t dex_pc = GetDexPc();
2023        const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
2024        DCHECK(reg_bitmap != NULL);
2025        num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
2026        for (size_t reg = 0; reg < num_regs; ++reg) {
2027          if (TestBitmap(reg, reg_bitmap)) {
2028            mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2029            if (ref != NULL) {
2030              visitor_(ref, reg, this);
2031            }
2032          }
2033        }
2034      }
2035    } else {
2036      mirror::AbstractMethod* m = GetMethod();
2037      // Process register map (which native and runtime methods don't have)
2038      if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
2039        const uint8_t* native_gc_map = m->GetNativeGcMap();
2040        CHECK(native_gc_map != NULL) << PrettyMethod(m);
2041        mh_.ChangeMethod(m);
2042        const DexFile::CodeItem* code_item = mh_.GetCodeItem();
2043        DCHECK(code_item != NULL) << PrettyMethod(m); // Can't be NULL or how would we compile its instructions?
2044        NativePcOffsetToReferenceMap map(native_gc_map);
2045        size_t num_regs = std::min(map.RegWidth() * 8,
2046                                   static_cast<size_t>(code_item->registers_size_));
2047        if (num_regs > 0) {
2048          const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset());
2049          DCHECK(reg_bitmap != NULL);
2050          const VmapTable vmap_table(m->GetVmapTableRaw());
2051          uint32_t core_spills = m->GetCoreSpillMask();
2052          uint32_t fp_spills = m->GetFpSpillMask();
2053          size_t frame_size = m->GetFrameSizeInBytes();
2054          // For all dex registers in the bitmap
2055          mirror::AbstractMethod** cur_quick_frame = GetCurrentQuickFrame();
2056          DCHECK(cur_quick_frame != NULL);
2057          for (size_t reg = 0; reg < num_regs; ++reg) {
2058            // Does this register hold a reference?
2059            if (TestBitmap(reg, reg_bitmap)) {
2060              uint32_t vmap_offset;
2061              mirror::Object* ref;
2062              if (vmap_table.IsInContext(reg, vmap_offset, kReferenceVReg)) {
2063                uintptr_t val = GetGPR(vmap_table.ComputeRegister(core_spills, vmap_offset,
2064                                                                  kReferenceVReg));
2065                ref = reinterpret_cast<mirror::Object*>(val);
2066              } else {
2067                ref = reinterpret_cast<mirror::Object*>(GetVReg(cur_quick_frame, code_item,
2068                                                                core_spills, fp_spills, frame_size,
2069                                                                reg));
2070              }
2071
2072              if (ref != NULL) {
2073                visitor_(ref, reg, this);
2074              }
2075            }
2076          }
2077        }
2078      }
2079    }
2080    return true;
2081  }
2082
2083 private:
2084  static bool TestBitmap(int reg, const uint8_t* reg_vector) {
2085    return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0;
2086  }
2087
2088  // Visitor for when we visit a root.
2089  const RootVisitor& visitor_;
2090
2091  // A method helper we keep around to avoid dex file/cache re-computations.
2092  MethodHelper mh_;
2093};
2094
2095class RootCallbackVisitor {
2096 public:
2097  RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {
2098
2099  }
2100
2101  void operator()(const mirror::Object* obj, size_t, const StackVisitor*) const {
2102    visitor_(obj, arg_);
2103  }
2104
2105 private:
2106  RootVisitor* visitor_;
2107  void* arg_;
2108};
2109
2110class VerifyCallbackVisitor {
2111 public:
2112  VerifyCallbackVisitor(VerifyRootVisitor* visitor, void* arg)
2113      : visitor_(visitor),
2114        arg_(arg) {
2115  }
2116
2117  void operator()(const mirror::Object* obj, size_t vreg, const StackVisitor* visitor) const {
2118    visitor_(obj, arg_, vreg, visitor);
2119  }
2120
2121 private:
2122  VerifyRootVisitor* const visitor_;
2123  void* const arg_;
2124};
2125
2126struct VerifyRootWrapperArg {
2127  VerifyRootVisitor* visitor;
2128  void* arg;
2129};
2130
2131static void VerifyRootWrapperCallback(const mirror::Object* root, void* arg) {
2132  VerifyRootWrapperArg* wrapperArg = reinterpret_cast<VerifyRootWrapperArg*>(arg);
2133  wrapperArg->visitor(root, wrapperArg->arg, 0, NULL);
2134}
2135
2136void Thread::VerifyRoots(VerifyRootVisitor* visitor, void* arg) {
2137  // We need to map from a RootVisitor to VerifyRootVisitor, so pass in nulls for arguments we
2138  // don't have.
2139  VerifyRootWrapperArg wrapperArg;
2140  wrapperArg.arg = arg;
2141  wrapperArg.visitor = visitor;
2142
2143  if (opeer_ != NULL) {
2144    VerifyRootWrapperCallback(opeer_, &wrapperArg);
2145  }
2146  if (exception_ != NULL) {
2147    VerifyRootWrapperCallback(exception_, &wrapperArg);
2148  }
2149  throw_location_.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2150  if (class_loader_override_ != NULL) {
2151    VerifyRootWrapperCallback(class_loader_override_, &wrapperArg);
2152  }
2153  jni_env_->locals.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2154  jni_env_->monitors.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2155
2156  SirtVisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2157
2158  // Visit roots on this thread's stack
2159  Context* context = GetLongJumpContext();
2160  VerifyCallbackVisitor visitorToCallback(visitor, arg);
2161  ReferenceMapVisitor<VerifyCallbackVisitor> mapper(this, context, visitorToCallback);
2162  mapper.WalkStack();
2163  ReleaseLongJumpContext(context);
2164
2165  std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack = GetInstrumentationStack();
2166  typedef std::deque<instrumentation::InstrumentationStackFrame>::const_iterator It;
2167  for (It it = instrumentation_stack->begin(), end = instrumentation_stack->end(); it != end; ++it) {
2168    mirror::Object* this_object = (*it).this_object_;
2169    if (this_object != NULL) {
2170      VerifyRootWrapperCallback(this_object, &wrapperArg);
2171    }
2172    mirror::AbstractMethod* method = (*it).method_;
2173    VerifyRootWrapperCallback(method, &wrapperArg);
2174  }
2175}
2176
2177void Thread::VisitRoots(RootVisitor* visitor, void* arg) {
2178  if (opeer_ != NULL) {
2179    visitor(opeer_, arg);
2180  }
2181  if (exception_ != NULL) {
2182    visitor(exception_, arg);
2183  }
2184  throw_location_.VisitRoots(visitor, arg);
2185  if (class_loader_override_ != NULL) {
2186    visitor(class_loader_override_, arg);
2187  }
2188  jni_env_->locals.VisitRoots(visitor, arg);
2189  jni_env_->monitors.VisitRoots(visitor, arg);
2190
2191  SirtVisitRoots(visitor, arg);
2192
2193  // Visit roots on this thread's stack
2194  Context* context = GetLongJumpContext();
2195  RootCallbackVisitor visitorToCallback(visitor, arg);
2196  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context, visitorToCallback);
2197  mapper.WalkStack();
2198  ReleaseLongJumpContext(context);
2199
2200  std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack = GetInstrumentationStack();
2201  typedef std::deque<instrumentation::InstrumentationStackFrame>::const_iterator It;
2202  for (It it = instrumentation_stack->begin(), end = instrumentation_stack->end(); it != end; ++it) {
2203    mirror::Object* this_object = (*it).this_object_;
2204    if (this_object != NULL) {
2205      visitor(this_object, arg);
2206    }
2207    mirror::AbstractMethod* method = (*it).method_;
2208    visitor(method, arg);
2209  }
2210}
2211
2212static void VerifyObject(const mirror::Object* root, void* arg) {
2213  gc::Heap* heap = reinterpret_cast<gc::Heap*>(arg);
2214  heap->VerifyObject(root);
2215}
2216
2217void Thread::VerifyStackImpl() {
2218  UniquePtr<Context> context(Context::Create());
2219  RootCallbackVisitor visitorToCallback(VerifyObject, Runtime::Current()->GetHeap());
2220  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitorToCallback);
2221  mapper.WalkStack();
2222}
2223
2224// Set the stack end to that to be used during a stack overflow
2225void Thread::SetStackEndForStackOverflow() {
2226  // During stack overflow we allow use of the full stack
2227  if (stack_end_ == stack_begin_) {
2228    DumpStack(std::cerr);
2229    LOG(FATAL) << "Need to increase kStackOverflowReservedBytes (currently "
2230               << kStackOverflowReservedBytes << ")";
2231  }
2232
2233  stack_end_ = stack_begin_;
2234}
2235
2236std::ostream& operator<<(std::ostream& os, const Thread& thread) {
2237  thread.ShortDump(os);
2238  return os;
2239}
2240
2241}  // namespace art
2242