thread.cc revision 834b394ee759ed31c5371d8093d7cd8cd90014a8
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define ATRACE_TAG ATRACE_TAG_DALVIK
18
19#include "thread.h"
20
21#include <cutils/trace.h>
22#include <pthread.h>
23#include <signal.h>
24#include <sys/resource.h>
25#include <sys/time.h>
26
27#include <algorithm>
28#include <bitset>
29#include <cerrno>
30#include <iostream>
31#include <list>
32
33#include "arch/context.h"
34#include "base/mutex.h"
35#include "class_linker.h"
36#include "class_linker-inl.h"
37#include "cutils/atomic.h"
38#include "cutils/atomic-inline.h"
39#include "debugger.h"
40#include "dex_file-inl.h"
41#include "entrypoints/entrypoint_utils.h"
42#include "gc_map.h"
43#include "gc/accounting/card_table-inl.h"
44#include "gc/heap.h"
45#include "gc/space/space.h"
46#include "invoke_arg_array_builder.h"
47#include "jni_internal.h"
48#include "mirror/abstract_method-inl.h"
49#include "mirror/class-inl.h"
50#include "mirror/class_loader.h"
51#include "mirror/field-inl.h"
52#include "mirror/object_array-inl.h"
53#include "mirror/stack_trace_element.h"
54#include "monitor.h"
55#include "object_utils.h"
56#include "reflection.h"
57#include "runtime.h"
58#include "scoped_thread_state_change.h"
59#include "ScopedLocalRef.h"
60#include "ScopedUtfChars.h"
61#include "sirt_ref.h"
62#include "stack.h"
63#include "stack_indirect_reference_table.h"
64#include "thread-inl.h"
65#include "thread_list.h"
66#include "utils.h"
67#include "verifier/dex_gc_map.h"
68#include "verifier/method_verifier.h"
69#include "well_known_classes.h"
70
71namespace art {
72
73bool Thread::is_started_ = false;
74pthread_key_t Thread::pthread_key_self_;
75ConditionVariable* Thread::resume_cond_ = NULL;
76
77static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
78
79void Thread::InitCardTable() {
80  card_table_ = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin();
81}
82
83#if !defined(__APPLE__)
84static void UnimplementedEntryPoint() {
85  UNIMPLEMENTED(FATAL);
86}
87#endif
88
89void InitEntryPoints(QuickEntryPoints* qpoints, PortableEntryPoints* ppoints);
90
91void Thread::InitFunctionPointers() {
92#if !defined(__APPLE__)  // The Mac GCC is too old to accept this code.
93  // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
94  uintptr_t* begin = reinterpret_cast<uintptr_t*>(&quick_entrypoints_);
95  uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(quick_entrypoints_));
96  for (uintptr_t* it = begin; it != end; ++it) {
97    *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
98  }
99  begin = reinterpret_cast<uintptr_t*>(&portable_entrypoints_);
100  end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(portable_entrypoints_));
101  for (uintptr_t* it = begin; it != end; ++it) {
102    *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
103  }
104#endif
105  InitEntryPoints(&quick_entrypoints_, &portable_entrypoints_);
106}
107
108void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf) {
109  deoptimization_shadow_frame_ = sf;
110}
111
112void Thread::SetDeoptimizationReturnValue(const JValue& ret_val) {
113  deoptimization_return_value_.SetJ(ret_val.GetJ());
114}
115
116ShadowFrame* Thread::GetAndClearDeoptimizationShadowFrame(JValue* ret_val) {
117  ShadowFrame* sf = deoptimization_shadow_frame_;
118  deoptimization_shadow_frame_ = NULL;
119  ret_val->SetJ(deoptimization_return_value_.GetJ());
120  return sf;
121}
122
123void Thread::InitTid() {
124  tid_ = ::art::GetTid();
125}
126
127void Thread::InitAfterFork() {
128  // One thread (us) survived the fork, but we have a new tid so we need to
129  // update the value stashed in this Thread*.
130  InitTid();
131}
132
133void* Thread::CreateCallback(void* arg) {
134  Thread* self = reinterpret_cast<Thread*>(arg);
135  Runtime* runtime = Runtime::Current();
136  if (runtime == NULL) {
137    LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self;
138    return NULL;
139  }
140  {
141    // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true
142    //       after self->Init().
143    MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
144    // Check that if we got here we cannot be shutting down (as shutdown should never have started
145    // while threads are being born).
146    CHECK(!runtime->IsShuttingDown());
147    self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
148    Runtime::Current()->EndThreadBirth();
149  }
150  {
151    ScopedObjectAccess soa(self);
152
153    // Copy peer into self, deleting global reference when done.
154    CHECK(self->jpeer_ != NULL);
155    self->opeer_ = soa.Decode<mirror::Object*>(self->jpeer_);
156    self->GetJniEnv()->DeleteGlobalRef(self->jpeer_);
157    self->jpeer_ = NULL;
158
159    {
160      SirtRef<mirror::String> thread_name(self, self->GetThreadName(soa));
161      self->SetThreadName(thread_name->ToModifiedUtf8().c_str());
162    }
163    Dbg::PostThreadStart(self);
164
165    // Invoke the 'run' method of our java.lang.Thread.
166    mirror::Object* receiver = self->opeer_;
167    jmethodID mid = WellKnownClasses::java_lang_Thread_run;
168    mirror::AbstractMethod* m =
169        receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(soa.DecodeMethod(mid));
170    JValue result;
171    ArgArray arg_array(NULL, 0);
172    arg_array.Append(reinterpret_cast<uint32_t>(receiver));
173    m->Invoke(self, arg_array.GetArray(), arg_array.GetNumBytes(), &result, 'V');
174  }
175  // Detach and delete self.
176  Runtime::Current()->GetThreadList()->Unregister(self);
177
178  return NULL;
179}
180
181Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa,
182                                  mirror::Object* thread_peer) {
183  mirror::Field* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer);
184  Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetInt(thread_peer)));
185  // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
186  // to stop it from going away.
187  if (kIsDebugBuild) {
188    MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
189    if (result != NULL && !result->IsSuspended()) {
190      Locks::thread_list_lock_->AssertHeld(soa.Self());
191    }
192  }
193  return result;
194}
195
196Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, jobject java_thread) {
197  return FromManagedThread(soa, soa.Decode<mirror::Object*>(java_thread));
198}
199
200static size_t FixStackSize(size_t stack_size) {
201  // A stack size of zero means "use the default".
202  if (stack_size == 0) {
203    stack_size = Runtime::Current()->GetDefaultStackSize();
204  }
205
206  // Dalvik used the bionic pthread default stack size for native threads,
207  // so include that here to support apps that expect large native stacks.
208  stack_size += 1 * MB;
209
210  // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN.
211  if (stack_size < PTHREAD_STACK_MIN) {
212    stack_size = PTHREAD_STACK_MIN;
213  }
214
215  // It's likely that callers are trying to ensure they have at least a certain amount of
216  // stack space, so we should add our reserved space on top of what they requested, rather
217  // than implicitly take it away from them.
218  stack_size += Thread::kStackOverflowReservedBytes;
219
220  // Some systems require the stack size to be a multiple of the system page size, so round up.
221  stack_size = RoundUp(stack_size, kPageSize);
222
223  return stack_size;
224}
225
226void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
227  CHECK(java_peer != NULL);
228  Thread* self = static_cast<JNIEnvExt*>(env)->self;
229  Runtime* runtime = Runtime::Current();
230
231  // Atomically start the birth of the thread ensuring the runtime isn't shutting down.
232  bool thread_start_during_shutdown = false;
233  {
234    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
235    if (runtime->IsShuttingDown()) {
236      thread_start_during_shutdown = true;
237    } else {
238      runtime->StartThreadBirth();
239    }
240  }
241  if (thread_start_during_shutdown) {
242    ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError"));
243    env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown");
244    return;
245  }
246
247  Thread* child_thread = new Thread(is_daemon);
248  // Use global JNI ref to hold peer live while child thread starts.
249  child_thread->jpeer_ = env->NewGlobalRef(java_peer);
250  stack_size = FixStackSize(stack_size);
251
252  // Thread.start is synchronized, so we know that nativePeer is 0, and know that we're not racing to
253  // assign it.
254  env->SetIntField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer,
255                   reinterpret_cast<jint>(child_thread));
256
257  pthread_t new_pthread;
258  pthread_attr_t attr;
259  CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
260  CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED");
261  CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
262  int pthread_create_result = pthread_create(&new_pthread, &attr, Thread::CreateCallback, child_thread);
263  CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
264
265  if (pthread_create_result != 0) {
266    // pthread_create(3) failed, so clean up.
267    {
268      MutexLock mu(self, *Locks::runtime_shutdown_lock_);
269      runtime->EndThreadBirth();
270    }
271    // Manually delete the global reference since Thread::Init will not have been run.
272    env->DeleteGlobalRef(child_thread->jpeer_);
273    child_thread->jpeer_ = NULL;
274    delete child_thread;
275    child_thread = NULL;
276    // TODO: remove from thread group?
277    env->SetIntField(java_peer, WellKnownClasses::java_lang_Thread_nativePeer, 0);
278    {
279      std::string msg(StringPrintf("pthread_create (%s stack) failed: %s",
280                                   PrettySize(stack_size).c_str(), strerror(pthread_create_result)));
281      ScopedObjectAccess soa(env);
282      soa.Self()->ThrowOutOfMemoryError(msg.c_str());
283    }
284  }
285}
286
287void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
288  // This function does all the initialization that must be run by the native thread it applies to.
289  // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
290  // we can handshake with the corresponding native thread when it's ready.) Check this native
291  // thread hasn't been through here already...
292  CHECK(Thread::Current() == NULL);
293  SetUpAlternateSignalStack();
294  InitCpu();
295  InitFunctionPointers();
296  InitCardTable();
297  InitTid();
298  // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
299  // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
300  pthread_self_ = pthread_self();
301  CHECK(is_started_);
302  CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
303  DCHECK_EQ(Thread::Current(), this);
304
305  thin_lock_id_ = thread_list->AllocThreadId(this);
306  InitStackHwm();
307
308  jni_env_ = new JNIEnvExt(this, java_vm);
309  thread_list->Register(this);
310}
311
312Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group,
313                       bool create_peer) {
314  Thread* self;
315  Runtime* runtime = Runtime::Current();
316  if (runtime == NULL) {
317    LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name;
318    return NULL;
319  }
320  {
321    MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
322    if (runtime->IsShuttingDown()) {
323      LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name;
324      return NULL;
325    } else {
326      Runtime::Current()->StartThreadBirth();
327      self = new Thread(as_daemon);
328      self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
329      Runtime::Current()->EndThreadBirth();
330    }
331  }
332
333  CHECK_NE(self->GetState(), kRunnable);
334  self->SetState(kNative);
335
336  // If we're the main thread, ClassLinker won't be created until after we're attached,
337  // so that thread needs a two-stage attach. Regular threads don't need this hack.
338  // In the compiler, all threads need this hack, because no-one's going to be getting
339  // a native peer!
340  if (create_peer) {
341    self->CreatePeer(thread_name, as_daemon, thread_group);
342  } else {
343    // These aren't necessary, but they improve diagnostics for unit tests & command-line tools.
344    if (thread_name != NULL) {
345      self->name_->assign(thread_name);
346      ::art::SetThreadName(thread_name);
347    }
348  }
349
350  return self;
351}
352
353void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) {
354  Runtime* runtime = Runtime::Current();
355  CHECK(runtime->IsStarted());
356  JNIEnv* env = jni_env_;
357
358  if (thread_group == NULL) {
359    thread_group = runtime->GetMainThreadGroup();
360  }
361  ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
362  jint thread_priority = GetNativePriority();
363  jboolean thread_is_daemon = as_daemon;
364
365  ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread));
366  if (peer.get() == NULL) {
367    CHECK(IsExceptionPending());
368    return;
369  }
370  {
371    ScopedObjectAccess soa(this);
372    opeer_ = soa.Decode<mirror::Object*>(peer.get());
373  }
374  env->CallNonvirtualVoidMethod(peer.get(),
375                                WellKnownClasses::java_lang_Thread,
376                                WellKnownClasses::java_lang_Thread_init,
377                                thread_group, thread_name.get(), thread_priority, thread_is_daemon);
378  AssertNoPendingException();
379
380  Thread* self = this;
381  DCHECK_EQ(self, Thread::Current());
382  jni_env_->SetIntField(peer.get(), WellKnownClasses::java_lang_Thread_nativePeer,
383                        reinterpret_cast<jint>(self));
384
385  ScopedObjectAccess soa(self);
386  SirtRef<mirror::String> peer_thread_name(soa.Self(), GetThreadName(soa));
387  if (peer_thread_name.get() == NULL) {
388    // The Thread constructor should have set the Thread.name to a
389    // non-null value. However, because we can run without code
390    // available (in the compiler, in tests), we manually assign the
391    // fields the constructor should have set.
392    soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->
393        SetBoolean(opeer_, thread_is_daemon);
394    soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->
395        SetObject(opeer_, soa.Decode<mirror::Object*>(thread_group));
396    soa.DecodeField(WellKnownClasses::java_lang_Thread_name)->
397        SetObject(opeer_, soa.Decode<mirror::Object*>(thread_name.get()));
398    soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->
399        SetInt(opeer_, thread_priority);
400    peer_thread_name.reset(GetThreadName(soa));
401  }
402  // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
403  if (peer_thread_name.get() != NULL) {
404    SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
405  }
406}
407
408void Thread::SetThreadName(const char* name) {
409  name_->assign(name);
410  ::art::SetThreadName(name);
411  Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
412}
413
414void Thread::InitStackHwm() {
415  void* stack_base;
416  size_t stack_size;
417  GetThreadStack(pthread_self_, stack_base, stack_size);
418
419  // TODO: include this in the thread dumps; potentially useful in SIGQUIT output?
420  VLOG(threads) << StringPrintf("Native stack is at %p (%s)", stack_base, PrettySize(stack_size).c_str());
421
422  stack_begin_ = reinterpret_cast<byte*>(stack_base);
423  stack_size_ = stack_size;
424
425  if (stack_size_ <= kStackOverflowReservedBytes) {
426    LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << stack_size_ << " bytes)";
427  }
428
429  // TODO: move this into the Linux GetThreadStack implementation.
430#if !defined(__APPLE__)
431  // If we're the main thread, check whether we were run with an unlimited stack. In that case,
432  // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
433  // will be broken because we'll die long before we get close to 2GB.
434  bool is_main_thread = (::art::GetTid() == getpid());
435  if (is_main_thread) {
436    rlimit stack_limit;
437    if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) {
438      PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed";
439    }
440    if (stack_limit.rlim_cur == RLIM_INFINITY) {
441      // Find the default stack size for new threads...
442      pthread_attr_t default_attributes;
443      size_t default_stack_size;
444      CHECK_PTHREAD_CALL(pthread_attr_init, (&default_attributes), "default stack size query");
445      CHECK_PTHREAD_CALL(pthread_attr_getstacksize, (&default_attributes, &default_stack_size),
446                         "default stack size query");
447      CHECK_PTHREAD_CALL(pthread_attr_destroy, (&default_attributes), "default stack size query");
448
449      // ...and use that as our limit.
450      size_t old_stack_size = stack_size_;
451      stack_size_ = default_stack_size;
452      stack_begin_ += (old_stack_size - stack_size_);
453      VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")"
454                    << " to " << PrettySize(stack_size_)
455                    << " with base " << reinterpret_cast<void*>(stack_begin_);
456    }
457  }
458#endif
459
460  // Set stack_end_ to the bottom of the stack saving space of stack overflows
461  ResetDefaultStackEnd();
462
463  // Sanity check.
464  int stack_variable;
465  CHECK_GT(&stack_variable, reinterpret_cast<void*>(stack_end_));
466}
467
468void Thread::ShortDump(std::ostream& os) const {
469  os << "Thread[";
470  if (GetThinLockId() != 0) {
471    // If we're in kStarting, we won't have a thin lock id or tid yet.
472    os << GetThinLockId()
473             << ",tid=" << GetTid() << ',';
474  }
475  os << GetState()
476           << ",Thread*=" << this
477           << ",peer=" << opeer_
478           << ",\"" << *name_ << "\""
479           << "]";
480}
481
482void Thread::Dump(std::ostream& os) const {
483  DumpState(os);
484  DumpStack(os);
485}
486
487mirror::String* Thread::GetThreadName(const ScopedObjectAccessUnchecked& soa) const {
488  mirror::Field* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
489  return (opeer_ != NULL) ? reinterpret_cast<mirror::String*>(f->GetObject(opeer_)) : NULL;
490}
491
492void Thread::GetThreadName(std::string& name) const {
493  name.assign(*name_);
494}
495
496void Thread::AtomicSetFlag(ThreadFlag flag) {
497  android_atomic_or(flag, &state_and_flags_.as_int);
498}
499
500void Thread::AtomicClearFlag(ThreadFlag flag) {
501  android_atomic_and(-1 ^ flag, &state_and_flags_.as_int);
502}
503
504// Attempt to rectify locks so that we dump thread list with required locks before exiting.
505static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
506  LOG(ERROR) << *thread << " suspend count already zero.";
507  Locks::thread_suspend_count_lock_->Unlock(self);
508  if (!Locks::mutator_lock_->IsSharedHeld(self)) {
509    Locks::mutator_lock_->SharedTryLock(self);
510    if (!Locks::mutator_lock_->IsSharedHeld(self)) {
511      LOG(WARNING) << "Dumping thread list without holding mutator_lock_";
512    }
513  }
514  if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
515    Locks::thread_list_lock_->TryLock(self);
516    if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
517      LOG(WARNING) << "Dumping thread list without holding thread_list_lock_";
518    }
519  }
520  std::ostringstream ss;
521  Runtime::Current()->GetThreadList()->DumpLocked(ss);
522  LOG(FATAL) << ss.str();
523}
524
525void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) {
526  DCHECK(delta == -1 || delta == +1 || delta == -debug_suspend_count_)
527      << delta << " " << debug_suspend_count_ << " " << this;
528  DCHECK_GE(suspend_count_, debug_suspend_count_) << this;
529  Locks::thread_suspend_count_lock_->AssertHeld(self);
530  if (this != self && !IsSuspended()) {
531    Locks::thread_list_lock_->AssertHeld(self);
532  }
533  if (UNLIKELY(delta < 0 && suspend_count_ <= 0)) {
534    UnsafeLogFatalForSuspendCount(self, this);
535    return;
536  }
537
538  suspend_count_ += delta;
539  if (for_debugger) {
540    debug_suspend_count_ += delta;
541  }
542
543  if (suspend_count_ == 0) {
544    AtomicClearFlag(kSuspendRequest);
545  } else {
546    AtomicSetFlag(kSuspendRequest);
547  }
548}
549
550void Thread::RunCheckpointFunction() {
551  CHECK(checkpoint_function_ != NULL);
552  ATRACE_BEGIN("Checkpoint function");
553  checkpoint_function_->Run(this);
554  ATRACE_END();
555}
556
557bool Thread::RequestCheckpoint(Closure* function) {
558  CHECK(!ReadFlag(kCheckpointRequest)) << "Already have a pending checkpoint request";
559  checkpoint_function_ = function;
560  union StateAndFlags old_state_and_flags = state_and_flags_;
561  // We must be runnable to request a checkpoint.
562  old_state_and_flags.as_struct.state = kRunnable;
563  union StateAndFlags new_state_and_flags = old_state_and_flags;
564  new_state_and_flags.as_struct.flags |= kCheckpointRequest;
565  int succeeded = android_atomic_cmpxchg(old_state_and_flags.as_int, new_state_and_flags.as_int,
566                                         &state_and_flags_.as_int);
567  return succeeded == 0;
568}
569
570void Thread::FullSuspendCheck() {
571  VLOG(threads) << this << " self-suspending";
572  ATRACE_BEGIN("Full suspend check");
573  // Make thread appear suspended to other threads, release mutator_lock_.
574  TransitionFromRunnableToSuspended(kSuspended);
575  // Transition back to runnable noting requests to suspend, re-acquire share on mutator_lock_.
576  TransitionFromSuspendedToRunnable();
577  ATRACE_END();
578  VLOG(threads) << this << " self-reviving";
579}
580
581Thread* Thread::SuspendForDebugger(jobject peer, bool request_suspension, bool* timed_out) {
582  static const useconds_t kTimeoutUs = 30 * 1000000;  // 30s.
583  useconds_t total_delay_us = 0;
584  useconds_t delay_us = 0;
585  bool did_suspend_request = false;
586  *timed_out = false;
587  while (true) {
588    Thread* thread;
589    {
590      ScopedObjectAccess soa(Thread::Current());
591      Thread* self = soa.Self();
592      MutexLock mu(self, *Locks::thread_list_lock_);
593      thread = Thread::FromManagedThread(soa, peer);
594      if (thread == NULL) {
595        JNIEnv* env = self->GetJniEnv();
596        ScopedLocalRef<jstring> scoped_name_string(env,
597                                                   (jstring)env->GetObjectField(peer,
598                                                              WellKnownClasses::java_lang_Thread_name));
599        ScopedUtfChars scoped_name_chars(env, scoped_name_string.get());
600        if (scoped_name_chars.c_str() == NULL) {
601            LOG(WARNING) << "No such thread for suspend: " << peer;
602            env->ExceptionClear();
603        } else {
604            LOG(WARNING) << "No such thread for suspend: " << peer << ":" << scoped_name_chars.c_str();
605        }
606
607        return NULL;
608      }
609      {
610        MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
611        if (request_suspension) {
612          thread->ModifySuspendCount(soa.Self(), +1, true /* for_debugger */);
613          request_suspension = false;
614          did_suspend_request = true;
615        }
616        // IsSuspended on the current thread will fail as the current thread is changed into
617        // Runnable above. As the suspend count is now raised if this is the current thread
618        // it will self suspend on transition to Runnable, making it hard to work with. It's simpler
619        // to just explicitly handle the current thread in the callers to this code.
620        CHECK_NE(thread, soa.Self()) << "Attempt to suspend the current thread for the debugger";
621        // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend
622        // count, or else we've waited and it has self suspended) or is the current thread, we're
623        // done.
624        if (thread->IsSuspended()) {
625          return thread;
626        }
627        if (total_delay_us >= kTimeoutUs) {
628          LOG(ERROR) << "Thread suspension timed out: " << peer;
629          if (did_suspend_request) {
630            thread->ModifySuspendCount(soa.Self(), -1, true /* for_debugger */);
631          }
632          *timed_out = true;
633          return NULL;
634        }
635      }
636      // Release locks and come out of runnable state.
637    }
638    for (int i = kLockLevelCount - 1; i >= 0; --i) {
639      BaseMutex* held_mutex = Thread::Current()->GetHeldMutex(static_cast<LockLevel>(i));
640      if (held_mutex != NULL) {
641        LOG(FATAL) << "Holding " << held_mutex->GetName()
642            << " while sleeping for thread suspension";
643      }
644    }
645    {
646      useconds_t new_delay_us = delay_us * 2;
647      CHECK_GE(new_delay_us, delay_us);
648      if (new_delay_us < 500000) {  // Don't allow sleeping to be more than 0.5s.
649        delay_us = new_delay_us;
650      }
651    }
652    if (delay_us == 0) {
653      sched_yield();
654      // Default to 1 milliseconds (note that this gets multiplied by 2 before the first sleep).
655      delay_us = 500;
656    } else {
657      usleep(delay_us);
658      total_delay_us += delay_us;
659    }
660  }
661}
662
663void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
664  std::string group_name;
665  int priority;
666  bool is_daemon = false;
667  Thread* self = Thread::Current();
668
669  if (self != NULL && thread != NULL && thread->opeer_ != NULL) {
670    ScopedObjectAccessUnchecked soa(self);
671    priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->GetInt(thread->opeer_);
672    is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->GetBoolean(thread->opeer_);
673
674    mirror::Object* thread_group =
675        soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->opeer_);
676
677    if (thread_group != NULL) {
678      mirror::Field* group_name_field =
679          soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
680      mirror::String* group_name_string =
681          reinterpret_cast<mirror::String*>(group_name_field->GetObject(thread_group));
682      group_name = (group_name_string != NULL) ? group_name_string->ToModifiedUtf8() : "<null>";
683    }
684  } else {
685    priority = GetNativePriority();
686  }
687
688  std::string scheduler_group_name(GetSchedulerGroupName(tid));
689  if (scheduler_group_name.empty()) {
690    scheduler_group_name = "default";
691  }
692
693  if (thread != NULL) {
694    os << '"' << *thread->name_ << '"';
695    if (is_daemon) {
696      os << " daemon";
697    }
698    os << " prio=" << priority
699       << " tid=" << thread->GetThinLockId()
700       << " " << thread->GetState();
701    if (thread->IsStillStarting()) {
702      os << " (still starting up)";
703    }
704    os << "\n";
705  } else {
706    os << '"' << ::art::GetThreadName(tid) << '"'
707       << " prio=" << priority
708       << " (not attached)\n";
709  }
710
711  if (thread != NULL) {
712    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
713    os << "  | group=\"" << group_name << "\""
714       << " sCount=" << thread->suspend_count_
715       << " dsCount=" << thread->debug_suspend_count_
716       << " obj=" << reinterpret_cast<void*>(thread->opeer_)
717       << " self=" << reinterpret_cast<const void*>(thread) << "\n";
718  }
719
720  os << "  | sysTid=" << tid
721     << " nice=" << getpriority(PRIO_PROCESS, tid)
722     << " cgrp=" << scheduler_group_name;
723  if (thread != NULL) {
724    int policy;
725    sched_param sp;
726    CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->pthread_self_, &policy, &sp), __FUNCTION__);
727    os << " sched=" << policy << "/" << sp.sched_priority
728       << " handle=" << reinterpret_cast<void*>(thread->pthread_self_);
729  }
730  os << "\n";
731
732  // Grab the scheduler stats for this thread.
733  std::string scheduler_stats;
734  if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) {
735    scheduler_stats.resize(scheduler_stats.size() - 1);  // Lose the trailing '\n'.
736  } else {
737    scheduler_stats = "0 0 0";
738  }
739
740  char native_thread_state = '?';
741  int utime = 0;
742  int stime = 0;
743  int task_cpu = 0;
744  GetTaskStats(tid, native_thread_state, utime, stime, task_cpu);
745
746  os << "  | state=" << native_thread_state
747     << " schedstat=( " << scheduler_stats << " )"
748     << " utm=" << utime
749     << " stm=" << stime
750     << " core=" << task_cpu
751     << " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
752  if (thread != NULL) {
753    os << "  | stack=" << reinterpret_cast<void*>(thread->stack_begin_) << "-" << reinterpret_cast<void*>(thread->stack_end_)
754       << " stackSize=" << PrettySize(thread->stack_size_) << "\n";
755  }
756}
757
758void Thread::DumpState(std::ostream& os) const {
759  Thread::DumpState(os, this, GetTid());
760}
761
762struct StackDumpVisitor : public StackVisitor {
763  StackDumpVisitor(std::ostream& os, Thread* thread, Context* context, bool can_allocate)
764      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
765      : StackVisitor(thread, context), os(os), thread(thread), can_allocate(can_allocate),
766        last_method(NULL), last_line_number(0), repetition_count(0), frame_count(0) {
767  }
768
769  virtual ~StackDumpVisitor() {
770    if (frame_count == 0) {
771      os << "  (no managed stack frames)\n";
772    }
773  }
774
775  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
776    mirror::AbstractMethod* m = GetMethod();
777    if (m->IsRuntimeMethod()) {
778      return true;
779    }
780    const int kMaxRepetition = 3;
781    mirror::Class* c = m->GetDeclaringClass();
782    const mirror::DexCache* dex_cache = c->GetDexCache();
783    int line_number = -1;
784    if (dex_cache != NULL) {  // be tolerant of bad input
785      const DexFile& dex_file = *dex_cache->GetDexFile();
786      line_number = dex_file.GetLineNumFromPC(m, GetDexPc());
787    }
788    if (line_number == last_line_number && last_method == m) {
789      repetition_count++;
790    } else {
791      if (repetition_count >= kMaxRepetition) {
792        os << "  ... repeated " << (repetition_count - kMaxRepetition) << " times\n";
793      }
794      repetition_count = 0;
795      last_line_number = line_number;
796      last_method = m;
797    }
798    if (repetition_count < kMaxRepetition) {
799      os << "  at " << PrettyMethod(m, false);
800      if (m->IsNative()) {
801        os << "(Native method)";
802      } else {
803        mh.ChangeMethod(m);
804        const char* source_file(mh.GetDeclaringClassSourceFile());
805        os << "(" << (source_file != NULL ? source_file : "unavailable")
806           << ":" << line_number << ")";
807      }
808      os << "\n";
809      if (frame_count == 0) {
810        Monitor::DescribeWait(os, thread);
811      }
812      if (can_allocate) {
813        Monitor::VisitLocks(this, DumpLockedObject, &os);
814      }
815    }
816
817    ++frame_count;
818    return true;
819  }
820
821  static void DumpLockedObject(mirror::Object* o, void* context)
822      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
823    std::ostream& os = *reinterpret_cast<std::ostream*>(context);
824    os << "  - locked <" << o << "> (a " << PrettyTypeOf(o) << ")\n";
825  }
826
827  std::ostream& os;
828  const Thread* thread;
829  const bool can_allocate;
830  MethodHelper mh;
831  mirror::AbstractMethod* last_method;
832  int last_line_number;
833  int repetition_count;
834  int frame_count;
835};
836
837static bool ShouldShowNativeStack(const Thread* thread)
838    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
839  ThreadState state = thread->GetState();
840
841  // In native code somewhere in the VM (one of the kWaitingFor* states)? That's interesting.
842  if (state > kWaiting && state < kStarting) {
843    return true;
844  }
845
846  // In an Object.wait variant or Thread.sleep? That's not interesting.
847  if (state == kTimedWaiting || state == kSleeping || state == kWaiting) {
848    return false;
849  }
850
851  // In some other native method? That's interesting.
852  // We don't just check kNative because native methods will be in state kSuspended if they're
853  // calling back into the VM, or kBlocked if they're blocked on a monitor, or one of the
854  // thread-startup states if it's early enough in their life cycle (http://b/7432159).
855  mirror::AbstractMethod* current_method = thread->GetCurrentMethod(NULL);
856  return current_method != NULL && current_method->IsNative();
857}
858
859void Thread::DumpStack(std::ostream& os) const {
860  // TODO: we call this code when dying but may not have suspended the thread ourself. The
861  //       IsSuspended check is therefore racy with the use for dumping (normally we inhibit
862  //       the race with the thread_suspend_count_lock_).
863  bool dump_for_abort = (gAborting > 0);
864  if (this == Thread::Current() || IsSuspended() || dump_for_abort) {
865    // If we're currently in native code, dump that stack before dumping the managed stack.
866    if (dump_for_abort || ShouldShowNativeStack(this)) {
867      DumpKernelStack(os, GetTid(), "  kernel: ", false);
868      DumpNativeStack(os, GetTid(), "  native: ", false);
869    }
870    UniquePtr<Context> context(Context::Create());
871    StackDumpVisitor dumper(os, const_cast<Thread*>(this), context.get(), !throwing_OutOfMemoryError_);
872    dumper.WalkStack();
873  } else {
874    os << "Not able to dump stack of thread that isn't suspended";
875  }
876}
877
878void Thread::ThreadExitCallback(void* arg) {
879  Thread* self = reinterpret_cast<Thread*>(arg);
880  if (self->thread_exit_check_count_ == 0) {
881    LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's going to use a pthread_key_create destructor?): " << *self;
882    CHECK(is_started_);
883    CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
884    self->thread_exit_check_count_ = 1;
885  } else {
886    LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
887  }
888}
889
890void Thread::Startup() {
891  CHECK(!is_started_);
892  is_started_ = true;
893  {
894    // MutexLock to keep annotalysis happy.
895    //
896    // Note we use NULL for the thread because Thread::Current can
897    // return garbage since (is_started_ == true) and
898    // Thread::pthread_key_self_ is not yet initialized.
899    // This was seen on glibc.
900    MutexLock mu(NULL, *Locks::thread_suspend_count_lock_);
901    resume_cond_ = new ConditionVariable("Thread resumption condition variable",
902                                         *Locks::thread_suspend_count_lock_);
903  }
904
905  // Allocate a TLS slot.
906  CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key");
907
908  // Double-check the TLS slot allocation.
909  if (pthread_getspecific(pthread_key_self_) != NULL) {
910    LOG(FATAL) << "Newly-created pthread TLS slot is not NULL";
911  }
912}
913
914void Thread::FinishStartup() {
915  Runtime* runtime = Runtime::Current();
916  CHECK(runtime->IsStarted());
917
918  // Finish attaching the main thread.
919  ScopedObjectAccess soa(Thread::Current());
920  Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup());
921
922  Runtime::Current()->GetClassLinker()->RunRootClinits();
923}
924
925void Thread::Shutdown() {
926  CHECK(is_started_);
927  is_started_ = false;
928  CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
929  MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);
930  if (resume_cond_ != NULL) {
931    delete resume_cond_;
932    resume_cond_ = NULL;
933  }
934}
935
936Thread::Thread(bool daemon)
937    : suspend_count_(0),
938      card_table_(NULL),
939      exception_(NULL),
940      stack_end_(NULL),
941      managed_stack_(),
942      jni_env_(NULL),
943      self_(NULL),
944      opeer_(NULL),
945      jpeer_(NULL),
946      stack_begin_(NULL),
947      stack_size_(0),
948      thin_lock_id_(0),
949      tid_(0),
950      wait_mutex_(new Mutex("a thread wait mutex")),
951      wait_cond_(new ConditionVariable("a thread wait condition variable", *wait_mutex_)),
952      wait_monitor_(NULL),
953      interrupted_(false),
954      wait_next_(NULL),
955      monitor_enter_object_(NULL),
956      top_sirt_(NULL),
957      runtime_(NULL),
958      class_loader_override_(NULL),
959      long_jump_context_(NULL),
960      throwing_OutOfMemoryError_(false),
961      debug_suspend_count_(0),
962      debug_invoke_req_(new DebugInvokeReq),
963      deoptimization_shadow_frame_(NULL),
964      instrumentation_stack_(new std::deque<instrumentation::InstrumentationStackFrame>),
965      name_(new std::string(kThreadNameDuringStartup)),
966      daemon_(daemon),
967      pthread_self_(0),
968      no_thread_suspension_(0),
969      last_no_thread_suspension_cause_(NULL),
970      checkpoint_function_(0),
971      thread_exit_check_count_(0) {
972  CHECK_EQ((sizeof(Thread) % 4), 0U) << sizeof(Thread);
973  state_and_flags_.as_struct.flags = 0;
974  state_and_flags_.as_struct.state = kNative;
975  memset(&held_mutexes_[0], 0, sizeof(held_mutexes_));
976}
977
978bool Thread::IsStillStarting() const {
979  // You might think you can check whether the state is kStarting, but for much of thread startup,
980  // the thread is in kNative; it might also be in kVmWait.
981  // You might think you can check whether the peer is NULL, but the peer is actually created and
982  // assigned fairly early on, and needs to be.
983  // It turns out that the last thing to change is the thread name; that's a good proxy for "has
984  // this thread _ever_ entered kRunnable".
985  return (jpeer_ == NULL && opeer_ == NULL) || (*name_ == kThreadNameDuringStartup);
986}
987
988void Thread::AssertNoPendingException() const {
989  if (UNLIKELY(IsExceptionPending())) {
990    ScopedObjectAccess soa(Thread::Current());
991    mirror::Throwable* exception = GetException(NULL);
992    LOG(FATAL) << "No pending exception expected: " << exception->Dump();
993  }
994}
995
996static void MonitorExitVisitor(const mirror::Object* object, void* arg) NO_THREAD_SAFETY_ANALYSIS {
997  Thread* self = reinterpret_cast<Thread*>(arg);
998  mirror::Object* entered_monitor = const_cast<mirror::Object*>(object);
999  if (self->HoldsLock(entered_monitor)) {
1000    LOG(WARNING) << "Calling MonitorExit on object "
1001                 << object << " (" << PrettyTypeOf(object) << ")"
1002                 << " left locked by native thread "
1003                 << *Thread::Current() << " which is detaching";
1004    entered_monitor->MonitorExit(self);
1005  }
1006}
1007
1008void Thread::Destroy() {
1009  Thread* self = this;
1010  DCHECK_EQ(self, Thread::Current());
1011
1012  if (opeer_ != NULL) {
1013    ScopedObjectAccess soa(self);
1014    // We may need to call user-supplied managed code, do this before final clean-up.
1015    HandleUncaughtExceptions(soa);
1016    RemoveFromThreadGroup(soa);
1017
1018    // this.nativePeer = 0;
1019    soa.DecodeField(WellKnownClasses::java_lang_Thread_nativePeer)->SetInt(opeer_, 0);
1020    Dbg::PostThreadDeath(self);
1021
1022    // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
1023    // who is waiting.
1024    mirror::Object* lock =
1025        soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(opeer_);
1026    // (This conditional is only needed for tests, where Thread.lock won't have been set.)
1027    if (lock != NULL) {
1028      ObjectLock locker(self, lock);
1029      locker.Notify();
1030    }
1031  }
1032
1033  // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
1034  if (jni_env_ != NULL) {
1035    jni_env_->monitors.VisitRoots(MonitorExitVisitor, self);
1036  }
1037}
1038
1039Thread::~Thread() {
1040  if (jni_env_ != NULL && jpeer_ != NULL) {
1041    // If pthread_create fails we don't have a jni env here.
1042    jni_env_->DeleteGlobalRef(jpeer_);
1043    jpeer_ = NULL;
1044  }
1045  opeer_ = NULL;
1046
1047  delete jni_env_;
1048  jni_env_ = NULL;
1049
1050  CHECK_NE(GetState(), kRunnable);
1051  // We may be deleting a still born thread.
1052  SetStateUnsafe(kTerminated);
1053
1054  delete wait_cond_;
1055  delete wait_mutex_;
1056
1057  if (long_jump_context_ != NULL) {
1058    delete long_jump_context_;
1059  }
1060
1061  delete debug_invoke_req_;
1062  delete instrumentation_stack_;
1063  delete name_;
1064
1065  TearDownAlternateSignalStack();
1066}
1067
1068void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) {
1069  if (!IsExceptionPending()) {
1070    return;
1071  }
1072  ScopedLocalRef<jobject> peer(jni_env_, soa.AddLocalReference<jobject>(opeer_));
1073  ScopedThreadStateChange tsc(this, kNative);
1074
1075  // Get and clear the exception.
1076  ScopedLocalRef<jthrowable> exception(jni_env_, jni_env_->ExceptionOccurred());
1077  jni_env_->ExceptionClear();
1078
1079  // If the thread has its own handler, use that.
1080  ScopedLocalRef<jobject> handler(jni_env_,
1081                                  jni_env_->GetObjectField(peer.get(),
1082                                                           WellKnownClasses::java_lang_Thread_uncaughtHandler));
1083  if (handler.get() == NULL) {
1084    // Otherwise use the thread group's default handler.
1085    handler.reset(jni_env_->GetObjectField(peer.get(), WellKnownClasses::java_lang_Thread_group));
1086  }
1087
1088  // Call the handler.
1089  jni_env_->CallVoidMethod(handler.get(),
1090                           WellKnownClasses::java_lang_Thread$UncaughtExceptionHandler_uncaughtException,
1091                           peer.get(), exception.get());
1092
1093  // If the handler threw, clear that exception too.
1094  jni_env_->ExceptionClear();
1095}
1096
1097void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
1098  // this.group.removeThread(this);
1099  // group can be null if we're in the compiler or a test.
1100  mirror::Object* ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(opeer_);
1101  if (ogroup != NULL) {
1102    ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup));
1103    ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(opeer_));
1104    ScopedThreadStateChange tsc(soa.Self(), kNative);
1105    jni_env_->CallVoidMethod(group.get(), WellKnownClasses::java_lang_ThreadGroup_removeThread,
1106                             peer.get());
1107  }
1108}
1109
1110size_t Thread::NumSirtReferences() {
1111  size_t count = 0;
1112  for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
1113    count += cur->NumberOfReferences();
1114  }
1115  return count;
1116}
1117
1118bool Thread::SirtContains(jobject obj) const {
1119  mirror::Object** sirt_entry = reinterpret_cast<mirror::Object**>(obj);
1120  for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
1121    if (cur->Contains(sirt_entry)) {
1122      return true;
1123    }
1124  }
1125  // JNI code invoked from portable code uses shadow frames rather than the SIRT.
1126  return managed_stack_.ShadowFramesContain(sirt_entry);
1127}
1128
1129void Thread::SirtVisitRoots(RootVisitor* visitor, void* arg) {
1130  for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
1131    size_t num_refs = cur->NumberOfReferences();
1132    for (size_t j = 0; j < num_refs; j++) {
1133      mirror::Object* object = cur->GetReference(j);
1134      if (object != NULL) {
1135        visitor(object, arg);
1136      }
1137    }
1138  }
1139}
1140
1141mirror::Object* Thread::DecodeJObject(jobject obj) const {
1142  Locks::mutator_lock_->AssertSharedHeld(this);
1143  if (obj == NULL) {
1144    return NULL;
1145  }
1146  IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
1147  IndirectRefKind kind = GetIndirectRefKind(ref);
1148  mirror::Object* result;
1149  // The "kinds" below are sorted by the frequency we expect to encounter them.
1150  if (kind == kLocal) {
1151    IndirectReferenceTable& locals = jni_env_->locals;
1152    result = const_cast<mirror::Object*>(locals.Get(ref));
1153  } else if (kind == kSirtOrInvalid) {
1154    // TODO: make stack indirect reference table lookup more efficient
1155    // Check if this is a local reference in the SIRT
1156    if (LIKELY(SirtContains(obj))) {
1157      result = *reinterpret_cast<mirror::Object**>(obj);  // Read from SIRT
1158    } else if (Runtime::Current()->GetJavaVM()->work_around_app_jni_bugs) {
1159      // Assume an invalid local reference is actually a direct pointer.
1160      result = reinterpret_cast<mirror::Object*>(obj);
1161    } else {
1162      result = kInvalidIndirectRefObject;
1163    }
1164  } else if (kind == kGlobal) {
1165    JavaVMExt* vm = Runtime::Current()->GetJavaVM();
1166    IndirectReferenceTable& globals = vm->globals;
1167    MutexLock mu(const_cast<Thread*>(this), vm->globals_lock);
1168    result = const_cast<mirror::Object*>(globals.Get(ref));
1169  } else {
1170    DCHECK_EQ(kind, kWeakGlobal);
1171    JavaVMExt* vm = Runtime::Current()->GetJavaVM();
1172    IndirectReferenceTable& weak_globals = vm->weak_globals;
1173    MutexLock mu(const_cast<Thread*>(this), vm->weak_globals_lock);
1174    result = const_cast<mirror::Object*>(weak_globals.Get(ref));
1175    if (result == kClearedJniWeakGlobal) {
1176      // This is a special case where it's okay to return NULL.
1177      return NULL;
1178    }
1179  }
1180
1181  if (UNLIKELY(result == NULL)) {
1182    JniAbortF(NULL, "use of deleted %s %p", ToStr<IndirectRefKind>(kind).c_str(), obj);
1183  } else {
1184    if (kIsDebugBuild && (result != kInvalidIndirectRefObject)) {
1185      Runtime::Current()->GetHeap()->VerifyObject(result);
1186    }
1187  }
1188  return result;
1189}
1190
1191// Implements java.lang.Thread.interrupted.
1192bool Thread::Interrupted() {
1193  MutexLock mu(Thread::Current(), *wait_mutex_);
1194  bool interrupted = interrupted_;
1195  interrupted_ = false;
1196  return interrupted;
1197}
1198
1199// Implements java.lang.Thread.isInterrupted.
1200bool Thread::IsInterrupted() {
1201  MutexLock mu(Thread::Current(), *wait_mutex_);
1202  return interrupted_;
1203}
1204
1205void Thread::Interrupt() {
1206  Thread* self = Thread::Current();
1207  MutexLock mu(self, *wait_mutex_);
1208  if (interrupted_) {
1209    return;
1210  }
1211  interrupted_ = true;
1212  NotifyLocked(self);
1213}
1214
1215void Thread::Notify() {
1216  Thread* self = Thread::Current();
1217  MutexLock mu(self, *wait_mutex_);
1218  NotifyLocked(self);
1219}
1220
1221void Thread::NotifyLocked(Thread* self) {
1222  if (wait_monitor_ != NULL) {
1223    wait_cond_->Signal(self);
1224  }
1225}
1226
1227class CountStackDepthVisitor : public StackVisitor {
1228 public:
1229  explicit CountStackDepthVisitor(Thread* thread)
1230      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1231      : StackVisitor(thread, NULL),
1232        depth_(0), skip_depth_(0), skipping_(true) {}
1233
1234  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1235    // We want to skip frames up to and including the exception's constructor.
1236    // Note we also skip the frame if it doesn't have a method (namely the callee
1237    // save frame)
1238    mirror::AbstractMethod* m = GetMethod();
1239    if (skipping_ && !m->IsRuntimeMethod() &&
1240        !mirror::Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) {
1241      skipping_ = false;
1242    }
1243    if (!skipping_) {
1244      if (!m->IsRuntimeMethod()) {  // Ignore runtime frames (in particular callee save).
1245        ++depth_;
1246      }
1247    } else {
1248      ++skip_depth_;
1249    }
1250    return true;
1251  }
1252
1253  int GetDepth() const {
1254    return depth_;
1255  }
1256
1257  int GetSkipDepth() const {
1258    return skip_depth_;
1259  }
1260
1261 private:
1262  uint32_t depth_;
1263  uint32_t skip_depth_;
1264  bool skipping_;
1265};
1266
1267class BuildInternalStackTraceVisitor : public StackVisitor {
1268 public:
1269  explicit BuildInternalStackTraceVisitor(Thread* self, Thread* thread, int skip_depth)
1270      : StackVisitor(thread, NULL), self_(self),
1271        skip_depth_(skip_depth), count_(0), dex_pc_trace_(NULL), method_trace_(NULL) {}
1272
1273  bool Init(int depth)
1274      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1275    // Allocate method trace with an extra slot that will hold the PC trace
1276    SirtRef<mirror::ObjectArray<mirror::Object> >
1277        method_trace(self_,
1278                     Runtime::Current()->GetClassLinker()->AllocObjectArray<mirror::Object>(self_,
1279                                                                                            depth + 1));
1280    if (method_trace.get() == NULL) {
1281      return false;
1282    }
1283    mirror::IntArray* dex_pc_trace = mirror::IntArray::Alloc(self_, depth);
1284    if (dex_pc_trace == NULL) {
1285      return false;
1286    }
1287    // Save PC trace in last element of method trace, also places it into the
1288    // object graph.
1289    method_trace->Set(depth, dex_pc_trace);
1290    // Set the Object*s and assert that no thread suspension is now possible.
1291    const char* last_no_suspend_cause =
1292        self_->StartAssertNoThreadSuspension("Building internal stack trace");
1293    CHECK(last_no_suspend_cause == NULL) << last_no_suspend_cause;
1294    method_trace_ = method_trace.get();
1295    dex_pc_trace_ = dex_pc_trace;
1296    return true;
1297  }
1298
1299  virtual ~BuildInternalStackTraceVisitor() {
1300    if (method_trace_ != NULL) {
1301      self_->EndAssertNoThreadSuspension(NULL);
1302    }
1303  }
1304
1305  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1306    if (method_trace_ == NULL || dex_pc_trace_ == NULL) {
1307      return true;  // We're probably trying to fillInStackTrace for an OutOfMemoryError.
1308    }
1309    if (skip_depth_ > 0) {
1310      skip_depth_--;
1311      return true;
1312    }
1313    mirror::AbstractMethod* m = GetMethod();
1314    if (m->IsRuntimeMethod()) {
1315      return true;  // Ignore runtime frames (in particular callee save).
1316    }
1317    method_trace_->Set(count_, m);
1318    dex_pc_trace_->Set(count_, GetDexPc());
1319    ++count_;
1320    return true;
1321  }
1322
1323  mirror::ObjectArray<mirror::Object>* GetInternalStackTrace() const {
1324    return method_trace_;
1325  }
1326
1327 private:
1328  Thread* const self_;
1329  // How many more frames to skip.
1330  int32_t skip_depth_;
1331  // Current position down stack trace.
1332  uint32_t count_;
1333  // Array of dex PC values.
1334  mirror::IntArray* dex_pc_trace_;
1335  // An array of the methods on the stack, the last entry is a reference to the PC trace.
1336  mirror::ObjectArray<mirror::Object>* method_trace_;
1337};
1338
1339jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessUnchecked& soa) const {
1340  // Compute depth of stack
1341  CountStackDepthVisitor count_visitor(const_cast<Thread*>(this));
1342  count_visitor.WalkStack();
1343  int32_t depth = count_visitor.GetDepth();
1344  int32_t skip_depth = count_visitor.GetSkipDepth();
1345
1346  // Build internal stack trace.
1347  BuildInternalStackTraceVisitor build_trace_visitor(soa.Self(), const_cast<Thread*>(this),
1348                                                     skip_depth);
1349  if (!build_trace_visitor.Init(depth)) {
1350    return NULL;  // Allocation failed.
1351  }
1352  build_trace_visitor.WalkStack();
1353  mirror::ObjectArray<mirror::Object>* trace = build_trace_visitor.GetInternalStackTrace();
1354  if (kIsDebugBuild) {
1355    for (int32_t i = 0; i < trace->GetLength(); ++i) {
1356      CHECK(trace->Get(i) != NULL);
1357    }
1358  }
1359  return soa.AddLocalReference<jobjectArray>(trace);
1360}
1361
1362jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
1363    jobjectArray output_array, int* stack_depth) {
1364  // Transition into runnable state to work on Object*/Array*
1365  ScopedObjectAccess soa(env);
1366  // Decode the internal stack trace into the depth, method trace and PC trace
1367  mirror::ObjectArray<mirror::Object>* method_trace =
1368      soa.Decode<mirror::ObjectArray<mirror::Object>*>(internal);
1369  int32_t depth = method_trace->GetLength() - 1;
1370  mirror::IntArray* pc_trace = down_cast<mirror::IntArray*>(method_trace->Get(depth));
1371
1372  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1373
1374  jobjectArray result;
1375  mirror::ObjectArray<mirror::StackTraceElement>* java_traces;
1376  if (output_array != NULL) {
1377    // Reuse the array we were given.
1378    result = output_array;
1379    java_traces = soa.Decode<mirror::ObjectArray<mirror::StackTraceElement>*>(output_array);
1380    // ...adjusting the number of frames we'll write to not exceed the array length.
1381    depth = std::min(depth, java_traces->GetLength());
1382  } else {
1383    // Create java_trace array and place in local reference table
1384    java_traces = class_linker->AllocStackTraceElementArray(soa.Self(), depth);
1385    if (java_traces == NULL) {
1386      return NULL;
1387    }
1388    result = soa.AddLocalReference<jobjectArray>(java_traces);
1389  }
1390
1391  if (stack_depth != NULL) {
1392    *stack_depth = depth;
1393  }
1394
1395  MethodHelper mh;
1396  for (int32_t i = 0; i < depth; ++i) {
1397    // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
1398    mirror::AbstractMethod* method = down_cast<mirror::AbstractMethod*>(method_trace->Get(i));
1399    mh.ChangeMethod(method);
1400    uint32_t dex_pc = pc_trace->Get(i);
1401    int32_t line_number = mh.GetLineNumFromDexPC(dex_pc);
1402    // Allocate element, potentially triggering GC
1403    // TODO: reuse class_name_object via Class::name_?
1404    const char* descriptor = mh.GetDeclaringClassDescriptor();
1405    CHECK(descriptor != NULL);
1406    std::string class_name(PrettyDescriptor(descriptor));
1407    SirtRef<mirror::String> class_name_object(soa.Self(),
1408                                              mirror::String::AllocFromModifiedUtf8(soa.Self(),
1409                                                                                    class_name.c_str()));
1410    if (class_name_object.get() == NULL) {
1411      return NULL;
1412    }
1413    const char* method_name = mh.GetName();
1414    CHECK(method_name != NULL);
1415    SirtRef<mirror::String> method_name_object(soa.Self(),
1416                                               mirror::String::AllocFromModifiedUtf8(soa.Self(),
1417                                                                                     method_name));
1418    if (method_name_object.get() == NULL) {
1419      return NULL;
1420    }
1421    const char* source_file = mh.GetDeclaringClassSourceFile();
1422    SirtRef<mirror::String> source_name_object(soa.Self(), mirror::String::AllocFromModifiedUtf8(soa.Self(),
1423                                                                                                 source_file));
1424    mirror::StackTraceElement* obj = mirror::StackTraceElement::Alloc(soa.Self(),
1425                                                                      class_name_object.get(),
1426                                                                      method_name_object.get(),
1427                                                                      source_name_object.get(),
1428                                                                      line_number);
1429    if (obj == NULL) {
1430      return NULL;
1431    }
1432#ifdef MOVING_GARBAGE_COLLECTOR
1433    // Re-read after potential GC
1434    java_traces = Decode<ObjectArray<Object>*>(soa.Env(), result);
1435    method_trace = down_cast<ObjectArray<Object>*>(Decode<Object*>(soa.Env(), internal));
1436    pc_trace = down_cast<IntArray*>(method_trace->Get(depth));
1437#endif
1438    java_traces->Set(i, obj);
1439  }
1440  return result;
1441}
1442
1443void Thread::ThrowNewExceptionF(const ThrowLocation& throw_location,
1444                                const char* exception_class_descriptor, const char* fmt, ...) {
1445  va_list args;
1446  va_start(args, fmt);
1447  ThrowNewExceptionV(throw_location, exception_class_descriptor,
1448                     fmt, args);
1449  va_end(args);
1450}
1451
1452void Thread::ThrowNewExceptionV(const ThrowLocation& throw_location,
1453                                const char* exception_class_descriptor,
1454                                const char* fmt, va_list ap) {
1455  std::string msg;
1456  StringAppendV(&msg, fmt, ap);
1457  ThrowNewException(throw_location, exception_class_descriptor, msg.c_str());
1458}
1459
1460void Thread::ThrowNewException(const ThrowLocation& throw_location, const char* exception_class_descriptor,
1461                               const char* msg) {
1462  AssertNoPendingException();  // Callers should either clear or call ThrowNewWrappedException.
1463  ThrowNewWrappedException(throw_location, exception_class_descriptor, msg);
1464}
1465
1466void Thread::ThrowNewWrappedException(const ThrowLocation& throw_location,
1467                                      const char* exception_class_descriptor,
1468                                      const char* msg) {
1469  DCHECK_EQ(this, Thread::Current());
1470  // Ensure we don't forget arguments over object allocation.
1471  SirtRef<mirror::Object> saved_throw_this(this, throw_location.GetThis());
1472  SirtRef<mirror::AbstractMethod> saved_throw_method(this, throw_location.GetMethod());
1473  // Ignore the cause throw location. TODO: should we report this as a re-throw?
1474  SirtRef<mirror::Throwable> cause(this, GetException(NULL));
1475  ClearException();
1476  Runtime* runtime = Runtime::Current();
1477
1478  mirror::ClassLoader* cl = NULL;
1479  if (throw_location.GetMethod() != NULL) {
1480    cl = throw_location.GetMethod()->GetDeclaringClass()->GetClassLoader();
1481  }
1482  SirtRef<mirror::Class>
1483      exception_class(this, runtime->GetClassLinker()->FindClass(exception_class_descriptor, cl));
1484  if (UNLIKELY(exception_class.get() == NULL)) {
1485    CHECK(IsExceptionPending());
1486    LOG(ERROR) << "No exception class " << PrettyDescriptor(exception_class_descriptor);
1487    return;
1488  }
1489
1490  if (UNLIKELY(!runtime->GetClassLinker()->EnsureInitialized(exception_class.get(), true, true))) {
1491    DCHECK(IsExceptionPending());
1492    return;
1493  }
1494  DCHECK(!runtime->IsStarted() || exception_class->IsThrowableClass());
1495  SirtRef<mirror::Throwable> exception(this,
1496                                down_cast<mirror::Throwable*>(exception_class->AllocObject(this)));
1497
1498  // Choose an appropriate constructor and set up the arguments.
1499  const char* signature;
1500  SirtRef<mirror::String> msg_string(this, NULL);
1501  if (msg != NULL) {
1502    // Ensure we remember this and the method over the String allocation.
1503    msg_string.reset(mirror::String::AllocFromModifiedUtf8(this, msg));
1504    if (UNLIKELY(msg_string.get() == NULL)) {
1505      CHECK(IsExceptionPending());  // OOME.
1506      return;
1507    }
1508    if (cause.get() == NULL) {
1509      signature = "(Ljava/lang/String;)V";
1510    } else {
1511      signature = "(Ljava/lang/String;Ljava/lang/Throwable;)V";
1512    }
1513  } else {
1514    if (cause.get() == NULL) {
1515      signature = "()V";
1516    } else {
1517      signature = "(Ljava/lang/Throwable;)V";
1518    }
1519  }
1520  mirror::AbstractMethod* exception_init_method =
1521      exception_class->FindDeclaredDirectMethod("<init>", signature);
1522
1523  CHECK(exception_init_method != NULL) << "No <init>" << signature << " in "
1524      << PrettyDescriptor(exception_class_descriptor);
1525
1526  if (UNLIKELY(!runtime->IsStarted())) {
1527    // Something is trying to throw an exception without a started runtime, which is the common
1528    // case in the compiler. We won't be able to invoke the constructor of the exception, so set
1529    // the exception fields directly.
1530    if (msg != NULL) {
1531      exception->SetDetailMessage(msg_string.get());
1532    }
1533    if (cause.get() != NULL) {
1534      exception->SetCause(cause.get());
1535    }
1536    ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
1537                                         throw_location.GetDexPc());
1538    SetException(gc_safe_throw_location, exception.get());
1539  } else {
1540    ArgArray args("VLL", 3);
1541    args.Append(reinterpret_cast<uint32_t>(exception.get()));
1542    if (msg != NULL) {
1543      args.Append(reinterpret_cast<uint32_t>(msg_string.get()));
1544    }
1545    if (cause.get() != NULL) {
1546      args.Append(reinterpret_cast<uint32_t>(cause.get()));
1547    }
1548    JValue result;
1549    exception_init_method->Invoke(this, args.GetArray(), args.GetNumBytes(), &result, 'V');
1550    if (LIKELY(!IsExceptionPending())) {
1551      ThrowLocation gc_safe_throw_location(saved_throw_this.get(), saved_throw_method.get(),
1552                                           throw_location.GetDexPc());
1553      SetException(gc_safe_throw_location, exception.get());
1554    }
1555  }
1556}
1557
1558void Thread::ThrowOutOfMemoryError(const char* msg) {
1559  LOG(ERROR) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
1560      msg, (throwing_OutOfMemoryError_ ? " (recursive case)" : ""));
1561  ThrowLocation throw_location = GetCurrentLocationForThrow();
1562  if (!throwing_OutOfMemoryError_) {
1563    throwing_OutOfMemoryError_ = true;
1564    ThrowNewException(throw_location, "Ljava/lang/OutOfMemoryError;", msg);
1565    throwing_OutOfMemoryError_ = false;
1566  } else {
1567    Dump(LOG(ERROR));  // The pre-allocated OOME has no stack, so help out and log one.
1568    SetException(throw_location, Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1569  }
1570}
1571
1572Thread* Thread::CurrentFromGdb() {
1573  return Thread::Current();
1574}
1575
1576void Thread::DumpFromGdb() const {
1577  std::ostringstream ss;
1578  Dump(ss);
1579  std::string str(ss.str());
1580  // log to stderr for debugging command line processes
1581  std::cerr << str;
1582#ifdef HAVE_ANDROID_OS
1583  // log to logcat for debugging frameworks processes
1584  LOG(INFO) << str;
1585#endif
1586}
1587
1588struct EntryPointInfo {
1589  uint32_t offset;
1590  const char* name;
1591};
1592#define QUICK_ENTRY_POINT_INFO(x) { QUICK_ENTRYPOINT_OFFSET(x), #x }
1593#define PORTABLE_ENTRY_POINT_INFO(x) { PORTABLE_ENTRYPOINT_OFFSET(x), #x }
1594static const EntryPointInfo gThreadEntryPointInfo[] = {
1595  QUICK_ENTRY_POINT_INFO(pAllocArrayFromCode),
1596  QUICK_ENTRY_POINT_INFO(pAllocArrayFromCodeWithAccessCheck),
1597  QUICK_ENTRY_POINT_INFO(pAllocObjectFromCode),
1598  QUICK_ENTRY_POINT_INFO(pAllocObjectFromCodeWithAccessCheck),
1599  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayFromCode),
1600  QUICK_ENTRY_POINT_INFO(pCheckAndAllocArrayFromCodeWithAccessCheck),
1601  QUICK_ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode),
1602  QUICK_ENTRY_POINT_INFO(pCanPutArrayElementFromCode),
1603  QUICK_ENTRY_POINT_INFO(pCheckCastFromCode),
1604  QUICK_ENTRY_POINT_INFO(pInitializeStaticStorage),
1605  QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode),
1606  QUICK_ENTRY_POINT_INFO(pInitializeTypeFromCode),
1607  QUICK_ENTRY_POINT_INFO(pResolveStringFromCode),
1608  QUICK_ENTRY_POINT_INFO(pSet32Instance),
1609  QUICK_ENTRY_POINT_INFO(pSet32Static),
1610  QUICK_ENTRY_POINT_INFO(pSet64Instance),
1611  QUICK_ENTRY_POINT_INFO(pSet64Static),
1612  QUICK_ENTRY_POINT_INFO(pSetObjInstance),
1613  QUICK_ENTRY_POINT_INFO(pSetObjStatic),
1614  QUICK_ENTRY_POINT_INFO(pGet32Instance),
1615  QUICK_ENTRY_POINT_INFO(pGet32Static),
1616  QUICK_ENTRY_POINT_INFO(pGet64Instance),
1617  QUICK_ENTRY_POINT_INFO(pGet64Static),
1618  QUICK_ENTRY_POINT_INFO(pGetObjInstance),
1619  QUICK_ENTRY_POINT_INFO(pGetObjStatic),
1620  QUICK_ENTRY_POINT_INFO(pHandleFillArrayDataFromCode),
1621  QUICK_ENTRY_POINT_INFO(pJniMethodStart),
1622  QUICK_ENTRY_POINT_INFO(pJniMethodStartSynchronized),
1623  QUICK_ENTRY_POINT_INFO(pJniMethodEnd),
1624  QUICK_ENTRY_POINT_INFO(pJniMethodEndSynchronized),
1625  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReference),
1626  QUICK_ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized),
1627  QUICK_ENTRY_POINT_INFO(pLockObjectFromCode),
1628  QUICK_ENTRY_POINT_INFO(pUnlockObjectFromCode),
1629  QUICK_ENTRY_POINT_INFO(pCmpgDouble),
1630  QUICK_ENTRY_POINT_INFO(pCmpgFloat),
1631  QUICK_ENTRY_POINT_INFO(pCmplDouble),
1632  QUICK_ENTRY_POINT_INFO(pCmplFloat),
1633  QUICK_ENTRY_POINT_INFO(pFmod),
1634  QUICK_ENTRY_POINT_INFO(pSqrt),
1635  QUICK_ENTRY_POINT_INFO(pL2d),
1636  QUICK_ENTRY_POINT_INFO(pFmodf),
1637  QUICK_ENTRY_POINT_INFO(pL2f),
1638  QUICK_ENTRY_POINT_INFO(pD2iz),
1639  QUICK_ENTRY_POINT_INFO(pF2iz),
1640  QUICK_ENTRY_POINT_INFO(pIdivmod),
1641  QUICK_ENTRY_POINT_INFO(pD2l),
1642  QUICK_ENTRY_POINT_INFO(pF2l),
1643  QUICK_ENTRY_POINT_INFO(pLdiv),
1644  QUICK_ENTRY_POINT_INFO(pLdivmod),
1645  QUICK_ENTRY_POINT_INFO(pLmul),
1646  QUICK_ENTRY_POINT_INFO(pShlLong),
1647  QUICK_ENTRY_POINT_INFO(pShrLong),
1648  QUICK_ENTRY_POINT_INFO(pUshrLong),
1649  QUICK_ENTRY_POINT_INFO(pInterpreterToInterpreterEntry),
1650  QUICK_ENTRY_POINT_INFO(pInterpreterToQuickEntry),
1651  QUICK_ENTRY_POINT_INFO(pIndexOf),
1652  QUICK_ENTRY_POINT_INFO(pMemcmp16),
1653  QUICK_ENTRY_POINT_INFO(pStringCompareTo),
1654  QUICK_ENTRY_POINT_INFO(pMemcpy),
1655  QUICK_ENTRY_POINT_INFO(pQuickResolutionTrampolineFromCode),
1656  QUICK_ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck),
1657  QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampoline),
1658  QUICK_ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck),
1659  QUICK_ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck),
1660  QUICK_ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck),
1661  QUICK_ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck),
1662  QUICK_ENTRY_POINT_INFO(pCheckSuspendFromCode),
1663  QUICK_ENTRY_POINT_INFO(pTestSuspendFromCode),
1664  QUICK_ENTRY_POINT_INFO(pDeliverException),
1665  QUICK_ENTRY_POINT_INFO(pThrowArrayBoundsFromCode),
1666  QUICK_ENTRY_POINT_INFO(pThrowDivZeroFromCode),
1667  QUICK_ENTRY_POINT_INFO(pThrowNoSuchMethodFromCode),
1668  QUICK_ENTRY_POINT_INFO(pThrowNullPointerFromCode),
1669  QUICK_ENTRY_POINT_INFO(pThrowStackOverflowFromCode),
1670  PORTABLE_ENTRY_POINT_INFO(pPortableResolutionTrampolineFromCode),
1671};
1672#undef QUICK_ENTRY_POINT_INFO
1673
1674void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers) {
1675  CHECK_EQ(size_of_pointers, 4U);  // TODO: support 64-bit targets.
1676
1677#define DO_THREAD_OFFSET(x) \
1678    if (offset == static_cast<uint32_t>(OFFSETOF_VOLATILE_MEMBER(Thread, x))) { \
1679      os << # x; \
1680      return; \
1681    }
1682  DO_THREAD_OFFSET(state_and_flags_);
1683  DO_THREAD_OFFSET(card_table_);
1684  DO_THREAD_OFFSET(exception_);
1685  DO_THREAD_OFFSET(opeer_);
1686  DO_THREAD_OFFSET(jni_env_);
1687  DO_THREAD_OFFSET(self_);
1688  DO_THREAD_OFFSET(stack_end_);
1689  DO_THREAD_OFFSET(suspend_count_);
1690  DO_THREAD_OFFSET(thin_lock_id_);
1691  // DO_THREAD_OFFSET(top_of_managed_stack_);
1692  // DO_THREAD_OFFSET(top_of_managed_stack_pc_);
1693  DO_THREAD_OFFSET(top_sirt_);
1694#undef DO_THREAD_OFFSET
1695
1696  size_t entry_point_count = arraysize(gThreadEntryPointInfo);
1697  CHECK_EQ(entry_point_count * size_of_pointers,
1698           sizeof(QuickEntryPoints) + sizeof(PortableEntryPoints));
1699  uint32_t expected_offset = OFFSETOF_MEMBER(Thread, quick_entrypoints_);
1700  for (size_t i = 0; i < entry_point_count; ++i) {
1701    CHECK_EQ(gThreadEntryPointInfo[i].offset, expected_offset) << gThreadEntryPointInfo[i].name;
1702    expected_offset += size_of_pointers;
1703    if (gThreadEntryPointInfo[i].offset == offset) {
1704      os << gThreadEntryPointInfo[i].name;
1705      return;
1706    }
1707  }
1708  os << offset;
1709}
1710
1711static const bool kDebugExceptionDelivery = false;
1712class CatchBlockStackVisitor : public StackVisitor {
1713 public:
1714  CatchBlockStackVisitor(Thread* self, const ThrowLocation& throw_location,
1715                         mirror::Throwable* exception, bool is_deoptimization)
1716      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1717      : StackVisitor(self, self->GetLongJumpContext()),
1718        self_(self), exception_(exception), is_deoptimization_(is_deoptimization),
1719        to_find_(is_deoptimization ? NULL : exception->GetClass()), throw_location_(throw_location),
1720        handler_quick_frame_(NULL), handler_quick_frame_pc_(0), handler_dex_pc_(0),
1721        native_method_count_(0), clear_exception_(false),
1722        method_tracing_active_(is_deoptimization ||
1723                               Runtime::Current()->GetInstrumentation()->AreExitStubsInstalled()),
1724        instrumentation_frames_to_pop_(0), top_shadow_frame_(NULL), prev_shadow_frame_(NULL) {
1725    // Exception not in root sets, can't allow GC.
1726    last_no_assert_suspension_cause_ = self->StartAssertNoThreadSuspension("Finding catch block");
1727  }
1728
1729  ~CatchBlockStackVisitor() {
1730    LOG(FATAL) << "UNREACHABLE";  // Expected to take long jump.
1731  }
1732
1733  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1734    mirror::AbstractMethod* method = GetMethod();
1735    if (method == NULL) {
1736      // This is the upcall, we remember the frame and last pc so that we may long jump to them.
1737      handler_quick_frame_pc_ = GetCurrentQuickFramePc();
1738      handler_quick_frame_ = GetCurrentQuickFrame();
1739      return false;  // End stack walk.
1740    } else {
1741      if (UNLIKELY(method_tracing_active_ &&
1742                   GetInstrumentationExitPc() == GetReturnPc())) {
1743        // Keep count of the number of unwinds during instrumentation.
1744        instrumentation_frames_to_pop_++;
1745      }
1746      if (method->IsRuntimeMethod()) {
1747        // Ignore callee save method.
1748        DCHECK(method->IsCalleeSaveMethod());
1749        return true;
1750      } else if (is_deoptimization_) {
1751        return HandleDeoptimization(method);
1752      } else {
1753        return HandleTryItems(method);
1754      }
1755    }
1756  }
1757
1758  bool HandleTryItems(mirror::AbstractMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1759    uint32_t dex_pc = DexFile::kDexNoIndex;
1760    if (method->IsNative()) {
1761      native_method_count_++;
1762    } else {
1763      dex_pc = GetDexPc();
1764    }
1765    if (dex_pc != DexFile::kDexNoIndex) {
1766      uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc, &clear_exception_);
1767      if (found_dex_pc != DexFile::kDexNoIndex) {
1768        handler_dex_pc_ = found_dex_pc;
1769        handler_quick_frame_pc_ = method->ToNativePc(found_dex_pc);
1770        handler_quick_frame_ = GetCurrentQuickFrame();
1771        return false;  // End stack walk.
1772      }
1773    }
1774    return true;  // Continue stack walk.
1775  }
1776
1777  bool HandleDeoptimization(mirror::AbstractMethod* m) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1778    MethodHelper mh(m);
1779    const DexFile::CodeItem* code_item = mh.GetCodeItem();
1780    CHECK(code_item != NULL);
1781    uint16_t num_regs =  code_item->registers_size_;
1782    uint32_t dex_pc = GetDexPc();
1783    const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
1784    uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits();
1785    ShadowFrame* new_frame = ShadowFrame::Create(num_regs, NULL, m, new_dex_pc);
1786    verifier::MethodVerifier verifier(&mh.GetDexFile(), mh.GetDexCache(), mh.GetClassLoader(),
1787                                      mh.GetClassDefIndex(), code_item,
1788                                      m->GetDexMethodIndex(), m, m->GetAccessFlags(), false, true);
1789    verifier.Verify();
1790    std::vector<int32_t> kinds = verifier.DescribeVRegs(dex_pc);
1791    for (uint16_t reg = 0; reg < num_regs; reg++) {
1792      VRegKind kind = static_cast<VRegKind>(kinds.at(reg * 2));
1793      switch (kind) {
1794        case kUndefined:
1795          new_frame->SetVReg(reg, 0xEBADDE09);
1796          break;
1797        case kConstant:
1798          new_frame->SetVReg(reg, kinds.at((reg * 2) + 1));
1799          break;
1800        case kReferenceVReg:
1801          new_frame->SetVRegReference(reg,
1802                                      reinterpret_cast<mirror::Object*>(GetVReg(m, reg, kind)));
1803          break;
1804        default:
1805          new_frame->SetVReg(reg, GetVReg(m, reg, kind));
1806          break;
1807      }
1808    }
1809    if (prev_shadow_frame_ != NULL) {
1810      prev_shadow_frame_->SetLink(new_frame);
1811    } else {
1812      top_shadow_frame_ = new_frame;
1813    }
1814    prev_shadow_frame_ = new_frame;
1815    return true;
1816  }
1817
1818  void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1819    mirror::AbstractMethod* catch_method = *handler_quick_frame_;
1820    if (catch_method == NULL) {
1821      if (kDebugExceptionDelivery) {
1822        LOG(INFO) << "Handler is upcall";
1823      }
1824    } else {
1825      CHECK(!is_deoptimization_);
1826      if (kDebugExceptionDelivery) {
1827        const DexFile& dex_file = *catch_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
1828        int line_number = dex_file.GetLineNumFromPC(catch_method, handler_dex_pc_);
1829        LOG(INFO) << "Handler: " << PrettyMethod(catch_method) << " (line: " << line_number << ")";
1830      }
1831    }
1832    if (clear_exception_) {
1833      // Exception was cleared as part of delivery.
1834      DCHECK(!self_->IsExceptionPending());
1835    } else {
1836      // Put exception back in root set with clear throw location.
1837      self_->SetException(ThrowLocation(), exception_);
1838    }
1839    self_->EndAssertNoThreadSuspension(last_no_assert_suspension_cause_);
1840    // Do instrumentation events after allowing thread suspension again.
1841    instrumentation::Instrumentation* instrumentation = Runtime::Current()->GetInstrumentation();
1842    for (size_t i = 0; i < instrumentation_frames_to_pop_; ++i) {
1843      // We pop the instrumentation stack here so as not to corrupt it during the stack walk.
1844      if (i != instrumentation_frames_to_pop_ - 1 || self_->GetInstrumentationStack()->front().method_ != catch_method) {
1845        // Don't pop the instrumentation frame of the catch handler.
1846        instrumentation->PopMethodForUnwind(self_, is_deoptimization_);
1847      }
1848    }
1849    if (!is_deoptimization_) {
1850      instrumentation->ExceptionCaughtEvent(self_, throw_location_, catch_method, handler_dex_pc_,
1851                                            exception_);
1852    } else {
1853      // TODO: proper return value.
1854      self_->SetDeoptimizationShadowFrame(top_shadow_frame_);
1855    }
1856    // Place context back on thread so it will be available when we continue.
1857    self_->ReleaseLongJumpContext(context_);
1858    context_->SetSP(reinterpret_cast<uintptr_t>(handler_quick_frame_));
1859    CHECK_NE(handler_quick_frame_pc_, 0u);
1860    context_->SetPC(handler_quick_frame_pc_);
1861    context_->SmashCallerSaves();
1862    context_->DoLongJump();
1863  }
1864
1865 private:
1866  Thread* const self_;
1867  mirror::Throwable* const exception_;
1868  const bool is_deoptimization_;
1869  // The type of the exception catch block to find.
1870  mirror::Class* const to_find_;
1871  // Location of the throw.
1872  const ThrowLocation& throw_location_;
1873  // Quick frame with found handler or last frame if no handler found.
1874  mirror::AbstractMethod** handler_quick_frame_;
1875  // PC to branch to for the handler.
1876  uintptr_t handler_quick_frame_pc_;
1877  // Associated dex PC.
1878  uint32_t handler_dex_pc_;
1879  // Number of native methods passed in crawl (equates to number of SIRTs to pop)
1880  uint32_t native_method_count_;
1881  // Should the exception be cleared as the catch block has no move-exception?
1882  bool clear_exception_;
1883  // Is method tracing active?
1884  const bool method_tracing_active_;
1885  // Support for nesting no thread suspension checks.
1886  const char* last_no_assert_suspension_cause_;
1887  // Number of frames to pop in long jump.
1888  size_t instrumentation_frames_to_pop_;
1889  ShadowFrame* top_shadow_frame_;
1890  ShadowFrame* prev_shadow_frame_;
1891};
1892
1893void Thread::QuickDeliverException() {
1894  // Get exception from thread.
1895  ThrowLocation throw_location;
1896  mirror::Throwable* exception = GetException(&throw_location);
1897  CHECK(exception != NULL);
1898  // Don't leave exception visible while we try to find the handler, which may cause class
1899  // resolution.
1900  ClearException();
1901  bool is_deoptimization = (exception == reinterpret_cast<mirror::Throwable*>(-1));
1902  if (kDebugExceptionDelivery) {
1903    if (!is_deoptimization) {
1904      mirror::String* msg = exception->GetDetailMessage();
1905      std::string str_msg(msg != NULL ? msg->ToModifiedUtf8() : "");
1906      DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception)
1907                << ": " << str_msg << "\n");
1908    } else {
1909      DumpStack(LOG(INFO) << "Deoptimizing: ");
1910    }
1911  }
1912  CatchBlockStackVisitor catch_finder(this, throw_location, exception, is_deoptimization);
1913  catch_finder.WalkStack(true);
1914  catch_finder.DoLongJump();
1915  LOG(FATAL) << "UNREACHABLE";
1916}
1917
1918Context* Thread::GetLongJumpContext() {
1919  Context* result = long_jump_context_;
1920  if (result == NULL) {
1921    result = Context::Create();
1922  } else {
1923    long_jump_context_ = NULL;  // Avoid context being shared.
1924    result->Reset();
1925  }
1926  return result;
1927}
1928
1929struct CurrentMethodVisitor : public StackVisitor {
1930  CurrentMethodVisitor(Thread* thread, Context* context)
1931      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1932      : StackVisitor(thread, context), this_object_(NULL), method_(NULL), dex_pc_(0) {}
1933  virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1934    mirror::AbstractMethod* m = GetMethod();
1935    if (m->IsRuntimeMethod()) {
1936      // Continue if this is a runtime method.
1937      return true;
1938    }
1939    if (context_ != NULL) {
1940      this_object_ = GetThisObject();
1941    }
1942    method_ = m;
1943    dex_pc_ = GetDexPc();
1944    return false;
1945  }
1946  mirror::Object* this_object_;
1947  mirror::AbstractMethod* method_;
1948  uint32_t dex_pc_;
1949};
1950
1951mirror::AbstractMethod* Thread::GetCurrentMethod(uint32_t* dex_pc) const {
1952  CurrentMethodVisitor visitor(const_cast<Thread*>(this), NULL);
1953  visitor.WalkStack(false);
1954  if (dex_pc != NULL) {
1955    *dex_pc = visitor.dex_pc_;
1956  }
1957  return visitor.method_;
1958}
1959
1960ThrowLocation Thread::GetCurrentLocationForThrow() {
1961  Context* context = GetLongJumpContext();
1962  CurrentMethodVisitor visitor(this, context);
1963  visitor.WalkStack(false);
1964  ReleaseLongJumpContext(context);
1965  return ThrowLocation(visitor.this_object_, visitor.method_, visitor.dex_pc_);
1966}
1967
1968bool Thread::HoldsLock(mirror::Object* object) {
1969  if (object == NULL) {
1970    return false;
1971  }
1972  return object->GetThinLockId() == thin_lock_id_;
1973}
1974
1975// RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor).
1976template <typename RootVisitor>
1977class ReferenceMapVisitor : public StackVisitor {
1978 public:
1979  ReferenceMapVisitor(Thread* thread, Context* context, const RootVisitor& visitor)
1980      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1981      : StackVisitor(thread, context), visitor_(visitor) {}
1982
1983  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1984    if (false) {
1985      LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
1986          << StringPrintf("@ PC:%04x", GetDexPc());
1987    }
1988    ShadowFrame* shadow_frame = GetCurrentShadowFrame();
1989    if (shadow_frame != NULL) {
1990      mirror::AbstractMethod* m = shadow_frame->GetMethod();
1991      size_t num_regs = shadow_frame->NumberOfVRegs();
1992      if (m->IsNative() || shadow_frame->HasReferenceArray()) {
1993        // SIRT for JNI or References for interpreter.
1994        for (size_t reg = 0; reg < num_regs; ++reg) {
1995          mirror::Object* ref = shadow_frame->GetVRegReference(reg);
1996          if (ref != NULL) {
1997            visitor_(ref, reg, this);
1998          }
1999        }
2000      } else {
2001        // Java method.
2002        // Portable path use DexGcMap and store in Method.native_gc_map_.
2003        const uint8_t* gc_map = m->GetNativeGcMap();
2004        CHECK(gc_map != NULL) << PrettyMethod(m);
2005        uint32_t gc_map_length = static_cast<uint32_t>((gc_map[0] << 24) |
2006                                                       (gc_map[1] << 16) |
2007                                                       (gc_map[2] << 8) |
2008                                                       (gc_map[3] << 0));
2009        verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length);
2010        uint32_t dex_pc = GetDexPc();
2011        const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
2012        DCHECK(reg_bitmap != NULL);
2013        num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
2014        for (size_t reg = 0; reg < num_regs; ++reg) {
2015          if (TestBitmap(reg, reg_bitmap)) {
2016            mirror::Object* ref = shadow_frame->GetVRegReference(reg);
2017            if (ref != NULL) {
2018              visitor_(ref, reg, this);
2019            }
2020          }
2021        }
2022      }
2023    } else {
2024      mirror::AbstractMethod* m = GetMethod();
2025      // Process register map (which native and runtime methods don't have)
2026      if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
2027        const uint8_t* native_gc_map = m->GetNativeGcMap();
2028        CHECK(native_gc_map != NULL) << PrettyMethod(m);
2029        mh_.ChangeMethod(m);
2030        const DexFile::CodeItem* code_item = mh_.GetCodeItem();
2031        DCHECK(code_item != NULL) << PrettyMethod(m);  // Can't be NULL or how would we compile its instructions?
2032        NativePcOffsetToReferenceMap map(native_gc_map);
2033        size_t num_regs = std::min(map.RegWidth() * 8,
2034                                   static_cast<size_t>(code_item->registers_size_));
2035        if (num_regs > 0) {
2036          const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset());
2037          DCHECK(reg_bitmap != NULL);
2038          const VmapTable vmap_table(m->GetVmapTableRaw());
2039          uint32_t core_spills = m->GetCoreSpillMask();
2040          uint32_t fp_spills = m->GetFpSpillMask();
2041          size_t frame_size = m->GetFrameSizeInBytes();
2042          // For all dex registers in the bitmap
2043          mirror::AbstractMethod** cur_quick_frame = GetCurrentQuickFrame();
2044          DCHECK(cur_quick_frame != NULL);
2045          for (size_t reg = 0; reg < num_regs; ++reg) {
2046            // Does this register hold a reference?
2047            if (TestBitmap(reg, reg_bitmap)) {
2048              uint32_t vmap_offset;
2049              mirror::Object* ref;
2050              if (vmap_table.IsInContext(reg, vmap_offset, kReferenceVReg)) {
2051                uintptr_t val = GetGPR(vmap_table.ComputeRegister(core_spills, vmap_offset,
2052                                                                  kReferenceVReg));
2053                ref = reinterpret_cast<mirror::Object*>(val);
2054              } else {
2055                ref = reinterpret_cast<mirror::Object*>(GetVReg(cur_quick_frame, code_item,
2056                                                                core_spills, fp_spills, frame_size,
2057                                                                reg));
2058              }
2059
2060              if (ref != NULL) {
2061                visitor_(ref, reg, this);
2062              }
2063            }
2064          }
2065        }
2066      }
2067    }
2068    return true;
2069  }
2070
2071 private:
2072  static bool TestBitmap(int reg, const uint8_t* reg_vector) {
2073    return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0;
2074  }
2075
2076  // Visitor for when we visit a root.
2077  const RootVisitor& visitor_;
2078
2079  // A method helper we keep around to avoid dex file/cache re-computations.
2080  MethodHelper mh_;
2081};
2082
2083class RootCallbackVisitor {
2084 public:
2085  RootCallbackVisitor(RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {}
2086
2087  void operator()(const mirror::Object* obj, size_t, const StackVisitor*) const {
2088    visitor_(obj, arg_);
2089  }
2090
2091 private:
2092  RootVisitor* visitor_;
2093  void* arg_;
2094};
2095
2096class VerifyCallbackVisitor {
2097 public:
2098  VerifyCallbackVisitor(VerifyRootVisitor* visitor, void* arg)
2099      : visitor_(visitor),
2100        arg_(arg) {
2101  }
2102
2103  void operator()(const mirror::Object* obj, size_t vreg, const StackVisitor* visitor) const {
2104    visitor_(obj, arg_, vreg, visitor);
2105  }
2106
2107 private:
2108  VerifyRootVisitor* const visitor_;
2109  void* const arg_;
2110};
2111
2112struct VerifyRootWrapperArg {
2113  VerifyRootVisitor* visitor;
2114  void* arg;
2115};
2116
2117static void VerifyRootWrapperCallback(const mirror::Object* root, void* arg) {
2118  VerifyRootWrapperArg* wrapperArg = reinterpret_cast<VerifyRootWrapperArg*>(arg);
2119  wrapperArg->visitor(root, wrapperArg->arg, 0, NULL);
2120}
2121
2122void Thread::VerifyRoots(VerifyRootVisitor* visitor, void* arg) {
2123  // We need to map from a RootVisitor to VerifyRootVisitor, so pass in nulls for arguments we
2124  // don't have.
2125  VerifyRootWrapperArg wrapperArg;
2126  wrapperArg.arg = arg;
2127  wrapperArg.visitor = visitor;
2128
2129  if (opeer_ != NULL) {
2130    VerifyRootWrapperCallback(opeer_, &wrapperArg);
2131  }
2132  if (exception_ != NULL) {
2133    VerifyRootWrapperCallback(exception_, &wrapperArg);
2134  }
2135  throw_location_.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2136  if (class_loader_override_ != NULL) {
2137    VerifyRootWrapperCallback(class_loader_override_, &wrapperArg);
2138  }
2139  jni_env_->locals.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2140  jni_env_->monitors.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2141
2142  SirtVisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2143
2144  // Visit roots on this thread's stack
2145  Context* context = GetLongJumpContext();
2146  VerifyCallbackVisitor visitorToCallback(visitor, arg);
2147  ReferenceMapVisitor<VerifyCallbackVisitor> mapper(this, context, visitorToCallback);
2148  mapper.WalkStack();
2149  ReleaseLongJumpContext(context);
2150
2151  std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack = GetInstrumentationStack();
2152  typedef std::deque<instrumentation::InstrumentationStackFrame>::const_iterator It;
2153  for (It it = instrumentation_stack->begin(), end = instrumentation_stack->end(); it != end; ++it) {
2154    mirror::Object* this_object = (*it).this_object_;
2155    if (this_object != NULL) {
2156      VerifyRootWrapperCallback(this_object, &wrapperArg);
2157    }
2158    mirror::AbstractMethod* method = (*it).method_;
2159    VerifyRootWrapperCallback(method, &wrapperArg);
2160  }
2161}
2162
2163void Thread::VisitRoots(RootVisitor* visitor, void* arg) {
2164  if (opeer_ != NULL) {
2165    visitor(opeer_, arg);
2166  }
2167  if (exception_ != NULL) {
2168    visitor(exception_, arg);
2169  }
2170  throw_location_.VisitRoots(visitor, arg);
2171  if (class_loader_override_ != NULL) {
2172    visitor(class_loader_override_, arg);
2173  }
2174  jni_env_->locals.VisitRoots(visitor, arg);
2175  jni_env_->monitors.VisitRoots(visitor, arg);
2176
2177  SirtVisitRoots(visitor, arg);
2178
2179  // Visit roots on this thread's stack
2180  Context* context = GetLongJumpContext();
2181  RootCallbackVisitor visitorToCallback(visitor, arg);
2182  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context, visitorToCallback);
2183  mapper.WalkStack();
2184  ReleaseLongJumpContext(context);
2185
2186  std::deque<instrumentation::InstrumentationStackFrame>* instrumentation_stack = GetInstrumentationStack();
2187  typedef std::deque<instrumentation::InstrumentationStackFrame>::const_iterator It;
2188  for (It it = instrumentation_stack->begin(), end = instrumentation_stack->end(); it != end; ++it) {
2189    mirror::Object* this_object = (*it).this_object_;
2190    if (this_object != NULL) {
2191      visitor(this_object, arg);
2192    }
2193    mirror::AbstractMethod* method = (*it).method_;
2194    visitor(method, arg);
2195  }
2196}
2197
2198static void VerifyObject(const mirror::Object* root, void* arg) {
2199  gc::Heap* heap = reinterpret_cast<gc::Heap*>(arg);
2200  heap->VerifyObject(root);
2201}
2202
2203void Thread::VerifyStackImpl() {
2204  UniquePtr<Context> context(Context::Create());
2205  RootCallbackVisitor visitorToCallback(VerifyObject, Runtime::Current()->GetHeap());
2206  ReferenceMapVisitor<RootCallbackVisitor> mapper(this, context.get(), visitorToCallback);
2207  mapper.WalkStack();
2208}
2209
2210// Set the stack end to that to be used during a stack overflow
2211void Thread::SetStackEndForStackOverflow() {
2212  // During stack overflow we allow use of the full stack
2213  if (stack_end_ == stack_begin_) {
2214    DumpStack(std::cerr);
2215    LOG(FATAL) << "Need to increase kStackOverflowReservedBytes (currently "
2216               << kStackOverflowReservedBytes << ")";
2217  }
2218
2219  stack_end_ = stack_begin_;
2220}
2221
2222std::ostream& operator<<(std::ostream& os, const Thread& thread) {
2223  thread.ShortDump(os);
2224  return os;
2225}
2226
2227}  // namespace art
2228