thread.cc revision 80537bb742dff4ccdf6d04b1c0bb7d2179acc8cb
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "thread.h"
18
19#include <dynamic_annotations.h>
20#include <pthread.h>
21#include <signal.h>
22#include <sys/resource.h>
23#include <sys/time.h>
24
25#include <algorithm>
26#include <bitset>
27#include <cerrno>
28#include <iostream>
29#include <list>
30
31#include "base/mutex.h"
32#include "class_linker.h"
33#include "class_loader.h"
34#include "cutils/atomic.h"
35#include "cutils/atomic-inline.h"
36#include "debugger.h"
37#include "gc_map.h"
38#include "heap.h"
39#include "jni_internal.h"
40#include "monitor.h"
41#include "oat/runtime/context.h"
42#include "object.h"
43#include "object_utils.h"
44#include "reflection.h"
45#include "runtime.h"
46#include "runtime_support.h"
47#include "scoped_thread_state_change.h"
48#include "ScopedLocalRef.h"
49#include "sirt_ref.h"
50#include "gc/space.h"
51#include "stack.h"
52#include "stack_indirect_reference_table.h"
53#include "thread_list.h"
54#include "utils.h"
55#include "verifier/dex_gc_map.h"
56#include "well_known_classes.h"
57
58namespace art {
59
60pthread_key_t Thread::pthread_key_self_;
61ConditionVariable* Thread::resume_cond_;
62
63static const char* kThreadNameDuringStartup = "<native thread without managed peer>";
64
65void Thread::InitCardTable() {
66  card_table_ = Runtime::Current()->GetHeap()->GetCardTable()->GetBiasedBegin();
67}
68
69#if !defined(__APPLE__)
70static void UnimplementedEntryPoint() {
71  UNIMPLEMENTED(FATAL);
72}
73#endif
74
75void Thread::InitFunctionPointers() {
76#if !defined(__APPLE__) // The Mac GCC is too old to accept this code.
77  // Insert a placeholder so we can easily tell if we call an unimplemented entry point.
78  uintptr_t* begin = reinterpret_cast<uintptr_t*>(&entrypoints_);
79  uintptr_t* end = reinterpret_cast<uintptr_t*>(reinterpret_cast<uint8_t*>(begin) + sizeof(entrypoints_));
80  for (uintptr_t* it = begin; it != end; ++it) {
81    *it = reinterpret_cast<uintptr_t>(UnimplementedEntryPoint);
82  }
83#endif
84  InitEntryPoints(&entrypoints_);
85}
86
87void Thread::SetDebuggerUpdatesEnabled(bool enabled) {
88  LOG(INFO) << "Turning debugger updates " << (enabled ? "on" : "off") << " for " << *this;
89#if !defined(ART_USE_LLVM_COMPILER)
90  ChangeDebuggerEntryPoint(&entrypoints_, enabled);
91#else
92  UNIMPLEMENTED(FATAL);
93#endif
94}
95
96
97void Thread::SetDeoptimizationShadowFrame(ShadowFrame* sf, const JValue& ret_val) {
98  CHECK(sf != NULL);
99  deoptimization_shadow_frame_ = sf;
100  deoptimization_return_value_.SetJ(ret_val.GetJ());
101}
102
103ShadowFrame* Thread::GetAndClearDeoptimizationShadowFrame(JValue* ret_val) {
104  ShadowFrame* sf = deoptimization_shadow_frame_;
105  DCHECK(sf != NULL);
106  deoptimization_shadow_frame_ = NULL;
107  ret_val->SetJ(deoptimization_return_value_.GetJ());
108  return sf;
109}
110
111void Thread::InitTid() {
112  tid_ = ::art::GetTid();
113}
114
115void Thread::InitAfterFork() {
116  // One thread (us) survived the fork, but we have a new tid so we need to
117  // update the value stashed in this Thread*.
118  InitTid();
119}
120
121void* Thread::CreateCallback(void* arg) {
122  Thread* self = reinterpret_cast<Thread*>(arg);
123  Runtime* runtime = Runtime::Current();
124  if (runtime == NULL) {
125    LOG(ERROR) << "Thread attaching to non-existent runtime: " << *self;
126    return NULL;
127  }
128  {
129    // TODO: pass self to MutexLock - requires self to equal Thread::Current(), which is only true
130    //       after self->Init().
131    MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
132    // Check that if we got here we cannot be shutting down (as shutdown should never have started
133    // while threads are being born).
134    CHECK(!runtime->IsShuttingDown());
135    self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
136    Runtime::Current()->EndThreadBirth();
137  }
138  {
139    ScopedObjectAccess soa(self);
140
141    // Copy peer into self, deleting global reference when done.
142    CHECK(self->jpeer_ != NULL);
143    self->opeer_ = soa.Decode<Object*>(self->jpeer_);
144    self->GetJniEnv()->DeleteGlobalRef(self->jpeer_);
145    self->jpeer_ = NULL;
146
147    {
148      SirtRef<String> thread_name(self, self->GetThreadName(soa));
149      self->SetThreadName(thread_name->ToModifiedUtf8().c_str());
150    }
151    Dbg::PostThreadStart(self);
152
153    // Invoke the 'run' method of our java.lang.Thread.
154    Object* receiver = self->opeer_;
155    jmethodID mid = WellKnownClasses::java_lang_Thread_run;
156    AbstractMethod* m =
157        receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(soa.DecodeMethod(mid));
158    m->Invoke(self, receiver, NULL, NULL);
159  }
160  // Detach and delete self.
161  Runtime::Current()->GetThreadList()->Unregister(self);
162
163  return NULL;
164}
165
166Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, Object* thread_peer) {
167  Field* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_vmData);
168  Thread* result = reinterpret_cast<Thread*>(static_cast<uintptr_t>(f->GetInt(thread_peer)));
169  // Sanity check that if we have a result it is either suspended or we hold the thread_list_lock_
170  // to stop it from going away.
171  if (kIsDebugBuild) {
172    MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
173    if (result != NULL && !result->IsSuspended()) {
174      Locks::thread_list_lock_->AssertHeld(soa.Self());
175    }
176  }
177  return result;
178}
179
180Thread* Thread::FromManagedThread(const ScopedObjectAccessUnchecked& soa, jobject java_thread) {
181  return FromManagedThread(soa, soa.Decode<Object*>(java_thread));
182}
183
184static size_t FixStackSize(size_t stack_size) {
185  // A stack size of zero means "use the default".
186  if (stack_size == 0) {
187    stack_size = Runtime::Current()->GetDefaultStackSize();
188  }
189
190  // Dalvik used the bionic pthread default stack size for native threads,
191  // so include that here to support apps that expect large native stacks.
192  stack_size += 1 * MB;
193
194  // It's not possible to request a stack smaller than the system-defined PTHREAD_STACK_MIN.
195  if (stack_size < PTHREAD_STACK_MIN) {
196    stack_size = PTHREAD_STACK_MIN;
197  }
198
199  // It's likely that callers are trying to ensure they have at least a certain amount of
200  // stack space, so we should add our reserved space on top of what they requested, rather
201  // than implicitly take it away from them.
202  stack_size += Thread::kStackOverflowReservedBytes;
203
204  // Some systems require the stack size to be a multiple of the system page size, so round up.
205  stack_size = RoundUp(stack_size, kPageSize);
206
207  return stack_size;
208}
209
210static void SigAltStack(stack_t* new_stack, stack_t* old_stack) {
211  if (sigaltstack(new_stack, old_stack) == -1) {
212    PLOG(FATAL) << "sigaltstack failed";
213  }
214}
215
216static void SetUpAlternateSignalStack() {
217  // Create and set an alternate signal stack.
218  stack_t ss;
219  ss.ss_sp = new uint8_t[SIGSTKSZ];
220  ss.ss_size = SIGSTKSZ;
221  ss.ss_flags = 0;
222  CHECK(ss.ss_sp != NULL);
223  SigAltStack(&ss, NULL);
224
225  // Double-check that it worked.
226  ss.ss_sp = NULL;
227  SigAltStack(NULL, &ss);
228  VLOG(threads) << "Alternate signal stack is " << PrettySize(ss.ss_size) << " at " << ss.ss_sp;
229}
230
231static void TearDownAlternateSignalStack() {
232  // Get the pointer so we can free the memory.
233  stack_t ss;
234  SigAltStack(NULL, &ss);
235  uint8_t* allocated_signal_stack = reinterpret_cast<uint8_t*>(ss.ss_sp);
236
237  // Tell the kernel to stop using it.
238  ss.ss_sp = NULL;
239  ss.ss_flags = SS_DISABLE;
240  ss.ss_size = SIGSTKSZ; // Avoid ENOMEM failure with Mac OS' buggy libc.
241  SigAltStack(&ss, NULL);
242
243  // Free it.
244  delete[] allocated_signal_stack;
245}
246
247void Thread::CreateNativeThread(JNIEnv* env, jobject java_peer, size_t stack_size, bool is_daemon) {
248  CHECK(java_peer != NULL);
249  Thread* self = static_cast<JNIEnvExt*>(env)->self;
250  Runtime* runtime = Runtime::Current();
251
252  // Atomically start the birth of the thread ensuring the runtime isn't shutting down.
253  bool thread_start_during_shutdown = false;
254  {
255    MutexLock mu(self, *Locks::runtime_shutdown_lock_);
256    if (runtime->IsShuttingDown()) {
257      thread_start_during_shutdown = true;
258    } else {
259      runtime->StartThreadBirth();
260    }
261  }
262  if (thread_start_during_shutdown) {
263    ScopedLocalRef<jclass> error_class(env, env->FindClass("java/lang/InternalError"));
264    env->ThrowNew(error_class.get(), "Thread starting during runtime shutdown");
265    return;
266  }
267
268  Thread* child_thread = new Thread(is_daemon);
269  // Use global JNI ref to hold peer live while child thread starts.
270  child_thread->jpeer_ = env->NewGlobalRef(java_peer);
271  stack_size = FixStackSize(stack_size);
272
273  // Thread.start is synchronized, so we know that vmData is 0, and know that we're not racing to
274  // assign it.
275  env->SetIntField(java_peer, WellKnownClasses::java_lang_Thread_vmData,
276                   reinterpret_cast<jint>(child_thread));
277
278  pthread_t new_pthread;
279  pthread_attr_t attr;
280  CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
281  CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED");
282  CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
283  int pthread_create_result = pthread_create(&new_pthread, &attr, Thread::CreateCallback, child_thread);
284  CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
285
286  if (pthread_create_result != 0) {
287    // pthread_create(3) failed, so clean up.
288    {
289      MutexLock mu(self, *Locks::runtime_shutdown_lock_);
290      runtime->EndThreadBirth();
291    }
292    // Manually delete the global reference since Thread::Init will not have been run.
293    env->DeleteGlobalRef(child_thread->jpeer_);
294    child_thread->jpeer_ = NULL;
295    delete child_thread;
296    child_thread = NULL;
297    // TODO: remove from thread group?
298    env->SetIntField(java_peer, WellKnownClasses::java_lang_Thread_vmData, 0);
299    {
300      std::string msg(StringPrintf("pthread_create (%s stack) failed: %s",
301                                   PrettySize(stack_size).c_str(), strerror(pthread_create_result)));
302      ScopedObjectAccess soa(env);
303      soa.Self()->ThrowOutOfMemoryError(msg.c_str());
304    }
305  }
306}
307
308void Thread::Init(ThreadList* thread_list, JavaVMExt* java_vm) {
309  // This function does all the initialization that must be run by the native thread it applies to.
310  // (When we create a new thread from managed code, we allocate the Thread* in Thread::Create so
311  // we can handshake with the corresponding native thread when it's ready.) Check this native
312  // thread hasn't been through here already...
313  CHECK(Thread::Current() == NULL);
314  SetUpAlternateSignalStack();
315  InitCpu();
316  InitFunctionPointers();
317  InitCardTable();
318  InitTid();
319  if (Runtime::Current()->InterpreterOnly()) {
320    AtomicSetFlag(kEnterInterpreter);
321  }
322  // Set pthread_self_ ahead of pthread_setspecific, that makes Thread::Current function, this
323  // avoids pthread_self_ ever being invalid when discovered from Thread::Current().
324  pthread_self_ = pthread_self();
325  CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach self");
326  DCHECK_EQ(Thread::Current(), this);
327
328  thin_lock_id_ = thread_list->AllocThreadId(this);
329  InitStackHwm();
330
331  jni_env_ = new JNIEnvExt(this, java_vm);
332  thread_list->Register(this);
333}
334
335Thread* Thread::Attach(const char* thread_name, bool as_daemon, jobject thread_group,
336                       bool create_peer) {
337  Thread* self;
338  Runtime* runtime = Runtime::Current();
339  if (runtime == NULL) {
340    LOG(ERROR) << "Thread attaching to non-existent runtime: " << thread_name;
341    return NULL;
342  }
343  {
344    MutexLock mu(NULL, *Locks::runtime_shutdown_lock_);
345    if (runtime->IsShuttingDown()) {
346      LOG(ERROR) << "Thread attaching while runtime is shutting down: " << thread_name;
347      return NULL;
348    } else {
349      Runtime::Current()->StartThreadBirth();
350      self = new Thread(as_daemon);
351      self->Init(runtime->GetThreadList(), runtime->GetJavaVM());
352      Runtime::Current()->EndThreadBirth();
353    }
354  }
355
356  CHECK_NE(self->GetState(), kRunnable);
357  self->SetState(kNative);
358
359  // If we're the main thread, ClassLinker won't be created until after we're attached,
360  // so that thread needs a two-stage attach. Regular threads don't need this hack.
361  // In the compiler, all threads need this hack, because no-one's going to be getting
362  // a native peer!
363  if (create_peer) {
364    self->CreatePeer(thread_name, as_daemon, thread_group);
365  } else {
366    // These aren't necessary, but they improve diagnostics for unit tests & command-line tools.
367    if (thread_name != NULL) {
368      self->name_->assign(thread_name);
369      ::art::SetThreadName(thread_name);
370    }
371  }
372
373  return self;
374}
375
376void Thread::CreatePeer(const char* name, bool as_daemon, jobject thread_group) {
377  Runtime* runtime = Runtime::Current();
378  CHECK(runtime->IsStarted());
379  JNIEnv* env = jni_env_;
380
381  if (thread_group == NULL) {
382    thread_group = runtime->GetMainThreadGroup();
383  }
384  ScopedLocalRef<jobject> thread_name(env, env->NewStringUTF(name));
385  jint thread_priority = GetNativePriority();
386  jboolean thread_is_daemon = as_daemon;
387
388  ScopedLocalRef<jobject> peer(env, env->AllocObject(WellKnownClasses::java_lang_Thread));
389  if (peer.get() == NULL) {
390    CHECK(IsExceptionPending());
391    return;
392  }
393  {
394    ScopedObjectAccess soa(this);
395    opeer_ = soa.Decode<Object*>(peer.get());
396  }
397  env->CallNonvirtualVoidMethod(peer.get(),
398                                WellKnownClasses::java_lang_Thread,
399                                WellKnownClasses::java_lang_Thread_init,
400                                thread_group, thread_name.get(), thread_priority, thread_is_daemon);
401  AssertNoPendingException();
402
403  Thread* self = this;
404  DCHECK_EQ(self, Thread::Current());
405  jni_env_->SetIntField(peer.get(), WellKnownClasses::java_lang_Thread_vmData,
406                        reinterpret_cast<jint>(self));
407
408  ScopedObjectAccess soa(self);
409  SirtRef<String> peer_thread_name(soa.Self(), GetThreadName(soa));
410  if (peer_thread_name.get() == NULL) {
411    // The Thread constructor should have set the Thread.name to a
412    // non-null value. However, because we can run without code
413    // available (in the compiler, in tests), we manually assign the
414    // fields the constructor should have set.
415    soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->
416        SetBoolean(opeer_, thread_is_daemon);
417    soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->
418        SetObject(opeer_, soa.Decode<Object*>(thread_group));
419    soa.DecodeField(WellKnownClasses::java_lang_Thread_name)->
420        SetObject(opeer_, soa.Decode<Object*>(thread_name.get()));
421    soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->
422        SetInt(opeer_, thread_priority);
423    peer_thread_name.reset(GetThreadName(soa));
424  }
425  // 'thread_name' may have been null, so don't trust 'peer_thread_name' to be non-null.
426  if (peer_thread_name.get() != NULL) {
427    SetThreadName(peer_thread_name->ToModifiedUtf8().c_str());
428  }
429}
430
431void Thread::SetThreadName(const char* name) {
432  name_->assign(name);
433  ::art::SetThreadName(name);
434  Dbg::DdmSendThreadNotification(this, CHUNK_TYPE("THNM"));
435}
436
437void Thread::InitStackHwm() {
438  void* stack_base;
439  size_t stack_size;
440  GetThreadStack(pthread_self_, stack_base, stack_size);
441
442  // TODO: include this in the thread dumps; potentially useful in SIGQUIT output?
443  VLOG(threads) << StringPrintf("Native stack is at %p (%s)", stack_base, PrettySize(stack_size).c_str());
444
445  stack_begin_ = reinterpret_cast<byte*>(stack_base);
446  stack_size_ = stack_size;
447
448  if (stack_size_ <= kStackOverflowReservedBytes) {
449    LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << stack_size_ << " bytes)";
450  }
451
452  // TODO: move this into the Linux GetThreadStack implementation.
453#if !defined(__APPLE__)
454  // If we're the main thread, check whether we were run with an unlimited stack. In that case,
455  // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
456  // will be broken because we'll die long before we get close to 2GB.
457  bool is_main_thread = (::art::GetTid() == getpid());
458  if (is_main_thread) {
459    rlimit stack_limit;
460    if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) {
461      PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed";
462    }
463    if (stack_limit.rlim_cur == RLIM_INFINITY) {
464      // Find the default stack size for new threads...
465      pthread_attr_t default_attributes;
466      size_t default_stack_size;
467      CHECK_PTHREAD_CALL(pthread_attr_init, (&default_attributes), "default stack size query");
468      CHECK_PTHREAD_CALL(pthread_attr_getstacksize, (&default_attributes, &default_stack_size),
469                         "default stack size query");
470      CHECK_PTHREAD_CALL(pthread_attr_destroy, (&default_attributes), "default stack size query");
471
472      // ...and use that as our limit.
473      size_t old_stack_size = stack_size_;
474      stack_size_ = default_stack_size;
475      stack_begin_ += (old_stack_size - stack_size_);
476      VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")"
477                    << " to " << PrettySize(stack_size_)
478                    << " with base " << reinterpret_cast<void*>(stack_begin_);
479    }
480  }
481#endif
482
483  // Set stack_end_ to the bottom of the stack saving space of stack overflows
484  ResetDefaultStackEnd();
485
486  // Sanity check.
487  int stack_variable;
488  CHECK_GT(&stack_variable, reinterpret_cast<void*>(stack_end_));
489}
490
491void Thread::ShortDump(std::ostream& os) const {
492  os << "Thread[";
493  if (GetThinLockId() != 0) {
494    // If we're in kStarting, we won't have a thin lock id or tid yet.
495    os << GetThinLockId()
496             << ",tid=" << GetTid() << ',';
497  }
498  os << GetState()
499           << ",Thread*=" << this
500           << ",peer=" << opeer_
501           << ",\"" << *name_ << "\""
502           << "]";
503}
504
505void Thread::Dump(std::ostream& os) const {
506  DumpState(os);
507  DumpStack(os);
508}
509
510String* Thread::GetThreadName(const ScopedObjectAccessUnchecked& soa) const {
511  Field* f = soa.DecodeField(WellKnownClasses::java_lang_Thread_name);
512  return (opeer_ != NULL) ? reinterpret_cast<String*>(f->GetObject(opeer_)) : NULL;
513}
514
515void Thread::GetThreadName(std::string& name) const {
516  name.assign(*name_);
517}
518
519void Thread::AtomicSetFlag(ThreadFlag flag) {
520  android_atomic_or(flag, &state_and_flags_.as_int);
521}
522
523void Thread::AtomicClearFlag(ThreadFlag flag) {
524  android_atomic_and(-1 ^ flag, &state_and_flags_.as_int);
525}
526
527ThreadState Thread::SetState(ThreadState new_state) {
528  // Cannot use this code to change into Runnable as changing to Runnable should fail if
529  // old_state_and_flags.suspend_request is true.
530  DCHECK_NE(new_state, kRunnable);
531  DCHECK_EQ(this, Thread::Current());
532  union StateAndFlags old_state_and_flags = state_and_flags_;
533  state_and_flags_.as_struct.state = new_state;
534  return static_cast<ThreadState>(old_state_and_flags.as_struct.state);
535}
536
537// Attempt to rectify locks so that we dump thread list with required locks before exiting.
538static void UnsafeLogFatalForSuspendCount(Thread* self, Thread* thread) NO_THREAD_SAFETY_ANALYSIS {
539  LOG(ERROR) << *thread << " suspend count already zero.";
540  Locks::thread_suspend_count_lock_->Unlock(self);
541  if (!Locks::mutator_lock_->IsSharedHeld(self)) {
542    Locks::mutator_lock_->SharedTryLock(self);
543    if (!Locks::mutator_lock_->IsSharedHeld(self)) {
544      LOG(WARNING) << "Dumping thread list without holding mutator_lock_";
545    }
546  }
547  if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
548    Locks::thread_list_lock_->TryLock(self);
549    if (!Locks::thread_list_lock_->IsExclusiveHeld(self)) {
550      LOG(WARNING) << "Dumping thread list without holding thread_list_lock_";
551    }
552  }
553  std::ostringstream ss;
554  Runtime::Current()->GetThreadList()->DumpLocked(ss);
555  LOG(FATAL) << ss.str();
556}
557
558void Thread::ModifySuspendCount(Thread* self, int delta, bool for_debugger) {
559  DCHECK(delta == -1 || delta == +1 || delta == -debug_suspend_count_)
560      << delta << " " << debug_suspend_count_ << " " << this;
561  DCHECK_GE(suspend_count_, debug_suspend_count_) << this;
562  Locks::thread_suspend_count_lock_->AssertHeld(self);
563  if (this != self && !IsSuspended()) {
564    Locks::thread_list_lock_->AssertHeld(self);
565  }
566  if (UNLIKELY(delta < 0 && suspend_count_ <= 0)) {
567    UnsafeLogFatalForSuspendCount(self, this);
568    return;
569  }
570
571  suspend_count_ += delta;
572  if (for_debugger) {
573    debug_suspend_count_ += delta;
574  }
575
576  if (suspend_count_ == 0) {
577    AtomicClearFlag(kSuspendRequest);
578  } else {
579    AtomicSetFlag(kSuspendRequest);
580  }
581}
582
583bool Thread::RequestCheckpoint(Closure* function) {
584  CHECK(!ReadFlag(kCheckpointRequest)) << "Already have a pending checkpoint request";
585  checkpoint_function_ = function;
586  union StateAndFlags old_state_and_flags = state_and_flags_;
587  // We must be runnable to request a checkpoint.
588  old_state_and_flags.as_struct.state = kRunnable;
589  union StateAndFlags new_state_and_flags = old_state_and_flags;
590  new_state_and_flags.as_struct.flags |= kCheckpointRequest;
591  int succeeded = android_atomic_cmpxchg(old_state_and_flags.as_int, new_state_and_flags.as_int,
592                                         &state_and_flags_.as_int);
593  return succeeded == 0;
594}
595
596void Thread::FullSuspendCheck() {
597  VLOG(threads) << this << " self-suspending";
598  // Make thread appear suspended to other threads, release mutator_lock_.
599  TransitionFromRunnableToSuspended(kSuspended);
600  // Transition back to runnable noting requests to suspend, re-acquire share on mutator_lock_.
601  TransitionFromSuspendedToRunnable();
602  VLOG(threads) << this << " self-reviving";
603}
604
605void Thread::TransitionFromRunnableToSuspended(ThreadState new_state) {
606  AssertThreadSuspensionIsAllowable();
607  DCHECK_NE(new_state, kRunnable);
608  DCHECK_EQ(this, Thread::Current());
609  // Change to non-runnable state, thereby appearing suspended to the system.
610  DCHECK_EQ(GetState(), kRunnable);
611  union StateAndFlags old_state_and_flags;
612  union StateAndFlags new_state_and_flags;
613  do {
614    old_state_and_flags = state_and_flags_;
615    // Copy over flags and try to clear the checkpoint bit if it is set.
616    new_state_and_flags.as_struct.flags = old_state_and_flags.as_struct.flags & ~kCheckpointRequest;
617    new_state_and_flags.as_struct.state = new_state;
618  } while (android_atomic_cmpxchg(old_state_and_flags.as_int, new_state_and_flags.as_int,
619                                  &state_and_flags_.as_int) != 0);
620  // If we toggled the checkpoint flag we must have cleared it.
621  uint16_t flag_change = new_state_and_flags.as_struct.flags ^ old_state_and_flags.as_struct.flags;
622  if ((flag_change & kCheckpointRequest) != 0) {
623    RunCheckpointFunction();
624  }
625  // Release share on mutator_lock_.
626  Locks::mutator_lock_->SharedUnlock(this);
627}
628
629ThreadState Thread::TransitionFromSuspendedToRunnable() {
630  bool done = false;
631  union StateAndFlags old_state_and_flags = state_and_flags_;
632  int16_t old_state = old_state_and_flags.as_struct.state;
633  DCHECK_NE(static_cast<ThreadState>(old_state), kRunnable);
634  do {
635    Locks::mutator_lock_->AssertNotHeld(this);  // Otherwise we starve GC..
636    old_state_and_flags = state_and_flags_;
637    DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
638    if ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) {
639      // Wait while our suspend count is non-zero.
640      MutexLock mu(this, *Locks::thread_suspend_count_lock_);
641      old_state_and_flags = state_and_flags_;
642      DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
643      while ((old_state_and_flags.as_struct.flags & kSuspendRequest) != 0) {
644        // Re-check when Thread::resume_cond_ is notified.
645        Thread::resume_cond_->Wait(this);
646        old_state_and_flags = state_and_flags_;
647        DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
648      }
649      DCHECK_EQ(GetSuspendCount(), 0);
650    }
651    // Re-acquire shared mutator_lock_ access.
652    Locks::mutator_lock_->SharedLock(this);
653    // Atomically change from suspended to runnable if no suspend request pending.
654    old_state_and_flags = state_and_flags_;
655    DCHECK_EQ(old_state_and_flags.as_struct.state, old_state);
656    if ((old_state_and_flags.as_struct.flags & kSuspendRequest) == 0) {
657      union StateAndFlags new_state_and_flags = old_state_and_flags;
658      new_state_and_flags.as_struct.state = kRunnable;
659      done = android_atomic_cmpxchg(old_state_and_flags.as_int, new_state_and_flags.as_int,
660                                    &state_and_flags_.as_int)
661                                        == 0;
662    }
663    if (!done) {
664      // Failed to transition to Runnable. Release shared mutator_lock_ access and try again.
665      Locks::mutator_lock_->SharedUnlock(this);
666    }
667  } while (!done);
668  return static_cast<ThreadState>(old_state);
669}
670
671Thread* Thread::SuspendForDebugger(jobject peer, bool request_suspension, bool* timeout) {
672  static const useconds_t kTimeoutUs = 30 * 1000000; // 30s.
673  useconds_t total_delay_us = 0;
674  useconds_t delay_us = 0;
675  bool did_suspend_request = false;
676  *timeout = false;
677  while (true) {
678    Thread* thread;
679    {
680      ScopedObjectAccess soa(Thread::Current());
681      MutexLock mu(soa.Self(), *Locks::thread_list_lock_);
682      thread = Thread::FromManagedThread(soa, peer);
683      if (thread == NULL) {
684        LOG(WARNING) << "No such thread for suspend: " << peer;
685        return NULL;
686      }
687      {
688        MutexLock mu(soa.Self(), *Locks::thread_suspend_count_lock_);
689        if (request_suspension) {
690          thread->ModifySuspendCount(soa.Self(), +1, true /* for_debugger */);
691          request_suspension = false;
692          did_suspend_request = true;
693        }
694        // IsSuspended on the current thread will fail as the current thread is changed into
695        // Runnable above. As the suspend count is now raised if this is the current thread
696        // it will self suspend on transition to Runnable, making it hard to work with. Its simpler
697        // to just explicitly handle the current thread in the callers to this code.
698        CHECK_NE(thread, soa.Self()) << "Attempt to suspend for debugger the current thread";
699        // If thread is suspended (perhaps it was already not Runnable but didn't have a suspend
700        // count, or else we've waited and it has self suspended) or is the current thread, we're
701        // done.
702        if (thread->IsSuspended()) {
703          return thread;
704        }
705        if (total_delay_us >= kTimeoutUs) {
706          LOG(ERROR) << "Thread suspension timed out: " << peer;
707          if (did_suspend_request) {
708            thread->ModifySuspendCount(soa.Self(), -1, true /* for_debugger */);
709          }
710          *timeout = true;
711          return NULL;
712        }
713      }
714      // Release locks and come out of runnable state.
715    }
716    for (int i = kMaxMutexLevel; i >= 0; --i) {
717      BaseMutex* held_mutex = Thread::Current()->GetHeldMutex(static_cast<LockLevel>(i));
718      if (held_mutex != NULL) {
719        LOG(FATAL) << "Holding " << held_mutex->GetName()
720            << " while sleeping for thread suspension";
721      }
722    }
723    {
724      useconds_t new_delay_us = delay_us * 2;
725      CHECK_GE(new_delay_us, delay_us);
726      if (new_delay_us < 500000) {  // Don't allow sleeping to be more than 0.5s.
727        delay_us = new_delay_us;
728      }
729    }
730    if (delay_us == 0) {
731      sched_yield();
732      // Default to 1 milliseconds (note that this gets multiplied by 2 before the first sleep).
733      delay_us = 500;
734    } else {
735      usleep(delay_us);
736      total_delay_us += delay_us;
737    }
738  }
739}
740
741void Thread::DumpState(std::ostream& os, const Thread* thread, pid_t tid) {
742  std::string group_name;
743  int priority;
744  bool is_daemon = false;
745  Thread* self = Thread::Current();
746
747  if (thread != NULL && thread->opeer_ != NULL) {
748    ScopedObjectAccessUnchecked soa(self);
749    priority = soa.DecodeField(WellKnownClasses::java_lang_Thread_priority)->GetInt(thread->opeer_);
750    is_daemon = soa.DecodeField(WellKnownClasses::java_lang_Thread_daemon)->GetBoolean(thread->opeer_);
751
752    Object* thread_group =
753        soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(thread->opeer_);
754
755    if (thread_group != NULL) {
756      Field* group_name_field = soa.DecodeField(WellKnownClasses::java_lang_ThreadGroup_name);
757      String* group_name_string = reinterpret_cast<String*>(group_name_field->GetObject(thread_group));
758      group_name = (group_name_string != NULL) ? group_name_string->ToModifiedUtf8() : "<null>";
759    }
760  } else {
761    priority = GetNativePriority();
762  }
763
764  std::string scheduler_group_name(GetSchedulerGroupName(tid));
765  if (scheduler_group_name.empty()) {
766    scheduler_group_name = "default";
767  }
768
769  if (thread != NULL) {
770    os << '"' << *thread->name_ << '"';
771    if (is_daemon) {
772      os << " daemon";
773    }
774    os << " prio=" << priority
775       << " tid=" << thread->GetThinLockId()
776       << " " << thread->GetState();
777    if (thread->IsStillStarting()) {
778      os << " (still starting up)";
779    }
780    os << "\n";
781  } else {
782    os << '"' << ::art::GetThreadName(tid) << '"'
783       << " prio=" << priority
784       << " (not attached)\n";
785  }
786
787  if (thread != NULL) {
788    MutexLock mu(self, *Locks::thread_suspend_count_lock_);
789    os << "  | group=\"" << group_name << "\""
790       << " sCount=" << thread->suspend_count_
791       << " dsCount=" << thread->debug_suspend_count_
792       << " obj=" << reinterpret_cast<void*>(thread->opeer_)
793       << " self=" << reinterpret_cast<const void*>(thread) << "\n";
794  }
795
796  os << "  | sysTid=" << tid
797     << " nice=" << getpriority(PRIO_PROCESS, tid)
798     << " cgrp=" << scheduler_group_name;
799  if (thread != NULL) {
800    int policy;
801    sched_param sp;
802    CHECK_PTHREAD_CALL(pthread_getschedparam, (thread->pthread_self_, &policy, &sp), __FUNCTION__);
803    os << " sched=" << policy << "/" << sp.sched_priority
804       << " handle=" << reinterpret_cast<void*>(thread->pthread_self_);
805  }
806  os << "\n";
807
808  // Grab the scheduler stats for this thread.
809  std::string scheduler_stats;
810  if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", tid), &scheduler_stats)) {
811    scheduler_stats.resize(scheduler_stats.size() - 1); // Lose the trailing '\n'.
812  } else {
813    scheduler_stats = "0 0 0";
814  }
815
816  char native_thread_state = '?';
817  int utime = 0;
818  int stime = 0;
819  int task_cpu = 0;
820  GetTaskStats(tid, native_thread_state, utime, stime, task_cpu);
821
822  os << "  | state=" << native_thread_state
823     << " schedstat=( " << scheduler_stats << " )"
824     << " utm=" << utime
825     << " stm=" << stime
826     << " core=" << task_cpu
827     << " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
828  if (thread != NULL) {
829    os << "  | stack=" << reinterpret_cast<void*>(thread->stack_begin_) << "-" << reinterpret_cast<void*>(thread->stack_end_)
830       << " stackSize=" << PrettySize(thread->stack_size_) << "\n";
831  }
832}
833
834void Thread::DumpState(std::ostream& os) const {
835  Thread::DumpState(os, this, GetTid());
836}
837
838struct StackDumpVisitor : public StackVisitor {
839  StackDumpVisitor(std::ostream& os, const Thread* thread, Context* context, bool can_allocate)
840      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
841      : StackVisitor(thread->GetManagedStack(), thread->GetInstrumentationStack(), context),
842        os(os), thread(thread), can_allocate(can_allocate),
843        last_method(NULL), last_line_number(0), repetition_count(0), frame_count(0) {
844  }
845
846  virtual ~StackDumpVisitor() {
847    if (frame_count == 0) {
848      os << "  (no managed stack frames)\n";
849    }
850  }
851
852  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
853    AbstractMethod* m = GetMethod();
854    if (m->IsRuntimeMethod()) {
855      return true;
856    }
857    const int kMaxRepetition = 3;
858    Class* c = m->GetDeclaringClass();
859    const DexCache* dex_cache = c->GetDexCache();
860    int line_number = -1;
861    if (dex_cache != NULL) {  // be tolerant of bad input
862      const DexFile& dex_file = *dex_cache->GetDexFile();
863      line_number = dex_file.GetLineNumFromPC(m, GetDexPc());
864    }
865    if (line_number == last_line_number && last_method == m) {
866      repetition_count++;
867    } else {
868      if (repetition_count >= kMaxRepetition) {
869        os << "  ... repeated " << (repetition_count - kMaxRepetition) << " times\n";
870      }
871      repetition_count = 0;
872      last_line_number = line_number;
873      last_method = m;
874    }
875    if (repetition_count < kMaxRepetition) {
876      os << "  at " << PrettyMethod(m, false);
877      if (m->IsNative()) {
878        os << "(Native method)";
879      } else {
880        mh.ChangeMethod(m);
881        const char* source_file(mh.GetDeclaringClassSourceFile());
882        os << "(" << (source_file != NULL ? source_file : "unavailable")
883           << ":" << line_number << ")";
884      }
885      os << "\n";
886      if (frame_count == 0) {
887        Monitor::DescribeWait(os, thread);
888      }
889      if (can_allocate) {
890        Monitor::DescribeLocks(os, this);
891      }
892    }
893
894    ++frame_count;
895    return true;
896  }
897  std::ostream& os;
898  const Thread* thread;
899  bool can_allocate;
900  MethodHelper mh;
901  AbstractMethod* last_method;
902  int last_line_number;
903  int repetition_count;
904  int frame_count;
905};
906
907void Thread::DumpStack(std::ostream& os) const {
908  // If we're currently in native code, dump that stack before dumping the managed stack.
909  if (GetState() == kNative) {
910    DumpKernelStack(os, GetTid(), "  kernel: ", false);
911    DumpNativeStack(os, GetTid(), "  native: ", false);
912  }
913  UniquePtr<Context> context(Context::Create());
914  StackDumpVisitor dumper(os, this, context.get(), !throwing_OutOfMemoryError_);
915  dumper.WalkStack();
916}
917
918void Thread::ThreadExitCallback(void* arg) {
919  Thread* self = reinterpret_cast<Thread*>(arg);
920  if (self->thread_exit_check_count_ == 0) {
921    LOG(WARNING) << "Native thread exiting without having called DetachCurrentThread (maybe it's going to use a pthread_key_create destructor?): " << *self;
922    CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, self), "reattach self");
923    self->thread_exit_check_count_ = 1;
924  } else {
925    LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
926  }
927}
928
929void Thread::Startup() {
930  {
931    MutexLock mu(Thread::Current(), *Locks::thread_suspend_count_lock_);  // Keep GCC happy.
932    resume_cond_ = new ConditionVariable("Thread resumption condition variable",
933                                         *Locks::thread_suspend_count_lock_);
934  }
935
936  // Allocate a TLS slot.
937  CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key");
938
939  // Double-check the TLS slot allocation.
940  if (pthread_getspecific(pthread_key_self_) != NULL) {
941    LOG(FATAL) << "Newly-created pthread TLS slot is not NULL";
942  }
943}
944
945void Thread::FinishStartup() {
946  Runtime* runtime = Runtime::Current();
947  CHECK(runtime->IsStarted());
948
949  // Finish attaching the main thread.
950  ScopedObjectAccess soa(Thread::Current());
951  Thread::Current()->CreatePeer("main", false, runtime->GetMainThreadGroup());
952
953  Runtime::Current()->GetClassLinker()->RunRootClinits();
954}
955
956void Thread::Shutdown() {
957  CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
958}
959
960Thread::Thread(bool daemon)
961    : suspend_count_(0),
962      card_table_(NULL),
963      exception_(NULL),
964      stack_end_(NULL),
965      managed_stack_(),
966      jni_env_(NULL),
967      self_(NULL),
968      opeer_(NULL),
969      jpeer_(NULL),
970      stack_begin_(NULL),
971      stack_size_(0),
972      thin_lock_id_(0),
973      tid_(0),
974      wait_mutex_(new Mutex("a thread wait mutex")),
975      wait_cond_(new ConditionVariable("a thread wait condition variable", *wait_mutex_)),
976      wait_monitor_(NULL),
977      interrupted_(false),
978      wait_next_(NULL),
979      monitor_enter_object_(NULL),
980      top_sirt_(NULL),
981      runtime_(NULL),
982      class_loader_override_(NULL),
983      long_jump_context_(NULL),
984      throwing_OutOfMemoryError_(false),
985      debug_suspend_count_(0),
986      debug_invoke_req_(new DebugInvokeReq),
987      instrumentation_stack_(new std::deque<InstrumentationStackFrame>),
988      name_(new std::string(kThreadNameDuringStartup)),
989      daemon_(daemon),
990      pthread_self_(0),
991      no_thread_suspension_(0),
992      last_no_thread_suspension_cause_(NULL),
993      checkpoint_function_(0),
994      thread_exit_check_count_(0) {
995  CHECK_EQ((sizeof(Thread) % 4), 0U) << sizeof(Thread);
996  state_and_flags_.as_struct.flags = 0;
997  state_and_flags_.as_struct.state = kNative;
998  memset(&held_mutexes_[0], 0, sizeof(held_mutexes_));
999}
1000
1001bool Thread::IsStillStarting() const {
1002  // You might think you can check whether the state is kStarting, but for much of thread startup,
1003  // the thread is in kNative; it might also be in kVmWait.
1004  // You might think you can check whether the peer is NULL, but the peer is actually created and
1005  // assigned fairly early on, and needs to be.
1006  // It turns out that the last thing to change is the thread name; that's a good proxy for "has
1007  // this thread _ever_ entered kRunnable".
1008  return (jpeer_ == NULL && opeer_ == NULL) || (*name_ == kThreadNameDuringStartup);
1009}
1010
1011void Thread::AssertNoPendingException() const {
1012  if (UNLIKELY(IsExceptionPending())) {
1013    ScopedObjectAccess soa(Thread::Current());
1014    Throwable* exception = GetException();
1015    LOG(FATAL) << "No pending exception expected: " << exception->Dump();
1016  }
1017}
1018
1019static void MonitorExitVisitor(const Object* object, void* arg) NO_THREAD_SAFETY_ANALYSIS {
1020  Thread* self = reinterpret_cast<Thread*>(arg);
1021  Object* entered_monitor = const_cast<Object*>(object);
1022  if (self->HoldsLock(entered_monitor)) {
1023    LOG(WARNING) << "Calling MonitorExit on object "
1024                 << object << " (" << PrettyTypeOf(object) << ")"
1025                 << " left locked by native thread "
1026                 << *Thread::Current() << " which is detaching";
1027    entered_monitor->MonitorExit(self);
1028  }
1029}
1030
1031void Thread::Destroy() {
1032  Thread* self = this;
1033  DCHECK_EQ(self, Thread::Current());
1034
1035  if (opeer_ != NULL) {
1036    ScopedObjectAccess soa(self);
1037    // We may need to call user-supplied managed code, do this before final clean-up.
1038    HandleUncaughtExceptions(soa);
1039    RemoveFromThreadGroup(soa);
1040
1041    // this.vmData = 0;
1042    soa.DecodeField(WellKnownClasses::java_lang_Thread_vmData)->SetInt(opeer_, 0);
1043    Dbg::PostThreadDeath(self);
1044
1045    // Thread.join() is implemented as an Object.wait() on the Thread.lock object. Signal anyone
1046    // who is waiting.
1047    Object* lock = soa.DecodeField(WellKnownClasses::java_lang_Thread_lock)->GetObject(opeer_);
1048    // (This conditional is only needed for tests, where Thread.lock won't have been set.)
1049    if (lock != NULL) {
1050      lock->MonitorEnter(self);
1051      lock->Notify();
1052      lock->MonitorExit(self);
1053    }
1054  }
1055
1056  // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
1057  if (jni_env_ != NULL) {
1058    jni_env_->monitors.VisitRoots(MonitorExitVisitor, self);
1059  }
1060}
1061
1062Thread::~Thread() {
1063  if (jni_env_ != NULL && jpeer_ != NULL) {
1064    // If pthread_create fails we don't have a jni env here.
1065    jni_env_->DeleteGlobalRef(jpeer_);
1066    jpeer_ = NULL;
1067  }
1068  opeer_ = NULL;
1069
1070  delete jni_env_;
1071  jni_env_ = NULL;
1072
1073  CHECK_NE(GetState(), kRunnable);
1074  // We may be deleting a still born thread.
1075  SetStateUnsafe(kTerminated);
1076
1077  delete wait_cond_;
1078  delete wait_mutex_;
1079
1080#if !defined(ART_USE_LLVM_COMPILER)
1081  delete long_jump_context_;
1082#endif
1083
1084  delete debug_invoke_req_;
1085  delete instrumentation_stack_;
1086  delete name_;
1087
1088  TearDownAlternateSignalStack();
1089}
1090
1091void Thread::HandleUncaughtExceptions(ScopedObjectAccess& soa) {
1092  if (!IsExceptionPending()) {
1093    return;
1094  }
1095  ScopedLocalRef<jobject> peer(jni_env_, soa.AddLocalReference<jobject>(opeer_));
1096  ScopedThreadStateChange tsc(this, kNative);
1097
1098  // Get and clear the exception.
1099  ScopedLocalRef<jthrowable> exception(jni_env_, jni_env_->ExceptionOccurred());
1100  jni_env_->ExceptionClear();
1101
1102  // If the thread has its own handler, use that.
1103  ScopedLocalRef<jobject> handler(jni_env_,
1104                                  jni_env_->GetObjectField(peer.get(),
1105                                                           WellKnownClasses::java_lang_Thread_uncaughtHandler));
1106  if (handler.get() == NULL) {
1107    // Otherwise use the thread group's default handler.
1108    handler.reset(jni_env_->GetObjectField(peer.get(), WellKnownClasses::java_lang_Thread_group));
1109  }
1110
1111  // Call the handler.
1112  jni_env_->CallVoidMethod(handler.get(),
1113                           WellKnownClasses::java_lang_Thread$UncaughtExceptionHandler_uncaughtException,
1114                           peer.get(), exception.get());
1115
1116  // If the handler threw, clear that exception too.
1117  jni_env_->ExceptionClear();
1118}
1119
1120void Thread::RemoveFromThreadGroup(ScopedObjectAccess& soa) {
1121  // this.group.removeThread(this);
1122  // group can be null if we're in the compiler or a test.
1123  Object* ogroup = soa.DecodeField(WellKnownClasses::java_lang_Thread_group)->GetObject(opeer_);
1124  if (ogroup != NULL) {
1125    ScopedLocalRef<jobject> group(soa.Env(), soa.AddLocalReference<jobject>(ogroup));
1126    ScopedLocalRef<jobject> peer(soa.Env(), soa.AddLocalReference<jobject>(opeer_));
1127    ScopedThreadStateChange tsc(soa.Self(), kNative);
1128    jni_env_->CallVoidMethod(group.get(), WellKnownClasses::java_lang_ThreadGroup_removeThread,
1129                             peer.get());
1130  }
1131}
1132
1133size_t Thread::NumSirtReferences() {
1134  size_t count = 0;
1135  for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
1136    count += cur->NumberOfReferences();
1137  }
1138  return count;
1139}
1140
1141bool Thread::SirtContains(jobject obj) const {
1142  Object** sirt_entry = reinterpret_cast<Object**>(obj);
1143  for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
1144    if (cur->Contains(sirt_entry)) {
1145      return true;
1146    }
1147  }
1148  // JNI code invoked from portable code uses shadow frames rather than the SIRT.
1149  return managed_stack_.ShadowFramesContain(sirt_entry);
1150}
1151
1152void Thread::SirtVisitRoots(Heap::RootVisitor* visitor, void* arg) {
1153  for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->GetLink()) {
1154    size_t num_refs = cur->NumberOfReferences();
1155    for (size_t j = 0; j < num_refs; j++) {
1156      Object* object = cur->GetReference(j);
1157      if (object != NULL) {
1158        visitor(object, arg);
1159      }
1160    }
1161  }
1162}
1163
1164Object* Thread::DecodeJObject(jobject obj) const {
1165  Locks::mutator_lock_->AssertSharedHeld(this);
1166  if (obj == NULL) {
1167    return NULL;
1168  }
1169  IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
1170  IndirectRefKind kind = GetIndirectRefKind(ref);
1171  Object* result;
1172  switch (kind) {
1173  case kLocal:
1174    {
1175      IndirectReferenceTable& locals = jni_env_->locals;
1176      result = const_cast<Object*>(locals.Get(ref));
1177      break;
1178    }
1179  case kGlobal:
1180    {
1181      JavaVMExt* vm = Runtime::Current()->GetJavaVM();
1182      IndirectReferenceTable& globals = vm->globals;
1183      MutexLock mu(const_cast<Thread*>(this), vm->globals_lock);
1184      result = const_cast<Object*>(globals.Get(ref));
1185      break;
1186    }
1187  case kWeakGlobal:
1188    {
1189      JavaVMExt* vm = Runtime::Current()->GetJavaVM();
1190      IndirectReferenceTable& weak_globals = vm->weak_globals;
1191      MutexLock mu(const_cast<Thread*>(this), vm->weak_globals_lock);
1192      result = const_cast<Object*>(weak_globals.Get(ref));
1193      if (result == kClearedJniWeakGlobal) {
1194        // This is a special case where it's okay to return NULL.
1195        return NULL;
1196      }
1197      break;
1198    }
1199  case kSirtOrInvalid:
1200  default:
1201    // TODO: make stack indirect reference table lookup more efficient
1202    // Check if this is a local reference in the SIRT
1203    if (SirtContains(obj)) {
1204      result = *reinterpret_cast<Object**>(obj);  // Read from SIRT
1205    } else if (Runtime::Current()->GetJavaVM()->work_around_app_jni_bugs) {
1206      // Assume an invalid local reference is actually a direct pointer.
1207      result = reinterpret_cast<Object*>(obj);
1208    } else {
1209      result = kInvalidIndirectRefObject;
1210    }
1211  }
1212
1213  if (result == NULL) {
1214    JniAbortF(NULL, "use of deleted %s %p", ToStr<IndirectRefKind>(kind).c_str(), obj);
1215  } else {
1216    if (result != kInvalidIndirectRefObject) {
1217      Runtime::Current()->GetHeap()->VerifyObject(result);
1218    }
1219  }
1220  return result;
1221}
1222
1223// Implements java.lang.Thread.interrupted.
1224bool Thread::Interrupted() {
1225  MutexLock mu(Thread::Current(), *wait_mutex_);
1226  bool interrupted = interrupted_;
1227  interrupted_ = false;
1228  return interrupted;
1229}
1230
1231// Implements java.lang.Thread.isInterrupted.
1232bool Thread::IsInterrupted() {
1233  MutexLock mu(Thread::Current(), *wait_mutex_);
1234  return interrupted_;
1235}
1236
1237void Thread::Interrupt() {
1238  Thread* self = Thread::Current();
1239  MutexLock mu(self, *wait_mutex_);
1240  if (interrupted_) {
1241    return;
1242  }
1243  interrupted_ = true;
1244  NotifyLocked(self);
1245}
1246
1247void Thread::Notify() {
1248  Thread* self = Thread::Current();
1249  MutexLock mu(self, *wait_mutex_);
1250  NotifyLocked(self);
1251}
1252
1253void Thread::NotifyLocked(Thread* self) {
1254  if (wait_monitor_ != NULL) {
1255    wait_cond_->Signal(self);
1256  }
1257}
1258
1259class CountStackDepthVisitor : public StackVisitor {
1260 public:
1261  CountStackDepthVisitor(const ManagedStack* stack,
1262                         const std::deque<InstrumentationStackFrame>* instrumentation_stack)
1263      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1264      : StackVisitor(stack, instrumentation_stack, NULL),
1265        depth_(0), skip_depth_(0), skipping_(true) {}
1266
1267  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1268    // We want to skip frames up to and including the exception's constructor.
1269    // Note we also skip the frame if it doesn't have a method (namely the callee
1270    // save frame)
1271    AbstractMethod* m = GetMethod();
1272    if (skipping_ && !m->IsRuntimeMethod() &&
1273        !Throwable::GetJavaLangThrowable()->IsAssignableFrom(m->GetDeclaringClass())) {
1274      skipping_ = false;
1275    }
1276    if (!skipping_) {
1277      if (!m->IsRuntimeMethod()) {  // Ignore runtime frames (in particular callee save).
1278        ++depth_;
1279      }
1280    } else {
1281      ++skip_depth_;
1282    }
1283    return true;
1284  }
1285
1286  int GetDepth() const {
1287    return depth_;
1288  }
1289
1290  int GetSkipDepth() const {
1291    return skip_depth_;
1292  }
1293
1294 private:
1295  uint32_t depth_;
1296  uint32_t skip_depth_;
1297  bool skipping_;
1298};
1299
1300class BuildInternalStackTraceVisitor : public StackVisitor {
1301 public:
1302  explicit BuildInternalStackTraceVisitor(Thread* self, const ManagedStack* stack,
1303                                          const std::deque<InstrumentationStackFrame>* instrumentation_stack,
1304                                          int skip_depth)
1305      : StackVisitor(stack, instrumentation_stack, NULL), self_(self),
1306        skip_depth_(skip_depth), count_(0), dex_pc_trace_(NULL), method_trace_(NULL) {}
1307
1308  bool Init(int depth)
1309      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1310    // Allocate method trace with an extra slot that will hold the PC trace
1311    SirtRef<ObjectArray<Object> >
1312        method_trace(self_,
1313                     Runtime::Current()->GetClassLinker()->AllocObjectArray<Object>(self_,
1314                                                                                    depth + 1));
1315    if (method_trace.get() == NULL) {
1316      return false;
1317    }
1318    IntArray* dex_pc_trace = IntArray::Alloc(self_, depth);
1319    if (dex_pc_trace == NULL) {
1320      return false;
1321    }
1322    // Save PC trace in last element of method trace, also places it into the
1323    // object graph.
1324    method_trace->Set(depth, dex_pc_trace);
1325    // Set the Object*s and assert that no thread suspension is now possible.
1326    const char* last_no_suspend_cause =
1327        self_->StartAssertNoThreadSuspension("Building internal stack trace");
1328    CHECK(last_no_suspend_cause == NULL) << last_no_suspend_cause;
1329    method_trace_ = method_trace.get();
1330    dex_pc_trace_ = dex_pc_trace;
1331    return true;
1332  }
1333
1334  virtual ~BuildInternalStackTraceVisitor() {
1335    if (method_trace_ != NULL) {
1336      self_->EndAssertNoThreadSuspension(NULL);
1337    }
1338  }
1339
1340  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1341    if (method_trace_ == NULL || dex_pc_trace_ == NULL) {
1342      return true; // We're probably trying to fillInStackTrace for an OutOfMemoryError.
1343    }
1344    if (skip_depth_ > 0) {
1345      skip_depth_--;
1346      return true;
1347    }
1348    AbstractMethod* m = GetMethod();
1349    if (m->IsRuntimeMethod()) {
1350      return true;  // Ignore runtime frames (in particular callee save).
1351    }
1352    method_trace_->Set(count_, m);
1353    dex_pc_trace_->Set(count_, GetDexPc());
1354    ++count_;
1355    return true;
1356  }
1357
1358  ObjectArray<Object>* GetInternalStackTrace() const {
1359    return method_trace_;
1360  }
1361
1362 private:
1363  Thread* const self_;
1364  // How many more frames to skip.
1365  int32_t skip_depth_;
1366  // Current position down stack trace.
1367  uint32_t count_;
1368  // Array of dex PC values.
1369  IntArray* dex_pc_trace_;
1370  // An array of the methods on the stack, the last entry is a reference to the PC trace.
1371  ObjectArray<Object>* method_trace_;
1372};
1373
1374jobject Thread::CreateInternalStackTrace(const ScopedObjectAccessUnchecked& soa) const {
1375  // Compute depth of stack
1376  CountStackDepthVisitor count_visitor(GetManagedStack(), GetInstrumentationStack());
1377  count_visitor.WalkStack();
1378  int32_t depth = count_visitor.GetDepth();
1379  int32_t skip_depth = count_visitor.GetSkipDepth();
1380
1381  // Build internal stack trace.
1382  BuildInternalStackTraceVisitor build_trace_visitor(soa.Self(), GetManagedStack(),
1383                                                     GetInstrumentationStack(), skip_depth);
1384  if (!build_trace_visitor.Init(depth)) {
1385    return NULL;  // Allocation failed.
1386  }
1387  build_trace_visitor.WalkStack();
1388  return soa.AddLocalReference<jobjectArray>(build_trace_visitor.GetInternalStackTrace());
1389}
1390
1391jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
1392    jobjectArray output_array, int* stack_depth) {
1393  // Transition into runnable state to work on Object*/Array*
1394  ScopedObjectAccess soa(env);
1395  // Decode the internal stack trace into the depth, method trace and PC trace
1396  ObjectArray<Object>* method_trace = soa.Decode<ObjectArray<Object>*>(internal);
1397  int32_t depth = method_trace->GetLength() - 1;
1398  IntArray* pc_trace = down_cast<IntArray*>(method_trace->Get(depth));
1399
1400  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1401
1402  jobjectArray result;
1403  ObjectArray<StackTraceElement>* java_traces;
1404  if (output_array != NULL) {
1405    // Reuse the array we were given.
1406    result = output_array;
1407    java_traces = soa.Decode<ObjectArray<StackTraceElement>*>(output_array);
1408    // ...adjusting the number of frames we'll write to not exceed the array length.
1409    depth = std::min(depth, java_traces->GetLength());
1410  } else {
1411    // Create java_trace array and place in local reference table
1412    java_traces = class_linker->AllocStackTraceElementArray(soa.Self(), depth);
1413    if (java_traces == NULL) {
1414      return NULL;
1415    }
1416    result = soa.AddLocalReference<jobjectArray>(java_traces);
1417  }
1418
1419  if (stack_depth != NULL) {
1420    *stack_depth = depth;
1421  }
1422
1423  MethodHelper mh;
1424  for (int32_t i = 0; i < depth; ++i) {
1425    // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
1426    AbstractMethod* method = down_cast<AbstractMethod*>(method_trace->Get(i));
1427    mh.ChangeMethod(method);
1428    uint32_t dex_pc = pc_trace->Get(i);
1429    int32_t line_number = mh.GetLineNumFromDexPC(dex_pc);
1430    // Allocate element, potentially triggering GC
1431    // TODO: reuse class_name_object via Class::name_?
1432    const char* descriptor = mh.GetDeclaringClassDescriptor();
1433    CHECK(descriptor != NULL);
1434    std::string class_name(PrettyDescriptor(descriptor));
1435    SirtRef<String> class_name_object(soa.Self(),
1436                                      String::AllocFromModifiedUtf8(soa.Self(),
1437                                                                    class_name.c_str()));
1438    if (class_name_object.get() == NULL) {
1439      return NULL;
1440    }
1441    const char* method_name = mh.GetName();
1442    CHECK(method_name != NULL);
1443    SirtRef<String> method_name_object(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(),
1444                                                                                 method_name));
1445    if (method_name_object.get() == NULL) {
1446      return NULL;
1447    }
1448    const char* source_file = mh.GetDeclaringClassSourceFile();
1449    SirtRef<String> source_name_object(soa.Self(), String::AllocFromModifiedUtf8(soa.Self(),
1450                                                                                 source_file));
1451    StackTraceElement* obj = StackTraceElement::Alloc(soa.Self(),
1452                                                      class_name_object.get(),
1453                                                      method_name_object.get(),
1454                                                      source_name_object.get(),
1455                                                      line_number);
1456    if (obj == NULL) {
1457      return NULL;
1458    }
1459#ifdef MOVING_GARBAGE_COLLECTOR
1460    // Re-read after potential GC
1461    java_traces = Decode<ObjectArray<Object>*>(soa.Env(), result);
1462    method_trace = down_cast<ObjectArray<Object>*>(Decode<Object*>(soa.Env(), internal));
1463    pc_trace = down_cast<IntArray*>(method_trace->Get(depth));
1464#endif
1465    java_traces->Set(i, obj);
1466  }
1467  return result;
1468}
1469
1470void Thread::ThrowNewExceptionF(const char* exception_class_descriptor, const char* fmt, ...) {
1471  va_list args;
1472  va_start(args, fmt);
1473  ThrowNewExceptionV(exception_class_descriptor, fmt, args);
1474  va_end(args);
1475}
1476
1477void Thread::ThrowNewExceptionV(const char* exception_class_descriptor, const char* fmt, va_list ap) {
1478  std::string msg;
1479  StringAppendV(&msg, fmt, ap);
1480  ThrowNewException(exception_class_descriptor, msg.c_str());
1481}
1482
1483void Thread::ThrowNewException(const char* exception_class_descriptor, const char* msg) {
1484  AssertNoPendingException(); // Callers should either clear or call ThrowNewWrappedException.
1485  ThrowNewWrappedException(exception_class_descriptor, msg);
1486}
1487
1488void Thread::ThrowNewWrappedException(const char* exception_class_descriptor, const char* msg) {
1489  // Convert "Ljava/lang/Exception;" into JNI-style "java/lang/Exception".
1490  CHECK_EQ('L', exception_class_descriptor[0]);
1491  std::string descriptor(exception_class_descriptor + 1);
1492  CHECK_EQ(';', descriptor[descriptor.length() - 1]);
1493  descriptor.erase(descriptor.length() - 1);
1494
1495  JNIEnv* env = GetJniEnv();
1496  jobject cause = env->ExceptionOccurred();
1497  env->ExceptionClear();
1498
1499  ScopedLocalRef<jclass> exception_class(env, env->FindClass(descriptor.c_str()));
1500  if (exception_class.get() == NULL) {
1501    LOG(ERROR) << "Couldn't throw new " << descriptor << " because JNI FindClass failed: "
1502               << PrettyTypeOf(GetException());
1503    CHECK(IsExceptionPending());
1504    return;
1505  }
1506  if (!Runtime::Current()->IsStarted()) {
1507    // Something is trying to throw an exception without a started
1508    // runtime, which is the common case in the compiler. We won't be
1509    // able to invoke the constructor of the exception, so use
1510    // AllocObject which will not invoke a constructor.
1511    ScopedLocalRef<jthrowable> exception(
1512        env, reinterpret_cast<jthrowable>(env->AllocObject(exception_class.get())));
1513    if (exception.get() != NULL) {
1514      ScopedObjectAccessUnchecked soa(env);
1515      Throwable* t = reinterpret_cast<Throwable*>(soa.Self()->DecodeJObject(exception.get()));
1516      t->SetDetailMessage(String::AllocFromModifiedUtf8(soa.Self(), msg));
1517      if (cause != NULL) {
1518        t->SetCause(soa.Decode<Throwable*>(cause));
1519      }
1520      soa.Self()->SetException(t);
1521    } else {
1522      LOG(ERROR) << "Couldn't throw new " << descriptor << " because JNI AllocObject failed: "
1523                 << PrettyTypeOf(GetException());
1524      CHECK(IsExceptionPending());
1525    }
1526    return;
1527  }
1528  int rc = ::art::ThrowNewException(env, exception_class.get(), msg, cause);
1529  if (rc != JNI_OK) {
1530    LOG(ERROR) << "Couldn't throw new " << descriptor << " because JNI ThrowNew failed: "
1531               << PrettyTypeOf(GetException());
1532    CHECK(IsExceptionPending());
1533  }
1534}
1535
1536void Thread::ThrowOutOfMemoryError(const char* msg) {
1537  LOG(ERROR) << StringPrintf("Throwing OutOfMemoryError \"%s\"%s",
1538      msg, (throwing_OutOfMemoryError_ ? " (recursive case)" : ""));
1539  if (!throwing_OutOfMemoryError_) {
1540    throwing_OutOfMemoryError_ = true;
1541    ThrowNewException("Ljava/lang/OutOfMemoryError;", msg);
1542  } else {
1543    Dump(LOG(ERROR)); // The pre-allocated OOME has no stack, so help out and log one.
1544    SetException(Runtime::Current()->GetPreAllocatedOutOfMemoryError());
1545  }
1546  throwing_OutOfMemoryError_ = false;
1547}
1548
1549Thread* Thread::CurrentFromGdb() {
1550  return Thread::Current();
1551}
1552
1553void Thread::DumpFromGdb() const {
1554  std::ostringstream ss;
1555  Dump(ss);
1556  std::string str(ss.str());
1557  // log to stderr for debugging command line processes
1558  std::cerr << str;
1559#ifdef HAVE_ANDROID_OS
1560  // log to logcat for debugging frameworks processes
1561  LOG(INFO) << str;
1562#endif
1563}
1564
1565struct EntryPointInfo {
1566  uint32_t offset;
1567  const char* name;
1568};
1569#define ENTRY_POINT_INFO(x) { ENTRYPOINT_OFFSET(x), #x }
1570static const EntryPointInfo gThreadEntryPointInfo[] = {
1571  ENTRY_POINT_INFO(pAllocArrayFromCode),
1572  ENTRY_POINT_INFO(pAllocArrayFromCodeWithAccessCheck),
1573  ENTRY_POINT_INFO(pAllocObjectFromCode),
1574  ENTRY_POINT_INFO(pAllocObjectFromCodeWithAccessCheck),
1575  ENTRY_POINT_INFO(pCheckAndAllocArrayFromCode),
1576  ENTRY_POINT_INFO(pCheckAndAllocArrayFromCodeWithAccessCheck),
1577  ENTRY_POINT_INFO(pInstanceofNonTrivialFromCode),
1578  ENTRY_POINT_INFO(pCanPutArrayElementFromCode),
1579  ENTRY_POINT_INFO(pCheckCastFromCode),
1580  ENTRY_POINT_INFO(pDebugMe),
1581  ENTRY_POINT_INFO(pUpdateDebuggerFromCode),
1582  ENTRY_POINT_INFO(pInitializeStaticStorage),
1583  ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccessFromCode),
1584  ENTRY_POINT_INFO(pInitializeTypeFromCode),
1585  ENTRY_POINT_INFO(pResolveStringFromCode),
1586  ENTRY_POINT_INFO(pGetAndClearException),
1587  ENTRY_POINT_INFO(pSet32Instance),
1588  ENTRY_POINT_INFO(pSet32Static),
1589  ENTRY_POINT_INFO(pSet64Instance),
1590  ENTRY_POINT_INFO(pSet64Static),
1591  ENTRY_POINT_INFO(pSetObjInstance),
1592  ENTRY_POINT_INFO(pSetObjStatic),
1593  ENTRY_POINT_INFO(pGet32Instance),
1594  ENTRY_POINT_INFO(pGet32Static),
1595  ENTRY_POINT_INFO(pGet64Instance),
1596  ENTRY_POINT_INFO(pGet64Static),
1597  ENTRY_POINT_INFO(pGetObjInstance),
1598  ENTRY_POINT_INFO(pGetObjStatic),
1599  ENTRY_POINT_INFO(pHandleFillArrayDataFromCode),
1600  ENTRY_POINT_INFO(pFindNativeMethod),
1601  ENTRY_POINT_INFO(pJniMethodStart),
1602  ENTRY_POINT_INFO(pJniMethodStartSynchronized),
1603  ENTRY_POINT_INFO(pJniMethodEnd),
1604  ENTRY_POINT_INFO(pJniMethodEndSynchronized),
1605  ENTRY_POINT_INFO(pJniMethodEndWithReference),
1606  ENTRY_POINT_INFO(pJniMethodEndWithReferenceSynchronized),
1607  ENTRY_POINT_INFO(pLockObjectFromCode),
1608  ENTRY_POINT_INFO(pUnlockObjectFromCode),
1609  ENTRY_POINT_INFO(pCmpgDouble),
1610  ENTRY_POINT_INFO(pCmpgFloat),
1611  ENTRY_POINT_INFO(pCmplDouble),
1612  ENTRY_POINT_INFO(pCmplFloat),
1613  ENTRY_POINT_INFO(pFmod),
1614  ENTRY_POINT_INFO(pSqrt),
1615  ENTRY_POINT_INFO(pL2d),
1616  ENTRY_POINT_INFO(pFmodf),
1617  ENTRY_POINT_INFO(pL2f),
1618  ENTRY_POINT_INFO(pD2iz),
1619  ENTRY_POINT_INFO(pF2iz),
1620  ENTRY_POINT_INFO(pIdivmod),
1621  ENTRY_POINT_INFO(pD2l),
1622  ENTRY_POINT_INFO(pF2l),
1623  ENTRY_POINT_INFO(pLdiv),
1624  ENTRY_POINT_INFO(pLdivmod),
1625  ENTRY_POINT_INFO(pLmul),
1626  ENTRY_POINT_INFO(pShlLong),
1627  ENTRY_POINT_INFO(pShrLong),
1628  ENTRY_POINT_INFO(pUshrLong),
1629  ENTRY_POINT_INFO(pIndexOf),
1630  ENTRY_POINT_INFO(pMemcmp16),
1631  ENTRY_POINT_INFO(pStringCompareTo),
1632  ENTRY_POINT_INFO(pMemcpy),
1633  ENTRY_POINT_INFO(pUnresolvedDirectMethodTrampolineFromCode),
1634  ENTRY_POINT_INFO(pInvokeDirectTrampolineWithAccessCheck),
1635  ENTRY_POINT_INFO(pInvokeInterfaceTrampoline),
1636  ENTRY_POINT_INFO(pInvokeInterfaceTrampolineWithAccessCheck),
1637  ENTRY_POINT_INFO(pInvokeStaticTrampolineWithAccessCheck),
1638  ENTRY_POINT_INFO(pInvokeSuperTrampolineWithAccessCheck),
1639  ENTRY_POINT_INFO(pInvokeVirtualTrampolineWithAccessCheck),
1640  ENTRY_POINT_INFO(pCheckSuspendFromCode),
1641  ENTRY_POINT_INFO(pTestSuspendFromCode),
1642  ENTRY_POINT_INFO(pDeliverException),
1643  ENTRY_POINT_INFO(pThrowAbstractMethodErrorFromCode),
1644  ENTRY_POINT_INFO(pThrowArrayBoundsFromCode),
1645  ENTRY_POINT_INFO(pThrowDivZeroFromCode),
1646  ENTRY_POINT_INFO(pThrowNoSuchMethodFromCode),
1647  ENTRY_POINT_INFO(pThrowNullPointerFromCode),
1648  ENTRY_POINT_INFO(pThrowStackOverflowFromCode),
1649};
1650#undef ENTRY_POINT_INFO
1651
1652void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset, size_t size_of_pointers) {
1653  CHECK_EQ(size_of_pointers, 4U); // TODO: support 64-bit targets.
1654
1655#define DO_THREAD_OFFSET(x) if (offset == static_cast<uint32_t>(OFFSETOF_VOLATILE_MEMBER(Thread, x))) { os << # x; return; }
1656  DO_THREAD_OFFSET(state_and_flags_);
1657  DO_THREAD_OFFSET(card_table_);
1658  DO_THREAD_OFFSET(exception_);
1659  DO_THREAD_OFFSET(opeer_);
1660  DO_THREAD_OFFSET(jni_env_);
1661  DO_THREAD_OFFSET(self_);
1662  DO_THREAD_OFFSET(stack_end_);
1663  DO_THREAD_OFFSET(suspend_count_);
1664  DO_THREAD_OFFSET(thin_lock_id_);
1665  //DO_THREAD_OFFSET(top_of_managed_stack_);
1666  //DO_THREAD_OFFSET(top_of_managed_stack_pc_);
1667  DO_THREAD_OFFSET(top_sirt_);
1668#undef DO_THREAD_OFFSET
1669
1670  size_t entry_point_count = arraysize(gThreadEntryPointInfo);
1671  CHECK_EQ(entry_point_count * size_of_pointers, sizeof(EntryPoints));
1672  uint32_t expected_offset = OFFSETOF_MEMBER(Thread, entrypoints_);
1673  for (size_t i = 0; i < entry_point_count; ++i) {
1674    CHECK_EQ(gThreadEntryPointInfo[i].offset, expected_offset) << gThreadEntryPointInfo[i].name;
1675    expected_offset += size_of_pointers;
1676    if (gThreadEntryPointInfo[i].offset == offset) {
1677      os << gThreadEntryPointInfo[i].name;
1678      return;
1679    }
1680  }
1681  os << offset;
1682}
1683
1684static const bool kDebugExceptionDelivery = false;
1685class CatchBlockStackVisitor : public StackVisitor {
1686 public:
1687  CatchBlockStackVisitor(Thread* self, Throwable* exception)
1688      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1689      : StackVisitor(self->GetManagedStack(), self->GetInstrumentationStack(), self->GetLongJumpContext()),
1690        self_(self), exception_(exception), to_find_(exception->GetClass()), throw_method_(NULL),
1691        throw_frame_id_(0), throw_dex_pc_(0), handler_quick_frame_(NULL),
1692        handler_quick_frame_pc_(0), handler_dex_pc_(0), native_method_count_(0),
1693        method_tracing_active_(Runtime::Current()->IsMethodTracingActive()) {
1694    // Exception not in root sets, can't allow GC.
1695    last_no_assert_suspension_cause_ = self->StartAssertNoThreadSuspension("Finding catch block");
1696  }
1697
1698  ~CatchBlockStackVisitor() {
1699    LOG(FATAL) << "UNREACHABLE";  // Expected to take long jump.
1700  }
1701
1702  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1703      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1704    AbstractMethod* method = GetMethod();
1705    if (method == NULL) {
1706      // This is the upcall, we remember the frame and last pc so that we may long jump to them.
1707      handler_quick_frame_pc_ = GetCurrentQuickFramePc();
1708      handler_quick_frame_ = GetCurrentQuickFrame();
1709      return false;  // End stack walk.
1710    }
1711    uint32_t dex_pc = DexFile::kDexNoIndex;
1712    if (method->IsRuntimeMethod()) {
1713      // ignore callee save method
1714      DCHECK(method->IsCalleeSaveMethod());
1715    } else {
1716      if (throw_method_ == NULL) {
1717        throw_method_ = method;
1718        throw_frame_id_ = GetFrameId();
1719        throw_dex_pc_ = GetDexPc();
1720      }
1721      if (method->IsNative()) {
1722        native_method_count_++;
1723      } else {
1724        // Unwind stack when an exception occurs during instrumentation
1725        if (UNLIKELY(method_tracing_active_ &&
1726                     GetInstrumentationExitPc() == GetCurrentQuickFramePc())) {
1727          uintptr_t pc = InstrumentationMethodUnwindFromCode(Thread::Current());
1728          dex_pc = method->ToDexPc(pc);
1729        } else {
1730          dex_pc = GetDexPc();
1731        }
1732      }
1733    }
1734    if (dex_pc != DexFile::kDexNoIndex) {
1735      uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc);
1736      if (found_dex_pc != DexFile::kDexNoIndex) {
1737        handler_dex_pc_ = found_dex_pc;
1738        handler_quick_frame_pc_ = method->ToNativePc(found_dex_pc);
1739        handler_quick_frame_ = GetCurrentQuickFrame();
1740        return false;  // End stack walk.
1741      }
1742    }
1743    return true;  // Continue stack walk.
1744  }
1745
1746  void DoLongJump() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1747    AbstractMethod* catch_method = *handler_quick_frame_;
1748    if (kDebugExceptionDelivery) {
1749      if (catch_method == NULL) {
1750        LOG(INFO) << "Handler is upcall";
1751      } else {
1752        const DexFile& dex_file = *catch_method->GetDeclaringClass()->GetDexCache()->GetDexFile();
1753        int line_number = dex_file.GetLineNumFromPC(catch_method, handler_dex_pc_);
1754        LOG(INFO) << "Handler: " << PrettyMethod(catch_method) << " (line: " << line_number << ")";
1755      }
1756    }
1757    self_->SetException(exception_);  // Exception back in root set.
1758    self_->EndAssertNoThreadSuspension(last_no_assert_suspension_cause_);
1759    // Do debugger PostException after allowing thread suspension again.
1760    Dbg::PostException(self_, throw_frame_id_, throw_method_, throw_dex_pc_,
1761                       catch_method, handler_dex_pc_, exception_);
1762    // Place context back on thread so it will be available when we continue.
1763    self_->ReleaseLongJumpContext(context_);
1764    context_->SetSP(reinterpret_cast<uintptr_t>(handler_quick_frame_));
1765    CHECK_NE(handler_quick_frame_pc_, 0u);
1766    context_->SetPC(handler_quick_frame_pc_);
1767    context_->SmashCallerSaves();
1768    context_->DoLongJump();
1769  }
1770
1771 private:
1772  Thread* self_;
1773  Throwable* exception_;
1774  // The type of the exception catch block to find.
1775  Class* to_find_;
1776  AbstractMethod* throw_method_;
1777  JDWP::FrameId throw_frame_id_;
1778  uint32_t throw_dex_pc_;
1779  // Quick frame with found handler or last frame if no handler found.
1780  AbstractMethod** handler_quick_frame_;
1781  // PC to branch to for the handler.
1782  uintptr_t handler_quick_frame_pc_;
1783  // Associated dex PC.
1784  uint32_t handler_dex_pc_;
1785  // Number of native methods passed in crawl (equates to number of SIRTs to pop)
1786  uint32_t native_method_count_;
1787  // Is method tracing active?
1788  const bool method_tracing_active_;
1789  // Support for nesting no thread suspension checks.
1790  const char* last_no_assert_suspension_cause_;
1791};
1792
1793void Thread::QuickDeliverException() {
1794  Throwable* exception = GetException();  // Get exception from thread
1795  CHECK(exception != NULL);
1796  // Don't leave exception visible while we try to find the handler, which may cause class
1797  // resolution.
1798  ClearException();
1799  if (kDebugExceptionDelivery) {
1800    String* msg = exception->GetDetailMessage();
1801    std::string str_msg(msg != NULL ? msg->ToModifiedUtf8() : "");
1802    DumpStack(LOG(INFO) << "Delivering exception: " << PrettyTypeOf(exception)
1803                        << ": " << str_msg << "\n");
1804  }
1805  CatchBlockStackVisitor catch_finder(this, exception);
1806  catch_finder.WalkStack(true);
1807  catch_finder.DoLongJump();
1808  LOG(FATAL) << "UNREACHABLE";
1809}
1810
1811Context* Thread::GetLongJumpContext() {
1812  Context* result = long_jump_context_;
1813  if (result == NULL) {
1814    result = Context::Create();
1815  } else {
1816    long_jump_context_ = NULL;  // Avoid context being shared.
1817    result->Reset();
1818  }
1819  return result;
1820}
1821
1822AbstractMethod* Thread::GetCurrentMethod(uint32_t* dex_pc, size_t* frame_id) const {
1823  struct CurrentMethodVisitor : public StackVisitor {
1824    CurrentMethodVisitor(const ManagedStack* stack,
1825                         const std::deque<InstrumentationStackFrame>* instrumentation_stack)
1826        SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1827        : StackVisitor(stack, instrumentation_stack, NULL), method_(NULL), dex_pc_(0), frame_id_(0) {}
1828
1829    virtual bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1830      AbstractMethod* m = GetMethod();
1831      if (m->IsRuntimeMethod()) {
1832        // Continue if this is a runtime method.
1833        return true;
1834      }
1835      method_ = m;
1836      dex_pc_ = GetDexPc();
1837      frame_id_ = GetFrameId();
1838      return false;
1839    }
1840    AbstractMethod* method_;
1841    uint32_t dex_pc_;
1842    size_t frame_id_;
1843  };
1844
1845  CurrentMethodVisitor visitor(GetManagedStack(), GetInstrumentationStack());
1846  visitor.WalkStack(false);
1847  if (dex_pc != NULL) {
1848    *dex_pc = visitor.dex_pc_;
1849  }
1850  if (frame_id != NULL) {
1851    *frame_id = visitor.frame_id_;
1852  }
1853  return visitor.method_;
1854}
1855
1856bool Thread::HoldsLock(Object* object) {
1857  if (object == NULL) {
1858    return false;
1859  }
1860  return object->GetThinLockId() == thin_lock_id_;
1861}
1862
1863// RootVisitor parameters are: (const Object* obj, size_t vreg, const StackVisitor* visitor).
1864template <typename RootVisitor>
1865class ReferenceMapVisitor : public StackVisitor {
1866 public:
1867  ReferenceMapVisitor(const ManagedStack* stack,
1868                      const std::deque<InstrumentationStackFrame>* instrumentation_stack,
1869                      Context* context, const RootVisitor& visitor)
1870      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
1871      : StackVisitor(stack, instrumentation_stack, context), visitor_(visitor) {}
1872
1873  bool VisitFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
1874    if (false) {
1875      LOG(INFO) << "Visiting stack roots in " << PrettyMethod(GetMethod())
1876          << StringPrintf("@ PC:%04x", GetDexPc());
1877    }
1878    ShadowFrame* shadow_frame = GetCurrentShadowFrame();
1879    if (shadow_frame != NULL) {
1880      AbstractMethod* m = shadow_frame->GetMethod();
1881      size_t num_regs = shadow_frame->NumberOfVRegs();
1882      if (m->IsNative() || shadow_frame->HasReferenceArray()) {
1883        // SIRT for JNI or References for interpreter.
1884        for (size_t reg = 0; reg < num_regs; ++reg) {
1885          Object* ref = shadow_frame->GetVRegReference(reg);
1886          if (ref != NULL) {
1887            visitor_(ref, reg, this);
1888          }
1889        }
1890      } else {
1891        // Java method.
1892        // Portable path use DexGcMap and store in Method.native_gc_map_.
1893        const uint8_t* gc_map = m->GetNativeGcMap();
1894        CHECK(gc_map != NULL) << PrettyMethod(m);
1895        uint32_t gc_map_length = static_cast<uint32_t>((gc_map[0] << 24) |
1896                                                       (gc_map[1] << 16) |
1897                                                       (gc_map[2] << 8) |
1898                                                       (gc_map[3] << 0));
1899        verifier::DexPcToReferenceMap dex_gc_map(gc_map + 4, gc_map_length);
1900        uint32_t dex_pc = GetDexPc();
1901        const uint8_t* reg_bitmap = dex_gc_map.FindBitMap(dex_pc);
1902        DCHECK(reg_bitmap != NULL);
1903        num_regs = std::min(dex_gc_map.RegWidth() * 8, num_regs);
1904        for (size_t reg = 0; reg < num_regs; ++reg) {
1905          if (TestBitmap(reg, reg_bitmap)) {
1906            Object* ref = shadow_frame->GetVRegReference(reg);
1907            if (ref != NULL) {
1908              visitor_(ref, reg, this);
1909            }
1910          }
1911        }
1912      }
1913    } else {
1914      AbstractMethod* m = GetMethod();
1915      // Process register map (which native and runtime methods don't have)
1916      if (!m->IsNative() && !m->IsRuntimeMethod() && !m->IsProxyMethod()) {
1917        const uint8_t* native_gc_map = m->GetNativeGcMap();
1918        CHECK(native_gc_map != NULL) << PrettyMethod(m);
1919        mh_.ChangeMethod(m);
1920        const DexFile::CodeItem* code_item = mh_.GetCodeItem();
1921        DCHECK(code_item != NULL) << PrettyMethod(m); // Can't be NULL or how would we compile its instructions?
1922        NativePcOffsetToReferenceMap map(native_gc_map);
1923        size_t num_regs = std::min(map.RegWidth() * 8,
1924                                   static_cast<size_t>(code_item->registers_size_));
1925        if (num_regs > 0) {
1926          const uint8_t* reg_bitmap = map.FindBitMap(GetNativePcOffset());
1927          DCHECK(reg_bitmap != NULL);
1928          const VmapTable vmap_table(m->GetVmapTableRaw());
1929          uint32_t core_spills = m->GetCoreSpillMask();
1930          uint32_t fp_spills = m->GetFpSpillMask();
1931          size_t frame_size = m->GetFrameSizeInBytes();
1932          // For all dex registers in the bitmap
1933          AbstractMethod** cur_quick_frame = GetCurrentQuickFrame();
1934          DCHECK(cur_quick_frame != NULL);
1935          for (size_t reg = 0; reg < num_regs; ++reg) {
1936            // Does this register hold a reference?
1937            if (TestBitmap(reg, reg_bitmap)) {
1938              uint32_t vmap_offset;
1939              Object* ref;
1940              if (vmap_table.IsInContext(reg, vmap_offset, kReferenceVReg)) {
1941                uintptr_t val = GetGPR(vmap_table.ComputeRegister(core_spills, vmap_offset,
1942                                                                  kReferenceVReg));
1943                ref = reinterpret_cast<Object*>(val);
1944              } else {
1945                ref = reinterpret_cast<Object*>(GetVReg(cur_quick_frame, code_item, core_spills,
1946                                                        fp_spills, frame_size, reg));
1947              }
1948
1949              if (ref != NULL) {
1950                visitor_(ref, reg, this);
1951              }
1952            }
1953          }
1954        }
1955      }
1956    }
1957    return true;
1958  }
1959
1960 private:
1961  static bool TestBitmap(int reg, const uint8_t* reg_vector) {
1962    return ((reg_vector[reg / 8] >> (reg % 8)) & 0x01) != 0;
1963  }
1964
1965  // Visitor for when we visit a root.
1966  const RootVisitor& visitor_;
1967
1968  // A method helper we keep around to avoid dex file/cache re-computations.
1969  MethodHelper mh_;
1970};
1971
1972class RootCallbackVisitor {
1973 public:
1974  RootCallbackVisitor(Heap::RootVisitor* visitor, void* arg) : visitor_(visitor), arg_(arg) {
1975
1976  }
1977
1978  void operator()(const Object* obj, size_t, const StackVisitor*) const {
1979    visitor_(obj, arg_);
1980  }
1981
1982 private:
1983  Heap::RootVisitor* visitor_;
1984  void* arg_;
1985};
1986
1987class VerifyCallbackVisitor {
1988 public:
1989  VerifyCallbackVisitor(Heap::VerifyRootVisitor* visitor, void* arg)
1990      : visitor_(visitor),
1991        arg_(arg) {
1992  }
1993
1994  void operator()(const Object* obj, size_t vreg, const StackVisitor* visitor) const {
1995    visitor_(obj, arg_, vreg, visitor);
1996  }
1997
1998 private:
1999  Heap::VerifyRootVisitor* const visitor_;
2000  void* const arg_;
2001};
2002
2003struct VerifyRootWrapperArg {
2004  Heap::VerifyRootVisitor* visitor;
2005  void* arg;
2006};
2007
2008static void VerifyRootWrapperCallback(const Object* root, void* arg) {
2009  VerifyRootWrapperArg* wrapperArg = reinterpret_cast<VerifyRootWrapperArg*>(arg);
2010  wrapperArg->visitor(root, wrapperArg->arg, 0, NULL);
2011}
2012
2013void Thread::VerifyRoots(Heap::VerifyRootVisitor* visitor, void* arg) {
2014  // We need to map from a RootVisitor to VerifyRootVisitor, so pass in nulls for arguments we
2015  // don't have.
2016  VerifyRootWrapperArg wrapperArg;
2017  wrapperArg.arg = arg;
2018  wrapperArg.visitor = visitor;
2019
2020  if (opeer_ != NULL) {
2021    VerifyRootWrapperCallback(opeer_, &wrapperArg);
2022  }
2023  if (exception_ != NULL) {
2024    VerifyRootWrapperCallback(exception_, &wrapperArg);
2025  }
2026  if (class_loader_override_ != NULL) {
2027    VerifyRootWrapperCallback(class_loader_override_, &wrapperArg);
2028  }
2029  jni_env_->locals.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2030  jni_env_->monitors.VisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2031
2032  SirtVisitRoots(VerifyRootWrapperCallback, &wrapperArg);
2033
2034  // Visit roots on this thread's stack
2035  Context* context = GetLongJumpContext();
2036  VerifyCallbackVisitor visitorToCallback(visitor, arg);
2037  ReferenceMapVisitor<VerifyCallbackVisitor> mapper(GetManagedStack(), GetInstrumentationStack(),
2038                                                    context, visitorToCallback);
2039  mapper.WalkStack();
2040  ReleaseLongJumpContext(context);
2041}
2042
2043void Thread::VisitRoots(Heap::RootVisitor* visitor, void* arg) {
2044  if (opeer_ != NULL) {
2045    visitor(opeer_, arg);
2046  }
2047  if (exception_ != NULL) {
2048    visitor(exception_, arg);
2049  }
2050  if (class_loader_override_ != NULL) {
2051    visitor(class_loader_override_, arg);
2052  }
2053  jni_env_->locals.VisitRoots(visitor, arg);
2054  jni_env_->monitors.VisitRoots(visitor, arg);
2055
2056  SirtVisitRoots(visitor, arg);
2057
2058  // Visit roots on this thread's stack
2059  Context* context = GetLongJumpContext();
2060  RootCallbackVisitor visitorToCallback(visitor, arg);
2061  ReferenceMapVisitor<RootCallbackVisitor> mapper(GetManagedStack(), GetInstrumentationStack(), context,
2062                                                  visitorToCallback);
2063  mapper.WalkStack();
2064  ReleaseLongJumpContext(context);
2065}
2066
2067#if VERIFY_OBJECT_ENABLED
2068static void VerifyObject(const Object* obj, void* arg) {
2069  Heap* heap = reinterpret_cast<Heap*>(arg);
2070  heap->VerifyObject(obj);
2071}
2072
2073void Thread::VerifyStack() {
2074  UniquePtr<Context> context(Context::Create());
2075  RootCallbackVisitor visitorToCallback(VerifyObject, Runtime::Current()->GetHeap());
2076  ReferenceMapVisitor<RootCallbackVisitor> mapper(GetManagedStack(), GetInstrumentationStack(), context.get(),
2077                                                  visitorToCallback);
2078  mapper.WalkStack();
2079}
2080#endif
2081
2082// Set the stack end to that to be used during a stack overflow
2083void Thread::SetStackEndForStackOverflow() {
2084  // During stack overflow we allow use of the full stack
2085  if (stack_end_ == stack_begin_) {
2086    DumpStack(std::cerr);
2087    LOG(FATAL) << "Need to increase kStackOverflowReservedBytes (currently "
2088               << kStackOverflowReservedBytes << ")";
2089  }
2090
2091  stack_end_ = stack_begin_;
2092}
2093
2094std::ostream& operator<<(std::ostream& os, const Thread& thread) {
2095  thread.ShortDump(os);
2096  return os;
2097}
2098
2099#ifndef NDEBUG
2100void Thread::AssertThreadSuspensionIsAllowable(bool check_locks) const {
2101  CHECK_EQ(0u, no_thread_suspension_) << last_no_thread_suspension_cause_;
2102  if (check_locks) {
2103    bool bad_mutexes_held = false;
2104    for (int i = kMaxMutexLevel; i >= 0; --i) {
2105      // We expect no locks except the mutator_lock_.
2106      if (i != kMutatorLock) {
2107        BaseMutex* held_mutex = GetHeldMutex(static_cast<LockLevel>(i));
2108        if (held_mutex != NULL) {
2109          LOG(ERROR) << "holding \"" << held_mutex->GetName()
2110                  << "\" at point where thread suspension is expected";
2111          bad_mutexes_held = true;
2112        }
2113      }
2114    }
2115    CHECK(!bad_mutexes_held);
2116  }
2117}
2118#endif
2119
2120}  // namespace art
2121