thread.cc revision 01158d7a57c8321370667a6045220237d16e0da8
1/*
2 * Copyright (C) 2011 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "thread.h"
18
19#include <dynamic_annotations.h>
20#include <pthread.h>
21#include <sys/mman.h>
22
23#include <algorithm>
24#include <bitset>
25#include <cerrno>
26#include <iostream>
27#include <list>
28
29#include "class_linker.h"
30#include "context.h"
31#include "heap.h"
32#include "jni_internal.h"
33#include "object.h"
34#include "runtime.h"
35#include "runtime_support.h"
36#include "scoped_jni_thread_state.h"
37#include "thread_list.h"
38#include "utils.h"
39
40namespace art {
41
42pthread_key_t Thread::pthread_key_self_;
43
44static Class* gThrowable = NULL;
45static Field* gThread_daemon = NULL;
46static Field* gThread_group = NULL;
47static Field* gThread_lock = NULL;
48static Field* gThread_name = NULL;
49static Field* gThread_priority = NULL;
50static Field* gThread_uncaughtHandler = NULL;
51static Field* gThread_vmData = NULL;
52static Field* gThreadGroup_name = NULL;
53static Method* gThread_run = NULL;
54static Method* gThreadGroup_removeThread = NULL;
55static Method* gUncaughtExceptionHandler_uncaughtException = NULL;
56
57// Temporary debugging hook for compiler.
58void DebugMe(Method* method, uint32_t info) {
59  LOG(INFO) << "DebugMe";
60  if (method != NULL) {
61    LOG(INFO) << PrettyMethod(method);
62  }
63  LOG(INFO) << "Info: " << info;
64}
65
66}  // namespace art
67
68// Called by generated call to throw an exception
69extern "C" void artDeliverExceptionHelper(art::Throwable* exception,
70                                          art::Thread* thread,
71                                          art::Method** sp) {
72  /*
73   * exception may be NULL, in which case this routine should
74   * throw NPE.  NOTE: this is a convenience for generated code,
75   * which previously did the null check inline and constructed
76   * and threw a NPE if NULL.  This routine responsible for setting
77   * exception_ in thread and delivering the exception.
78   */
79  // Place a special frame at the TOS that will save all callee saves
80  *sp = thread->CalleeSaveMethod();
81  thread->SetTopOfStack(sp, 0);
82  if (exception == NULL) {
83    thread->ThrowNewException("Ljava/lang/NullPointerException;", "throw with null exception");
84    exception = thread->GetException();
85  }
86  thread->DeliverException(exception);
87}
88
89// Called by generated call to throw a NPE exception
90extern "C" void artThrowNullPointerExceptionFromCodeHelper(art::Thread* thread,
91                                                           art::Method** sp) {
92  // Place a special frame at the TOS that will save all callee saves
93  *sp = thread->CalleeSaveMethod();
94  thread->SetTopOfStack(sp, 0);
95  thread->ThrowNewException("Ljava/lang/NullPointerException;", "unexpected null reference");
96  art::Throwable* exception = thread->GetException();
97  thread->DeliverException(exception);
98}
99
100// Called by generated call to throw an arithmetic divide by zero exception
101extern "C" void artThrowDivZeroFromCodeHelper(art::Thread* thread,
102                                              art::Method** sp) {
103  // Place a special frame at the TOS that will save all callee saves
104  *sp = thread->CalleeSaveMethod();
105  thread->SetTopOfStack(sp, 0);
106  thread->ThrowNewException("Ljava/lang/ArithmeticException;", "divide by zero");
107  art::Throwable* exception = thread->GetException();
108  thread->DeliverException(exception);
109}
110
111// Called by generated call to throw an arithmetic divide by zero exception
112extern "C" void artThrowArrayBoundsFromCodeHelper(int index, int limit,
113                                                  art::Thread* thread,
114                                                  art::Method** sp) {
115  // Place a special frame at the TOS that will save all callee saves
116  *sp = thread->CalleeSaveMethod();
117  thread->SetTopOfStack(sp, 0);
118  thread->ThrowNewException("Ljava/lang/ArrayIndexOutOfBoundsException;",
119                            "length=%d; index=%d", limit, index);
120  art::Throwable* exception = thread->GetException();
121  thread->DeliverException(exception);
122}
123
124namespace art {
125
126// TODO: placeholder.  Helper function to type
127Class* InitializeTypeFromCode(uint32_t type_idx, Method* method) {
128  /*
129   * Should initialize & fix up method->dex_cache_resolved_types_[].
130   * Returns initialized type.  Does not return normally if an exception
131   * is thrown, but instead initiates the catch.  Should be similar to
132   * ClassLinker::InitializeStaticStorageFromCode.
133   */
134  UNIMPLEMENTED(FATAL);
135  return NULL;
136}
137
138// TODO: placeholder.  Helper function to resolve virtual method
139void ResolveMethodFromCode(Method* method, uint32_t method_idx) {
140    /*
141     * Slow-path handler on invoke virtual method path in which
142     * base method is unresolved at compile-time.  Doesn't need to
143     * return anything - just either ensure that
144     * method->dex_cache_resolved_methods_(method_idx) != NULL or
145     * throw and unwind.  The caller will restart call sequence
146     * from the beginning.
147     */
148}
149
150// TODO: placeholder.  Helper function to alloc array for OP_FILLED_NEW_ARRAY
151Array* CheckAndAllocFromCode(uint32_t type_index, Method* method, int32_t component_count) {
152    /*
153     * Just a wrapper around Array::AllocFromCode() that additionally
154     * throws a runtime exception "bad Filled array req" for 'D' and 'J'.
155     */
156    UNIMPLEMENTED(WARNING) << "Need check that not 'D' or 'J'";
157    return Array::AllocFromCode(type_index, method, component_count);
158}
159
160// TODO: placeholder (throw on failure)
161void CheckCastFromCode(const Class* a, const Class* b) {
162  DCHECK(a->IsClass());
163  DCHECK(b->IsClass());
164  if (b->IsAssignableFrom(a)) {
165    return;
166  }
167  UNIMPLEMENTED(FATAL);
168}
169
170void UnlockObjectFromCode(Thread* thread, Object* obj) {
171  // TODO: throw and unwind if lock not held
172  // TODO: throw and unwind on NPE
173  obj->MonitorExit(thread);
174}
175
176void LockObjectFromCode(Thread* thread, Object* obj) {
177  obj->MonitorEnter(thread);
178  // TODO: throw and unwind on failure.
179}
180
181void CheckSuspendFromCode(Thread* thread) {
182  Runtime::Current()->GetThreadList()->FullSuspendCheck(thread);
183}
184
185// TODO: placeholder
186void StackOverflowFromCode(Method* method) {
187  Thread::Current()->SetTopOfStackPC(reinterpret_cast<uintptr_t>(__builtin_return_address(0)));
188  Thread::Current()->Dump(std::cerr);
189  //NOTE: to save code space, this handler needs to look up its own Thread*
190  UNIMPLEMENTED(FATAL) << "Stack overflow: " << PrettyMethod(method);
191}
192
193// TODO: placeholder
194void ThrowNullPointerFromCode() {
195  Thread::Current()->SetTopOfStackPC(reinterpret_cast<uintptr_t>(__builtin_return_address(0)));
196  Thread::Current()->Dump(std::cerr);
197  //NOTE: to save code space, this handler must look up caller's Method*
198  UNIMPLEMENTED(FATAL) << "Null pointer exception";
199}
200
201// TODO: placeholder
202void ThrowDivZeroFromCode() {
203  UNIMPLEMENTED(FATAL) << "Divide by zero";
204}
205
206// TODO: placeholder
207void ThrowArrayBoundsFromCode(int32_t index, int32_t limit) {
208  UNIMPLEMENTED(FATAL) << "Bound check exception, idx: " << index << ", limit: " << limit;
209}
210
211// TODO: placeholder
212void ThrowVerificationErrorFromCode(int32_t src1, int32_t ref) {
213    UNIMPLEMENTED(FATAL) << "Verification error, src1: " << src1 <<
214        " ref: " << ref;
215}
216
217// TODO: placeholder
218void ThrowNegArraySizeFromCode(int32_t index) {
219    UNIMPLEMENTED(FATAL) << "Negative array size: " << index;
220}
221
222// TODO: placeholder
223void ThrowInternalErrorFromCode(int32_t errnum) {
224    UNIMPLEMENTED(FATAL) << "Internal error: " << errnum;
225}
226
227// TODO: placeholder
228void ThrowRuntimeExceptionFromCode(int32_t errnum) {
229    UNIMPLEMENTED(FATAL) << "Internal error: " << errnum;
230}
231
232// TODO: placeholder
233void ThrowNoSuchMethodFromCode(int32_t method_idx) {
234    UNIMPLEMENTED(FATAL) << "No such method, idx: " << method_idx;
235}
236
237void ThrowAbstractMethodErrorFromCode(Method* method, Thread* thread) {
238  thread->ThrowNewException("Ljava/lang/AbstractMethodError",
239                            "abstract method \"%s\"",
240                            PrettyMethod(method).c_str());
241  thread->DeliverException(thread->GetException());
242}
243
244
245/*
246 * Temporary placeholder.  Should include run-time checks for size
247 * of fill data <= size of array.  If not, throw arrayOutOfBoundsException.
248 * As with other new "FromCode" routines, this should return to the caller
249 * only if no exception has been thrown.
250 *
251 * NOTE: When dealing with a raw dex file, the data to be copied uses
252 * little-endian ordering.  Require that oat2dex do any required swapping
253 * so this routine can get by with a memcpy().
254 *
255 * Format of the data:
256 *  ushort ident = 0x0300   magic value
257 *  ushort width            width of each element in the table
258 *  uint   size             number of elements in the table
259 *  ubyte  data[size*width] table of data values (may contain a single-byte
260 *                          padding at the end)
261 */
262void HandleFillArrayDataFromCode(Array* array, const uint16_t* table) {
263    uint32_t size = (uint32_t)table[2] | (((uint32_t)table[3]) << 16);
264    uint32_t size_in_bytes = size * table[1];
265    if (static_cast<int32_t>(size) > array->GetLength()) {
266      ThrowArrayBoundsFromCode(array->GetLength(), size);
267    }
268    memcpy((char*)array + art::Array::DataOffset().Int32Value(),
269           (char*)&table[4], size_in_bytes);
270}
271
272/*
273 * TODO: placeholder for a method that can be called by the
274 * invoke-interface trampoline to unwind and handle exception.  The
275 * trampoline will arrange it so that the caller appears to be the
276 * callsite of the failed invoke-interface.  See comments in
277 * runtime_support.S
278 */
279extern "C" void artFailedInvokeInterface() {
280    UNIMPLEMENTED(FATAL) << "Unimplemented exception throw";
281}
282
283// See comments in runtime_support.S
284extern "C" uint64_t artFindInterfaceMethodInCache(uint32_t method_idx,
285     Object* this_object , Method* caller_method)
286{
287  if (this_object == NULL) {
288    ThrowNullPointerFromCode();
289  }
290  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
291  Method* interface_method = class_linker->ResolveMethod(method_idx, caller_method, false);
292  if (interface_method == NULL) {
293    UNIMPLEMENTED(FATAL) << "Could not resolve interface method. Throw error and unwind";
294  }
295  Method* method = this_object->GetClass()->FindVirtualMethodForInterface(interface_method);
296  const void* code = method->GetCode();
297
298  uint32_t method_uint = reinterpret_cast<uint32_t>(method);
299  uint64_t code_uint = reinterpret_cast<uint32_t>(code);
300  uint64_t result = ((code_uint << 32) | method_uint);
301  return result;
302}
303
304// TODO: move to more appropriate location
305/*
306 * Float/double conversion requires clamping to min and max of integer form.  If
307 * target doesn't support this normally, use these.
308 */
309int64_t D2L(double d) {
310    static const double kMaxLong = (double)(int64_t)0x7fffffffffffffffULL;
311    static const double kMinLong = (double)(int64_t)0x8000000000000000ULL;
312    if (d >= kMaxLong)
313        return (int64_t)0x7fffffffffffffffULL;
314    else if (d <= kMinLong)
315        return (int64_t)0x8000000000000000ULL;
316    else if (d != d) // NaN case
317        return 0;
318    else
319        return (int64_t)d;
320}
321
322int64_t F2L(float f) {
323    static const float kMaxLong = (float)(int64_t)0x7fffffffffffffffULL;
324    static const float kMinLong = (float)(int64_t)0x8000000000000000ULL;
325    if (f >= kMaxLong)
326        return (int64_t)0x7fffffffffffffffULL;
327    else if (f <= kMinLong)
328        return (int64_t)0x8000000000000000ULL;
329    else if (f != f) // NaN case
330        return 0;
331    else
332        return (int64_t)f;
333}
334
335// Return value helper for jobject return types
336static Object* DecodeJObjectInThread(Thread* thread, jobject obj) {
337  return thread->DecodeJObject(obj);
338}
339
340void Thread::InitFunctionPointers() {
341#if defined(__arm__)
342  pShlLong = art_shl_long;
343  pShrLong = art_shr_long;
344  pUshrLong = art_ushr_long;
345  pIdiv = __aeabi_idiv;
346  pIdivmod = __aeabi_idivmod;
347  pI2f = __aeabi_i2f;
348  pF2iz = __aeabi_f2iz;
349  pD2f = __aeabi_d2f;
350  pF2d = __aeabi_f2d;
351  pD2iz = __aeabi_d2iz;
352  pL2f = __aeabi_l2f;
353  pL2d = __aeabi_l2d;
354  pFadd = __aeabi_fadd;
355  pFsub = __aeabi_fsub;
356  pFdiv = __aeabi_fdiv;
357  pFmul = __aeabi_fmul;
358  pFmodf = fmodf;
359  pDadd = __aeabi_dadd;
360  pDsub = __aeabi_dsub;
361  pDdiv = __aeabi_ddiv;
362  pDmul = __aeabi_dmul;
363  pFmod = fmod;
364  pLdivmod = __aeabi_ldivmod;
365  pLmul = __aeabi_lmul;
366  pThrowNullPointerFromCode = art_throw_null_pointer_exception_from_code;
367  pThrowArrayBoundsFromCode = art_throw_array_bounds_from_code;
368  pThrowDivZeroFromCode = art_throw_div_zero_from_code;
369  pInvokeInterfaceTrampoline = art_invoke_interface_trampoline;
370#endif
371  pDeliverException = art_deliver_exception;
372  pF2l = F2L;
373  pD2l = D2L;
374  pAllocFromCode = Array::AllocFromCode;
375  pCheckAndAllocFromCode = CheckAndAllocFromCode;
376  pAllocObjectFromCode = Class::AllocObjectFromCode;
377  pMemcpy = memcpy;
378  pHandleFillArrayDataFromCode = HandleFillArrayDataFromCode;
379  pGet32Static = Field::Get32StaticFromCode;
380  pSet32Static = Field::Set32StaticFromCode;
381  pGet64Static = Field::Get64StaticFromCode;
382  pSet64Static = Field::Set64StaticFromCode;
383  pGetObjStatic = Field::GetObjStaticFromCode;
384  pSetObjStatic = Field::SetObjStaticFromCode;
385  pCanPutArrayElementFromCode = Class::CanPutArrayElementFromCode;
386  pInitializeTypeFromCode = InitializeTypeFromCode;
387  pResolveMethodFromCode = ResolveMethodFromCode;
388  pInitializeStaticStorage = ClassLinker::InitializeStaticStorageFromCode;
389  pInstanceofNonTrivialFromCode = Object::InstanceOf;
390  pCheckCastFromCode = CheckCastFromCode;
391  pLockObjectFromCode = LockObjectFromCode;
392  pUnlockObjectFromCode = UnlockObjectFromCode;
393  pFindInstanceFieldFromCode = Field::FindInstanceFieldFromCode;
394  pCheckSuspendFromCode = CheckSuspendFromCode;
395  pStackOverflowFromCode = StackOverflowFromCode;
396  pThrowVerificationErrorFromCode = ThrowVerificationErrorFromCode;
397  pThrowNegArraySizeFromCode = ThrowNegArraySizeFromCode;
398  pThrowRuntimeExceptionFromCode = ThrowRuntimeExceptionFromCode;
399  pThrowInternalErrorFromCode = ThrowInternalErrorFromCode;
400  pThrowNoSuchMethodFromCode = ThrowNoSuchMethodFromCode;
401  pThrowAbstractMethodErrorFromCode = ThrowAbstractMethodErrorFromCode;
402  pFindNativeMethod = FindNativeMethod;
403  pDecodeJObjectInThread = DecodeJObjectInThread;
404  pDebugMe = DebugMe;
405}
406
407void Frame::Next() {
408  size_t frame_size = GetMethod()->GetFrameSizeInBytes();
409  DCHECK_NE(frame_size, 0u);
410  DCHECK_LT(frame_size, 1024u);
411  byte* next_sp = reinterpret_cast<byte*>(sp_) +
412      frame_size;
413  sp_ = reinterpret_cast<Method**>(next_sp);
414  DCHECK(*sp_ == NULL ||
415         (*sp_)->GetClass()->GetDescriptor()->Equals("Ljava/lang/reflect/Method;"));
416}
417
418bool Frame::HasMethod() const {
419  return GetMethod() != NULL && (!GetMethod()->IsPhony());
420}
421
422uintptr_t Frame::GetReturnPC() const {
423  byte* pc_addr = reinterpret_cast<byte*>(sp_) +
424      GetMethod()->GetReturnPcOffsetInBytes();
425  return *reinterpret_cast<uintptr_t*>(pc_addr);
426}
427
428uintptr_t Frame::LoadCalleeSave(int num) const {
429  // Callee saves are held at the top of the frame
430  Method* method = GetMethod();
431  DCHECK(method != NULL);
432  size_t frame_size = method->GetFrameSizeInBytes();
433  byte* save_addr = reinterpret_cast<byte*>(sp_) + frame_size -
434                    ((num + 1) * kPointerSize);
435#if defined(__i386__)
436  save_addr -= kPointerSize;  // account for return address
437#endif
438  return *reinterpret_cast<uintptr_t*>(save_addr);
439}
440
441Method* Frame::NextMethod() const {
442  byte* next_sp = reinterpret_cast<byte*>(sp_) +
443      GetMethod()->GetFrameSizeInBytes();
444  return *reinterpret_cast<Method**>(next_sp);
445}
446
447void* Thread::CreateCallback(void* arg) {
448  Thread* self = reinterpret_cast<Thread*>(arg);
449  Runtime* runtime = Runtime::Current();
450
451  self->Attach(runtime);
452
453  String* thread_name = reinterpret_cast<String*>(gThread_name->GetObject(self->peer_));
454  if (thread_name != NULL) {
455    SetThreadName(thread_name->ToModifiedUtf8().c_str());
456  }
457
458  // Wait until it's safe to start running code. (There may have been a suspend-all
459  // in progress while we were starting up.)
460  runtime->GetThreadList()->WaitForGo();
461
462  // TODO: say "hi" to the debugger.
463  //if (gDvm.debuggerConnected) {
464  //  dvmDbgPostThreadStart(self);
465  //}
466
467  // Invoke the 'run' method of our java.lang.Thread.
468  CHECK(self->peer_ != NULL);
469  Object* receiver = self->peer_;
470  Method* m = receiver->GetClass()->FindVirtualMethodForVirtualOrInterface(gThread_run);
471  m->Invoke(self, receiver, NULL, NULL);
472
473  // Detach.
474  runtime->GetThreadList()->Unregister();
475
476  return NULL;
477}
478
479void SetVmData(Object* managed_thread, Thread* native_thread) {
480  gThread_vmData->SetInt(managed_thread, reinterpret_cast<uintptr_t>(native_thread));
481}
482
483Thread* Thread::FromManagedThread(JNIEnv* env, jobject java_thread) {
484  Object* thread = Decode<Object*>(env, java_thread);
485  return reinterpret_cast<Thread*>(static_cast<uintptr_t>(gThread_vmData->GetInt(thread)));
486}
487
488void Thread::Create(Object* peer, size_t stack_size) {
489  CHECK(peer != NULL);
490
491  if (stack_size == 0) {
492    stack_size = Runtime::Current()->GetDefaultStackSize();
493  }
494
495  Thread* native_thread = new Thread;
496  native_thread->peer_ = peer;
497
498  // Thread.start is synchronized, so we know that vmData is 0,
499  // and know that we're not racing to assign it.
500  SetVmData(peer, native_thread);
501
502  pthread_attr_t attr;
503  CHECK_PTHREAD_CALL(pthread_attr_init, (&attr), "new thread");
504  CHECK_PTHREAD_CALL(pthread_attr_setdetachstate, (&attr, PTHREAD_CREATE_DETACHED), "PTHREAD_CREATE_DETACHED");
505  CHECK_PTHREAD_CALL(pthread_attr_setstacksize, (&attr, stack_size), stack_size);
506  CHECK_PTHREAD_CALL(pthread_create, (&native_thread->pthread_, &attr, Thread::CreateCallback, native_thread), "new thread");
507  CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attr), "new thread");
508
509  // Let the child know when it's safe to start running.
510  Runtime::Current()->GetThreadList()->SignalGo(native_thread);
511}
512
513void Thread::Attach(const Runtime* runtime) {
514  InitCpu();
515  InitFunctionPointers();
516
517  thin_lock_id_ = Runtime::Current()->GetThreadList()->AllocThreadId();
518
519  tid_ = ::art::GetTid();
520  pthread_ = pthread_self();
521
522  InitStackHwm();
523
524  CHECK_PTHREAD_CALL(pthread_setspecific, (Thread::pthread_key_self_, this), "attach");
525
526  jni_env_ = new JNIEnvExt(this, runtime->GetJavaVM());
527
528  runtime->GetThreadList()->Register(this);
529}
530
531Thread* Thread::Attach(const Runtime* runtime, const char* name, bool as_daemon) {
532  Thread* self = new Thread;
533  self->Attach(runtime);
534
535  self->SetState(Thread::kRunnable);
536
537  SetThreadName(name);
538
539  // If we're the main thread, ClassLinker won't be created until after we're attached,
540  // so that thread needs a two-stage attach. Regular threads don't need this hack.
541  if (self->thin_lock_id_ != ThreadList::kMainId) {
542    self->CreatePeer(name, as_daemon);
543  }
544
545  return self;
546}
547
548jobject GetWellKnownThreadGroup(JNIEnv* env, const char* field_name) {
549  jclass thread_group_class = env->FindClass("java/lang/ThreadGroup");
550  jfieldID fid = env->GetStaticFieldID(thread_group_class, field_name, "Ljava/lang/ThreadGroup;");
551  jobject thread_group = env->GetStaticObjectField(thread_group_class, fid);
552  // This will be null in the compiler (and tests), but never in a running system.
553  //CHECK(thread_group != NULL) << "java.lang.ThreadGroup." << field_name << " not initialized";
554  return thread_group;
555}
556
557void Thread::CreatePeer(const char* name, bool as_daemon) {
558  Thread* self = Thread::Current();
559  ScopedThreadStateChange tsc(self, Thread::kNative);
560
561  JNIEnv* env = jni_env_;
562
563  const char* field_name = (GetThinLockId() == ThreadList::kMainId) ? "mMain" : "mSystem";
564  jobject thread_group = GetWellKnownThreadGroup(env, field_name);
565  jobject thread_name = env->NewStringUTF(name);
566  jint thread_priority = GetNativePriority();
567  jboolean thread_is_daemon = as_daemon;
568
569  jclass c = env->FindClass("java/lang/Thread");
570  jmethodID mid = env->GetMethodID(c, "<init>", "(Ljava/lang/ThreadGroup;Ljava/lang/String;IZ)V");
571
572  jobject peer = env->NewObject(c, mid, thread_group, thread_name, thread_priority, thread_is_daemon);
573  peer_ = DecodeJObject(peer);
574  SetVmData(peer_, self);
575
576  // Because we mostly run without code available (in the compiler, in tests), we
577  // manually assign the fields the constructor should have set.
578  // TODO: lose this.
579  gThread_daemon->SetBoolean(peer_, thread_is_daemon);
580  gThread_group->SetObject(peer_, Decode<Object*>(env, thread_group));
581  gThread_name->SetObject(peer_, Decode<Object*>(env, thread_name));
582  gThread_priority->SetInt(peer_, thread_priority);
583}
584
585void Thread::InitStackHwm() {
586  pthread_attr_t attributes;
587  CHECK_PTHREAD_CALL(pthread_getattr_np, (pthread_, &attributes), __FUNCTION__);
588
589  void* stack_base;
590  size_t stack_size;
591  CHECK_PTHREAD_CALL(pthread_attr_getstack, (&attributes, &stack_base, &stack_size), __FUNCTION__);
592
593  if (stack_size <= kStackOverflowReservedBytes) {
594    LOG(FATAL) << "attempt to attach a thread with a too-small stack (" << stack_size << " bytes)";
595  }
596
597  // stack_base is the "lowest addressable byte" of the stack.
598  // Our stacks grow down, so we want stack_end_ to be near there, but reserving enough room
599  // to throw a StackOverflowError.
600  stack_end_ = reinterpret_cast<byte*>(stack_base) + kStackOverflowReservedBytes;
601
602  // Sanity check.
603  int stack_variable;
604  CHECK_GT(&stack_variable, (void*) stack_end_);
605
606  CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__);
607}
608
609void Thread::Dump(std::ostream& os) const {
610  DumpState(os);
611  DumpStack(os);
612}
613
614std::string GetSchedulerGroup(pid_t tid) {
615  // /proc/<pid>/group looks like this:
616  // 2:devices:/
617  // 1:cpuacct,cpu:/
618  // We want the third field from the line whose second field contains the "cpu" token.
619  std::string cgroup_file;
620  if (!ReadFileToString("/proc/self/cgroup", &cgroup_file)) {
621    return "";
622  }
623  std::vector<std::string> cgroup_lines;
624  Split(cgroup_file, '\n', cgroup_lines);
625  for (size_t i = 0; i < cgroup_lines.size(); ++i) {
626    std::vector<std::string> cgroup_fields;
627    Split(cgroup_lines[i], ':', cgroup_fields);
628    std::vector<std::string> cgroups;
629    Split(cgroup_fields[1], ',', cgroups);
630    for (size_t i = 0; i < cgroups.size(); ++i) {
631      if (cgroups[i] == "cpu") {
632        return cgroup_fields[2].substr(1); // Skip the leading slash.
633      }
634    }
635  }
636  return "";
637}
638
639void Thread::DumpState(std::ostream& os) const {
640  std::string thread_name("<native thread without managed peer>");
641  std::string group_name;
642  int priority;
643  bool is_daemon = false;
644
645  if (peer_ != NULL) {
646    String* thread_name_string = reinterpret_cast<String*>(gThread_name->GetObject(peer_));
647    thread_name = (thread_name_string != NULL) ? thread_name_string->ToModifiedUtf8() : "<null>";
648    priority = gThread_priority->GetInt(peer_);
649    is_daemon = gThread_daemon->GetBoolean(peer_);
650
651    Object* thread_group = gThread_group->GetObject(peer_);
652    if (thread_group != NULL) {
653      String* group_name_string = reinterpret_cast<String*>(gThreadGroup_name->GetObject(thread_group));
654      group_name = (group_name_string != NULL) ? group_name_string->ToModifiedUtf8() : "<null>";
655    }
656  } else {
657    // This name may be truncated, but it's the best we can do in the absence of a managed peer.
658    std::string stats;
659    if (ReadFileToString(StringPrintf("/proc/self/task/%d/stat", GetTid()).c_str(), &stats)) {
660      size_t start = stats.find('(') + 1;
661      size_t end = stats.find(')') - start;
662      thread_name = stats.substr(start, end);
663    }
664    priority = GetNativePriority();
665  }
666
667  int policy;
668  sched_param sp;
669  CHECK_PTHREAD_CALL(pthread_getschedparam, (pthread_, &policy, &sp), __FUNCTION__);
670
671  std::string scheduler_group(GetSchedulerGroup(GetTid()));
672  if (scheduler_group.empty()) {
673    scheduler_group = "default";
674  }
675
676  os << '"' << thread_name << '"';
677  if (is_daemon) {
678    os << " daemon";
679  }
680  os << " prio=" << priority
681     << " tid=" << GetThinLockId()
682     << " " << GetState() << "\n";
683
684  int debug_suspend_count = 0; // TODO
685  os << "  | group=\"" << group_name << "\""
686     << " sCount=" << suspend_count_
687     << " dsCount=" << debug_suspend_count
688     << " obj=" << reinterpret_cast<void*>(peer_)
689     << " self=" << reinterpret_cast<const void*>(this) << "\n";
690  os << "  | sysTid=" << GetTid()
691     << " nice=" << getpriority(PRIO_PROCESS, GetTid())
692     << " sched=" << policy << "/" << sp.sched_priority
693     << " cgrp=" << scheduler_group
694     << " handle=" << GetImpl() << "\n";
695
696  // Grab the scheduler stats for this thread.
697  std::string scheduler_stats;
698  if (ReadFileToString(StringPrintf("/proc/self/task/%d/schedstat", GetTid()).c_str(), &scheduler_stats)) {
699    scheduler_stats.resize(scheduler_stats.size() - 1); // Lose the trailing '\n'.
700  } else {
701    scheduler_stats = "0 0 0";
702  }
703
704  int utime = 0;
705  int stime = 0;
706  int task_cpu = 0;
707  std::string stats;
708  if (ReadFileToString(StringPrintf("/proc/self/task/%d/stat", GetTid()).c_str(), &stats)) {
709    // Skip the command, which may contain spaces.
710    stats = stats.substr(stats.find(')') + 2);
711    // Extract the three fields we care about.
712    std::vector<std::string> fields;
713    Split(stats, ' ', fields);
714    utime = strtoull(fields[11].c_str(), NULL, 10);
715    stime = strtoull(fields[12].c_str(), NULL, 10);
716    task_cpu = strtoull(fields[36].c_str(), NULL, 10);
717  }
718
719  os << "  | schedstat=( " << scheduler_stats << " )"
720     << " utm=" << utime
721     << " stm=" << stime
722     << " core=" << task_cpu
723     << " HZ=" << sysconf(_SC_CLK_TCK) << "\n";
724}
725
726struct StackDumpVisitor : public Thread::StackVisitor {
727  StackDumpVisitor(std::ostream& os) : os(os) {
728  }
729
730  virtual ~StackDumpVisitor() {
731  }
732
733  void VisitFrame(const Frame& frame, uintptr_t pc) {
734    if (!frame.HasMethod()) {
735      return;
736    }
737    ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
738
739    Method* m = frame.GetMethod();
740    Class* c = m->GetDeclaringClass();
741    const DexFile& dex_file = class_linker->FindDexFile(c->GetDexCache());
742
743    os << "  at " << PrettyMethod(m, false);
744    if (m->IsNative()) {
745      os << "(Native method)";
746    } else {
747      int line_number = dex_file.GetLineNumFromPC(m, m->ToDexPC(pc));
748      os << "(" << c->GetSourceFile()->ToModifiedUtf8() << ":" << line_number << ")";
749    }
750    os << "\n";
751  }
752
753  std::ostream& os;
754};
755
756void Thread::DumpStack(std::ostream& os) const {
757  StackDumpVisitor dumper(os);
758  WalkStack(&dumper);
759}
760
761Thread::State Thread::SetState(Thread::State new_state) {
762  Thread::State old_state = state_;
763  if (old_state == new_state) {
764    return old_state;
765  }
766
767  volatile void* raw = reinterpret_cast<volatile void*>(&state_);
768  volatile int32_t* addr = reinterpret_cast<volatile int32_t*>(raw);
769
770  if (new_state == Thread::kRunnable) {
771    /*
772     * Change our status to Thread::kRunnable.  The transition requires
773     * that we check for pending suspension, because the VM considers
774     * us to be "asleep" in all other states, and another thread could
775     * be performing a GC now.
776     *
777     * The order of operations is very significant here.  One way to
778     * do this wrong is:
779     *
780     *   GCing thread                   Our thread (in kNative)
781     *   ------------                   ----------------------
782     *                                  check suspend count (== 0)
783     *   SuspendAllThreads()
784     *   grab suspend-count lock
785     *   increment all suspend counts
786     *   release suspend-count lock
787     *   check thread state (== kNative)
788     *   all are suspended, begin GC
789     *                                  set state to kRunnable
790     *                                  (continue executing)
791     *
792     * We can correct this by grabbing the suspend-count lock and
793     * performing both of our operations (check suspend count, set
794     * state) while holding it, now we need to grab a mutex on every
795     * transition to kRunnable.
796     *
797     * What we do instead is change the order of operations so that
798     * the transition to kRunnable happens first.  If we then detect
799     * that the suspend count is nonzero, we switch to kSuspended.
800     *
801     * Appropriate compiler and memory barriers are required to ensure
802     * that the operations are observed in the expected order.
803     *
804     * This does create a small window of opportunity where a GC in
805     * progress could observe what appears to be a running thread (if
806     * it happens to look between when we set to kRunnable and when we
807     * switch to kSuspended).  At worst this only affects assertions
808     * and thread logging.  (We could work around it with some sort
809     * of intermediate "pre-running" state that is generally treated
810     * as equivalent to running, but that doesn't seem worthwhile.)
811     *
812     * We can also solve this by combining the "status" and "suspend
813     * count" fields into a single 32-bit value.  This trades the
814     * store/load barrier on transition to kRunnable for an atomic RMW
815     * op on all transitions and all suspend count updates (also, all
816     * accesses to status or the thread count require bit-fiddling).
817     * It also eliminates the brief transition through kRunnable when
818     * the thread is supposed to be suspended.  This is possibly faster
819     * on SMP and slightly more correct, but less convenient.
820     */
821    android_atomic_acquire_store(new_state, addr);
822    if (ANNOTATE_UNPROTECTED_READ(suspend_count_) != 0) {
823      Runtime::Current()->GetThreadList()->FullSuspendCheck(this);
824    }
825  } else {
826    /*
827     * Not changing to Thread::kRunnable. No additional work required.
828     *
829     * We use a releasing store to ensure that, if we were runnable,
830     * any updates we previously made to objects on the managed heap
831     * will be observed before the state change.
832     */
833    android_atomic_release_store(new_state, addr);
834  }
835
836  return old_state;
837}
838
839void Thread::WaitUntilSuspended() {
840  // TODO: dalvik dropped the waiting thread's priority after a while.
841  // TODO: dalvik timed out and aborted.
842  useconds_t delay = 0;
843  while (GetState() == Thread::kRunnable) {
844    useconds_t new_delay = delay * 2;
845    CHECK_GE(new_delay, delay);
846    delay = new_delay;
847    if (delay == 0) {
848      sched_yield();
849      delay = 10000;
850    } else {
851      usleep(delay);
852    }
853  }
854}
855
856void Thread::ThreadExitCallback(void* arg) {
857  Thread* self = reinterpret_cast<Thread*>(arg);
858  LOG(FATAL) << "Native thread exited without calling DetachCurrentThread: " << *self;
859}
860
861void Thread::Startup() {
862  // Allocate a TLS slot.
863  CHECK_PTHREAD_CALL(pthread_key_create, (&Thread::pthread_key_self_, Thread::ThreadExitCallback), "self key");
864
865  // Double-check the TLS slot allocation.
866  if (pthread_getspecific(pthread_key_self_) != NULL) {
867    LOG(FATAL) << "newly-created pthread TLS slot is not NULL";
868  }
869}
870
871void Thread::FinishStartup() {
872  // Now the ClassLinker is ready, we can find the various Class*, Field*, and Method*s we need.
873  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
874  Class* boolean_class = class_linker->FindPrimitiveClass('Z');
875  Class* int_class = class_linker->FindPrimitiveClass('I');
876  Class* String_class = class_linker->FindSystemClass("Ljava/lang/String;");
877  Class* Thread_class = class_linker->FindSystemClass("Ljava/lang/Thread;");
878  Class* ThreadGroup_class = class_linker->FindSystemClass("Ljava/lang/ThreadGroup;");
879  Class* ThreadLock_class = class_linker->FindSystemClass("Ljava/lang/ThreadLock;");
880  Class* UncaughtExceptionHandler_class = class_linker->FindSystemClass("Ljava/lang/Thread$UncaughtExceptionHandler;");
881  gThrowable = class_linker->FindSystemClass("Ljava/lang/Throwable;");
882  gThread_daemon = Thread_class->FindDeclaredInstanceField("daemon", boolean_class);
883  gThread_group = Thread_class->FindDeclaredInstanceField("group", ThreadGroup_class);
884  gThread_lock = Thread_class->FindDeclaredInstanceField("lock", ThreadLock_class);
885  gThread_name = Thread_class->FindDeclaredInstanceField("name", String_class);
886  gThread_priority = Thread_class->FindDeclaredInstanceField("priority", int_class);
887  gThread_run = Thread_class->FindVirtualMethod("run", "()V");
888  gThread_uncaughtHandler = Thread_class->FindDeclaredInstanceField("uncaughtHandler", UncaughtExceptionHandler_class);
889  gThread_vmData = Thread_class->FindDeclaredInstanceField("vmData", int_class);
890  gThreadGroup_name = ThreadGroup_class->FindDeclaredInstanceField("name", String_class);
891  gThreadGroup_removeThread = ThreadGroup_class->FindVirtualMethod("removeThread", "(Ljava/lang/Thread;)V");
892  gUncaughtExceptionHandler_uncaughtException =
893      UncaughtExceptionHandler_class->FindVirtualMethod("uncaughtException", "(Ljava/lang/Thread;Ljava/lang/Throwable;)V");
894
895  // Finish attaching the main thread.
896  Thread::Current()->CreatePeer("main", false);
897}
898
899void Thread::Shutdown() {
900  CHECK_PTHREAD_CALL(pthread_key_delete, (Thread::pthread_key_self_), "self key");
901}
902
903Thread::Thread()
904    : peer_(NULL),
905      wait_mutex_(new Mutex("Thread wait mutex")),
906      wait_cond_(new ConditionVariable("Thread wait condition variable")),
907      wait_monitor_(NULL),
908      interrupted_(false),
909      wait_next_(NULL),
910      card_table_(0),
911      stack_end_(NULL),
912      top_of_managed_stack_(),
913      top_of_managed_stack_pc_(0),
914      native_to_managed_record_(NULL),
915      top_sirt_(NULL),
916      jni_env_(NULL),
917      state_(Thread::kUnknown),
918      self_(NULL),
919      runtime_(NULL),
920      exception_(NULL),
921      suspend_count_(0),
922      class_loader_override_(NULL),
923      long_jump_context_(NULL) {
924}
925
926void MonitorExitVisitor(const Object* object, void*) {
927  Object* entered_monitor = const_cast<Object*>(object);
928  entered_monitor->MonitorExit(Thread::Current());
929}
930
931Thread::~Thread() {
932  // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
933  if (jni_env_ != NULL) {
934    jni_env_->monitors.VisitRoots(MonitorExitVisitor, NULL);
935  }
936
937  if (peer_ != NULL) {
938    Object* group = gThread_group->GetObject(peer_);
939
940    // Handle any pending exception.
941    if (IsExceptionPending()) {
942      // Get and clear the exception.
943      Object* exception = GetException();
944      ClearException();
945
946      // If the thread has its own handler, use that.
947      Object* handler = gThread_uncaughtHandler->GetObject(peer_);
948      if (handler == NULL) {
949        // Otherwise use the thread group's default handler.
950        handler = group;
951      }
952
953      // Call the handler.
954      Method* m = handler->GetClass()->FindVirtualMethodForVirtualOrInterface(gUncaughtExceptionHandler_uncaughtException);
955      Object* args[2];
956      args[0] = peer_;
957      args[1] = exception;
958      m->Invoke(this, handler, reinterpret_cast<byte*>(&args), NULL);
959
960      // If the handler threw, clear that exception too.
961      ClearException();
962    }
963
964    // this.group.removeThread(this);
965    // group can be null if we're in the compiler or a test.
966    if (group != NULL) {
967      Method* m = group->GetClass()->FindVirtualMethodForVirtualOrInterface(gThreadGroup_removeThread);
968      Object* args = peer_;
969      m->Invoke(this, group, reinterpret_cast<byte*>(&args), NULL);
970    }
971
972    // this.vmData = 0;
973    SetVmData(peer_, NULL);
974
975    // TODO: say "bye" to the debugger.
976    //if (gDvm.debuggerConnected) {
977    //  dvmDbgPostThreadDeath(self);
978    //}
979
980    // Thread.join() is implemented as an Object.wait() on the Thread.lock
981    // object. Signal anyone who is waiting.
982    Thread* self = Thread::Current();
983    Object* lock = gThread_lock->GetObject(peer_);
984    // (This conditional is only needed for tests, where Thread.lock won't have been set.)
985    if (lock != NULL) {
986      lock->MonitorEnter(self);
987      lock->NotifyAll();
988      lock->MonitorExit(self);
989    }
990  }
991
992  delete jni_env_;
993  jni_env_ = NULL;
994
995  SetState(Thread::kTerminated);
996
997  delete wait_cond_;
998  delete wait_mutex_;
999
1000  delete long_jump_context_;
1001}
1002
1003size_t Thread::NumSirtReferences() {
1004  size_t count = 0;
1005  for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->Link()) {
1006    count += cur->NumberOfReferences();
1007  }
1008  return count;
1009}
1010
1011bool Thread::SirtContains(jobject obj) {
1012  Object** sirt_entry = reinterpret_cast<Object**>(obj);
1013  for (StackIndirectReferenceTable* cur = top_sirt_; cur; cur = cur->Link()) {
1014    size_t num_refs = cur->NumberOfReferences();
1015    // A SIRT should always have a jobject/jclass as a native method is passed
1016    // in a this pointer or a class
1017    DCHECK_GT(num_refs, 0u);
1018    if ((&cur->References()[0] <= sirt_entry) &&
1019        (sirt_entry <= (&cur->References()[num_refs - 1]))) {
1020      return true;
1021    }
1022  }
1023  return false;
1024}
1025
1026void Thread::PopSirt() {
1027  CHECK(top_sirt_ != NULL);
1028  top_sirt_ = top_sirt_->Link();
1029}
1030
1031Object* Thread::DecodeJObject(jobject obj) {
1032  DCHECK(CanAccessDirectReferences());
1033  if (obj == NULL) {
1034    return NULL;
1035  }
1036  IndirectRef ref = reinterpret_cast<IndirectRef>(obj);
1037  IndirectRefKind kind = GetIndirectRefKind(ref);
1038  Object* result;
1039  switch (kind) {
1040  case kLocal:
1041    {
1042      IndirectReferenceTable& locals = jni_env_->locals;
1043      result = const_cast<Object*>(locals.Get(ref));
1044      break;
1045    }
1046  case kGlobal:
1047    {
1048      JavaVMExt* vm = Runtime::Current()->GetJavaVM();
1049      IndirectReferenceTable& globals = vm->globals;
1050      MutexLock mu(vm->globals_lock);
1051      result = const_cast<Object*>(globals.Get(ref));
1052      break;
1053    }
1054  case kWeakGlobal:
1055    {
1056      JavaVMExt* vm = Runtime::Current()->GetJavaVM();
1057      IndirectReferenceTable& weak_globals = vm->weak_globals;
1058      MutexLock mu(vm->weak_globals_lock);
1059      result = const_cast<Object*>(weak_globals.Get(ref));
1060      if (result == kClearedJniWeakGlobal) {
1061        // This is a special case where it's okay to return NULL.
1062        return NULL;
1063      }
1064      break;
1065    }
1066  case kSirtOrInvalid:
1067  default:
1068    // TODO: make stack indirect reference table lookup more efficient
1069    // Check if this is a local reference in the SIRT
1070    if (SirtContains(obj)) {
1071      result = *reinterpret_cast<Object**>(obj);  // Read from SIRT
1072    } else if (jni_env_->work_around_app_jni_bugs) {
1073      // Assume an invalid local reference is actually a direct pointer.
1074      result = reinterpret_cast<Object*>(obj);
1075    } else {
1076      result = kInvalidIndirectRefObject;
1077    }
1078  }
1079
1080  if (result == NULL) {
1081    LOG(ERROR) << "JNI ERROR (app bug): use of deleted " << kind << ": " << obj;
1082    JniAbort(NULL);
1083  } else {
1084    if (result != kInvalidIndirectRefObject) {
1085      Heap::VerifyObject(result);
1086    }
1087  }
1088  return result;
1089}
1090
1091class CountStackDepthVisitor : public Thread::StackVisitor {
1092 public:
1093  CountStackDepthVisitor() : depth_(0), skip_depth_(0), skipping_(true) {}
1094
1095  virtual void VisitFrame(const Frame& frame, uintptr_t pc) {
1096    // We want to skip frames up to and including the exception's constructor.
1097    // Note we also skip the frame if it doesn't have a method (namely the callee
1098    // save frame)
1099    DCHECK(gThrowable != NULL);
1100    if (skipping_ && frame.HasMethod() && !gThrowable->IsAssignableFrom(frame.GetMethod()->GetDeclaringClass())) {
1101      skipping_ = false;
1102    }
1103    if (!skipping_) {
1104      ++depth_;
1105    } else {
1106      ++skip_depth_;
1107    }
1108  }
1109
1110  int GetDepth() const {
1111    return depth_;
1112  }
1113
1114  int GetSkipDepth() const {
1115    return skip_depth_;
1116  }
1117
1118 private:
1119  uint32_t depth_;
1120  uint32_t skip_depth_;
1121  bool skipping_;
1122};
1123
1124class BuildInternalStackTraceVisitor : public Thread::StackVisitor {
1125 public:
1126  explicit BuildInternalStackTraceVisitor(int depth, int skip_depth, ScopedJniThreadState& ts)
1127      : skip_depth_(skip_depth), count_(0) {
1128    // Allocate method trace with an extra slot that will hold the PC trace
1129    method_trace_ = Runtime::Current()->GetClassLinker()->AllocObjectArray<Object>(depth + 1);
1130    // Register a local reference as IntArray::Alloc may trigger GC
1131    local_ref_ = AddLocalReference<jobject>(ts.Env(), method_trace_);
1132    pc_trace_ = IntArray::Alloc(depth);
1133#ifdef MOVING_GARBAGE_COLLECTOR
1134    // Re-read after potential GC
1135    method_trace = Decode<ObjectArray<Object>*>(ts.Env(), local_ref_);
1136#endif
1137    // Save PC trace in last element of method trace, also places it into the
1138    // object graph.
1139    method_trace_->Set(depth, pc_trace_);
1140  }
1141
1142  virtual ~BuildInternalStackTraceVisitor() {}
1143
1144  virtual void VisitFrame(const Frame& frame, uintptr_t pc) {
1145    if (skip_depth_ > 0) {
1146      skip_depth_--;
1147      return;
1148    }
1149    method_trace_->Set(count_, frame.GetMethod());
1150    pc_trace_->Set(count_, pc);
1151    ++count_;
1152  }
1153
1154  jobject GetInternalStackTrace() const {
1155    return local_ref_;
1156  }
1157
1158 private:
1159  // How many more frames to skip.
1160  int32_t skip_depth_;
1161  // Current position down stack trace
1162  uint32_t count_;
1163  // Array of return PC values
1164  IntArray* pc_trace_;
1165  // An array of the methods on the stack, the last entry is a reference to the
1166  // PC trace
1167  ObjectArray<Object>* method_trace_;
1168  // Local indirect reference table entry for method trace
1169  jobject local_ref_;
1170};
1171
1172void Thread::WalkStack(StackVisitor* visitor) const {
1173  Frame frame = GetTopOfStack();
1174  uintptr_t pc = top_of_managed_stack_pc_;
1175  // TODO: enable this CHECK after native_to_managed_record_ is initialized during startup.
1176  // CHECK(native_to_managed_record_ != NULL);
1177  NativeToManagedRecord* record = native_to_managed_record_;
1178
1179  while (frame.GetSP() != 0) {
1180    for ( ; frame.GetMethod() != 0; frame.Next()) {
1181      DCHECK(frame.GetMethod()->IsWithinCode(pc));
1182      visitor->VisitFrame(frame, pc);
1183      pc = frame.GetReturnPC();
1184    }
1185    if (record == NULL) {
1186      break;
1187    }
1188    // last_tos should return Frame instead of sp?
1189    frame.SetSP(reinterpret_cast<art::Method**>(record->last_top_of_managed_stack_));
1190    pc = record->last_top_of_managed_stack_pc_;
1191    record = record->link_;
1192  }
1193}
1194
1195void Thread::WalkStackUntilUpCall(StackVisitor* visitor, bool include_upcall) const {
1196  Frame frame = GetTopOfStack();
1197  uintptr_t pc = top_of_managed_stack_pc_;
1198
1199  if (frame.GetSP() != 0) {
1200    for ( ; frame.GetMethod() != 0; frame.Next()) {
1201      DCHECK(frame.GetMethod()->IsWithinCode(pc));
1202      visitor->VisitFrame(frame, pc);
1203      pc = frame.GetReturnPC();
1204    }
1205    if (include_upcall) {
1206      visitor->VisitFrame(frame, pc);
1207    }
1208  }
1209}
1210
1211jobject Thread::CreateInternalStackTrace(JNIEnv* env) const {
1212  // Compute depth of stack
1213  CountStackDepthVisitor count_visitor;
1214  WalkStack(&count_visitor);
1215  int32_t depth = count_visitor.GetDepth();
1216  int32_t skip_depth = count_visitor.GetSkipDepth();
1217
1218  // Transition into runnable state to work on Object*/Array*
1219  ScopedJniThreadState ts(env);
1220
1221  // Build internal stack trace
1222  BuildInternalStackTraceVisitor build_trace_visitor(depth, skip_depth, ts);
1223  WalkStack(&build_trace_visitor);
1224
1225  return build_trace_visitor.GetInternalStackTrace();
1226}
1227
1228jobjectArray Thread::InternalStackTraceToStackTraceElementArray(JNIEnv* env, jobject internal,
1229    jobjectArray output_array, int* stack_depth) {
1230  // Transition into runnable state to work on Object*/Array*
1231  ScopedJniThreadState ts(env);
1232
1233  // Decode the internal stack trace into the depth, method trace and PC trace
1234  ObjectArray<Object>* method_trace =
1235      down_cast<ObjectArray<Object>*>(Decode<Object*>(ts.Env(), internal));
1236  int32_t depth = method_trace->GetLength()-1;
1237  IntArray* pc_trace = down_cast<IntArray*>(method_trace->Get(depth));
1238
1239  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
1240
1241  jobjectArray result;
1242  ObjectArray<StackTraceElement>* java_traces;
1243  if (output_array != NULL) {
1244    // Reuse the array we were given.
1245    result = output_array;
1246    java_traces = reinterpret_cast<ObjectArray<StackTraceElement>*>(Decode<Array*>(env,
1247        output_array));
1248    // ...adjusting the number of frames we'll write to not exceed the array length.
1249    depth = std::min(depth, java_traces->GetLength());
1250  } else {
1251    // Create java_trace array and place in local reference table
1252    java_traces = class_linker->AllocStackTraceElementArray(depth);
1253    result = AddLocalReference<jobjectArray>(ts.Env(), java_traces);
1254  }
1255
1256  if (stack_depth != NULL) {
1257    *stack_depth = depth;
1258  }
1259
1260  for (int32_t i = 0; i < depth; ++i) {
1261    // Prepare parameters for StackTraceElement(String cls, String method, String file, int line)
1262    Method* method = down_cast<Method*>(method_trace->Get(i));
1263    uint32_t native_pc = pc_trace->Get(i);
1264    Class* klass = method->GetDeclaringClass();
1265    const DexFile& dex_file = class_linker->FindDexFile(klass->GetDexCache());
1266    std::string class_name(PrettyDescriptor(klass->GetDescriptor()));
1267
1268    // Allocate element, potentially triggering GC
1269    StackTraceElement* obj =
1270        StackTraceElement::Alloc(String::AllocFromModifiedUtf8(class_name.c_str()),
1271                                 method->GetName(),
1272                                 klass->GetSourceFile(),
1273                                 dex_file.GetLineNumFromPC(method,
1274                                     method->ToDexPC(native_pc)));
1275#ifdef MOVING_GARBAGE_COLLECTOR
1276    // Re-read after potential GC
1277    java_traces = Decode<ObjectArray<Object>*>(ts.Env(), result);
1278    method_trace = down_cast<ObjectArray<Object>*>(Decode<Object*>(ts.Env(), internal));
1279    pc_trace = down_cast<IntArray*>(method_trace->Get(depth));
1280#endif
1281    java_traces->Set(i, obj);
1282  }
1283  return result;
1284}
1285
1286void Thread::ThrowNewException(const char* exception_class_descriptor, const char* fmt, ...) {
1287  std::string msg;
1288  va_list args;
1289  va_start(args, fmt);
1290  StringAppendV(&msg, fmt, args);
1291  va_end(args);
1292
1293  // Convert "Ljava/lang/Exception;" into JNI-style "java/lang/Exception".
1294  CHECK_EQ('L', exception_class_descriptor[0]);
1295  std::string descriptor(exception_class_descriptor + 1);
1296  CHECK_EQ(';', descriptor[descriptor.length() - 1]);
1297  descriptor.erase(descriptor.length() - 1);
1298
1299  JNIEnv* env = GetJniEnv();
1300  jclass exception_class = env->FindClass(descriptor.c_str());
1301  CHECK(exception_class != NULL) << "descriptor=\"" << descriptor << "\"";
1302  int rc = env->ThrowNew(exception_class, msg.c_str());
1303  CHECK_EQ(rc, JNI_OK);
1304}
1305
1306void Thread::ThrowOutOfMemoryError() {
1307  UNIMPLEMENTED(FATAL);
1308}
1309
1310Method* Thread::CalleeSaveMethod() const {
1311  // TODO: we should only allocate this once
1312  Method* method = Runtime::Current()->GetClassLinker()->AllocMethod();
1313#if defined(__arm__)
1314  method->SetCode(NULL, art::kThumb2, NULL);
1315  method->SetFrameSizeInBytes(64);
1316  method->SetReturnPcOffsetInBytes(60);
1317  method->SetCoreSpillMask((1 << art::arm::R1) |
1318                           (1 << art::arm::R2) |
1319                           (1 << art::arm::R3) |
1320                           (1 << art::arm::R4) |
1321                           (1 << art::arm::R5) |
1322                           (1 << art::arm::R6) |
1323                           (1 << art::arm::R7) |
1324                           (1 << art::arm::R8) |
1325                           (1 << art::arm::R9) |
1326                           (1 << art::arm::R10) |
1327                           (1 << art::arm::R11) |
1328                           (1 << art::arm::LR));
1329  method->SetFpSpillMask(0);
1330#elif defined(__i386__)
1331  method->SetCode(NULL, art::kX86, NULL);
1332  method->SetFrameSizeInBytes(32);
1333  method->SetReturnPcOffsetInBytes(28);
1334  method->SetCoreSpillMask((1 << art::x86::EBX) |
1335                           (1 << art::x86::EBP) |
1336                           (1 << art::x86::ESI) |
1337                           (1 << art::x86::EDI));
1338  method->SetFpSpillMask(0);
1339#else
1340  UNIMPLEMENTED(FATAL);
1341#endif
1342  return method;
1343}
1344
1345class CatchBlockStackVisitor : public Thread::StackVisitor {
1346 public:
1347  CatchBlockStackVisitor(Class* to_find, Context* ljc)
1348      : found_(false), to_find_(to_find), long_jump_context_(ljc), native_method_count_(0) {
1349#ifndef NDEBUG
1350    handler_pc_ = 0xEBADC0DE;
1351    handler_frame_.SetSP(reinterpret_cast<Method**>(0xEBADF00D));
1352#endif
1353  }
1354
1355  virtual void VisitFrame(const Frame& fr, uintptr_t pc) {
1356    if (!found_) {
1357      Method* method = fr.GetMethod();
1358      if (method == NULL) {
1359        // This is the upcall, we remember the frame and last_pc so that we may
1360        // long jump to them
1361        handler_pc_ = pc;
1362        handler_frame_ = fr;
1363        return;
1364      }
1365      uint32_t dex_pc = DexFile::kDexNoIndex;
1366      if (method->IsPhony()) {
1367        // ignore callee save method
1368      } else if (method->IsNative()) {
1369        native_method_count_++;
1370      } else {
1371        // Move the PC back 2 bytes as a call will frequently terminate the
1372        // decoding of a particular instruction and we want to make sure we
1373        // get the Dex PC of the instruction with the call and not the
1374        // instruction following.
1375        pc -= 2;
1376        dex_pc = method->ToDexPC(pc);
1377      }
1378      if (dex_pc != DexFile::kDexNoIndex) {
1379        uint32_t found_dex_pc = method->FindCatchBlock(to_find_, dex_pc);
1380        if (found_dex_pc != DexFile::kDexNoIndex) {
1381          found_ = true;
1382          handler_pc_ = method->ToNativePC(found_dex_pc);
1383          handler_frame_ = fr;
1384        }
1385      }
1386      if (!found_) {
1387        // Caller may be handler, fill in callee saves in context
1388        long_jump_context_->FillCalleeSaves(fr);
1389      }
1390    }
1391  }
1392
1393  // Did we find a catch block yet?
1394  bool found_;
1395  // The type of the exception catch block to find
1396  Class* to_find_;
1397  // Frame with found handler or last frame if no handler found
1398  Frame handler_frame_;
1399  // PC to branch to for the handler
1400  uintptr_t handler_pc_;
1401  // Context that will be the target of the long jump
1402  Context* long_jump_context_;
1403  // Number of native methods passed in crawl (equates to number of SIRTs to pop)
1404  uint32_t native_method_count_;
1405};
1406
1407void Thread::DeliverException(Throwable* exception) {
1408  SetException(exception);  // Set exception on thread
1409
1410  Context* long_jump_context = GetLongJumpContext();
1411  CatchBlockStackVisitor catch_finder(exception->GetClass(), long_jump_context);
1412  WalkStackUntilUpCall(&catch_finder, true);
1413
1414  // Pop any SIRT
1415  if (catch_finder.native_method_count_ == 1) {
1416    PopSirt();
1417  } else {
1418    // We only expect the stack crawl to have passed 1 native method as it's terminated
1419    // by an up call
1420    DCHECK_EQ(catch_finder.native_method_count_, 0u);
1421  }
1422  long_jump_context->SetSP(reinterpret_cast<intptr_t>(catch_finder.handler_frame_.GetSP()));
1423  long_jump_context->SetPC(catch_finder.handler_pc_);
1424  long_jump_context->DoLongJump();
1425}
1426
1427Context* Thread::GetLongJumpContext() {
1428  Context* result = long_jump_context_;
1429  if (result == NULL) {
1430    result = Context::Create();
1431    long_jump_context_ = result;
1432  }
1433  return result;
1434}
1435
1436bool Thread::HoldsLock(Object* object) {
1437  if (object == NULL) {
1438    return false;
1439  }
1440  return object->GetLockOwner() == thin_lock_id_;
1441}
1442
1443bool Thread::IsDaemon() {
1444  return gThread_daemon->GetBoolean(peer_);
1445}
1446
1447void Thread::VisitRoots(Heap::RootVisitor* visitor, void* arg) const {
1448  if (exception_ != NULL) {
1449    visitor(exception_, arg);
1450  }
1451  if (peer_ != NULL) {
1452    visitor(peer_, arg);
1453  }
1454  jni_env_->locals.VisitRoots(visitor, arg);
1455  jni_env_->monitors.VisitRoots(visitor, arg);
1456  // visitThreadStack(visitor, thread, arg);
1457  UNIMPLEMENTED(WARNING) << "some per-Thread roots not visited";
1458}
1459
1460static const char* kStateNames[] = {
1461  "Terminated",
1462  "Runnable",
1463  "TimedWaiting",
1464  "Blocked",
1465  "Waiting",
1466  "Initializing",
1467  "Starting",
1468  "Native",
1469  "VmWait",
1470  "Suspended",
1471};
1472std::ostream& operator<<(std::ostream& os, const Thread::State& state) {
1473  int int_state = static_cast<int>(state);
1474  if (state >= Thread::kTerminated && state <= Thread::kSuspended) {
1475    os << kStateNames[int_state];
1476  } else {
1477    os << "State[" << int_state << "]";
1478  }
1479  return os;
1480}
1481
1482std::ostream& operator<<(std::ostream& os, const Thread& thread) {
1483  os << "Thread[" << &thread
1484     << ",pthread_t=" << thread.GetImpl()
1485     << ",tid=" << thread.GetTid()
1486     << ",id=" << thread.GetThinLockId()
1487     << ",state=" << thread.GetState()
1488     << ",peer=" << thread.GetPeer()
1489     << "]";
1490  return os;
1491}
1492
1493}  // namespace art
1494